From 34ea053b26891aa6900efc44f00b9bec1f399afe Mon Sep 17 00:00:00 2001 From: George Hotz <72895+geohot@users.noreply.github.com> Date: Sun, 4 Jan 2026 02:06:15 -0500 Subject: [PATCH] assembly/amd: clean up pcode, jit pcode instead of static (#14001) * assembly/amd: clean up pcode * regen * lil * jit the pcode * sendmsg * cleanups * inst prefetch lol --- extra/assembly/amd/autogen/cdna/gen_pcode.py | 11328 ---------------- extra/assembly/amd/autogen/cdna/str_pcode.py | 1421 ++ extra/assembly/amd/autogen/rdna3/gen_pcode.py | 10391 -------------- extra/assembly/amd/autogen/rdna3/str_pcode.py | 1354 ++ extra/assembly/amd/autogen/rdna4/gen_pcode.py | 9452 ------------- extra/assembly/amd/autogen/rdna4/str_pcode.py | 1229 ++ extra/assembly/amd/emu.py | 30 +- extra/assembly/amd/pcode.py | 715 +- extra/assembly/amd/pdf.py | 351 +- extra/assembly/amd/test/helpers.py | 6 +- .../assembly/amd/test/test_mockgpu_invalid.py | 3 +- extra/assembly/amd/test/test_pcode.py | 46 +- 12 files changed, 4577 insertions(+), 31749 deletions(-) delete mode 100644 extra/assembly/amd/autogen/cdna/gen_pcode.py create mode 100644 extra/assembly/amd/autogen/cdna/str_pcode.py delete mode 100644 extra/assembly/amd/autogen/rdna3/gen_pcode.py create mode 100644 extra/assembly/amd/autogen/rdna3/str_pcode.py delete mode 100644 extra/assembly/amd/autogen/rdna4/gen_pcode.py create mode 100644 extra/assembly/amd/autogen/rdna4/str_pcode.py diff --git a/extra/assembly/amd/autogen/cdna/gen_pcode.py b/extra/assembly/amd/autogen/cdna/gen_pcode.py deleted file mode 100644 index 8d06afcc35..0000000000 --- a/extra/assembly/amd/autogen/cdna/gen_pcode.py +++ /dev/null @@ -1,11328 +0,0 @@ -# autogenerated by pdf.py - do not edit -# to regenerate: python -m extra.assembly.amd.pdf --arch cdna -# ruff: noqa: E501 -# mypy: ignore-errors -from extra.assembly.amd.autogen.cdna.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3POp, VOPCOp, VOP3AOp, VOP3BOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp -from extra.assembly.amd.pcode import ABSDIFF, BYTE_PERMUTE, DENORM, F, INF, OVERFLOW_F32, OVERFLOW_F64, PI, ROUND_MODE, Reg, SAT8, TWO_OVER_PI_1201, UNDERFLOW_F32, UNDERFLOW_F64, WAVE_MODE, _pack, _pack32, bf16_to_f32, cos, cvtToQuietNAN, exponent, f16_to_f32, f16_to_i16, f16_to_snorm, f16_to_u16, f16_to_unorm, f32_to_bf16, f32_to_f16, f32_to_f64, f32_to_i32, f32_to_snorm, f32_to_u32, f32_to_u8, f32_to_unorm, f64_to_f32, f64_to_i32, f64_to_u32, floor, fma, fract, i16_to_f16, i32_to_f32, i32_to_f64, i32_to_i16, isEven, isNAN, isQuietNAN, isSignalNAN, ldexp, log2, mantissa, pow, s_ff1_i32_b64, sign, signext, signext_from_bit, sin, sqrt, trunc, u16_to_f16, u32_to_f32, u32_to_f64, u32_to_u16, u4_to_u32, u8_to_u32, v_max3_f16, v_max3_f32, v_max3_i16, v_max3_i32, v_max3_u16, v_max3_u32, v_max_f16, v_max_f32, v_max_i16, v_max_i32, v_max_u16, v_max_u32, v_min3_f16, v_min3_f32, v_min_f16, v_min_f32, v_min_i16, v_min_i32, v_min_u16, v_min_u32, v_msad_u8, v_sad_u8 - -def _SOP1Op_S_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _SOP1Op_S_MOV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b64 = S0.b64 - return {'D0': D0._val} - -def _SOP1Op_S_CMOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - if SCC: - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _SOP1Op_S_CMOV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - if SCC: - D0.b64 = S0.b64 - return {'D0': D0._val} - -def _SOP1Op_S_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_NOT_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~S0.u64 - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_WQM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp[i] = S0.u32[(i & 60) + (4) - 1 : (i & 60)] != 0 - D0.u32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_WQM_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp[i] = S0.u64[(i & 60) + (4) - 1 : (i & 60)] != 0 - D0.u64 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _SOP1Op_S_BREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[63 : 0] = S0.u64[0 : 63] - return {'D0': D0._val} - -def _SOP1Op_S_BCNT0_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp += ((1) if (S0.u32[i] == 0) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT0_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp += ((1) if (S0.u64[i] == 0) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT1_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp += ((1) if (S0.u32[i] == 1) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT1_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp += ((1) if (S0.u64[i] == 1) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_FF0_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(31)+1): - if S0.u32[i] == 0: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_FF0_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(63)+1): - if S0.u64[i] == 0: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_FF1_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_FF1_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(63)+1): - if S0.u64[i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_FLBIT_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_FLBIT_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(63)+1): - if S0.u64[63 - i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_FLBIT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(1, int(31)+1): - if S0.u32[31 - i] != S0.u32[31]: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_FLBIT_I32_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(1, int(63)+1): - if S0.u64[63 - i] != S0.u64[63]: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_SEXT_I32_I8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i8)) - return {'D0': D0._val} - -def _SOP1Op_S_SEXT_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _SOP1Op_S_BITSET0_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[S0.u32[4 : 0]] = 0 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET0_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[S0.u32[5 : 0]] = 0 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[S0.u32[4 : 0]] = 1 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[S0.u32[5 : 0]] = 1 - return {'D0': D0._val} - -def _SOP1Op_S_GETPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.i64 = PC + 4 - return {'D0': D0._val} - -def _SOP1Op_S_SETPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - PC = Reg(S0.i64) - return {'PC': PC._val} - -def _SOP1Op_S_SWAPPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - jump_addr = S0.i64 - D0.i64 = PC + 4 - PC = Reg(jump_addr.i64) - return {'D0': D0._val, 'PC': PC._val} - -def _SOP1Op_S_RFE_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - PC = Reg(S0.i64) - return {'PC': PC._val} - -def _SOP1Op_S_AND_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 ^ EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_ANDN2_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 & ~EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_ORN2_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 | ~EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NAND_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XNOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 ^ EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_QUADMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(7)+1): - tmp[i] = S0.u32[(i * 4) + (4) - 1 : (i * 4)] != 0 - D0.u32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_QUADMASK_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(15)+1): - tmp[i] = S0.u64[(i * 4) + (4) - 1 : (i * 4)] != 0 - D0.u64 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_ABS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = ((-S0.i32) if (S0.i32 < 0) else (S0.i32)) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_SET_GPR_IDX_IDX(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0) - # --- compiled pseudocode --- - M0[7 : 0] = S0.u32[7 : 0].b8 - return {} - -def _SOP1Op_S_ANDN1_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (~S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_ORN1_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (~S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_ANDN1_WREXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64 = (~S0.u64 & EXEC.u64) - D0.u64 = EXEC.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_ANDN2_WREXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64 = (S0.u64 & ~EXEC.u64) - D0.u64 = EXEC.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_BITREPLICATE_B64_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S0.u32) - for i in range(0, int(31)+1): - D0.u64[i * 2] = tmp[i] - D0.u64[i * 2 + 1] = tmp[i] - return {'D0': D0._val} - -SOP1Op_FUNCTIONS = { - SOP1Op.S_MOV_B32: _SOP1Op_S_MOV_B32, - SOP1Op.S_MOV_B64: _SOP1Op_S_MOV_B64, - SOP1Op.S_CMOV_B32: _SOP1Op_S_CMOV_B32, - SOP1Op.S_CMOV_B64: _SOP1Op_S_CMOV_B64, - SOP1Op.S_NOT_B32: _SOP1Op_S_NOT_B32, - SOP1Op.S_NOT_B64: _SOP1Op_S_NOT_B64, - SOP1Op.S_WQM_B32: _SOP1Op_S_WQM_B32, - SOP1Op.S_WQM_B64: _SOP1Op_S_WQM_B64, - SOP1Op.S_BREV_B32: _SOP1Op_S_BREV_B32, - SOP1Op.S_BREV_B64: _SOP1Op_S_BREV_B64, - SOP1Op.S_BCNT0_I32_B32: _SOP1Op_S_BCNT0_I32_B32, - SOP1Op.S_BCNT0_I32_B64: _SOP1Op_S_BCNT0_I32_B64, - SOP1Op.S_BCNT1_I32_B32: _SOP1Op_S_BCNT1_I32_B32, - SOP1Op.S_BCNT1_I32_B64: _SOP1Op_S_BCNT1_I32_B64, - SOP1Op.S_FF0_I32_B32: _SOP1Op_S_FF0_I32_B32, - SOP1Op.S_FF0_I32_B64: _SOP1Op_S_FF0_I32_B64, - SOP1Op.S_FF1_I32_B32: _SOP1Op_S_FF1_I32_B32, - SOP1Op.S_FF1_I32_B64: _SOP1Op_S_FF1_I32_B64, - SOP1Op.S_FLBIT_I32_B32: _SOP1Op_S_FLBIT_I32_B32, - SOP1Op.S_FLBIT_I32_B64: _SOP1Op_S_FLBIT_I32_B64, - SOP1Op.S_FLBIT_I32: _SOP1Op_S_FLBIT_I32, - SOP1Op.S_FLBIT_I32_I64: _SOP1Op_S_FLBIT_I32_I64, - SOP1Op.S_SEXT_I32_I8: _SOP1Op_S_SEXT_I32_I8, - SOP1Op.S_SEXT_I32_I16: _SOP1Op_S_SEXT_I32_I16, - SOP1Op.S_BITSET0_B32: _SOP1Op_S_BITSET0_B32, - SOP1Op.S_BITSET0_B64: _SOP1Op_S_BITSET0_B64, - SOP1Op.S_BITSET1_B32: _SOP1Op_S_BITSET1_B32, - SOP1Op.S_BITSET1_B64: _SOP1Op_S_BITSET1_B64, - SOP1Op.S_GETPC_B64: _SOP1Op_S_GETPC_B64, - SOP1Op.S_SETPC_B64: _SOP1Op_S_SETPC_B64, - SOP1Op.S_SWAPPC_B64: _SOP1Op_S_SWAPPC_B64, - SOP1Op.S_RFE_B64: _SOP1Op_S_RFE_B64, - SOP1Op.S_AND_SAVEEXEC_B64: _SOP1Op_S_AND_SAVEEXEC_B64, - SOP1Op.S_OR_SAVEEXEC_B64: _SOP1Op_S_OR_SAVEEXEC_B64, - SOP1Op.S_XOR_SAVEEXEC_B64: _SOP1Op_S_XOR_SAVEEXEC_B64, - SOP1Op.S_ANDN2_SAVEEXEC_B64: _SOP1Op_S_ANDN2_SAVEEXEC_B64, - SOP1Op.S_ORN2_SAVEEXEC_B64: _SOP1Op_S_ORN2_SAVEEXEC_B64, - SOP1Op.S_NAND_SAVEEXEC_B64: _SOP1Op_S_NAND_SAVEEXEC_B64, - SOP1Op.S_NOR_SAVEEXEC_B64: _SOP1Op_S_NOR_SAVEEXEC_B64, - SOP1Op.S_XNOR_SAVEEXEC_B64: _SOP1Op_S_XNOR_SAVEEXEC_B64, - SOP1Op.S_QUADMASK_B32: _SOP1Op_S_QUADMASK_B32, - SOP1Op.S_QUADMASK_B64: _SOP1Op_S_QUADMASK_B64, - SOP1Op.S_ABS_I32: _SOP1Op_S_ABS_I32, - SOP1Op.S_SET_GPR_IDX_IDX: _SOP1Op_S_SET_GPR_IDX_IDX, - SOP1Op.S_ANDN1_SAVEEXEC_B64: _SOP1Op_S_ANDN1_SAVEEXEC_B64, - SOP1Op.S_ORN1_SAVEEXEC_B64: _SOP1Op_S_ORN1_SAVEEXEC_B64, - SOP1Op.S_ANDN1_WREXEC_B64: _SOP1Op_S_ANDN1_WREXEC_B64, - SOP1Op.S_ANDN2_WREXEC_B64: _SOP1Op_S_ANDN2_WREXEC_B64, - SOP1Op.S_BITREPLICATE_B64_B32: _SOP1Op_S_BITREPLICATE_B64_B32, -} - -def _SOP2Op_S_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUB_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32) - SCC = Reg(((1) if (S1.u32 > S0.u32) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ADD_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.i32 + S1.i32) - SCC = Reg(((S0.u32[31] == S1.u32[31]) and (S0.u32[31] != tmp.u32[31]))) - D0.i32 = tmp.i32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUB_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.i32 - S1.i32) - SCC = Reg(((S0.u32[31] != S1.u32[31]) and (S0.u32[31] != tmp.u32[31]))) - D0.i32 = tmp.i32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ADDC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + SCC.u64) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUBB_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - SCC.u32) - SCC = Reg(((1) if ((S1.u32) + SCC.u64 > (S0.u32)) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 < S1.i32) - D0.i32 = ((S0.i32) if (SCC) else (S1.i32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 < S1.u32) - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 >= S1.i32) - D0.i32 = ((S0.i32) if (SCC) else (S1.i32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 >= S1.u32) - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_CSELECT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val} - -def _SOP2Op_S_CSELECT_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ((S0.u64) if (SCC) else (S1.u64)) - return {'D0': D0._val} - -def _SOP2Op_S_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 & S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 | S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 ^ S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ANDN2_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & ~S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ANDN2_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 & ~S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ORN2_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | ~S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ORN2_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 | ~S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NAND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 & S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NAND_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 & S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 | S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 | S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XNOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 ^ S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 << S1[4 : 0].u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 << S1[5 : 0].u32) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 >> S1[4 : 0].u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 >> S1[5 : 0].u32) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ASHR_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i32) >> S1[4 : 0].u32) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ASHR_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32) - SCC = Reg(D0.i64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((1 << S0[4 : 0].u32) - 1) << S1[4 : 0].u32) - return {'D0': D0._val} - -def _SOP2Op_S_BFM_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (((1 << S0[5 : 0].u32) - 1) << S1[5 : 0].u32) - return {'D0': D0._val} - -def _SOP2Op_S_MUL_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 * S1.i32 - return {'D0': D0._val} - -def _SOP2Op_S_BFE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - D0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - D0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32) - SCC = Reg(D0.i64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ABSDIFF_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = S0.i32 - S1.i32 - if D0.i32 < 0: - D0.i32 = -D0.i32 - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MUL_HI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u32) * (S1.u32)) >> 32) - return {'D0': D0._val} - -def _SOP2Op_S_MUL_HI_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i32) * (S1.i32)) >> 32) - return {'D0': D0._val} - -def _SOP2Op_S_LSHL1_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 1) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL2_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 2) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL3_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 3) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL4_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 4) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_PACK_LL_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[15 : 0].u16, S0[15 : 0].u16)) - return {} - -def _SOP2Op_S_PACK_LH_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[31 : 16].u16, S0[15 : 0].u16)) - return {} - -def _SOP2Op_S_PACK_HH_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[31 : 16].u16, S0[31 : 16].u16)) - return {} - -SOP2Op_FUNCTIONS = { - SOP2Op.S_ADD_U32: _SOP2Op_S_ADD_U32, - SOP2Op.S_SUB_U32: _SOP2Op_S_SUB_U32, - SOP2Op.S_ADD_I32: _SOP2Op_S_ADD_I32, - SOP2Op.S_SUB_I32: _SOP2Op_S_SUB_I32, - SOP2Op.S_ADDC_U32: _SOP2Op_S_ADDC_U32, - SOP2Op.S_SUBB_U32: _SOP2Op_S_SUBB_U32, - SOP2Op.S_MIN_I32: _SOP2Op_S_MIN_I32, - SOP2Op.S_MIN_U32: _SOP2Op_S_MIN_U32, - SOP2Op.S_MAX_I32: _SOP2Op_S_MAX_I32, - SOP2Op.S_MAX_U32: _SOP2Op_S_MAX_U32, - SOP2Op.S_CSELECT_B32: _SOP2Op_S_CSELECT_B32, - SOP2Op.S_CSELECT_B64: _SOP2Op_S_CSELECT_B64, - SOP2Op.S_AND_B32: _SOP2Op_S_AND_B32, - SOP2Op.S_AND_B64: _SOP2Op_S_AND_B64, - SOP2Op.S_OR_B32: _SOP2Op_S_OR_B32, - SOP2Op.S_OR_B64: _SOP2Op_S_OR_B64, - SOP2Op.S_XOR_B32: _SOP2Op_S_XOR_B32, - SOP2Op.S_XOR_B64: _SOP2Op_S_XOR_B64, - SOP2Op.S_ANDN2_B32: _SOP2Op_S_ANDN2_B32, - SOP2Op.S_ANDN2_B64: _SOP2Op_S_ANDN2_B64, - SOP2Op.S_ORN2_B32: _SOP2Op_S_ORN2_B32, - SOP2Op.S_ORN2_B64: _SOP2Op_S_ORN2_B64, - SOP2Op.S_NAND_B32: _SOP2Op_S_NAND_B32, - SOP2Op.S_NAND_B64: _SOP2Op_S_NAND_B64, - SOP2Op.S_NOR_B32: _SOP2Op_S_NOR_B32, - SOP2Op.S_NOR_B64: _SOP2Op_S_NOR_B64, - SOP2Op.S_XNOR_B32: _SOP2Op_S_XNOR_B32, - SOP2Op.S_XNOR_B64: _SOP2Op_S_XNOR_B64, - SOP2Op.S_LSHL_B32: _SOP2Op_S_LSHL_B32, - SOP2Op.S_LSHL_B64: _SOP2Op_S_LSHL_B64, - SOP2Op.S_LSHR_B32: _SOP2Op_S_LSHR_B32, - SOP2Op.S_LSHR_B64: _SOP2Op_S_LSHR_B64, - SOP2Op.S_ASHR_I32: _SOP2Op_S_ASHR_I32, - SOP2Op.S_ASHR_I64: _SOP2Op_S_ASHR_I64, - SOP2Op.S_BFM_B32: _SOP2Op_S_BFM_B32, - SOP2Op.S_BFM_B64: _SOP2Op_S_BFM_B64, - SOP2Op.S_MUL_I32: _SOP2Op_S_MUL_I32, - SOP2Op.S_BFE_U32: _SOP2Op_S_BFE_U32, - SOP2Op.S_BFE_I32: _SOP2Op_S_BFE_I32, - SOP2Op.S_BFE_U64: _SOP2Op_S_BFE_U64, - SOP2Op.S_BFE_I64: _SOP2Op_S_BFE_I64, - SOP2Op.S_ABSDIFF_I32: _SOP2Op_S_ABSDIFF_I32, - SOP2Op.S_MUL_HI_U32: _SOP2Op_S_MUL_HI_U32, - SOP2Op.S_MUL_HI_I32: _SOP2Op_S_MUL_HI_I32, - SOP2Op.S_LSHL1_ADD_U32: _SOP2Op_S_LSHL1_ADD_U32, - SOP2Op.S_LSHL2_ADD_U32: _SOP2Op_S_LSHL2_ADD_U32, - SOP2Op.S_LSHL3_ADD_U32: _SOP2Op_S_LSHL3_ADD_U32, - SOP2Op.S_LSHL4_ADD_U32: _SOP2Op_S_LSHL4_ADD_U32, - SOP2Op.S_PACK_LL_B32_B16: _SOP2Op_S_PACK_LL_B32_B16, - SOP2Op.S_PACK_LH_B32_B16: _SOP2Op_S_PACK_LH_B32_B16, - SOP2Op.S_PACK_HH_B32_B16: _SOP2Op_S_PACK_HH_B32_B16, -} - -def _SOPCOp_S_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 == S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 != S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 > S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 >= S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 < S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 <= S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 == S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 != S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 > S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 >= S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 < S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 <= S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP0_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32[S1.u32[4 : 0]] == 0) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32[S1.u32[4 : 0]] == 1) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP0_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64[S1.u32[5 : 0]] == 0) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64[S1.u32[5 : 0]] == 1) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64 == S1.u64) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64 != S1.u64) - return {'SCC': SCC._val} - -SOPCOp_FUNCTIONS = { - SOPCOp.S_CMP_EQ_I32: _SOPCOp_S_CMP_EQ_I32, - SOPCOp.S_CMP_LG_I32: _SOPCOp_S_CMP_LG_I32, - SOPCOp.S_CMP_GT_I32: _SOPCOp_S_CMP_GT_I32, - SOPCOp.S_CMP_GE_I32: _SOPCOp_S_CMP_GE_I32, - SOPCOp.S_CMP_LT_I32: _SOPCOp_S_CMP_LT_I32, - SOPCOp.S_CMP_LE_I32: _SOPCOp_S_CMP_LE_I32, - SOPCOp.S_CMP_EQ_U32: _SOPCOp_S_CMP_EQ_U32, - SOPCOp.S_CMP_LG_U32: _SOPCOp_S_CMP_LG_U32, - SOPCOp.S_CMP_GT_U32: _SOPCOp_S_CMP_GT_U32, - SOPCOp.S_CMP_GE_U32: _SOPCOp_S_CMP_GE_U32, - SOPCOp.S_CMP_LT_U32: _SOPCOp_S_CMP_LT_U32, - SOPCOp.S_CMP_LE_U32: _SOPCOp_S_CMP_LE_U32, - SOPCOp.S_BITCMP0_B32: _SOPCOp_S_BITCMP0_B32, - SOPCOp.S_BITCMP1_B32: _SOPCOp_S_BITCMP1_B32, - SOPCOp.S_BITCMP0_B64: _SOPCOp_S_BITCMP0_B64, - SOPCOp.S_BITCMP1_B64: _SOPCOp_S_BITCMP1_B64, - SOPCOp.S_CMP_EQ_U64: _SOPCOp_S_CMP_EQ_U64, - SOPCOp.S_CMP_LG_U64: _SOPCOp_S_CMP_LG_U64, -} - -def _SOPKOp_S_MOVK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_CMOVK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - if SCC: - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_CMPK_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 == (signext(S1.i16))) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LG_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 != (signext(S1.i16))) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 > (signext(S1.i16))) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 >= (signext(S1.i16))) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 < (signext(S1.i16))) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 <= (signext(S1.i16))) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 == (S1.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LG_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 != (S1.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 > (S1.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 >= (S1.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 < (S1.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 <= (S1.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_ADDK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - D0.i32 = D0.i32 + (signext(S0.i16)) - SCC = Reg(((tmp[31] == S0.i16[15]) and (tmp[31] != D0.i32[31]))) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOPKOp_S_MULK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = D0.i32 * (signext(S0.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_CALL_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - D0.i64 = PC + 4 - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - return {'D0': D0._val, 'PC': PC._val} - -SOPKOp_FUNCTIONS = { - SOPKOp.S_MOVK_I32: _SOPKOp_S_MOVK_I32, - SOPKOp.S_CMOVK_I32: _SOPKOp_S_CMOVK_I32, - SOPKOp.S_CMPK_EQ_I32: _SOPKOp_S_CMPK_EQ_I32, - SOPKOp.S_CMPK_LG_I32: _SOPKOp_S_CMPK_LG_I32, - SOPKOp.S_CMPK_GT_I32: _SOPKOp_S_CMPK_GT_I32, - SOPKOp.S_CMPK_GE_I32: _SOPKOp_S_CMPK_GE_I32, - SOPKOp.S_CMPK_LT_I32: _SOPKOp_S_CMPK_LT_I32, - SOPKOp.S_CMPK_LE_I32: _SOPKOp_S_CMPK_LE_I32, - SOPKOp.S_CMPK_EQ_U32: _SOPKOp_S_CMPK_EQ_U32, - SOPKOp.S_CMPK_LG_U32: _SOPKOp_S_CMPK_LG_U32, - SOPKOp.S_CMPK_GT_U32: _SOPKOp_S_CMPK_GT_U32, - SOPKOp.S_CMPK_GE_U32: _SOPKOp_S_CMPK_GE_U32, - SOPKOp.S_CMPK_LT_U32: _SOPKOp_S_CMPK_LT_U32, - SOPKOp.S_CMPK_LE_U32: _SOPKOp_S_CMPK_LE_U32, - SOPKOp.S_ADDK_I32: _SOPKOp_S_ADDK_I32, - SOPKOp.S_MULK_I32: _SOPKOp_S_MULK_I32, - SOPKOp.S_CALL_B64: _SOPKOp_S_CALL_B64, -} - -def _SOPPOp_S_NOP(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SIMM16=Reg(literal) - # --- compiled pseudocode --- - for i in range(0, int(SIMM16.u16[3 : 0].u32)+1): - pass - return {} - -def _SOPPOp_S_BRANCH(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_SCC0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SCC=Reg(scc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if SCC == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'SCC': SCC._val, 'PC': PC._val} - -def _SOPPOp_S_CBRANCH_SCC1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SCC=Reg(scc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if SCC == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'SCC': SCC._val, 'PC': PC._val} - -def _SOPPOp_S_CBRANCH_VCCZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); VCCZ=Reg(1 if VCC._val == 0 else 0) - # --- compiled pseudocode --- - if VCCZ.u1 == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_VCCNZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); VCCZ=Reg(1 if VCC._val == 0 else 0) - # --- compiled pseudocode --- - if VCCZ.u1 == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_EXECZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); EXECZ=Reg(1 if EXEC._val == 0 else 0) - # --- compiled pseudocode --- - if EXECZ.u1 == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_EXECNZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); EXECZ=Reg(1 if EXEC._val == 0 else 0) - # --- compiled pseudocode --- - if EXECZ.u1 == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_TRAP(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_CDBGSYS(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if WAVE_STATUS.COND_DBG_SYS.u32 != 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_CDBGUSER(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if WAVE_STATUS.COND_DBG_USER.u32 != 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_CDBGSYS_OR_USER(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if (WAVE_STATUS.COND_DBG_SYS or WAVE_STATUS.COND_DBG_USER): - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_CDBGSYS_AND_USER(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if (WAVE_STATUS.COND_DBG_SYS and WAVE_STATUS.COND_DBG_USER): - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_SET_GPR_IDX_MODE(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask); SIMM16=Reg(literal); VDST=Reg(vdst_idx) - # --- compiled pseudocode --- - SIMM16[1] = VSRC1_REL, SIMM16[2] = VSRC2_REL and SIMM16[3] = VDST_REL. - return {} - -SOPPOp_FUNCTIONS = { - SOPPOp.S_NOP: _SOPPOp_S_NOP, - SOPPOp.S_BRANCH: _SOPPOp_S_BRANCH, - SOPPOp.S_CBRANCH_SCC0: _SOPPOp_S_CBRANCH_SCC0, - SOPPOp.S_CBRANCH_SCC1: _SOPPOp_S_CBRANCH_SCC1, - SOPPOp.S_CBRANCH_VCCZ: _SOPPOp_S_CBRANCH_VCCZ, - SOPPOp.S_CBRANCH_VCCNZ: _SOPPOp_S_CBRANCH_VCCNZ, - SOPPOp.S_CBRANCH_EXECZ: _SOPPOp_S_CBRANCH_EXECZ, - SOPPOp.S_CBRANCH_EXECNZ: _SOPPOp_S_CBRANCH_EXECNZ, - SOPPOp.S_TRAP: _SOPPOp_S_TRAP, - SOPPOp.S_CBRANCH_CDBGSYS: _SOPPOp_S_CBRANCH_CDBGSYS, - SOPPOp.S_CBRANCH_CDBGUSER: _SOPPOp_S_CBRANCH_CDBGUSER, - SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: _SOPPOp_S_CBRANCH_CDBGSYS_OR_USER, - SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: _SOPPOp_S_CBRANCH_CDBGSYS_AND_USER, - SOPPOp.S_SET_GPR_IDX_MODE: _SOPPOp_S_SET_GPR_IDX_MODE, -} - -def _SMEMOp_S_LOAD_DWORD(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_DWORDX2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_DWORDX4(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_DWORDX8(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - SDATA[159 : 128] = MEM[addr + 16].b32 - SDATA[191 : 160] = MEM[addr + 20].b32 - SDATA[223 : 192] = MEM[addr + 24].b32 - SDATA[255 : 224] = MEM[addr + 28].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_DWORDX16(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - SDATA[159 : 128] = MEM[addr + 16].b32 - SDATA[191 : 160] = MEM[addr + 20].b32 - SDATA[223 : 192] = MEM[addr + 24].b32 - SDATA[255 : 224] = MEM[addr + 28].b32 - SDATA[287 : 256] = MEM[addr + 32].b32 - SDATA[319 : 288] = MEM[addr + 36].b32 - SDATA[351 : 320] = MEM[addr + 40].b32 - SDATA[383 : 352] = MEM[addr + 44].b32 - SDATA[415 : 384] = MEM[addr + 48].b32 - SDATA[447 : 416] = MEM[addr + 52].b32 - SDATA[479 : 448] = MEM[addr + 56].b32 - SDATA[511 : 480] = MEM[addr + 60].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_SCRATCH_LOAD_DWORD(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_SCRATCH_LOAD_DWORDX2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_SCRATCH_LOAD_DWORDX4(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_DWORD(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_DWORDX2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_DWORDX4(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_DWORDX8(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - SDATA[159 : 128] = MEM[addr + 16].b32 - SDATA[191 : 160] = MEM[addr + 20].b32 - SDATA[223 : 192] = MEM[addr + 24].b32 - SDATA[255 : 224] = MEM[addr + 28].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_DWORDX16(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - SDATA[159 : 128] = MEM[addr + 16].b32 - SDATA[191 : 160] = MEM[addr + 20].b32 - SDATA[223 : 192] = MEM[addr + 24].b32 - SDATA[255 : 224] = MEM[addr + 28].b32 - SDATA[287 : 256] = MEM[addr + 32].b32 - SDATA[319 : 288] = MEM[addr + 36].b32 - SDATA[351 : 320] = MEM[addr + 40].b32 - SDATA[383 : 352] = MEM[addr + 44].b32 - SDATA[415 : 384] = MEM[addr + 48].b32 - SDATA[447 : 416] = MEM[addr + 52].b32 - SDATA[479 : 448] = MEM[addr + 56].b32 - SDATA[511 : 480] = MEM[addr + 60].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_STORE_DWORD(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - return {} - -def _SMEMOp_S_STORE_DWORDX2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - MEM[addr + 4].b32 = SDATA[63 : 32] - return {} - -def _SMEMOp_S_STORE_DWORDX4(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - MEM[addr + 4].b32 = SDATA[63 : 32] - MEM[addr + 8].b32 = SDATA[95 : 64] - MEM[addr + 12].b32 = SDATA[127 : 96] - return {} - -def _SMEMOp_S_SCRATCH_STORE_DWORD(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - return {} - -def _SMEMOp_S_SCRATCH_STORE_DWORDX2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - MEM[addr + 4].b32 = SDATA[63 : 32] - return {} - -def _SMEMOp_S_SCRATCH_STORE_DWORDX4(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - MEM[addr + 4].b32 = SDATA[63 : 32] - MEM[addr + 8].b32 = SDATA[95 : 64] - MEM[addr + 12].b32 = SDATA[127 : 96] - return {} - -def _SMEMOp_S_BUFFER_STORE_DWORD(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - return {} - -def _SMEMOp_S_BUFFER_STORE_DWORDX2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - MEM[addr + 4].b32 = SDATA[63 : 32] - return {} - -def _SMEMOp_S_BUFFER_STORE_DWORDX4(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - MEM[addr].b32 = SDATA[31 : 0] - MEM[addr + 4].b32 = SDATA[63 : 32] - MEM[addr + 8].b32 = SDATA[95 : 64] - MEM[addr + 12].b32 = SDATA[127 : 96] - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_SWAP(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_CMPSWAP(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA[31 : 0].u32 - cmp = DATA[63 : 32].u32 - MEM[addr].u32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_ADD(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_SUB(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_SMIN(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_UMIN(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_SMAX(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_UMAX(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_AND(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_OR(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_XOR(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_INC(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_DEC(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_SWAP_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_CMPSWAP_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA[63 : 0].u64 - cmp = DATA[127 : 64].u64 - MEM[addr].u64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_ADD_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_SUB_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_SMIN_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_UMIN_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_SMAX_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_UMAX_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_AND_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_OR_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_XOR_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_INC_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_BUFFER_ATOMIC_DEC_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_SWAP(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_CMPSWAP(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA[31 : 0].u32 - cmp = DATA[63 : 32].u32 - MEM[addr].u32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_ADD(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_SUB(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_SMIN(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_UMIN(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_SMAX(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_UMAX(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_AND(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_OR(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_XOR(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_INC(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_DEC(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {} - -def _SMEMOp_S_ATOMIC_SWAP_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_CMPSWAP_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA[63 : 0].u64 - cmp = DATA[127 : 64].u64 - MEM[addr].u64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_ADD_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_SUB_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_SMIN_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_UMIN_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_SMAX_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_UMAX_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_AND_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_OR_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_XOR_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_INC_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {} - -def _SMEMOp_S_ATOMIC_DEC_X2(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {} - -SMEMOp_FUNCTIONS = { - SMEMOp.S_LOAD_DWORD: _SMEMOp_S_LOAD_DWORD, - SMEMOp.S_LOAD_DWORDX2: _SMEMOp_S_LOAD_DWORDX2, - SMEMOp.S_LOAD_DWORDX4: _SMEMOp_S_LOAD_DWORDX4, - SMEMOp.S_LOAD_DWORDX8: _SMEMOp_S_LOAD_DWORDX8, - SMEMOp.S_LOAD_DWORDX16: _SMEMOp_S_LOAD_DWORDX16, - SMEMOp.S_SCRATCH_LOAD_DWORD: _SMEMOp_S_SCRATCH_LOAD_DWORD, - SMEMOp.S_SCRATCH_LOAD_DWORDX2: _SMEMOp_S_SCRATCH_LOAD_DWORDX2, - SMEMOp.S_SCRATCH_LOAD_DWORDX4: _SMEMOp_S_SCRATCH_LOAD_DWORDX4, - SMEMOp.S_BUFFER_LOAD_DWORD: _SMEMOp_S_BUFFER_LOAD_DWORD, - SMEMOp.S_BUFFER_LOAD_DWORDX2: _SMEMOp_S_BUFFER_LOAD_DWORDX2, - SMEMOp.S_BUFFER_LOAD_DWORDX4: _SMEMOp_S_BUFFER_LOAD_DWORDX4, - SMEMOp.S_BUFFER_LOAD_DWORDX8: _SMEMOp_S_BUFFER_LOAD_DWORDX8, - SMEMOp.S_BUFFER_LOAD_DWORDX16: _SMEMOp_S_BUFFER_LOAD_DWORDX16, - SMEMOp.S_STORE_DWORD: _SMEMOp_S_STORE_DWORD, - SMEMOp.S_STORE_DWORDX2: _SMEMOp_S_STORE_DWORDX2, - SMEMOp.S_STORE_DWORDX4: _SMEMOp_S_STORE_DWORDX4, - SMEMOp.S_SCRATCH_STORE_DWORD: _SMEMOp_S_SCRATCH_STORE_DWORD, - SMEMOp.S_SCRATCH_STORE_DWORDX2: _SMEMOp_S_SCRATCH_STORE_DWORDX2, - SMEMOp.S_SCRATCH_STORE_DWORDX4: _SMEMOp_S_SCRATCH_STORE_DWORDX4, - SMEMOp.S_BUFFER_STORE_DWORD: _SMEMOp_S_BUFFER_STORE_DWORD, - SMEMOp.S_BUFFER_STORE_DWORDX2: _SMEMOp_S_BUFFER_STORE_DWORDX2, - SMEMOp.S_BUFFER_STORE_DWORDX4: _SMEMOp_S_BUFFER_STORE_DWORDX4, - SMEMOp.S_BUFFER_ATOMIC_SWAP: _SMEMOp_S_BUFFER_ATOMIC_SWAP, - SMEMOp.S_BUFFER_ATOMIC_CMPSWAP: _SMEMOp_S_BUFFER_ATOMIC_CMPSWAP, - SMEMOp.S_BUFFER_ATOMIC_ADD: _SMEMOp_S_BUFFER_ATOMIC_ADD, - SMEMOp.S_BUFFER_ATOMIC_SUB: _SMEMOp_S_BUFFER_ATOMIC_SUB, - SMEMOp.S_BUFFER_ATOMIC_SMIN: _SMEMOp_S_BUFFER_ATOMIC_SMIN, - SMEMOp.S_BUFFER_ATOMIC_UMIN: _SMEMOp_S_BUFFER_ATOMIC_UMIN, - SMEMOp.S_BUFFER_ATOMIC_SMAX: _SMEMOp_S_BUFFER_ATOMIC_SMAX, - SMEMOp.S_BUFFER_ATOMIC_UMAX: _SMEMOp_S_BUFFER_ATOMIC_UMAX, - SMEMOp.S_BUFFER_ATOMIC_AND: _SMEMOp_S_BUFFER_ATOMIC_AND, - SMEMOp.S_BUFFER_ATOMIC_OR: _SMEMOp_S_BUFFER_ATOMIC_OR, - SMEMOp.S_BUFFER_ATOMIC_XOR: _SMEMOp_S_BUFFER_ATOMIC_XOR, - SMEMOp.S_BUFFER_ATOMIC_INC: _SMEMOp_S_BUFFER_ATOMIC_INC, - SMEMOp.S_BUFFER_ATOMIC_DEC: _SMEMOp_S_BUFFER_ATOMIC_DEC, - SMEMOp.S_BUFFER_ATOMIC_SWAP_X2: _SMEMOp_S_BUFFER_ATOMIC_SWAP_X2, - SMEMOp.S_BUFFER_ATOMIC_CMPSWAP_X2: _SMEMOp_S_BUFFER_ATOMIC_CMPSWAP_X2, - SMEMOp.S_BUFFER_ATOMIC_ADD_X2: _SMEMOp_S_BUFFER_ATOMIC_ADD_X2, - SMEMOp.S_BUFFER_ATOMIC_SUB_X2: _SMEMOp_S_BUFFER_ATOMIC_SUB_X2, - SMEMOp.S_BUFFER_ATOMIC_SMIN_X2: _SMEMOp_S_BUFFER_ATOMIC_SMIN_X2, - SMEMOp.S_BUFFER_ATOMIC_UMIN_X2: _SMEMOp_S_BUFFER_ATOMIC_UMIN_X2, - SMEMOp.S_BUFFER_ATOMIC_SMAX_X2: _SMEMOp_S_BUFFER_ATOMIC_SMAX_X2, - SMEMOp.S_BUFFER_ATOMIC_UMAX_X2: _SMEMOp_S_BUFFER_ATOMIC_UMAX_X2, - SMEMOp.S_BUFFER_ATOMIC_AND_X2: _SMEMOp_S_BUFFER_ATOMIC_AND_X2, - SMEMOp.S_BUFFER_ATOMIC_OR_X2: _SMEMOp_S_BUFFER_ATOMIC_OR_X2, - SMEMOp.S_BUFFER_ATOMIC_XOR_X2: _SMEMOp_S_BUFFER_ATOMIC_XOR_X2, - SMEMOp.S_BUFFER_ATOMIC_INC_X2: _SMEMOp_S_BUFFER_ATOMIC_INC_X2, - SMEMOp.S_BUFFER_ATOMIC_DEC_X2: _SMEMOp_S_BUFFER_ATOMIC_DEC_X2, - SMEMOp.S_ATOMIC_SWAP: _SMEMOp_S_ATOMIC_SWAP, - SMEMOp.S_ATOMIC_CMPSWAP: _SMEMOp_S_ATOMIC_CMPSWAP, - SMEMOp.S_ATOMIC_ADD: _SMEMOp_S_ATOMIC_ADD, - SMEMOp.S_ATOMIC_SUB: _SMEMOp_S_ATOMIC_SUB, - SMEMOp.S_ATOMIC_SMIN: _SMEMOp_S_ATOMIC_SMIN, - SMEMOp.S_ATOMIC_UMIN: _SMEMOp_S_ATOMIC_UMIN, - SMEMOp.S_ATOMIC_SMAX: _SMEMOp_S_ATOMIC_SMAX, - SMEMOp.S_ATOMIC_UMAX: _SMEMOp_S_ATOMIC_UMAX, - SMEMOp.S_ATOMIC_AND: _SMEMOp_S_ATOMIC_AND, - SMEMOp.S_ATOMIC_OR: _SMEMOp_S_ATOMIC_OR, - SMEMOp.S_ATOMIC_XOR: _SMEMOp_S_ATOMIC_XOR, - SMEMOp.S_ATOMIC_INC: _SMEMOp_S_ATOMIC_INC, - SMEMOp.S_ATOMIC_DEC: _SMEMOp_S_ATOMIC_DEC, - SMEMOp.S_ATOMIC_SWAP_X2: _SMEMOp_S_ATOMIC_SWAP_X2, - SMEMOp.S_ATOMIC_CMPSWAP_X2: _SMEMOp_S_ATOMIC_CMPSWAP_X2, - SMEMOp.S_ATOMIC_ADD_X2: _SMEMOp_S_ATOMIC_ADD_X2, - SMEMOp.S_ATOMIC_SUB_X2: _SMEMOp_S_ATOMIC_SUB_X2, - SMEMOp.S_ATOMIC_SMIN_X2: _SMEMOp_S_ATOMIC_SMIN_X2, - SMEMOp.S_ATOMIC_UMIN_X2: _SMEMOp_S_ATOMIC_UMIN_X2, - SMEMOp.S_ATOMIC_SMAX_X2: _SMEMOp_S_ATOMIC_SMAX_X2, - SMEMOp.S_ATOMIC_UMAX_X2: _SMEMOp_S_ATOMIC_UMAX_X2, - SMEMOp.S_ATOMIC_AND_X2: _SMEMOp_S_ATOMIC_AND_X2, - SMEMOp.S_ATOMIC_OR_X2: _SMEMOp_S_ATOMIC_OR_X2, - SMEMOp.S_ATOMIC_XOR_X2: _SMEMOp_S_ATOMIC_XOR_X2, - SMEMOp.S_ATOMIC_INC_X2: _SMEMOp_S_ATOMIC_INC_X2, - SMEMOp.S_ATOMIC_DEC_X2: _SMEMOp_S_ATOMIC_DEC_X2, -} - -def _VOP1Op_V_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _VOP1Op_V_READFIRSTLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); EXEC=Reg(exec_mask); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if EXEC == 0x0: - lane = 0 - else: - lane = s_ff1_i32_b64(EXEC) - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f64_to_i32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = i32_to_f64(S0.i32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = i32_to_f32(S0.i32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0.u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f32_to_u32(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = f32_to_f16(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_RPI_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32 + 0.5)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_FLR_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f64_to_f32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = f32_to_f64(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[7 : 0].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[15 : 8].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE2(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[23 : 16].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE3(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[31 : 24].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f64_to_u32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = u32_to_f64(S0.u32) - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 > 0.0) and (S0.f64 != D0.f64)): - D0.f64 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = floor(S0.f64 + 0.5) - if (isEven(floor(S0.f64)) and (fract(S0.f64) == 0.5)): - D0.f64 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 < 0.0) and (S0.f64 != D0.f64)): - D0.f64 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + -floor(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 > 0.0) and (S0.f32 != D0.f32)): - D0.f32 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = floor(S0.f32 + 0.5) - if (isEven(F(floor(S0.f32))) and (fract(S0.f32) == 0.5)): - D0.f32 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 < 0.0) and (S0.f32 != D0.f32)): - D0.f32 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_EXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = pow(2.0, S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_LOG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = log2(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP1Op_V_RCP_IFLAG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / S0.f64 - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_SIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sin(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_COS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = cos(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - return {'D0': D0._val} - -def _VOP1Op_V_BFREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _VOP1Op_V_FFBH_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_FFBL_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_FFBH_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(1, int(31)+1): - if S0.i32[31 - i] != S0.i32[31]: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f64) - 1023 + 1 - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.f64 = S0.f64 - else: - D0.f64 = mantissa(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + -floor(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f32) - 127 + 1 - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = mantissa(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_MOV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b64 = S0.b64 - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = u16_to_f16(S0.u16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = i16_to_f16(S0.i16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_u16(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_i16(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / S0.f16 - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_LOG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = log2(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_EXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = pow(2.0, S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = mantissa(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.i16 = 0 - else: - D0.i16 = (exponent(S0.f16) - 15 + 1) - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 < 0.0) and (S0.f16 != D0.f16)): - D0.f16 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 > 0.0) and (S0.f16 != D0.f16)): - D0.f16 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = floor(S0.f16 + 0.5) - if (isEven(F(floor(S0.f16))) and (fract(S0.f16) == 0.5)): - D0.f16 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + -floor(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_SIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sin(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_COS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = cos(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_NORM_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_snorm(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_NORM_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_unorm(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_SAT_PK_U8_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(0) - tmp[7 : 0].u8 = SAT8(S0[15 : 0].i16) - tmp[15 : 8].u8 = SAT8(S0[31 : 16].i16) - D0.b16 = tmp.b16 - return {'D0': D0._val} - -def _VOP1Op_V_SWAP_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.b32) - D0.b32 = S0.b32 - S0.b32 = tmp - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if SDWA_SRC0_SEL == BYTE1.b3: - D0.f32 = fp8_to_f32(S0[15 : 8].fp8) - elif SDWA_SRC0_SEL == BYTE2.b3: - D0.f32 = fp8_to_f32(S0[23 : 16].fp8) - elif SDWA_SRC0_SEL == BYTE3.b3: - D0.f32 = fp8_to_f32(S0[31 : 24].fp8) - else: - D0.f32 = fp8_to_f32(S0[7 : 0].fp8) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if SDWA_SRC0_SEL == BYTE1.b3: - D0.f32 = bf8_to_f32(S0[15 : 8].bf8) - elif SDWA_SRC0_SEL == BYTE2.b3: - D0.f32 = bf8_to_f32(S0[23 : 16].bf8) - elif SDWA_SRC0_SEL == BYTE3.b3: - D0.f32 = bf8_to_f32(S0[31 : 24].bf8) - else: - D0.f32 = bf8_to_f32(S0[7 : 0].bf8) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_PK_F32_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); D1=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - tmp = Reg(((S0[31 : 16]) if (SDWA_SRC0_SEL[1 : 0] == WORD1.b2) else (S0[15 : 0]))) - D0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8) - D0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_PK_F32_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); D1=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - tmp = Reg(((S0[31 : 16]) if (SDWA_SRC0_SEL[1 : 0] == WORD1.b2) else (S0[15 : 0]))) - D0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8) - D0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8) - return {'D0': D0._val} - -def _VOP1Op_V_PRNG_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - in = S0.u32 - D0.u32 = ((in << 1) ^ (in[31] ? 197 : 0)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_BF16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = F(_pack(S0.b16, 0)) - return {'D0': D0._val} - -VOP1Op_FUNCTIONS = { - VOP1Op.V_MOV_B32: _VOP1Op_V_MOV_B32, - VOP1Op.V_READFIRSTLANE_B32: _VOP1Op_V_READFIRSTLANE_B32, - VOP1Op.V_CVT_I32_F64: _VOP1Op_V_CVT_I32_F64, - VOP1Op.V_CVT_F64_I32: _VOP1Op_V_CVT_F64_I32, - VOP1Op.V_CVT_F32_I32: _VOP1Op_V_CVT_F32_I32, - VOP1Op.V_CVT_F32_U32: _VOP1Op_V_CVT_F32_U32, - VOP1Op.V_CVT_U32_F32: _VOP1Op_V_CVT_U32_F32, - VOP1Op.V_CVT_I32_F32: _VOP1Op_V_CVT_I32_F32, - VOP1Op.V_CVT_F16_F32: _VOP1Op_V_CVT_F16_F32, - VOP1Op.V_CVT_F32_F16: _VOP1Op_V_CVT_F32_F16, - VOP1Op.V_CVT_RPI_I32_F32: _VOP1Op_V_CVT_RPI_I32_F32, - VOP1Op.V_CVT_FLR_I32_F32: _VOP1Op_V_CVT_FLR_I32_F32, - VOP1Op.V_CVT_F32_F64: _VOP1Op_V_CVT_F32_F64, - VOP1Op.V_CVT_F64_F32: _VOP1Op_V_CVT_F64_F32, - VOP1Op.V_CVT_F32_UBYTE0: _VOP1Op_V_CVT_F32_UBYTE0, - VOP1Op.V_CVT_F32_UBYTE1: _VOP1Op_V_CVT_F32_UBYTE1, - VOP1Op.V_CVT_F32_UBYTE2: _VOP1Op_V_CVT_F32_UBYTE2, - VOP1Op.V_CVT_F32_UBYTE3: _VOP1Op_V_CVT_F32_UBYTE3, - VOP1Op.V_CVT_U32_F64: _VOP1Op_V_CVT_U32_F64, - VOP1Op.V_CVT_F64_U32: _VOP1Op_V_CVT_F64_U32, - VOP1Op.V_TRUNC_F64: _VOP1Op_V_TRUNC_F64, - VOP1Op.V_CEIL_F64: _VOP1Op_V_CEIL_F64, - VOP1Op.V_RNDNE_F64: _VOP1Op_V_RNDNE_F64, - VOP1Op.V_FLOOR_F64: _VOP1Op_V_FLOOR_F64, - VOP1Op.V_FRACT_F32: _VOP1Op_V_FRACT_F32, - VOP1Op.V_TRUNC_F32: _VOP1Op_V_TRUNC_F32, - VOP1Op.V_CEIL_F32: _VOP1Op_V_CEIL_F32, - VOP1Op.V_RNDNE_F32: _VOP1Op_V_RNDNE_F32, - VOP1Op.V_FLOOR_F32: _VOP1Op_V_FLOOR_F32, - VOP1Op.V_EXP_F32: _VOP1Op_V_EXP_F32, - VOP1Op.V_LOG_F32: _VOP1Op_V_LOG_F32, - VOP1Op.V_RCP_F32: _VOP1Op_V_RCP_F32, - VOP1Op.V_RCP_IFLAG_F32: _VOP1Op_V_RCP_IFLAG_F32, - VOP1Op.V_RSQ_F32: _VOP1Op_V_RSQ_F32, - VOP1Op.V_RCP_F64: _VOP1Op_V_RCP_F64, - VOP1Op.V_RSQ_F64: _VOP1Op_V_RSQ_F64, - VOP1Op.V_SQRT_F32: _VOP1Op_V_SQRT_F32, - VOP1Op.V_SQRT_F64: _VOP1Op_V_SQRT_F64, - VOP1Op.V_SIN_F32: _VOP1Op_V_SIN_F32, - VOP1Op.V_COS_F32: _VOP1Op_V_COS_F32, - VOP1Op.V_NOT_B32: _VOP1Op_V_NOT_B32, - VOP1Op.V_BFREV_B32: _VOP1Op_V_BFREV_B32, - VOP1Op.V_FFBH_U32: _VOP1Op_V_FFBH_U32, - VOP1Op.V_FFBL_B32: _VOP1Op_V_FFBL_B32, - VOP1Op.V_FFBH_I32: _VOP1Op_V_FFBH_I32, - VOP1Op.V_FREXP_EXP_I32_F64: _VOP1Op_V_FREXP_EXP_I32_F64, - VOP1Op.V_FREXP_MANT_F64: _VOP1Op_V_FREXP_MANT_F64, - VOP1Op.V_FRACT_F64: _VOP1Op_V_FRACT_F64, - VOP1Op.V_FREXP_EXP_I32_F32: _VOP1Op_V_FREXP_EXP_I32_F32, - VOP1Op.V_FREXP_MANT_F32: _VOP1Op_V_FREXP_MANT_F32, - VOP1Op.V_MOV_B64: _VOP1Op_V_MOV_B64, - VOP1Op.V_CVT_F16_U16: _VOP1Op_V_CVT_F16_U16, - VOP1Op.V_CVT_F16_I16: _VOP1Op_V_CVT_F16_I16, - VOP1Op.V_CVT_U16_F16: _VOP1Op_V_CVT_U16_F16, - VOP1Op.V_CVT_I16_F16: _VOP1Op_V_CVT_I16_F16, - VOP1Op.V_RCP_F16: _VOP1Op_V_RCP_F16, - VOP1Op.V_SQRT_F16: _VOP1Op_V_SQRT_F16, - VOP1Op.V_RSQ_F16: _VOP1Op_V_RSQ_F16, - VOP1Op.V_LOG_F16: _VOP1Op_V_LOG_F16, - VOP1Op.V_EXP_F16: _VOP1Op_V_EXP_F16, - VOP1Op.V_FREXP_MANT_F16: _VOP1Op_V_FREXP_MANT_F16, - VOP1Op.V_FREXP_EXP_I16_F16: _VOP1Op_V_FREXP_EXP_I16_F16, - VOP1Op.V_FLOOR_F16: _VOP1Op_V_FLOOR_F16, - VOP1Op.V_CEIL_F16: _VOP1Op_V_CEIL_F16, - VOP1Op.V_TRUNC_F16: _VOP1Op_V_TRUNC_F16, - VOP1Op.V_RNDNE_F16: _VOP1Op_V_RNDNE_F16, - VOP1Op.V_FRACT_F16: _VOP1Op_V_FRACT_F16, - VOP1Op.V_SIN_F16: _VOP1Op_V_SIN_F16, - VOP1Op.V_COS_F16: _VOP1Op_V_COS_F16, - VOP1Op.V_CVT_NORM_I16_F16: _VOP1Op_V_CVT_NORM_I16_F16, - VOP1Op.V_CVT_NORM_U16_F16: _VOP1Op_V_CVT_NORM_U16_F16, - VOP1Op.V_SAT_PK_U8_I16: _VOP1Op_V_SAT_PK_U8_I16, - VOP1Op.V_SWAP_B32: _VOP1Op_V_SWAP_B32, - VOP1Op.V_CVT_F32_FP8: _VOP1Op_V_CVT_F32_FP8, - VOP1Op.V_CVT_F32_BF8: _VOP1Op_V_CVT_F32_BF8, - VOP1Op.V_CVT_PK_F32_FP8: _VOP1Op_V_CVT_PK_F32_FP8, - VOP1Op.V_CVT_PK_F32_BF8: _VOP1Op_V_CVT_PK_F32_BF8, - VOP1Op.V_PRNG_B32: _VOP1Op_V_PRNG_B32, - VOP1Op.V_CVT_F32_BF16: _VOP1Op_V_CVT_F32_BF16, -} - -def _VOP2Op_V_CNDMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - D0.u32 = ((S1.u32) if (VCC.u64[laneId]) else (S0.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 - S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S1.f32 - S0.f32 - return {'D0': D0._val} - -def _VOP2Op_V_FMAC_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = fma(S0.f64, S1.f64, D0.f64) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_HI_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i24) * (S1.i24)) >> 32) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_HI_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u24) * (S1.u24)) >> 32) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(F(S0.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif (WAVE_MODE.IEEE and isSignalNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((F(S0.f32) == +0.0) and (F(S1.f32) == -0.0)): - D0.f32 = S1.f32 - elif ((F(S0.f32) == -0.0) and (F(S1.f32) == +0.0)): - D0.f32 = S0.f32 - else: - D0.f32 = ((S0.f32) if (S0.f32 < S1.f32) else (S1.f32)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(F(S0.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif (WAVE_MODE.IEEE and isSignalNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((F(S0.f32) == +0.0) and (F(S1.f32) == -0.0)): - D0.f32 = S0.f32 - elif ((F(S0.f32) == -0.0) and (F(S1.f32) == +0.0)): - D0.f32 = S1.f32 - elif WAVE_MODE.IEEE: - D0.f32 = ((S0.f32) if (S0.f32 >= S1.f32) else (S1.f32)) - else: - D0.f32 = ((S0.f32) if (S0.f32 > S1.f32) else (S1.f32)) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 < S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 >= S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 < S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 >= S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_LSHRREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_ASHRREV_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S1.i32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_LSHLREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 << S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_FMAMK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32) - return {'D0': D0._val} - -def _VOP2Op_V_FMAAK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32) - return {'D0': D0._val} - -def _VOP2Op_V_ADD_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32)) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_SUB_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32) - VCC.u64[laneId] = ((1) if (S1.u32 > S0.u32) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_SUBREV_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32) - VCC.u64[laneId] = ((1) if (S0.u32 > S1.u32) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_ADDC_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_SUBB_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S1.u32) + VCC.u64[laneId] > (S0.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_SUBBREV_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S0.u32) + VCC.u64[laneId] > (S1.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 - S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S1.f16 - S0.f16 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_MAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S0.f16 * S1.f16 + D0.f16) - if OPSEL.u4[3]: - D0 = Reg(_pack(tmp.f16, D0[15 : 0])) - else: - D0 = Reg(_pack(0, tmp.f16)) - return {'D0': D0._val} - -def _VOP2Op_V_MADMK_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SIMM16=Reg(literal) - # --- compiled pseudocode --- - tmp = Reg(S0.f16 * SIMM16.f16 + S1.f16) - return {} - -def _VOP2Op_V_MADAK_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SIMM16=Reg(literal) - # --- compiled pseudocode --- - tmp = Reg(S0.f16 * S1.f16 + SIMM16.f16) - return {} - -def _VOP2Op_V_ADD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 + S1.u16 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 - S1.u16 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S1.u16 - S0.u16 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_LO_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 * S1.u16 - return {'D0': D0._val} - -def _VOP2Op_V_LSHLREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S1.u16 << S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_LSHRREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S1.u16 >> S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_ASHRREV_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = (S1.i16 >> S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(F(S0.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif (WAVE_MODE.IEEE and isSignalNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((F(S0.f16) == +0.0) and (F(S1.f16) == -0.0)): - D0.f16 = S0.f16 - elif ((F(S0.f16) == -0.0) and (F(S1.f16) == +0.0)): - D0.f16 = S1.f16 - elif WAVE_MODE.IEEE: - D0.f16 = ((S0.f16) if (S0.f16 >= S1.f16) else (S1.f16)) - else: - D0.f16 = ((S0.f16) if (S0.f16 > S1.f16) else (S1.f16)) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(F(S0.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif (WAVE_MODE.IEEE and isSignalNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((F(S0.f16) == +0.0) and (F(S1.f16) == -0.0)): - D0.f16 = S1.f16 - elif ((F(S0.f16) == -0.0) and (F(S1.f16) == +0.0)): - D0.f16 = S0.f16 - else: - D0.f16 = ((S0.f16) if (S0.f16 < S1.f16) else (S1.f16)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ((S0.u16) if (S0.u16 >= S1.u16) else (S1.u16)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = ((S0.i16) if (S0.i16 >= S1.i16) else (S1.i16)) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ((S0.u16) if (S0.u16 < S1.u16) else (S1.u16)) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = ((S0.i16) if (S0.i16 < S1.i16) else (S1.i16)) - return {'D0': D0._val} - -def _VOP2Op_V_LDEXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * F(2.0 ** (S1.i16)) - return {'D0': D0._val} - -def _VOP2Op_V_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 - S1.u32 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S1.u32 - S0.u32 - return {'D0': D0._val} - -def _VOP2Op_V_DOT2C_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.f32) - tmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16) - tmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP2Op_V_DOT2C_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - tmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16) - tmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP2Op_V_DOT4C_I32_I8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - tmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8) - tmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8) - tmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8) - tmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP2Op_V_DOT8C_I32_I4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - tmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4) - tmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4) - tmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4) - tmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4) - tmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4) - tmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4) - tmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4) - tmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP2Op_V_FMAC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _VOP2Op_V_PK_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16) - D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16) - return {'D0': D0._val} - -def _VOP2Op_V_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_DOT2C_F32_BF16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.f32) - tmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16) - tmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16) - D0.f32 = tmp - return {'D0': D0._val} - -VOP2Op_FUNCTIONS = { - VOP2Op.V_CNDMASK_B32: _VOP2Op_V_CNDMASK_B32, - VOP2Op.V_ADD_F32: _VOP2Op_V_ADD_F32, - VOP2Op.V_SUB_F32: _VOP2Op_V_SUB_F32, - VOP2Op.V_SUBREV_F32: _VOP2Op_V_SUBREV_F32, - VOP2Op.V_FMAC_F64: _VOP2Op_V_FMAC_F64, - VOP2Op.V_MUL_F32: _VOP2Op_V_MUL_F32, - VOP2Op.V_MUL_I32_I24: _VOP2Op_V_MUL_I32_I24, - VOP2Op.V_MUL_HI_I32_I24: _VOP2Op_V_MUL_HI_I32_I24, - VOP2Op.V_MUL_U32_U24: _VOP2Op_V_MUL_U32_U24, - VOP2Op.V_MUL_HI_U32_U24: _VOP2Op_V_MUL_HI_U32_U24, - VOP2Op.V_MIN_F32: _VOP2Op_V_MIN_F32, - VOP2Op.V_MAX_F32: _VOP2Op_V_MAX_F32, - VOP2Op.V_MIN_I32: _VOP2Op_V_MIN_I32, - VOP2Op.V_MAX_I32: _VOP2Op_V_MAX_I32, - VOP2Op.V_MIN_U32: _VOP2Op_V_MIN_U32, - VOP2Op.V_MAX_U32: _VOP2Op_V_MAX_U32, - VOP2Op.V_LSHRREV_B32: _VOP2Op_V_LSHRREV_B32, - VOP2Op.V_ASHRREV_I32: _VOP2Op_V_ASHRREV_I32, - VOP2Op.V_LSHLREV_B32: _VOP2Op_V_LSHLREV_B32, - VOP2Op.V_AND_B32: _VOP2Op_V_AND_B32, - VOP2Op.V_OR_B32: _VOP2Op_V_OR_B32, - VOP2Op.V_XOR_B32: _VOP2Op_V_XOR_B32, - VOP2Op.V_FMAMK_F32: _VOP2Op_V_FMAMK_F32, - VOP2Op.V_FMAAK_F32: _VOP2Op_V_FMAAK_F32, - VOP2Op.V_ADD_CO_U32: _VOP2Op_V_ADD_CO_U32, - VOP2Op.V_SUB_CO_U32: _VOP2Op_V_SUB_CO_U32, - VOP2Op.V_SUBREV_CO_U32: _VOP2Op_V_SUBREV_CO_U32, - VOP2Op.V_ADDC_CO_U32: _VOP2Op_V_ADDC_CO_U32, - VOP2Op.V_SUBB_CO_U32: _VOP2Op_V_SUBB_CO_U32, - VOP2Op.V_SUBBREV_CO_U32: _VOP2Op_V_SUBBREV_CO_U32, - VOP2Op.V_ADD_F16: _VOP2Op_V_ADD_F16, - VOP2Op.V_SUB_F16: _VOP2Op_V_SUB_F16, - VOP2Op.V_SUBREV_F16: _VOP2Op_V_SUBREV_F16, - VOP2Op.V_MUL_F16: _VOP2Op_V_MUL_F16, - VOP2Op.V_MAC_F16: _VOP2Op_V_MAC_F16, - VOP2Op.V_MADMK_F16: _VOP2Op_V_MADMK_F16, - VOP2Op.V_MADAK_F16: _VOP2Op_V_MADAK_F16, - VOP2Op.V_ADD_U16: _VOP2Op_V_ADD_U16, - VOP2Op.V_SUB_U16: _VOP2Op_V_SUB_U16, - VOP2Op.V_SUBREV_U16: _VOP2Op_V_SUBREV_U16, - VOP2Op.V_MUL_LO_U16: _VOP2Op_V_MUL_LO_U16, - VOP2Op.V_LSHLREV_B16: _VOP2Op_V_LSHLREV_B16, - VOP2Op.V_LSHRREV_B16: _VOP2Op_V_LSHRREV_B16, - VOP2Op.V_ASHRREV_I16: _VOP2Op_V_ASHRREV_I16, - VOP2Op.V_MAX_F16: _VOP2Op_V_MAX_F16, - VOP2Op.V_MIN_F16: _VOP2Op_V_MIN_F16, - VOP2Op.V_MAX_U16: _VOP2Op_V_MAX_U16, - VOP2Op.V_MAX_I16: _VOP2Op_V_MAX_I16, - VOP2Op.V_MIN_U16: _VOP2Op_V_MIN_U16, - VOP2Op.V_MIN_I16: _VOP2Op_V_MIN_I16, - VOP2Op.V_LDEXP_F16: _VOP2Op_V_LDEXP_F16, - VOP2Op.V_ADD_U32: _VOP2Op_V_ADD_U32, - VOP2Op.V_SUB_U32: _VOP2Op_V_SUB_U32, - VOP2Op.V_SUBREV_U32: _VOP2Op_V_SUBREV_U32, - VOP2Op.V_DOT2C_F32_F16: _VOP2Op_V_DOT2C_F32_F16, - VOP2Op.V_DOT2C_I32_I16: _VOP2Op_V_DOT2C_I32_I16, - VOP2Op.V_DOT4C_I32_I8: _VOP2Op_V_DOT4C_I32_I8, - VOP2Op.V_DOT8C_I32_I4: _VOP2Op_V_DOT8C_I32_I4, - VOP2Op.V_FMAC_F32: _VOP2Op_V_FMAC_F32, - VOP2Op.V_PK_FMAC_F16: _VOP2Op_V_PK_FMAC_F16, - VOP2Op.V_XNOR_B32: _VOP2Op_V_XNOR_B32, - VOP2Op.V_DOT2C_F32_BF16: _VOP2Op_V_DOT2C_F32_BF16, -} - -def _VOP3POp_V_PK_MAD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16 - tmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MUL_LO_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 - tmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16 - tmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_SUB_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16 - tmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_LSHLREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32) - tmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_LSHRREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32) - tmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_ASHRREV_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32) - tmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = ((S0[15 : 0].i16) if (S0[15 : 0].i16 >= S1[15 : 0].i16) else (S1[15 : 0].i16)) - tmp[31 : 16].i16 = ((S0[31 : 16].i16) if (S0[31 : 16].i16 >= S1[31 : 16].i16) else (S1[31 : 16].i16)) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = ((S0[15 : 0].i16) if (S0[15 : 0].i16 < S1[15 : 0].i16) else (S1[15 : 0].i16)) - tmp[31 : 16].i16 = ((S0[31 : 16].i16) if (S0[31 : 16].i16 < S1[31 : 16].i16) else (S1[31 : 16].i16)) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16 - tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16 - tmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_SUB_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16 - tmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = ((S0[15 : 0].u16) if (S0[15 : 0].u16 >= S1[15 : 0].u16) else (S1[15 : 0].u16)) - tmp[31 : 16].u16 = ((S0[31 : 16].u16) if (S0[31 : 16].u16 >= S1[31 : 16].u16) else (S1[31 : 16].u16)) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = ((S0[15 : 0].u16) if (S0[15 : 0].u16 < S1[15 : 0].u16) else (S1[15 : 0].u16)) - tmp[31 : 16].u16 = ((S0[31 : 16].u16) if (S0[31 : 16].u16 < S1[31 : 16].u16) else (S1[31 : 16].u16)) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_FMA_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16) - tmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16 - tmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16 - tmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16) - tmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16) - tmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_MAD_MIX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[31 : 0].f32 = ins[0] * ins[1] + ins[2] - return {'D0': D0._val} - -def _VOP3POp_V_MAD_MIXLO_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[15 : 0].f16 = f32_to_f16(ins[0] * ins[1] + ins[2]) - return {'D0': D0._val} - -def _VOP3POp_V_MAD_MIXHI_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[31 : 16].f16 = f32_to_f16(ins[0] * ins[1] + ins[2]) - return {'D0': D0._val} - -def _VOP3POp_V_DOT2_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16) - tmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT2_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.i32) - tmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16) - tmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT2_U32_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += u16_to_u32(S0[15 : 0].u16) * u16_to_u32(S1[15 : 0].u16) - tmp += u16_to_u32(S0[31 : 16].u16) * u16_to_u32(S1[31 : 16].u16) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT4_I32_I8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.i32) - tmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8) - tmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8) - tmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8) - tmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT4_U32_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8) - tmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8) - tmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8) - tmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT8_I32_I4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.i32) - tmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4) - tmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4) - tmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4) - tmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4) - tmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4) - tmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4) - tmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4) - tmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT8_U32_U4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4) - tmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4) - tmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4) - tmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4) - tmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4) - tmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4) - tmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4) - tmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_FMA_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 0].f32 = fma(S0[31 : 0].f32, S1[31 : 0].f32, S2[31 : 0].f32) - tmp[63 : 32].f32 = fma(S0[63 : 32].f32, S1[63 : 32].f32, S2[63 : 32].f32) - D0.b64 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 0].f32 = S0[31 : 0].f32 * S1[31 : 0].f32 - tmp[63 : 32].f32 = S0[63 : 32].f32 * S1[63 : 32].f32 - D0.b64 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 0].f32 = S0[31 : 0].f32 + S1[31 : 0].f32 - tmp[63 : 32].f32 = S0[63 : 32].f32 + S1[63 : 32].f32 - D0.b64 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp0.u32 = S0.u32[OPSEL[0].i32 * 32 + 31 : OPSEL[0].i32 * 32] - tmp1.u32 = S1.u32[OPSEL[1].i32 * 32 + 31 : OPSEL[1].i32 * 32] - D0.u32[31 : 0] = tmp0.u32 - D0.u32[63 : 32] = tmp1.u32 - return {'D0': D0._val} - -def _VOP3POp_V_DOT2_F32_BF16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(F(S0[15 : 0].bf16) * F(S1[15 : 0].bf16)) - tmp += F(S0[31 : 16].bf16) * F(S1[31 : 16].bf16) - tmp += S2.f32 - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MINIMUM3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].f16 = F(v_minimum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16)) - tmp[15 : 0].f16 = F(v_minimum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16)) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAXIMUM3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].f16 = F(v_maximum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16)) - tmp[15 : 0].f16 = F(v_maximum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16)) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -VOP3POp_FUNCTIONS = { - VOP3POp.V_PK_MAD_I16: _VOP3POp_V_PK_MAD_I16, - VOP3POp.V_PK_MUL_LO_U16: _VOP3POp_V_PK_MUL_LO_U16, - VOP3POp.V_PK_ADD_I16: _VOP3POp_V_PK_ADD_I16, - VOP3POp.V_PK_SUB_I16: _VOP3POp_V_PK_SUB_I16, - VOP3POp.V_PK_LSHLREV_B16: _VOP3POp_V_PK_LSHLREV_B16, - VOP3POp.V_PK_LSHRREV_B16: _VOP3POp_V_PK_LSHRREV_B16, - VOP3POp.V_PK_ASHRREV_I16: _VOP3POp_V_PK_ASHRREV_I16, - VOP3POp.V_PK_MAX_I16: _VOP3POp_V_PK_MAX_I16, - VOP3POp.V_PK_MIN_I16: _VOP3POp_V_PK_MIN_I16, - VOP3POp.V_PK_MAD_U16: _VOP3POp_V_PK_MAD_U16, - VOP3POp.V_PK_ADD_U16: _VOP3POp_V_PK_ADD_U16, - VOP3POp.V_PK_SUB_U16: _VOP3POp_V_PK_SUB_U16, - VOP3POp.V_PK_MAX_U16: _VOP3POp_V_PK_MAX_U16, - VOP3POp.V_PK_MIN_U16: _VOP3POp_V_PK_MIN_U16, - VOP3POp.V_PK_FMA_F16: _VOP3POp_V_PK_FMA_F16, - VOP3POp.V_PK_ADD_F16: _VOP3POp_V_PK_ADD_F16, - VOP3POp.V_PK_MUL_F16: _VOP3POp_V_PK_MUL_F16, - VOP3POp.V_PK_MIN_F16: _VOP3POp_V_PK_MIN_F16, - VOP3POp.V_PK_MAX_F16: _VOP3POp_V_PK_MAX_F16, - VOP3POp.V_MAD_MIX_F32: _VOP3POp_V_MAD_MIX_F32, - VOP3POp.V_MAD_MIXLO_F16: _VOP3POp_V_MAD_MIXLO_F16, - VOP3POp.V_MAD_MIXHI_F16: _VOP3POp_V_MAD_MIXHI_F16, - VOP3POp.V_DOT2_F32_F16: _VOP3POp_V_DOT2_F32_F16, - VOP3POp.V_DOT2_I32_I16: _VOP3POp_V_DOT2_I32_I16, - VOP3POp.V_DOT2_U32_U16: _VOP3POp_V_DOT2_U32_U16, - VOP3POp.V_DOT4_I32_I8: _VOP3POp_V_DOT4_I32_I8, - VOP3POp.V_DOT4_U32_U8: _VOP3POp_V_DOT4_U32_U8, - VOP3POp.V_DOT8_I32_I4: _VOP3POp_V_DOT8_I32_I4, - VOP3POp.V_DOT8_U32_U4: _VOP3POp_V_DOT8_U32_U4, - VOP3POp.V_PK_FMA_F32: _VOP3POp_V_PK_FMA_F32, - VOP3POp.V_PK_MUL_F32: _VOP3POp_V_PK_MUL_F32, - VOP3POp.V_PK_ADD_F32: _VOP3POp_V_PK_ADD_F32, - VOP3POp.V_PK_MOV_B32: _VOP3POp_V_PK_MOV_B32, - VOP3POp.V_DOT2_F32_BF16: _VOP3POp_V_DOT2_F32_BF16, - VOP3POp.V_PK_MINIMUM3_F16: _VOP3POp_V_PK_MINIMUM3_F16, - VOP3POp.V_PK_MAXIMUM3_F16: _VOP3POp_V_PK_MAXIMUM3_F16, -} - -def _VOPCOp_V_CMP_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - EXEC.u64[laneId] = D0.u64[laneId] = result - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMP_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - EXEC.u64[laneId] = D0.u64[laneId] = result - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMP_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - EXEC.u64[laneId] = D0.u64[laneId] = result - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMP_F_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 < S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 == S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 <= S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 > S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 != S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 >= S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 >= S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 != S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 > S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 <= S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 == S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 < S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_TRU_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_F_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 != S1.f16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 >= S1.f16) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 != S1.f16) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 > S1.f16) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 <= S1.f16) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 == S1.f16) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 < S1.f16) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_TRU_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMP_F_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 < S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 == S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 <= S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 > S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 != S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 >= S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 >= S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 != S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 > S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 <= S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 == S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 < S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_TRU_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_F_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 != S1.f32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 >= S1.f32) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 != S1.f32) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 > S1.f32) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 <= S1.f32) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 == S1.f32) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 < S1.f32) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_TRU_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMP_F_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 < S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 == S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 <= S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 > S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 != S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 >= S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 >= S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 != S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 > S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 <= S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 == S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 < S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_TRU_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_F_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 != S1.f64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 >= S1.f64) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 != S1.f64) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 > S1.f64) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 <= S1.f64) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 == S1.f64) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 < S1.f64) - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_TRU_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMP_F_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 < S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 == S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 <= S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 > S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 != S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 >= S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 < S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 == S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 <= S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 > S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 != S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 >= S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_F_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 != S1.i16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 != S1.u16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMP_F_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 < S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 == S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 <= S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 > S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 != S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 >= S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 < S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 == S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 <= S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 > S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 != S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 >= S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_F_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 != S1.i32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 != S1.u32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMP_F_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 < S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 == S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 <= S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 > S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 != S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 >= S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 < S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 == S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 <= S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 > S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 != S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 >= S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_F_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 != S1.i64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 != S1.u64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64 - return {'D0': D0._val, 'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None; VDST=Reg(vdst_idx) - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - OFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR. - OFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR. - VDST = Destination VGPR 0- 255. - return {'D0': D0._val, 'EXEC': EXEC._val} - -VOPCOp_FUNCTIONS = { - VOPCOp.V_CMP_CLASS_F32: _VOPCOp_V_CMP_CLASS_F32, - VOPCOp.V_CMPX_CLASS_F32: _VOPCOp_V_CMPX_CLASS_F32, - VOPCOp.V_CMP_CLASS_F64: _VOPCOp_V_CMP_CLASS_F64, - VOPCOp.V_CMPX_CLASS_F64: _VOPCOp_V_CMPX_CLASS_F64, - VOPCOp.V_CMP_CLASS_F16: _VOPCOp_V_CMP_CLASS_F16, - VOPCOp.V_CMPX_CLASS_F16: _VOPCOp_V_CMPX_CLASS_F16, - VOPCOp.V_CMP_F_F16: _VOPCOp_V_CMP_F_F16, - VOPCOp.V_CMP_LT_F16: _VOPCOp_V_CMP_LT_F16, - VOPCOp.V_CMP_EQ_F16: _VOPCOp_V_CMP_EQ_F16, - VOPCOp.V_CMP_LE_F16: _VOPCOp_V_CMP_LE_F16, - VOPCOp.V_CMP_GT_F16: _VOPCOp_V_CMP_GT_F16, - VOPCOp.V_CMP_LG_F16: _VOPCOp_V_CMP_LG_F16, - VOPCOp.V_CMP_GE_F16: _VOPCOp_V_CMP_GE_F16, - VOPCOp.V_CMP_O_F16: _VOPCOp_V_CMP_O_F16, - VOPCOp.V_CMP_U_F16: _VOPCOp_V_CMP_U_F16, - VOPCOp.V_CMP_NGE_F16: _VOPCOp_V_CMP_NGE_F16, - VOPCOp.V_CMP_NLG_F16: _VOPCOp_V_CMP_NLG_F16, - VOPCOp.V_CMP_NGT_F16: _VOPCOp_V_CMP_NGT_F16, - VOPCOp.V_CMP_NLE_F16: _VOPCOp_V_CMP_NLE_F16, - VOPCOp.V_CMP_NEQ_F16: _VOPCOp_V_CMP_NEQ_F16, - VOPCOp.V_CMP_NLT_F16: _VOPCOp_V_CMP_NLT_F16, - VOPCOp.V_CMP_TRU_F16: _VOPCOp_V_CMP_TRU_F16, - VOPCOp.V_CMPX_F_F16: _VOPCOp_V_CMPX_F_F16, - VOPCOp.V_CMPX_LT_F16: _VOPCOp_V_CMPX_LT_F16, - VOPCOp.V_CMPX_EQ_F16: _VOPCOp_V_CMPX_EQ_F16, - VOPCOp.V_CMPX_LE_F16: _VOPCOp_V_CMPX_LE_F16, - VOPCOp.V_CMPX_GT_F16: _VOPCOp_V_CMPX_GT_F16, - VOPCOp.V_CMPX_LG_F16: _VOPCOp_V_CMPX_LG_F16, - VOPCOp.V_CMPX_GE_F16: _VOPCOp_V_CMPX_GE_F16, - VOPCOp.V_CMPX_O_F16: _VOPCOp_V_CMPX_O_F16, - VOPCOp.V_CMPX_U_F16: _VOPCOp_V_CMPX_U_F16, - VOPCOp.V_CMPX_NGE_F16: _VOPCOp_V_CMPX_NGE_F16, - VOPCOp.V_CMPX_NLG_F16: _VOPCOp_V_CMPX_NLG_F16, - VOPCOp.V_CMPX_NGT_F16: _VOPCOp_V_CMPX_NGT_F16, - VOPCOp.V_CMPX_NLE_F16: _VOPCOp_V_CMPX_NLE_F16, - VOPCOp.V_CMPX_NEQ_F16: _VOPCOp_V_CMPX_NEQ_F16, - VOPCOp.V_CMPX_NLT_F16: _VOPCOp_V_CMPX_NLT_F16, - VOPCOp.V_CMPX_TRU_F16: _VOPCOp_V_CMPX_TRU_F16, - VOPCOp.V_CMP_F_F32: _VOPCOp_V_CMP_F_F32, - VOPCOp.V_CMP_LT_F32: _VOPCOp_V_CMP_LT_F32, - VOPCOp.V_CMP_EQ_F32: _VOPCOp_V_CMP_EQ_F32, - VOPCOp.V_CMP_LE_F32: _VOPCOp_V_CMP_LE_F32, - VOPCOp.V_CMP_GT_F32: _VOPCOp_V_CMP_GT_F32, - VOPCOp.V_CMP_LG_F32: _VOPCOp_V_CMP_LG_F32, - VOPCOp.V_CMP_GE_F32: _VOPCOp_V_CMP_GE_F32, - VOPCOp.V_CMP_O_F32: _VOPCOp_V_CMP_O_F32, - VOPCOp.V_CMP_U_F32: _VOPCOp_V_CMP_U_F32, - VOPCOp.V_CMP_NGE_F32: _VOPCOp_V_CMP_NGE_F32, - VOPCOp.V_CMP_NLG_F32: _VOPCOp_V_CMP_NLG_F32, - VOPCOp.V_CMP_NGT_F32: _VOPCOp_V_CMP_NGT_F32, - VOPCOp.V_CMP_NLE_F32: _VOPCOp_V_CMP_NLE_F32, - VOPCOp.V_CMP_NEQ_F32: _VOPCOp_V_CMP_NEQ_F32, - VOPCOp.V_CMP_NLT_F32: _VOPCOp_V_CMP_NLT_F32, - VOPCOp.V_CMP_TRU_F32: _VOPCOp_V_CMP_TRU_F32, - VOPCOp.V_CMPX_F_F32: _VOPCOp_V_CMPX_F_F32, - VOPCOp.V_CMPX_LT_F32: _VOPCOp_V_CMPX_LT_F32, - VOPCOp.V_CMPX_EQ_F32: _VOPCOp_V_CMPX_EQ_F32, - VOPCOp.V_CMPX_LE_F32: _VOPCOp_V_CMPX_LE_F32, - VOPCOp.V_CMPX_GT_F32: _VOPCOp_V_CMPX_GT_F32, - VOPCOp.V_CMPX_LG_F32: _VOPCOp_V_CMPX_LG_F32, - VOPCOp.V_CMPX_GE_F32: _VOPCOp_V_CMPX_GE_F32, - VOPCOp.V_CMPX_O_F32: _VOPCOp_V_CMPX_O_F32, - VOPCOp.V_CMPX_U_F32: _VOPCOp_V_CMPX_U_F32, - VOPCOp.V_CMPX_NGE_F32: _VOPCOp_V_CMPX_NGE_F32, - VOPCOp.V_CMPX_NLG_F32: _VOPCOp_V_CMPX_NLG_F32, - VOPCOp.V_CMPX_NGT_F32: _VOPCOp_V_CMPX_NGT_F32, - VOPCOp.V_CMPX_NLE_F32: _VOPCOp_V_CMPX_NLE_F32, - VOPCOp.V_CMPX_NEQ_F32: _VOPCOp_V_CMPX_NEQ_F32, - VOPCOp.V_CMPX_NLT_F32: _VOPCOp_V_CMPX_NLT_F32, - VOPCOp.V_CMPX_TRU_F32: _VOPCOp_V_CMPX_TRU_F32, - VOPCOp.V_CMP_F_F64: _VOPCOp_V_CMP_F_F64, - VOPCOp.V_CMP_LT_F64: _VOPCOp_V_CMP_LT_F64, - VOPCOp.V_CMP_EQ_F64: _VOPCOp_V_CMP_EQ_F64, - VOPCOp.V_CMP_LE_F64: _VOPCOp_V_CMP_LE_F64, - VOPCOp.V_CMP_GT_F64: _VOPCOp_V_CMP_GT_F64, - VOPCOp.V_CMP_LG_F64: _VOPCOp_V_CMP_LG_F64, - VOPCOp.V_CMP_GE_F64: _VOPCOp_V_CMP_GE_F64, - VOPCOp.V_CMP_O_F64: _VOPCOp_V_CMP_O_F64, - VOPCOp.V_CMP_U_F64: _VOPCOp_V_CMP_U_F64, - VOPCOp.V_CMP_NGE_F64: _VOPCOp_V_CMP_NGE_F64, - VOPCOp.V_CMP_NLG_F64: _VOPCOp_V_CMP_NLG_F64, - VOPCOp.V_CMP_NGT_F64: _VOPCOp_V_CMP_NGT_F64, - VOPCOp.V_CMP_NLE_F64: _VOPCOp_V_CMP_NLE_F64, - VOPCOp.V_CMP_NEQ_F64: _VOPCOp_V_CMP_NEQ_F64, - VOPCOp.V_CMP_NLT_F64: _VOPCOp_V_CMP_NLT_F64, - VOPCOp.V_CMP_TRU_F64: _VOPCOp_V_CMP_TRU_F64, - VOPCOp.V_CMPX_F_F64: _VOPCOp_V_CMPX_F_F64, - VOPCOp.V_CMPX_LT_F64: _VOPCOp_V_CMPX_LT_F64, - VOPCOp.V_CMPX_EQ_F64: _VOPCOp_V_CMPX_EQ_F64, - VOPCOp.V_CMPX_LE_F64: _VOPCOp_V_CMPX_LE_F64, - VOPCOp.V_CMPX_GT_F64: _VOPCOp_V_CMPX_GT_F64, - VOPCOp.V_CMPX_LG_F64: _VOPCOp_V_CMPX_LG_F64, - VOPCOp.V_CMPX_GE_F64: _VOPCOp_V_CMPX_GE_F64, - VOPCOp.V_CMPX_O_F64: _VOPCOp_V_CMPX_O_F64, - VOPCOp.V_CMPX_U_F64: _VOPCOp_V_CMPX_U_F64, - VOPCOp.V_CMPX_NGE_F64: _VOPCOp_V_CMPX_NGE_F64, - VOPCOp.V_CMPX_NLG_F64: _VOPCOp_V_CMPX_NLG_F64, - VOPCOp.V_CMPX_NGT_F64: _VOPCOp_V_CMPX_NGT_F64, - VOPCOp.V_CMPX_NLE_F64: _VOPCOp_V_CMPX_NLE_F64, - VOPCOp.V_CMPX_NEQ_F64: _VOPCOp_V_CMPX_NEQ_F64, - VOPCOp.V_CMPX_NLT_F64: _VOPCOp_V_CMPX_NLT_F64, - VOPCOp.V_CMPX_TRU_F64: _VOPCOp_V_CMPX_TRU_F64, - VOPCOp.V_CMP_F_I16: _VOPCOp_V_CMP_F_I16, - VOPCOp.V_CMP_LT_I16: _VOPCOp_V_CMP_LT_I16, - VOPCOp.V_CMP_EQ_I16: _VOPCOp_V_CMP_EQ_I16, - VOPCOp.V_CMP_LE_I16: _VOPCOp_V_CMP_LE_I16, - VOPCOp.V_CMP_GT_I16: _VOPCOp_V_CMP_GT_I16, - VOPCOp.V_CMP_NE_I16: _VOPCOp_V_CMP_NE_I16, - VOPCOp.V_CMP_GE_I16: _VOPCOp_V_CMP_GE_I16, - VOPCOp.V_CMP_T_I16: _VOPCOp_V_CMP_T_I16, - VOPCOp.V_CMP_F_U16: _VOPCOp_V_CMP_F_U16, - VOPCOp.V_CMP_LT_U16: _VOPCOp_V_CMP_LT_U16, - VOPCOp.V_CMP_EQ_U16: _VOPCOp_V_CMP_EQ_U16, - VOPCOp.V_CMP_LE_U16: _VOPCOp_V_CMP_LE_U16, - VOPCOp.V_CMP_GT_U16: _VOPCOp_V_CMP_GT_U16, - VOPCOp.V_CMP_NE_U16: _VOPCOp_V_CMP_NE_U16, - VOPCOp.V_CMP_GE_U16: _VOPCOp_V_CMP_GE_U16, - VOPCOp.V_CMP_T_U16: _VOPCOp_V_CMP_T_U16, - VOPCOp.V_CMPX_F_I16: _VOPCOp_V_CMPX_F_I16, - VOPCOp.V_CMPX_LT_I16: _VOPCOp_V_CMPX_LT_I16, - VOPCOp.V_CMPX_EQ_I16: _VOPCOp_V_CMPX_EQ_I16, - VOPCOp.V_CMPX_LE_I16: _VOPCOp_V_CMPX_LE_I16, - VOPCOp.V_CMPX_GT_I16: _VOPCOp_V_CMPX_GT_I16, - VOPCOp.V_CMPX_NE_I16: _VOPCOp_V_CMPX_NE_I16, - VOPCOp.V_CMPX_GE_I16: _VOPCOp_V_CMPX_GE_I16, - VOPCOp.V_CMPX_T_I16: _VOPCOp_V_CMPX_T_I16, - VOPCOp.V_CMPX_F_U16: _VOPCOp_V_CMPX_F_U16, - VOPCOp.V_CMPX_LT_U16: _VOPCOp_V_CMPX_LT_U16, - VOPCOp.V_CMPX_EQ_U16: _VOPCOp_V_CMPX_EQ_U16, - VOPCOp.V_CMPX_LE_U16: _VOPCOp_V_CMPX_LE_U16, - VOPCOp.V_CMPX_GT_U16: _VOPCOp_V_CMPX_GT_U16, - VOPCOp.V_CMPX_NE_U16: _VOPCOp_V_CMPX_NE_U16, - VOPCOp.V_CMPX_GE_U16: _VOPCOp_V_CMPX_GE_U16, - VOPCOp.V_CMPX_T_U16: _VOPCOp_V_CMPX_T_U16, - VOPCOp.V_CMP_F_I32: _VOPCOp_V_CMP_F_I32, - VOPCOp.V_CMP_LT_I32: _VOPCOp_V_CMP_LT_I32, - VOPCOp.V_CMP_EQ_I32: _VOPCOp_V_CMP_EQ_I32, - VOPCOp.V_CMP_LE_I32: _VOPCOp_V_CMP_LE_I32, - VOPCOp.V_CMP_GT_I32: _VOPCOp_V_CMP_GT_I32, - VOPCOp.V_CMP_NE_I32: _VOPCOp_V_CMP_NE_I32, - VOPCOp.V_CMP_GE_I32: _VOPCOp_V_CMP_GE_I32, - VOPCOp.V_CMP_T_I32: _VOPCOp_V_CMP_T_I32, - VOPCOp.V_CMP_F_U32: _VOPCOp_V_CMP_F_U32, - VOPCOp.V_CMP_LT_U32: _VOPCOp_V_CMP_LT_U32, - VOPCOp.V_CMP_EQ_U32: _VOPCOp_V_CMP_EQ_U32, - VOPCOp.V_CMP_LE_U32: _VOPCOp_V_CMP_LE_U32, - VOPCOp.V_CMP_GT_U32: _VOPCOp_V_CMP_GT_U32, - VOPCOp.V_CMP_NE_U32: _VOPCOp_V_CMP_NE_U32, - VOPCOp.V_CMP_GE_U32: _VOPCOp_V_CMP_GE_U32, - VOPCOp.V_CMP_T_U32: _VOPCOp_V_CMP_T_U32, - VOPCOp.V_CMPX_F_I32: _VOPCOp_V_CMPX_F_I32, - VOPCOp.V_CMPX_LT_I32: _VOPCOp_V_CMPX_LT_I32, - VOPCOp.V_CMPX_EQ_I32: _VOPCOp_V_CMPX_EQ_I32, - VOPCOp.V_CMPX_LE_I32: _VOPCOp_V_CMPX_LE_I32, - VOPCOp.V_CMPX_GT_I32: _VOPCOp_V_CMPX_GT_I32, - VOPCOp.V_CMPX_NE_I32: _VOPCOp_V_CMPX_NE_I32, - VOPCOp.V_CMPX_GE_I32: _VOPCOp_V_CMPX_GE_I32, - VOPCOp.V_CMPX_T_I32: _VOPCOp_V_CMPX_T_I32, - VOPCOp.V_CMPX_F_U32: _VOPCOp_V_CMPX_F_U32, - VOPCOp.V_CMPX_LT_U32: _VOPCOp_V_CMPX_LT_U32, - VOPCOp.V_CMPX_EQ_U32: _VOPCOp_V_CMPX_EQ_U32, - VOPCOp.V_CMPX_LE_U32: _VOPCOp_V_CMPX_LE_U32, - VOPCOp.V_CMPX_GT_U32: _VOPCOp_V_CMPX_GT_U32, - VOPCOp.V_CMPX_NE_U32: _VOPCOp_V_CMPX_NE_U32, - VOPCOp.V_CMPX_GE_U32: _VOPCOp_V_CMPX_GE_U32, - VOPCOp.V_CMPX_T_U32: _VOPCOp_V_CMPX_T_U32, - VOPCOp.V_CMP_F_I64: _VOPCOp_V_CMP_F_I64, - VOPCOp.V_CMP_LT_I64: _VOPCOp_V_CMP_LT_I64, - VOPCOp.V_CMP_EQ_I64: _VOPCOp_V_CMP_EQ_I64, - VOPCOp.V_CMP_LE_I64: _VOPCOp_V_CMP_LE_I64, - VOPCOp.V_CMP_GT_I64: _VOPCOp_V_CMP_GT_I64, - VOPCOp.V_CMP_NE_I64: _VOPCOp_V_CMP_NE_I64, - VOPCOp.V_CMP_GE_I64: _VOPCOp_V_CMP_GE_I64, - VOPCOp.V_CMP_T_I64: _VOPCOp_V_CMP_T_I64, - VOPCOp.V_CMP_F_U64: _VOPCOp_V_CMP_F_U64, - VOPCOp.V_CMP_LT_U64: _VOPCOp_V_CMP_LT_U64, - VOPCOp.V_CMP_EQ_U64: _VOPCOp_V_CMP_EQ_U64, - VOPCOp.V_CMP_LE_U64: _VOPCOp_V_CMP_LE_U64, - VOPCOp.V_CMP_GT_U64: _VOPCOp_V_CMP_GT_U64, - VOPCOp.V_CMP_NE_U64: _VOPCOp_V_CMP_NE_U64, - VOPCOp.V_CMP_GE_U64: _VOPCOp_V_CMP_GE_U64, - VOPCOp.V_CMP_T_U64: _VOPCOp_V_CMP_T_U64, - VOPCOp.V_CMPX_F_I64: _VOPCOp_V_CMPX_F_I64, - VOPCOp.V_CMPX_LT_I64: _VOPCOp_V_CMPX_LT_I64, - VOPCOp.V_CMPX_EQ_I64: _VOPCOp_V_CMPX_EQ_I64, - VOPCOp.V_CMPX_LE_I64: _VOPCOp_V_CMPX_LE_I64, - VOPCOp.V_CMPX_GT_I64: _VOPCOp_V_CMPX_GT_I64, - VOPCOp.V_CMPX_NE_I64: _VOPCOp_V_CMPX_NE_I64, - VOPCOp.V_CMPX_GE_I64: _VOPCOp_V_CMPX_GE_I64, - VOPCOp.V_CMPX_T_I64: _VOPCOp_V_CMPX_T_I64, - VOPCOp.V_CMPX_F_U64: _VOPCOp_V_CMPX_F_U64, - VOPCOp.V_CMPX_LT_U64: _VOPCOp_V_CMPX_LT_U64, - VOPCOp.V_CMPX_EQ_U64: _VOPCOp_V_CMPX_EQ_U64, - VOPCOp.V_CMPX_LE_U64: _VOPCOp_V_CMPX_LE_U64, - VOPCOp.V_CMPX_GT_U64: _VOPCOp_V_CMPX_GT_U64, - VOPCOp.V_CMPX_NE_U64: _VOPCOp_V_CMPX_NE_U64, - VOPCOp.V_CMPX_GE_U64: _VOPCOp_V_CMPX_GE_U64, - VOPCOp.V_CMPX_T_U64: _VOPCOp_V_CMPX_T_U64, -} - -def _VOP3AOp_V_CMP_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - EXEC.u64[laneId] = D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - EXEC.u64[laneId] = D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - EXEC.u64[laneId] = D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 < S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 == S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 <= S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 > S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 != S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 >= S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 >= S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 != S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 > S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 <= S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 == S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 < S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_TRU_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 != S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 >= S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 != S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 > S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 <= S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 == S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f16 < S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_TRU_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 < S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 == S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 <= S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 > S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 != S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 >= S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 >= S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 != S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 > S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 <= S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 == S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 < S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_TRU_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 != S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 >= S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 != S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 > S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 <= S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 == S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f32 < S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_TRU_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 < S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 == S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 <= S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 > S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 != S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 >= S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 >= S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 != S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 > S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 <= S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 == S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 < S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_TRU_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 != S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 >= S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 != S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 > S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 <= S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 == S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = not (S0.f64 < S1.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_TRU_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 < S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 == S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 <= S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 > S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 != S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 >= S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_T_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 < S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 == S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 <= S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 > S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 != S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 >= S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_T_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 != S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_T_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 != S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_T_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 < S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 == S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 <= S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 > S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 != S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 >= S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_T_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 < S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 == S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 <= S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 > S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 != S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 >= S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_T_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 != S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_T_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 != S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_T_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 < S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 == S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 <= S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 > S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 != S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 >= S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_T_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_F_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 < S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 == S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 <= S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 > S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 != S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 >= S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMP_T_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 != S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_T_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_F_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 != S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_CMPX_T_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None; VDST=Reg(vdst_idx) - # --- compiled pseudocode --- - EXEC.u64[laneId] = D0.u64[laneId] = 1 - OFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR. - OFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR. - VDST = Destination VGPR 0- 255. - return {'D0': D0._val} - -def _VOP3AOp_V_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _VOP3AOp_V_READFIRSTLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); EXEC=Reg(exec_mask); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if EXEC == 0x0: - lane = 0 - else: - lane = s_ff1_i32_b64(EXEC) - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f64_to_i32(S0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = i32_to_f64(S0.i32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F32_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = i32_to_f32(S0.i32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_U32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f32_to_u32(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = f32_to_f16(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_RPI_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32 + 0.5)) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_FLR_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32)) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f64_to_f32(S0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F64_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = f32_to_f64(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F32_UBYTE0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[7 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F32_UBYTE1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[15 : 8].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F32_UBYTE2(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[23 : 16].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F32_UBYTE3(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[31 : 24].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_U32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f64_to_u32(S0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = u32_to_f64(S0.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_TRUNC_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_CEIL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 > 0.0) and (S0.f64 != D0.f64)): - D0.f64 += 1.0 - return {'D0': D0._val} - -def _VOP3AOp_V_RNDNE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = floor(S0.f64 + 0.5) - if (isEven(floor(S0.f64)) and (fract(S0.f64) == 0.5)): - D0.f64 -= 1.0 - return {'D0': D0._val} - -def _VOP3AOp_V_FLOOR_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 < 0.0) and (S0.f64 != D0.f64)): - D0.f64 += -1.0 - return {'D0': D0._val} - -def _VOP3AOp_V_FRACT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + -floor(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_TRUNC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_CEIL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 > 0.0) and (S0.f32 != D0.f32)): - D0.f32 += 1.0 - return {'D0': D0._val} - -def _VOP3AOp_V_RNDNE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = floor(S0.f32 + 0.5) - if (isEven(F(floor(S0.f32))) and (fract(S0.f32) == 0.5)): - D0.f32 -= 1.0 - return {'D0': D0._val} - -def _VOP3AOp_V_FLOOR_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 < 0.0) and (S0.f32 != D0.f32)): - D0.f32 += -1.0 - return {'D0': D0._val} - -def _VOP3AOp_V_EXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = pow(2.0, S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_LOG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = log2(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_RCP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_RCP_IFLAG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_RSQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_RCP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / S0.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_RSQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_SQRT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_SQRT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_SIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sin(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3AOp_V_COS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = cos(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3AOp_V_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_BFREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _VOP3AOp_V_FFBH_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3AOp_V_FFBL_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3AOp_V_FFBH_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(1, int(31)+1): - if S0.i32[31 - i] != S0.i32[31]: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3AOp_V_FREXP_EXP_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f64) - 1023 + 1 - return {'D0': D0._val} - -def _VOP3AOp_V_FREXP_MANT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.f64 = S0.f64 - else: - D0.f64 = mantissa(S0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_FRACT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + -floor(S0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_FREXP_EXP_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f32) - 127 + 1 - return {'D0': D0._val} - -def _VOP3AOp_V_FREXP_MANT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = mantissa(S0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_MOV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b64 = S0.b64 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F16_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = u16_to_f16(S0.u16) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_F16_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = i16_to_f16(S0.i16) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_u16(S0.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_i16(S0.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_RCP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / S0.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_SQRT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_RSQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_LOG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = log2(S0.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_EXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = pow(2.0, S0.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_CNDMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - D0.u32 = ((S1.u32) if (VCC.u64[laneId]) else (S0.u32)) - return {'D0': D0._val} - -def _VOP3AOp_V_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_SUB_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 - S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_SUBREV_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S1.f32 - S0.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_FMAC_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = fma(S0.f64, S1.f64, D0.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_HI_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i24) * (S1.i24)) >> 32) - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_HI_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u24) * (S1.u24)) >> 32) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(F(S0.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif (WAVE_MODE.IEEE and isSignalNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((F(S0.f32) == +0.0) and (F(S1.f32) == -0.0)): - D0.f32 = S1.f32 - elif ((F(S0.f32) == -0.0) and (F(S1.f32) == +0.0)): - D0.f32 = S0.f32 - else: - D0.f32 = ((S0.f32) if (S0.f32 < S1.f32) else (S1.f32)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(F(S0.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif (WAVE_MODE.IEEE and isSignalNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((F(S0.f32) == +0.0) and (F(S1.f32) == -0.0)): - D0.f32 = S0.f32 - elif ((F(S0.f32) == -0.0) and (F(S1.f32) == +0.0)): - D0.f32 = S1.f32 - elif WAVE_MODE.IEEE: - D0.f32 = ((S0.f32) if (S0.f32 >= S1.f32) else (S1.f32)) - else: - D0.f32 = ((S0.f32) if (S0.f32 > S1.f32) else (S1.f32)) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 < S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 >= S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 < S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 >= S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP3AOp_V_LSHRREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_ASHRREV_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S1.i32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_LSHLREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 << S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_SUB_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 - S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_SUBREV_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S1.f16 - S0.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_MAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S0.f16 * S1.f16 + D0.f16) - if OPSEL.u4[3]: - D0 = Reg(_pack(tmp.f16, D0[15 : 0])) - else: - D0 = Reg(_pack(0, tmp.f16)) - return {'D0': D0._val} - -def _VOP3AOp_V_ADD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 + S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_SUB_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 - S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_SUBREV_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S1.u16 - S0.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_LO_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 * S1.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_LSHLREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S1.u16 << S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_LSHRREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S1.u16 >> S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_ASHRREV_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = (S1.i16 >> S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(F(S0.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif (WAVE_MODE.IEEE and isSignalNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((F(S0.f16) == +0.0) and (F(S1.f16) == -0.0)): - D0.f16 = S0.f16 - elif ((F(S0.f16) == -0.0) and (F(S1.f16) == +0.0)): - D0.f16 = S1.f16 - elif WAVE_MODE.IEEE: - D0.f16 = ((S0.f16) if (S0.f16 >= S1.f16) else (S1.f16)) - else: - D0.f16 = ((S0.f16) if (S0.f16 > S1.f16) else (S1.f16)) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(F(S0.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif (WAVE_MODE.IEEE and isSignalNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((F(S0.f16) == +0.0) and (F(S1.f16) == -0.0)): - D0.f16 = S1.f16 - elif ((F(S0.f16) == -0.0) and (F(S1.f16) == +0.0)): - D0.f16 = S0.f16 - else: - D0.f16 = ((S0.f16) if (S0.f16 < S1.f16) else (S1.f16)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ((S0.u16) if (S0.u16 >= S1.u16) else (S1.u16)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = ((S0.i16) if (S0.i16 >= S1.i16) else (S1.i16)) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ((S0.u16) if (S0.u16 < S1.u16) else (S1.u16)) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = ((S0.i16) if (S0.i16 < S1.i16) else (S1.i16)) - return {'D0': D0._val} - -def _VOP3AOp_V_LDEXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * F(2.0 ** (S1.i16)) - return {'D0': D0._val} - -def _VOP3AOp_V_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_SUB_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 - S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_SUBREV_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S1.u32 - S0.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_DOT2C_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.f32) - tmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16) - tmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_DOT2C_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - tmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16) - tmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_DOT4C_I32_I8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - tmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8) - tmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8) - tmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8) - tmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_DOT8C_I32_I4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - tmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4) - tmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4) - tmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4) - tmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4) - tmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4) - tmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4) - tmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4) - tmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4) - D0.i32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_FMAC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_PK_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16) - D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16) - return {'D0': D0._val} - -def _VOP3AOp_V_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) + S2.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) + S2.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CUBEID_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - if S2.f32 < 0.0: - D0.f32 = 5.0 - else: - D0.f32 = 4.0 - elif abs(S1.f32) >= abs(S0.f32): - if S1.f32 < 0.0: - D0.f32 = 3.0 - else: - D0.f32 = 2.0 - else: - if S0.f32 < 0.0: - D0.f32 = 1.0 - else: - D0.f32 = 0.0 - return {'D0': D0._val} - -def _VOP3AOp_V_CUBESC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - if S2.f32 < 0.0: - D0.f32 = -S0.f32 - else: - D0.f32 = S0.f32 - elif abs(S1.f32) >= abs(S0.f32): - D0.f32 = S0.f32 - else: - if S0.f32 < 0.0: - D0.f32 = S2.f32 - else: - D0.f32 = -S2.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CUBETC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - D0.f32 = -S1.f32 - elif abs(S1.f32) >= abs(S0.f32): - if S1.f32 < 0.0: - D0.f32 = -S2.f32 - else: - D0.f32 = S2.f32 - else: - D0.f32 = -S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_CUBEMA_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - D0.f32 = S2.f32 * 2.0 - elif abs(S1.f32) >= abs(S0.f32): - D0.f32 = S1.f32 * 2.0 - else: - D0.f32 = S0.f32 * 2.0 - return {'D0': D0._val} - -def _VOP3AOp_V_BFE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1)) - return {'D0': D0._val} - -def _VOP3AOp_V_BFE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1)) - D0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_BFI_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32)) - return {'D0': D0._val} - -def _VOP3AOp_V_FMA_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, S2.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_FMA_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = fma(S0.f64, S1.f64, S2.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_LERP_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1 << 24)) - tmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1 << 16) - tmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1 << 8) - tmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1) - D0.u32 = tmp.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_ALIGNBIT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((_pack32(S0.u32, S1.u32) >> S2.u32[4 : 0]) & 0xffffffff) - return {'D0': D0._val} - -def _VOP3AOp_V_ALIGNBYTE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((_pack32(S0.u32, S1.u32) >> (S2.u32[1 : 0] * 8)) & 0xffffffff) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_min_f32(v_min_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_max_f32(v_max_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_MED3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if (isNAN(F(S0.f32)) or isNAN(F(S1.f32)) or isNAN(F(S2.f32))): - D0.f32 = v_min3_f32(S0.f32, S1.f32, S2.f32) - elif v_max3_f32(S0.f32, S1.f32, S2.f32) == S0.f32: - D0.f32 = v_max_f32(S1.f32, S2.f32) - elif v_max3_f32(S0.f32, S1.f32, S2.f32) == S1.f32: - D0.f32 = v_max_f32(S0.f32, S2.f32) - else: - D0.f32 = v_max_f32(S0.f32, S1.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_MED3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32: - D0.i32 = v_max_i32(S1.i32, S2.i32) - elif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32: - D0.i32 = v_max_i32(S0.i32, S2.i32) - else: - D0.i32 = v_max_i32(S0.i32, S1.i32) - return {'D0': D0._val} - -def _VOP3AOp_V_MED3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32: - D0.u32 = v_max_u32(S1.u32, S2.u32) - elif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32: - D0.u32 = v_max_u32(S0.u32, S2.u32) - else: - D0.u32 = v_max_u32(S0.u32, S1.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_SAD_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += (ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0])) - tmp += (ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8])) - tmp += (ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16])) - tmp += (ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24])) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_SAD_HI_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((v_sad_u8(S0, S1, 0)) << 16) + S2.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_SAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16) - tmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_SAD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_PK_U8_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg((S2.u32 & (~(0xff << (S1.u32[1 : 0].u32 * 8))))) - tmp = Reg((tmp | (((f32_to_u8(S0.f32)) & 255) << (S1.u32[1 : 0].u32 * 8)))) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_DIV_FIXUP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f32) ^ sign(S2.f32)) - if isNAN(F(S2.f32)): - D0.f32 = F(cvtToQuietNAN(F(S2.f32))) - elif isNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif ((F(S1.f32) == 0.0) and (F(S2.f32) == 0.0)): - D0.f32 = F(0xffc00000) - elif ((F(abs(S1.f32)) == INF) and (F(abs(S2.f32)) == INF)): - D0.f32 = F(0xffc00000) - elif ((F(S1.f32) == 0.0) or (F(abs(S2.f32)) == INF)): - D0.f32 = (((-INF).f32) if (sign_out) else (INF.f32)) - elif ((F(abs(S1.f32)) == INF) or (F(S2.f32) == 0.0)): - D0.f32 = ((-0.0) if (sign_out) else (0.0)) - elif exponent(S2.f32) - exponent(S1.f32) < -150: - D0.f32 = ((-UNDERFLOW_F32) if (sign_out) else (UNDERFLOW_F32)) - elif exponent(S1.f32) == 255: - D0.f32 = ((-OVERFLOW_F32) if (sign_out) else (OVERFLOW_F32)) - else: - D0.f32 = ((-OVERFLOW_F32) if (sign_out) else (OVERFLOW_F32)) if isNAN(S0.f32) else ((-abs(S0.f32)) if (sign_out) else (abs(S0.f32))) - return {'D0': D0._val} - -def _VOP3AOp_V_DIV_FIXUP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f64) ^ sign(S2.f64)) - if isNAN(S2.f64): - D0.f64 = cvtToQuietNAN(S2.f64) - elif isNAN(S1.f64): - D0.f64 = cvtToQuietNAN(S1.f64) - elif ((S1.f64 == 0.0) and (S2.f64 == 0.0)): - D0.f64 = F(0xfff8000000000000) - elif ((abs(S1.f64) == INF) and (abs(S2.f64) == INF)): - D0.f64 = F(0xfff8000000000000) - elif ((S1.f64 == 0.0) or (abs(S2.f64) == INF)): - D0.f64 = (((-INF)) if (sign_out) else (INF)) - elif ((abs(S1.f64) == INF) or (S2.f64 == 0.0)): - D0.f64 = ((-0.0) if (sign_out) else (0.0)) - elif exponent(S2.f64) - exponent(S1.f64) < -1075: - D0.f64 = ((-UNDERFLOW_F64) if (sign_out) else (UNDERFLOW_F64)) - elif exponent(S1.f64) == 2047: - D0.f64 = ((-OVERFLOW_F64) if (sign_out) else (OVERFLOW_F64)) - else: - D0.f64 = ((-OVERFLOW_F64) if (sign_out) else (OVERFLOW_F64)) if isNAN(S0.f64) else ((-abs(S0.f64)) if (sign_out) else (abs(S0.f64))) - return {'D0': D0._val} - -def _VOP3AOp_V_DIV_FMAS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - if VCC.u64[laneId]: - D0.f32 = (2.0 ** 64 if exponent(S2.f32) > 127 else 2.0 ** -64) * fma(S0.f32, S1.f32, S2.f32) - else: - D0.f32 = fma(S0.f32, S1.f32, S2.f32) - return {'D0': D0._val} - -def _VOP3AOp_V_DIV_FMAS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - if VCC.u64[laneId]: - D0.f64 = (2.0 ** 128 if exponent(S2.f64) > 1023 else 2.0 ** -128) * fma(S0.f64, S1.f64, S2.f64) - else: - D0.f64 = fma(S0.f64, S1.f64, S2.f64) - return {'D0': D0._val} - -def _VOP3AOp_V_MSAD_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += ((0) if (S1.u32[7 : 0] == 0) else ((ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0])))) - tmp += ((0) if (S1.u32[15 : 8] == 0) else ((ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8])))) - tmp += ((0) if (S1.u32[23 : 16] == 0) else ((ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16])))) - tmp += ((0) if (S1.u32[31 : 24] == 0) else ((ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24])))) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_QSAD_PK_U16_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[63 : 48] = (v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32)) - tmp[47 : 32] = (v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32)) - tmp[31 : 16] = (v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32)) - tmp[15 : 0] = (v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32)) - D0.b64 = tmp.b64 - return {'D0': D0._val} - -def _VOP3AOp_V_MQSAD_PK_U16_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[63 : 48] = (v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32)) - tmp[47 : 32] = (v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32)) - tmp[31 : 16] = (v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32)) - tmp[15 : 0] = (v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32)) - D0.b64 = tmp.b64 - return {'D0': D0._val} - -def _VOP3AOp_V_MQSAD_U32_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[127 : 96] = (v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32)) - tmp[95 : 64] = (v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32)) - tmp[63 : 32] = (v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32)) - tmp[31 : 0] = (v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32)) - D0.b128 = tmp.b128 - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_LEGACY_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S0.f16 * S1.f16 + S2.f16) - if OPSEL.u4[3]: - D0 = Reg(_pack(tmp.f16, D0[15 : 0])) - else: - D0 = Reg(_pack(0, tmp.f16)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_LEGACY_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S0.u16 * S1.u16 + S2.u16) - if OPSEL.u4[3]: - D0 = Reg(_pack(tmp.u16, D0[15 : 0])) - else: - D0 = Reg(_pack(0, tmp.u16)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_LEGACY_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S0.i16 * S1.i16 + S2.i16) - if OPSEL.u4[3]: - D0 = Reg(_pack(tmp.i16, D0[15 : 0])) - else: - D0 = Reg(_pack(0, tmp.i16)) - return {'D0': D0._val} - -def _VOP3AOp_V_PERM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0[31 : 24] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[31 : 24]) - D0[23 : 16] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[23 : 16]) - D0[15 : 8] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[15 : 8]) - D0[7 : 0] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[7 : 0]) - return {'D0': D0._val} - -def _VOP3AOp_V_FMA_LEGACY_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(fma(S0.f16, S1.f16, S2.f16)) - if OPSEL.u4[3]: - D0 = Reg(_pack(tmp.f16, D0[15 : 0])) - else: - D0 = Reg(_pack(0, tmp.f16)) - return {'D0': D0._val} - -def _VOP3AOp_V_DIV_FIXUP_LEGACY_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f16) ^ sign(S2.f16)) - if isNAN(F(S2.f16)): - tmp = Reg(cvtToQuietNAN(F(S2.f16))) - elif isNAN(F(S1.f16)): - tmp = Reg(cvtToQuietNAN(F(S1.f16))) - elif ((F(S1.f16) == 0.0) and (F(S2.f16) == 0.0)): - tmp = Reg(F(0xfe00)) - elif ((F(abs(S1.f16)) == INF) and (F(abs(S2.f16)) == INF)): - tmp = Reg(F(0xfe00)) - elif ((F(S1.f16) == 0.0) or (F(abs(S2.f16)) == INF)): - tmp = Reg((((-INF)) if (sign_out) else (INF))) - elif ((F(abs(S1.f16)) == INF) or (F(S2.f16) == 0.0)): - tmp = Reg(((-0.0) if (sign_out) else (0.0))) - else: - tmp = Reg(((-abs(S0.f16)) if (sign_out) else (abs(S0.f16)))) - if OPSEL.u4[3]: - D0 = Reg(_pack(tmp.f16, D0[15 : 0])) - else: - D0 = Reg(_pack(0, tmp.f16)) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_PKACCUM_U8_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - byte = S1.u32[1 : 0] - bit = byte.u32 * 8 - D0.u32[bit + 7 : bit] = (f32_to_u8(S0.f32)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_U32_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u16) * (S1.u16) + S2.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i16) * (S1.i16) + S2.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_XAD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_MIN3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_min_f16(v_min_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16) - return {'D0': D0._val} - -def _VOP3AOp_V_MIN3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_max_f16(v_max_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16) - return {'D0': D0._val} - -def _VOP3AOp_V_MED3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if (isNAN(F(S0.f16)) or isNAN(F(S1.f16)) or isNAN(F(S2.f16))): - D0.f16 = v_min3_f16(S0.f16, S1.f16, S2.f16) - elif v_max3_f16(S0.f16, S1.f16, S2.f16) == S0.f16: - D0.f16 = v_max_f16(S1.f16, S2.f16) - elif v_max3_f16(S0.f16, S1.f16, S2.f16) == S1.f16: - D0.f16 = v_max_f16(S0.f16, S2.f16) - else: - D0.f16 = v_max_f16(S0.f16, S1.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_MED3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16: - D0.i16 = v_max_i16(S1.i16, S2.i16) - elif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16: - D0.i16 = v_max_i16(S0.i16, S2.i16) - else: - D0.i16 = v_max_i16(S0.i16, S1.i16) - return {'D0': D0._val} - -def _VOP3AOp_V_MED3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16: - D0.u16 = v_max_u16(S1.u16, S2.u16) - elif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16: - D0.u16 = v_max_u16(S0.u16, S2.u16) - else: - D0.u16 = v_max_u16(S0.u16, S1.u16) - return {'D0': D0._val} - -def _VOP3AOp_V_LSHL_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_ADD_LSHL_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_ADD3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 + S2.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_LSHL_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_AND_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 & S1.u32) | S2.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_OR3_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32 | S2.u32) - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 + S2.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 * S1.u16 + S2.u16 - return {'D0': D0._val} - -def _VOP3AOp_V_MAD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 * S1.i16 + S2.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_FMA_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, S2.f16) - return {'D0': D0._val} - -def _VOP3AOp_V_DIV_FIXUP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f16) ^ sign(S2.f16)) - if isNAN(F(S2.f16)): - D0.f16 = F(cvtToQuietNAN(F(S2.f16))) - elif isNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif ((F(S1.f16) == 0.0) and (F(S2.f16) == 0.0)): - D0.f16 = F(0xfe00) - elif ((F(abs(S1.f16)) == INF) and (F(abs(S2.f16)) == INF)): - D0.f16 = F(0xfe00) - elif ((F(S1.f16) == 0.0) or (F(abs(S2.f16)) == INF)): - D0.f16 = (((-INF).f16) if (sign_out) else (INF.f16)) - elif ((F(abs(S1.f16)) == INF) or (F(S2.f16) == 0.0)): - D0.f16 = ((-0.0) if (sign_out) else (0.0)) - else: - D0.f16 = ((-abs(S0.f16)) if (sign_out) else (abs(S0.f16))) - return {'D0': D0._val} - -def _VOP3AOp_V_LSHL_ADD_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 << S1.u32[2 : 0].u32) + S2.u64 - return {'D0': D0._val} - -def _VOP3AOp_V_ADD_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 * S1.f64 - return {'D0': D0._val} - -def _VOP3AOp_V_MIN_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(S0.f64)): - D0.f64 = cvtToQuietNAN(S0.f64) - elif (WAVE_MODE.IEEE and isSignalNAN(S1.f64)): - D0.f64 = cvtToQuietNAN(S1.f64) - elif isNAN(S0.f64): - D0.f64 = S1.f64 - elif isNAN(S1.f64): - D0.f64 = S0.f64 - elif ((S0.f64 == +0.0) and (S1.f64 == -0.0)): - D0.f64 = S1.f64 - elif ((S0.f64 == -0.0) and (S1.f64 == +0.0)): - D0.f64 = S0.f64 - else: - D0.f64 = ((S0.f64) if (S0.f64 < S1.f64) else (S1.f64)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAX_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (WAVE_MODE.IEEE and isSignalNAN(S0.f64)): - D0.f64 = cvtToQuietNAN(S0.f64) - elif (WAVE_MODE.IEEE and isSignalNAN(S1.f64)): - D0.f64 = cvtToQuietNAN(S1.f64) - elif isNAN(S0.f64): - D0.f64 = S1.f64 - elif isNAN(S1.f64): - D0.f64 = S0.f64 - elif ((S0.f64 == +0.0) and (S1.f64 == -0.0)): - D0.f64 = S0.f64 - elif ((S0.f64 == -0.0) and (S1.f64 == +0.0)): - D0.f64 = S1.f64 - elif WAVE_MODE.IEEE: - D0.f64 = ((S0.f64) if (S0.f64 >= S1.f64) else (S1.f64)) - else: - D0.f64 = ((S0.f64) if (S0.f64 > S1.f64) else (S1.f64)) - return {'D0': D0._val} - -def _VOP3AOp_V_LDEXP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 * 2.0 ** S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_LO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 * S1.u32 - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_HI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u32) * (S1.u32)) >> 32) - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_HI_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i32) * (S1.i32)) >> 32) - return {'D0': D0._val} - -def _VOP3AOp_V_LDEXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * 2.0 ** S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_READLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - lane = S1.u32[5 : 0] - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP3AOp_V_BCNT_U32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S1.u32) - for i in range(0, int(31)+1): - tmp += S0[i].u32 - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_LSHLREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (S1.u64 << S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_LSHRREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (S1.u64 >> S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_ASHRREV_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i64 = (S1.i64 >> S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_TRIG_PREOP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - shift = (S1[4 : 0].u32) * 53 - if exponent(S0.f64) > 1077: - shift += exponent(S0.f64) - 1077 - result = float(((TWO_OVER_PI_1201[1200 : 0] << int(shift)) >> (1201 - 53)) & 0x1fffffffffffff) - scale = -53 - shift - if exponent(S0.f64) >= 1968: - scale += 128 - D0.f64 = ldexp(result, scale) - return {'D0': D0._val} - -def _VOP3AOp_V_BFM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((1 << S0[4 : 0].u32) - 1) << S1[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_PKNORM_I16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = f32_to_snorm(S0.f32) - tmp[31 : 16].i16 = f32_to_snorm(S1.f32) - return {} - -def _VOP3AOp_V_CVT_PKNORM_U16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = f32_to_unorm(S0.f32) - tmp[31 : 16].u16 = f32_to_unorm(S1.f32) - return {} - -def _VOP3AOp_V_CVT_PKRTZ_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].f16 = f32_to_f16(S0.f32) - tmp[31 : 16].f16 = f32_to_f16(S1.f32) - return {} - -def _VOP3AOp_V_CVT_PK_U16_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = u32_to_u16(S0.u32) - tmp[31 : 16].u16 = u32_to_u16(S1.u32) - return {} - -def _VOP3AOp_V_CVT_PK_I16_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = i32_to_i16(S0.i32) - tmp[31 : 16].i16 = i32_to_i16(S1.i32) - return {} - -def _VOP3AOp_V_CVT_PKNORM_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = f16_to_snorm(S0.f16) - tmp[31 : 16].i16 = f16_to_snorm(S1.f16) - return {} - -def _VOP3AOp_V_CVT_PKNORM_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = f16_to_unorm(S0.f16) - tmp[31 : 16].u16 = f16_to_unorm(S1.f16) - return {} - -def _VOP3AOp_V_ADD_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 + S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_SUB_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 - S1.i32 - return {'D0': D0._val} - -def _VOP3AOp_V_ADD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 + S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_SUB_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 - S1.i16 - return {'D0': D0._val} - -def _VOP3AOp_V_PACK_B32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0[31 : 16].f16 = S1.f16 - D0[15 : 0].f16 = S0.f16 - return {'D0': D0._val} - -def _VOP3AOp_V_MUL_LEGACY_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = 0.0 - else: - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP3AOp_V_DOT2C_F32_BF16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.f32) - tmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16) - tmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_SCALEF32_PK_F32_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcword = OPSEL[0].i32 * 16 - src = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16 - D0[31 : 0].f32 = tmp0 - D0[63 : 32].f32 = tmp1 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_SCALEF32_PK_F32_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcword = OPSEL[0].i32 * 16 - src = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16 - D0[31 : 0].f32 = tmp0 - D0[63 : 32].f32 = tmp1 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_SCALEF32_F32_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcbyte = OPSEL[1 : 0].i32 * 8 - src = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8 - tmp = Reg(fp8_to_f32_scale(src, scale.u8)) - return {} - -def _VOP3AOp_V_CVT_SCALEF32_F32_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcbyte = OPSEL[1 : 0].i32 * 8 - src = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8 - tmp = Reg(bf8_to_f32_scale(src, scale.u8)) - return {} - -def _VOP3AOp_V_CVT_SCALEF32_PK_F32_FP4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcbyte = OPSEL[1 : 0].i32 * 8 - src = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8 - D0[31 : 0].f32 = tmp0 - D0[63 : 32].f32 = tmp1 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_SCALEF32_PK_F16_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcword = OPSEL[0].i32 * 16 - src = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16 - D0[15 : 0].f16 = tmp0 - D0[31 : 16].f16 = tmp1 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_SCALEF32_PK_F16_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcword = OPSEL[0].i32 * 16 - src = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16 - D0[15 : 0].f16 = tmp0 - D0[31 : 16].f16 = tmp1 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_SCALEF32_F16_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcbyte = OPSEL[1 : 0].i32 * 8 - src = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8 - tmp = Reg(fp8_to_f16_scale(src, scale.u8)) - return {} - -def _VOP3AOp_V_CVT_SCALEF32_F16_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcbyte = OPSEL[1 : 0].i32 * 8 - src = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8 - tmp = Reg(bf8_to_f16_scale(src, scale.u8)) - return {} - -def _VOP3AOp_V_CVT_SCALEF32_PK_F16_FP4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcbyte = OPSEL[1 : 0].i32 * 8 - src = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8 - D0[15 : 0].f16 = tmp0 - D0[31 : 16].f16 = tmp1 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_SCALEF32_PK_BF16_FP4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcbyte = OPSEL[1 : 0].i32 * 8 - src = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8 - D0[15 : 0].bf16 = tmp0 - D0[31 : 16].bf16 = tmp1 - return {'D0': D0._val} - -def _VOP3AOp_V_ASHR_PK_I8_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32) - tmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32) - D0[15 : 0] = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_ASHR_PK_U8_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32) - tmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32) - D0[15 : 0] = tmp - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_PK_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].f16 = f32_to_f16(S0.f32) - tmp[31 : 16].f16 = f32_to_f16(S1.f32) - return {} - -def _VOP3AOp_V_CVT_PK_BF16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].bf16 = f32_to_bf16(S0.f32) - tmp[31 : 16].bf16 = f32_to_bf16(S1.f32) - return {} - -def _VOP3AOp_V_CVT_SCALEF32_PK_BF16_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcword = OPSEL[0].i32 * 16 - src = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16 - D0[15 : 0].bf16 = tmp0.bf16 - D0[31 : 16].bf16 = tmp1.bf16 - return {'D0': D0._val} - -def _VOP3AOp_V_CVT_SCALEF32_PK_BF16_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); tmp=Reg(0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - scale = (exponent(S1.f32)) - srcword = OPSEL[0].i32 * 16 - src = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16 - D0[15 : 0].bf16 = tmp0.bf16 - D0[31 : 16].bf16 = tmp1.bf16 - return {'D0': D0._val} - -def _VOP3AOp_V_MINIMUM3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = F(v_minimum_f32(v_minimum_f32(S0.f32, S1.f32), S2.f32)) - return {'D0': D0._val} - -def _VOP3AOp_V_MAXIMUM3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = F(v_maximum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32)) - return {'D0': D0._val} - -VOP3AOp_FUNCTIONS = { - VOP3AOp.V_CMP_CLASS_F32: _VOP3AOp_V_CMP_CLASS_F32, - VOP3AOp.V_CMPX_CLASS_F32: _VOP3AOp_V_CMPX_CLASS_F32, - VOP3AOp.V_CMP_CLASS_F64: _VOP3AOp_V_CMP_CLASS_F64, - VOP3AOp.V_CMPX_CLASS_F64: _VOP3AOp_V_CMPX_CLASS_F64, - VOP3AOp.V_CMP_CLASS_F16: _VOP3AOp_V_CMP_CLASS_F16, - VOP3AOp.V_CMPX_CLASS_F16: _VOP3AOp_V_CMPX_CLASS_F16, - VOP3AOp.V_CMP_F_F16: _VOP3AOp_V_CMP_F_F16, - VOP3AOp.V_CMP_LT_F16: _VOP3AOp_V_CMP_LT_F16, - VOP3AOp.V_CMP_EQ_F16: _VOP3AOp_V_CMP_EQ_F16, - VOP3AOp.V_CMP_LE_F16: _VOP3AOp_V_CMP_LE_F16, - VOP3AOp.V_CMP_GT_F16: _VOP3AOp_V_CMP_GT_F16, - VOP3AOp.V_CMP_LG_F16: _VOP3AOp_V_CMP_LG_F16, - VOP3AOp.V_CMP_GE_F16: _VOP3AOp_V_CMP_GE_F16, - VOP3AOp.V_CMP_O_F16: _VOP3AOp_V_CMP_O_F16, - VOP3AOp.V_CMP_U_F16: _VOP3AOp_V_CMP_U_F16, - VOP3AOp.V_CMP_NGE_F16: _VOP3AOp_V_CMP_NGE_F16, - VOP3AOp.V_CMP_NLG_F16: _VOP3AOp_V_CMP_NLG_F16, - VOP3AOp.V_CMP_NGT_F16: _VOP3AOp_V_CMP_NGT_F16, - VOP3AOp.V_CMP_NLE_F16: _VOP3AOp_V_CMP_NLE_F16, - VOP3AOp.V_CMP_NEQ_F16: _VOP3AOp_V_CMP_NEQ_F16, - VOP3AOp.V_CMP_NLT_F16: _VOP3AOp_V_CMP_NLT_F16, - VOP3AOp.V_CMP_TRU_F16: _VOP3AOp_V_CMP_TRU_F16, - VOP3AOp.V_CMPX_F_F16: _VOP3AOp_V_CMPX_F_F16, - VOP3AOp.V_CMPX_LT_F16: _VOP3AOp_V_CMPX_LT_F16, - VOP3AOp.V_CMPX_EQ_F16: _VOP3AOp_V_CMPX_EQ_F16, - VOP3AOp.V_CMPX_LE_F16: _VOP3AOp_V_CMPX_LE_F16, - VOP3AOp.V_CMPX_GT_F16: _VOP3AOp_V_CMPX_GT_F16, - VOP3AOp.V_CMPX_LG_F16: _VOP3AOp_V_CMPX_LG_F16, - VOP3AOp.V_CMPX_GE_F16: _VOP3AOp_V_CMPX_GE_F16, - VOP3AOp.V_CMPX_O_F16: _VOP3AOp_V_CMPX_O_F16, - VOP3AOp.V_CMPX_U_F16: _VOP3AOp_V_CMPX_U_F16, - VOP3AOp.V_CMPX_NGE_F16: _VOP3AOp_V_CMPX_NGE_F16, - VOP3AOp.V_CMPX_NLG_F16: _VOP3AOp_V_CMPX_NLG_F16, - VOP3AOp.V_CMPX_NGT_F16: _VOP3AOp_V_CMPX_NGT_F16, - VOP3AOp.V_CMPX_NLE_F16: _VOP3AOp_V_CMPX_NLE_F16, - VOP3AOp.V_CMPX_NEQ_F16: _VOP3AOp_V_CMPX_NEQ_F16, - VOP3AOp.V_CMPX_NLT_F16: _VOP3AOp_V_CMPX_NLT_F16, - VOP3AOp.V_CMPX_TRU_F16: _VOP3AOp_V_CMPX_TRU_F16, - VOP3AOp.V_CMP_F_F32: _VOP3AOp_V_CMP_F_F32, - VOP3AOp.V_CMP_LT_F32: _VOP3AOp_V_CMP_LT_F32, - VOP3AOp.V_CMP_EQ_F32: _VOP3AOp_V_CMP_EQ_F32, - VOP3AOp.V_CMP_LE_F32: _VOP3AOp_V_CMP_LE_F32, - VOP3AOp.V_CMP_GT_F32: _VOP3AOp_V_CMP_GT_F32, - VOP3AOp.V_CMP_LG_F32: _VOP3AOp_V_CMP_LG_F32, - VOP3AOp.V_CMP_GE_F32: _VOP3AOp_V_CMP_GE_F32, - VOP3AOp.V_CMP_O_F32: _VOP3AOp_V_CMP_O_F32, - VOP3AOp.V_CMP_U_F32: _VOP3AOp_V_CMP_U_F32, - VOP3AOp.V_CMP_NGE_F32: _VOP3AOp_V_CMP_NGE_F32, - VOP3AOp.V_CMP_NLG_F32: _VOP3AOp_V_CMP_NLG_F32, - VOP3AOp.V_CMP_NGT_F32: _VOP3AOp_V_CMP_NGT_F32, - VOP3AOp.V_CMP_NLE_F32: _VOP3AOp_V_CMP_NLE_F32, - VOP3AOp.V_CMP_NEQ_F32: _VOP3AOp_V_CMP_NEQ_F32, - VOP3AOp.V_CMP_NLT_F32: _VOP3AOp_V_CMP_NLT_F32, - VOP3AOp.V_CMP_TRU_F32: _VOP3AOp_V_CMP_TRU_F32, - VOP3AOp.V_CMPX_F_F32: _VOP3AOp_V_CMPX_F_F32, - VOP3AOp.V_CMPX_LT_F32: _VOP3AOp_V_CMPX_LT_F32, - VOP3AOp.V_CMPX_EQ_F32: _VOP3AOp_V_CMPX_EQ_F32, - VOP3AOp.V_CMPX_LE_F32: _VOP3AOp_V_CMPX_LE_F32, - VOP3AOp.V_CMPX_GT_F32: _VOP3AOp_V_CMPX_GT_F32, - VOP3AOp.V_CMPX_LG_F32: _VOP3AOp_V_CMPX_LG_F32, - VOP3AOp.V_CMPX_GE_F32: _VOP3AOp_V_CMPX_GE_F32, - VOP3AOp.V_CMPX_O_F32: _VOP3AOp_V_CMPX_O_F32, - VOP3AOp.V_CMPX_U_F32: _VOP3AOp_V_CMPX_U_F32, - VOP3AOp.V_CMPX_NGE_F32: _VOP3AOp_V_CMPX_NGE_F32, - VOP3AOp.V_CMPX_NLG_F32: _VOP3AOp_V_CMPX_NLG_F32, - VOP3AOp.V_CMPX_NGT_F32: _VOP3AOp_V_CMPX_NGT_F32, - VOP3AOp.V_CMPX_NLE_F32: _VOP3AOp_V_CMPX_NLE_F32, - VOP3AOp.V_CMPX_NEQ_F32: _VOP3AOp_V_CMPX_NEQ_F32, - VOP3AOp.V_CMPX_NLT_F32: _VOP3AOp_V_CMPX_NLT_F32, - VOP3AOp.V_CMPX_TRU_F32: _VOP3AOp_V_CMPX_TRU_F32, - VOP3AOp.V_CMP_F_F64: _VOP3AOp_V_CMP_F_F64, - VOP3AOp.V_CMP_LT_F64: _VOP3AOp_V_CMP_LT_F64, - VOP3AOp.V_CMP_EQ_F64: _VOP3AOp_V_CMP_EQ_F64, - VOP3AOp.V_CMP_LE_F64: _VOP3AOp_V_CMP_LE_F64, - VOP3AOp.V_CMP_GT_F64: _VOP3AOp_V_CMP_GT_F64, - VOP3AOp.V_CMP_LG_F64: _VOP3AOp_V_CMP_LG_F64, - VOP3AOp.V_CMP_GE_F64: _VOP3AOp_V_CMP_GE_F64, - VOP3AOp.V_CMP_O_F64: _VOP3AOp_V_CMP_O_F64, - VOP3AOp.V_CMP_U_F64: _VOP3AOp_V_CMP_U_F64, - VOP3AOp.V_CMP_NGE_F64: _VOP3AOp_V_CMP_NGE_F64, - VOP3AOp.V_CMP_NLG_F64: _VOP3AOp_V_CMP_NLG_F64, - VOP3AOp.V_CMP_NGT_F64: _VOP3AOp_V_CMP_NGT_F64, - VOP3AOp.V_CMP_NLE_F64: _VOP3AOp_V_CMP_NLE_F64, - VOP3AOp.V_CMP_NEQ_F64: _VOP3AOp_V_CMP_NEQ_F64, - VOP3AOp.V_CMP_NLT_F64: _VOP3AOp_V_CMP_NLT_F64, - VOP3AOp.V_CMP_TRU_F64: _VOP3AOp_V_CMP_TRU_F64, - VOP3AOp.V_CMPX_F_F64: _VOP3AOp_V_CMPX_F_F64, - VOP3AOp.V_CMPX_LT_F64: _VOP3AOp_V_CMPX_LT_F64, - VOP3AOp.V_CMPX_EQ_F64: _VOP3AOp_V_CMPX_EQ_F64, - VOP3AOp.V_CMPX_LE_F64: _VOP3AOp_V_CMPX_LE_F64, - VOP3AOp.V_CMPX_GT_F64: _VOP3AOp_V_CMPX_GT_F64, - VOP3AOp.V_CMPX_LG_F64: _VOP3AOp_V_CMPX_LG_F64, - VOP3AOp.V_CMPX_GE_F64: _VOP3AOp_V_CMPX_GE_F64, - VOP3AOp.V_CMPX_O_F64: _VOP3AOp_V_CMPX_O_F64, - VOP3AOp.V_CMPX_U_F64: _VOP3AOp_V_CMPX_U_F64, - VOP3AOp.V_CMPX_NGE_F64: _VOP3AOp_V_CMPX_NGE_F64, - VOP3AOp.V_CMPX_NLG_F64: _VOP3AOp_V_CMPX_NLG_F64, - VOP3AOp.V_CMPX_NGT_F64: _VOP3AOp_V_CMPX_NGT_F64, - VOP3AOp.V_CMPX_NLE_F64: _VOP3AOp_V_CMPX_NLE_F64, - VOP3AOp.V_CMPX_NEQ_F64: _VOP3AOp_V_CMPX_NEQ_F64, - VOP3AOp.V_CMPX_NLT_F64: _VOP3AOp_V_CMPX_NLT_F64, - VOP3AOp.V_CMPX_TRU_F64: _VOP3AOp_V_CMPX_TRU_F64, - VOP3AOp.V_CMP_F_I16: _VOP3AOp_V_CMP_F_I16, - VOP3AOp.V_CMP_LT_I16: _VOP3AOp_V_CMP_LT_I16, - VOP3AOp.V_CMP_EQ_I16: _VOP3AOp_V_CMP_EQ_I16, - VOP3AOp.V_CMP_LE_I16: _VOP3AOp_V_CMP_LE_I16, - VOP3AOp.V_CMP_GT_I16: _VOP3AOp_V_CMP_GT_I16, - VOP3AOp.V_CMP_NE_I16: _VOP3AOp_V_CMP_NE_I16, - VOP3AOp.V_CMP_GE_I16: _VOP3AOp_V_CMP_GE_I16, - VOP3AOp.V_CMP_T_I16: _VOP3AOp_V_CMP_T_I16, - VOP3AOp.V_CMP_F_U16: _VOP3AOp_V_CMP_F_U16, - VOP3AOp.V_CMP_LT_U16: _VOP3AOp_V_CMP_LT_U16, - VOP3AOp.V_CMP_EQ_U16: _VOP3AOp_V_CMP_EQ_U16, - VOP3AOp.V_CMP_LE_U16: _VOP3AOp_V_CMP_LE_U16, - VOP3AOp.V_CMP_GT_U16: _VOP3AOp_V_CMP_GT_U16, - VOP3AOp.V_CMP_NE_U16: _VOP3AOp_V_CMP_NE_U16, - VOP3AOp.V_CMP_GE_U16: _VOP3AOp_V_CMP_GE_U16, - VOP3AOp.V_CMP_T_U16: _VOP3AOp_V_CMP_T_U16, - VOP3AOp.V_CMPX_F_I16: _VOP3AOp_V_CMPX_F_I16, - VOP3AOp.V_CMPX_LT_I16: _VOP3AOp_V_CMPX_LT_I16, - VOP3AOp.V_CMPX_EQ_I16: _VOP3AOp_V_CMPX_EQ_I16, - VOP3AOp.V_CMPX_LE_I16: _VOP3AOp_V_CMPX_LE_I16, - VOP3AOp.V_CMPX_GT_I16: _VOP3AOp_V_CMPX_GT_I16, - VOP3AOp.V_CMPX_NE_I16: _VOP3AOp_V_CMPX_NE_I16, - VOP3AOp.V_CMPX_GE_I16: _VOP3AOp_V_CMPX_GE_I16, - VOP3AOp.V_CMPX_T_I16: _VOP3AOp_V_CMPX_T_I16, - VOP3AOp.V_CMPX_F_U16: _VOP3AOp_V_CMPX_F_U16, - VOP3AOp.V_CMPX_LT_U16: _VOP3AOp_V_CMPX_LT_U16, - VOP3AOp.V_CMPX_EQ_U16: _VOP3AOp_V_CMPX_EQ_U16, - VOP3AOp.V_CMPX_LE_U16: _VOP3AOp_V_CMPX_LE_U16, - VOP3AOp.V_CMPX_GT_U16: _VOP3AOp_V_CMPX_GT_U16, - VOP3AOp.V_CMPX_NE_U16: _VOP3AOp_V_CMPX_NE_U16, - VOP3AOp.V_CMPX_GE_U16: _VOP3AOp_V_CMPX_GE_U16, - VOP3AOp.V_CMPX_T_U16: _VOP3AOp_V_CMPX_T_U16, - VOP3AOp.V_CMP_F_I32: _VOP3AOp_V_CMP_F_I32, - VOP3AOp.V_CMP_LT_I32: _VOP3AOp_V_CMP_LT_I32, - VOP3AOp.V_CMP_EQ_I32: _VOP3AOp_V_CMP_EQ_I32, - VOP3AOp.V_CMP_LE_I32: _VOP3AOp_V_CMP_LE_I32, - VOP3AOp.V_CMP_GT_I32: _VOP3AOp_V_CMP_GT_I32, - VOP3AOp.V_CMP_NE_I32: _VOP3AOp_V_CMP_NE_I32, - VOP3AOp.V_CMP_GE_I32: _VOP3AOp_V_CMP_GE_I32, - VOP3AOp.V_CMP_T_I32: _VOP3AOp_V_CMP_T_I32, - VOP3AOp.V_CMP_F_U32: _VOP3AOp_V_CMP_F_U32, - VOP3AOp.V_CMP_LT_U32: _VOP3AOp_V_CMP_LT_U32, - VOP3AOp.V_CMP_EQ_U32: _VOP3AOp_V_CMP_EQ_U32, - VOP3AOp.V_CMP_LE_U32: _VOP3AOp_V_CMP_LE_U32, - VOP3AOp.V_CMP_GT_U32: _VOP3AOp_V_CMP_GT_U32, - VOP3AOp.V_CMP_NE_U32: _VOP3AOp_V_CMP_NE_U32, - VOP3AOp.V_CMP_GE_U32: _VOP3AOp_V_CMP_GE_U32, - VOP3AOp.V_CMP_T_U32: _VOP3AOp_V_CMP_T_U32, - VOP3AOp.V_CMPX_F_I32: _VOP3AOp_V_CMPX_F_I32, - VOP3AOp.V_CMPX_LT_I32: _VOP3AOp_V_CMPX_LT_I32, - VOP3AOp.V_CMPX_EQ_I32: _VOP3AOp_V_CMPX_EQ_I32, - VOP3AOp.V_CMPX_LE_I32: _VOP3AOp_V_CMPX_LE_I32, - VOP3AOp.V_CMPX_GT_I32: _VOP3AOp_V_CMPX_GT_I32, - VOP3AOp.V_CMPX_NE_I32: _VOP3AOp_V_CMPX_NE_I32, - VOP3AOp.V_CMPX_GE_I32: _VOP3AOp_V_CMPX_GE_I32, - VOP3AOp.V_CMPX_T_I32: _VOP3AOp_V_CMPX_T_I32, - VOP3AOp.V_CMPX_F_U32: _VOP3AOp_V_CMPX_F_U32, - VOP3AOp.V_CMPX_LT_U32: _VOP3AOp_V_CMPX_LT_U32, - VOP3AOp.V_CMPX_EQ_U32: _VOP3AOp_V_CMPX_EQ_U32, - VOP3AOp.V_CMPX_LE_U32: _VOP3AOp_V_CMPX_LE_U32, - VOP3AOp.V_CMPX_GT_U32: _VOP3AOp_V_CMPX_GT_U32, - VOP3AOp.V_CMPX_NE_U32: _VOP3AOp_V_CMPX_NE_U32, - VOP3AOp.V_CMPX_GE_U32: _VOP3AOp_V_CMPX_GE_U32, - VOP3AOp.V_CMPX_T_U32: _VOP3AOp_V_CMPX_T_U32, - VOP3AOp.V_CMP_F_I64: _VOP3AOp_V_CMP_F_I64, - VOP3AOp.V_CMP_LT_I64: _VOP3AOp_V_CMP_LT_I64, - VOP3AOp.V_CMP_EQ_I64: _VOP3AOp_V_CMP_EQ_I64, - VOP3AOp.V_CMP_LE_I64: _VOP3AOp_V_CMP_LE_I64, - VOP3AOp.V_CMP_GT_I64: _VOP3AOp_V_CMP_GT_I64, - VOP3AOp.V_CMP_NE_I64: _VOP3AOp_V_CMP_NE_I64, - VOP3AOp.V_CMP_GE_I64: _VOP3AOp_V_CMP_GE_I64, - VOP3AOp.V_CMP_T_I64: _VOP3AOp_V_CMP_T_I64, - VOP3AOp.V_CMP_F_U64: _VOP3AOp_V_CMP_F_U64, - VOP3AOp.V_CMP_LT_U64: _VOP3AOp_V_CMP_LT_U64, - VOP3AOp.V_CMP_EQ_U64: _VOP3AOp_V_CMP_EQ_U64, - VOP3AOp.V_CMP_LE_U64: _VOP3AOp_V_CMP_LE_U64, - VOP3AOp.V_CMP_GT_U64: _VOP3AOp_V_CMP_GT_U64, - VOP3AOp.V_CMP_NE_U64: _VOP3AOp_V_CMP_NE_U64, - VOP3AOp.V_CMP_GE_U64: _VOP3AOp_V_CMP_GE_U64, - VOP3AOp.V_CMP_T_U64: _VOP3AOp_V_CMP_T_U64, - VOP3AOp.V_CMPX_F_I64: _VOP3AOp_V_CMPX_F_I64, - VOP3AOp.V_CMPX_LT_I64: _VOP3AOp_V_CMPX_LT_I64, - VOP3AOp.V_CMPX_EQ_I64: _VOP3AOp_V_CMPX_EQ_I64, - VOP3AOp.V_CMPX_LE_I64: _VOP3AOp_V_CMPX_LE_I64, - VOP3AOp.V_CMPX_GT_I64: _VOP3AOp_V_CMPX_GT_I64, - VOP3AOp.V_CMPX_NE_I64: _VOP3AOp_V_CMPX_NE_I64, - VOP3AOp.V_CMPX_GE_I64: _VOP3AOp_V_CMPX_GE_I64, - VOP3AOp.V_CMPX_T_I64: _VOP3AOp_V_CMPX_T_I64, - VOP3AOp.V_CMPX_F_U64: _VOP3AOp_V_CMPX_F_U64, - VOP3AOp.V_CMPX_LT_U64: _VOP3AOp_V_CMPX_LT_U64, - VOP3AOp.V_CMPX_EQ_U64: _VOP3AOp_V_CMPX_EQ_U64, - VOP3AOp.V_CMPX_LE_U64: _VOP3AOp_V_CMPX_LE_U64, - VOP3AOp.V_CMPX_GT_U64: _VOP3AOp_V_CMPX_GT_U64, - VOP3AOp.V_CMPX_NE_U64: _VOP3AOp_V_CMPX_NE_U64, - VOP3AOp.V_CMPX_GE_U64: _VOP3AOp_V_CMPX_GE_U64, - VOP3AOp.V_CMPX_T_U64: _VOP3AOp_V_CMPX_T_U64, - VOP3AOp.V_MOV_B32: _VOP3AOp_V_MOV_B32, - VOP3AOp.V_READFIRSTLANE_B32: _VOP3AOp_V_READFIRSTLANE_B32, - VOP3AOp.V_CVT_I32_F64: _VOP3AOp_V_CVT_I32_F64, - VOP3AOp.V_CVT_F64_I32: _VOP3AOp_V_CVT_F64_I32, - VOP3AOp.V_CVT_F32_I32: _VOP3AOp_V_CVT_F32_I32, - VOP3AOp.V_CVT_F32_U32: _VOP3AOp_V_CVT_F32_U32, - VOP3AOp.V_CVT_U32_F32: _VOP3AOp_V_CVT_U32_F32, - VOP3AOp.V_CVT_I32_F32: _VOP3AOp_V_CVT_I32_F32, - VOP3AOp.V_CVT_F16_F32: _VOP3AOp_V_CVT_F16_F32, - VOP3AOp.V_CVT_F32_F16: _VOP3AOp_V_CVT_F32_F16, - VOP3AOp.V_CVT_RPI_I32_F32: _VOP3AOp_V_CVT_RPI_I32_F32, - VOP3AOp.V_CVT_FLR_I32_F32: _VOP3AOp_V_CVT_FLR_I32_F32, - VOP3AOp.V_CVT_F32_F64: _VOP3AOp_V_CVT_F32_F64, - VOP3AOp.V_CVT_F64_F32: _VOP3AOp_V_CVT_F64_F32, - VOP3AOp.V_CVT_F32_UBYTE0: _VOP3AOp_V_CVT_F32_UBYTE0, - VOP3AOp.V_CVT_F32_UBYTE1: _VOP3AOp_V_CVT_F32_UBYTE1, - VOP3AOp.V_CVT_F32_UBYTE2: _VOP3AOp_V_CVT_F32_UBYTE2, - VOP3AOp.V_CVT_F32_UBYTE3: _VOP3AOp_V_CVT_F32_UBYTE3, - VOP3AOp.V_CVT_U32_F64: _VOP3AOp_V_CVT_U32_F64, - VOP3AOp.V_CVT_F64_U32: _VOP3AOp_V_CVT_F64_U32, - VOP3AOp.V_TRUNC_F64: _VOP3AOp_V_TRUNC_F64, - VOP3AOp.V_CEIL_F64: _VOP3AOp_V_CEIL_F64, - VOP3AOp.V_RNDNE_F64: _VOP3AOp_V_RNDNE_F64, - VOP3AOp.V_FLOOR_F64: _VOP3AOp_V_FLOOR_F64, - VOP3AOp.V_FRACT_F32: _VOP3AOp_V_FRACT_F32, - VOP3AOp.V_TRUNC_F32: _VOP3AOp_V_TRUNC_F32, - VOP3AOp.V_CEIL_F32: _VOP3AOp_V_CEIL_F32, - VOP3AOp.V_RNDNE_F32: _VOP3AOp_V_RNDNE_F32, - VOP3AOp.V_FLOOR_F32: _VOP3AOp_V_FLOOR_F32, - VOP3AOp.V_EXP_F32: _VOP3AOp_V_EXP_F32, - VOP3AOp.V_LOG_F32: _VOP3AOp_V_LOG_F32, - VOP3AOp.V_RCP_F32: _VOP3AOp_V_RCP_F32, - VOP3AOp.V_RCP_IFLAG_F32: _VOP3AOp_V_RCP_IFLAG_F32, - VOP3AOp.V_RSQ_F32: _VOP3AOp_V_RSQ_F32, - VOP3AOp.V_RCP_F64: _VOP3AOp_V_RCP_F64, - VOP3AOp.V_RSQ_F64: _VOP3AOp_V_RSQ_F64, - VOP3AOp.V_SQRT_F32: _VOP3AOp_V_SQRT_F32, - VOP3AOp.V_SQRT_F64: _VOP3AOp_V_SQRT_F64, - VOP3AOp.V_SIN_F32: _VOP3AOp_V_SIN_F32, - VOP3AOp.V_COS_F32: _VOP3AOp_V_COS_F32, - VOP3AOp.V_NOT_B32: _VOP3AOp_V_NOT_B32, - VOP3AOp.V_BFREV_B32: _VOP3AOp_V_BFREV_B32, - VOP3AOp.V_FFBH_U32: _VOP3AOp_V_FFBH_U32, - VOP3AOp.V_FFBL_B32: _VOP3AOp_V_FFBL_B32, - VOP3AOp.V_FFBH_I32: _VOP3AOp_V_FFBH_I32, - VOP3AOp.V_FREXP_EXP_I32_F64: _VOP3AOp_V_FREXP_EXP_I32_F64, - VOP3AOp.V_FREXP_MANT_F64: _VOP3AOp_V_FREXP_MANT_F64, - VOP3AOp.V_FRACT_F64: _VOP3AOp_V_FRACT_F64, - VOP3AOp.V_FREXP_EXP_I32_F32: _VOP3AOp_V_FREXP_EXP_I32_F32, - VOP3AOp.V_FREXP_MANT_F32: _VOP3AOp_V_FREXP_MANT_F32, - VOP3AOp.V_MOV_B64: _VOP3AOp_V_MOV_B64, - VOP3AOp.V_CVT_F16_U16: _VOP3AOp_V_CVT_F16_U16, - VOP3AOp.V_CVT_F16_I16: _VOP3AOp_V_CVT_F16_I16, - VOP3AOp.V_CVT_U16_F16: _VOP3AOp_V_CVT_U16_F16, - VOP3AOp.V_CVT_I16_F16: _VOP3AOp_V_CVT_I16_F16, - VOP3AOp.V_RCP_F16: _VOP3AOp_V_RCP_F16, - VOP3AOp.V_SQRT_F16: _VOP3AOp_V_SQRT_F16, - VOP3AOp.V_RSQ_F16: _VOP3AOp_V_RSQ_F16, - VOP3AOp.V_LOG_F16: _VOP3AOp_V_LOG_F16, - VOP3AOp.V_EXP_F16: _VOP3AOp_V_EXP_F16, - VOP3AOp.V_CNDMASK_B32: _VOP3AOp_V_CNDMASK_B32, - VOP3AOp.V_ADD_F32: _VOP3AOp_V_ADD_F32, - VOP3AOp.V_SUB_F32: _VOP3AOp_V_SUB_F32, - VOP3AOp.V_SUBREV_F32: _VOP3AOp_V_SUBREV_F32, - VOP3AOp.V_FMAC_F64: _VOP3AOp_V_FMAC_F64, - VOP3AOp.V_MUL_F32: _VOP3AOp_V_MUL_F32, - VOP3AOp.V_MUL_I32_I24: _VOP3AOp_V_MUL_I32_I24, - VOP3AOp.V_MUL_HI_I32_I24: _VOP3AOp_V_MUL_HI_I32_I24, - VOP3AOp.V_MUL_U32_U24: _VOP3AOp_V_MUL_U32_U24, - VOP3AOp.V_MUL_HI_U32_U24: _VOP3AOp_V_MUL_HI_U32_U24, - VOP3AOp.V_MIN_F32: _VOP3AOp_V_MIN_F32, - VOP3AOp.V_MAX_F32: _VOP3AOp_V_MAX_F32, - VOP3AOp.V_MIN_I32: _VOP3AOp_V_MIN_I32, - VOP3AOp.V_MAX_I32: _VOP3AOp_V_MAX_I32, - VOP3AOp.V_MIN_U32: _VOP3AOp_V_MIN_U32, - VOP3AOp.V_MAX_U32: _VOP3AOp_V_MAX_U32, - VOP3AOp.V_LSHRREV_B32: _VOP3AOp_V_LSHRREV_B32, - VOP3AOp.V_ASHRREV_I32: _VOP3AOp_V_ASHRREV_I32, - VOP3AOp.V_LSHLREV_B32: _VOP3AOp_V_LSHLREV_B32, - VOP3AOp.V_AND_B32: _VOP3AOp_V_AND_B32, - VOP3AOp.V_OR_B32: _VOP3AOp_V_OR_B32, - VOP3AOp.V_XOR_B32: _VOP3AOp_V_XOR_B32, - VOP3AOp.V_ADD_F16: _VOP3AOp_V_ADD_F16, - VOP3AOp.V_SUB_F16: _VOP3AOp_V_SUB_F16, - VOP3AOp.V_SUBREV_F16: _VOP3AOp_V_SUBREV_F16, - VOP3AOp.V_MUL_F16: _VOP3AOp_V_MUL_F16, - VOP3AOp.V_MAC_F16: _VOP3AOp_V_MAC_F16, - VOP3AOp.V_ADD_U16: _VOP3AOp_V_ADD_U16, - VOP3AOp.V_SUB_U16: _VOP3AOp_V_SUB_U16, - VOP3AOp.V_SUBREV_U16: _VOP3AOp_V_SUBREV_U16, - VOP3AOp.V_MUL_LO_U16: _VOP3AOp_V_MUL_LO_U16, - VOP3AOp.V_LSHLREV_B16: _VOP3AOp_V_LSHLREV_B16, - VOP3AOp.V_LSHRREV_B16: _VOP3AOp_V_LSHRREV_B16, - VOP3AOp.V_ASHRREV_I16: _VOP3AOp_V_ASHRREV_I16, - VOP3AOp.V_MAX_F16: _VOP3AOp_V_MAX_F16, - VOP3AOp.V_MIN_F16: _VOP3AOp_V_MIN_F16, - VOP3AOp.V_MAX_U16: _VOP3AOp_V_MAX_U16, - VOP3AOp.V_MAX_I16: _VOP3AOp_V_MAX_I16, - VOP3AOp.V_MIN_U16: _VOP3AOp_V_MIN_U16, - VOP3AOp.V_MIN_I16: _VOP3AOp_V_MIN_I16, - VOP3AOp.V_LDEXP_F16: _VOP3AOp_V_LDEXP_F16, - VOP3AOp.V_ADD_U32: _VOP3AOp_V_ADD_U32, - VOP3AOp.V_SUB_U32: _VOP3AOp_V_SUB_U32, - VOP3AOp.V_SUBREV_U32: _VOP3AOp_V_SUBREV_U32, - VOP3AOp.V_DOT2C_F32_F16: _VOP3AOp_V_DOT2C_F32_F16, - VOP3AOp.V_DOT2C_I32_I16: _VOP3AOp_V_DOT2C_I32_I16, - VOP3AOp.V_DOT4C_I32_I8: _VOP3AOp_V_DOT4C_I32_I8, - VOP3AOp.V_DOT8C_I32_I4: _VOP3AOp_V_DOT8C_I32_I4, - VOP3AOp.V_FMAC_F32: _VOP3AOp_V_FMAC_F32, - VOP3AOp.V_PK_FMAC_F16: _VOP3AOp_V_PK_FMAC_F16, - VOP3AOp.V_XNOR_B32: _VOP3AOp_V_XNOR_B32, - VOP3AOp.V_MAD_I32_I24: _VOP3AOp_V_MAD_I32_I24, - VOP3AOp.V_MAD_U32_U24: _VOP3AOp_V_MAD_U32_U24, - VOP3AOp.V_CUBEID_F32: _VOP3AOp_V_CUBEID_F32, - VOP3AOp.V_CUBESC_F32: _VOP3AOp_V_CUBESC_F32, - VOP3AOp.V_CUBETC_F32: _VOP3AOp_V_CUBETC_F32, - VOP3AOp.V_CUBEMA_F32: _VOP3AOp_V_CUBEMA_F32, - VOP3AOp.V_BFE_U32: _VOP3AOp_V_BFE_U32, - VOP3AOp.V_BFE_I32: _VOP3AOp_V_BFE_I32, - VOP3AOp.V_BFI_B32: _VOP3AOp_V_BFI_B32, - VOP3AOp.V_FMA_F32: _VOP3AOp_V_FMA_F32, - VOP3AOp.V_FMA_F64: _VOP3AOp_V_FMA_F64, - VOP3AOp.V_LERP_U8: _VOP3AOp_V_LERP_U8, - VOP3AOp.V_ALIGNBIT_B32: _VOP3AOp_V_ALIGNBIT_B32, - VOP3AOp.V_ALIGNBYTE_B32: _VOP3AOp_V_ALIGNBYTE_B32, - VOP3AOp.V_MIN3_F32: _VOP3AOp_V_MIN3_F32, - VOP3AOp.V_MIN3_I32: _VOP3AOp_V_MIN3_I32, - VOP3AOp.V_MIN3_U32: _VOP3AOp_V_MIN3_U32, - VOP3AOp.V_MAX3_F32: _VOP3AOp_V_MAX3_F32, - VOP3AOp.V_MAX3_I32: _VOP3AOp_V_MAX3_I32, - VOP3AOp.V_MAX3_U32: _VOP3AOp_V_MAX3_U32, - VOP3AOp.V_MED3_F32: _VOP3AOp_V_MED3_F32, - VOP3AOp.V_MED3_I32: _VOP3AOp_V_MED3_I32, - VOP3AOp.V_MED3_U32: _VOP3AOp_V_MED3_U32, - VOP3AOp.V_SAD_U8: _VOP3AOp_V_SAD_U8, - VOP3AOp.V_SAD_HI_U8: _VOP3AOp_V_SAD_HI_U8, - VOP3AOp.V_SAD_U16: _VOP3AOp_V_SAD_U16, - VOP3AOp.V_SAD_U32: _VOP3AOp_V_SAD_U32, - VOP3AOp.V_CVT_PK_U8_F32: _VOP3AOp_V_CVT_PK_U8_F32, - VOP3AOp.V_DIV_FIXUP_F32: _VOP3AOp_V_DIV_FIXUP_F32, - VOP3AOp.V_DIV_FIXUP_F64: _VOP3AOp_V_DIV_FIXUP_F64, - VOP3AOp.V_DIV_FMAS_F32: _VOP3AOp_V_DIV_FMAS_F32, - VOP3AOp.V_DIV_FMAS_F64: _VOP3AOp_V_DIV_FMAS_F64, - VOP3AOp.V_MSAD_U8: _VOP3AOp_V_MSAD_U8, - VOP3AOp.V_QSAD_PK_U16_U8: _VOP3AOp_V_QSAD_PK_U16_U8, - VOP3AOp.V_MQSAD_PK_U16_U8: _VOP3AOp_V_MQSAD_PK_U16_U8, - VOP3AOp.V_MQSAD_U32_U8: _VOP3AOp_V_MQSAD_U32_U8, - VOP3AOp.V_MAD_LEGACY_F16: _VOP3AOp_V_MAD_LEGACY_F16, - VOP3AOp.V_MAD_LEGACY_U16: _VOP3AOp_V_MAD_LEGACY_U16, - VOP3AOp.V_MAD_LEGACY_I16: _VOP3AOp_V_MAD_LEGACY_I16, - VOP3AOp.V_PERM_B32: _VOP3AOp_V_PERM_B32, - VOP3AOp.V_FMA_LEGACY_F16: _VOP3AOp_V_FMA_LEGACY_F16, - VOP3AOp.V_DIV_FIXUP_LEGACY_F16: _VOP3AOp_V_DIV_FIXUP_LEGACY_F16, - VOP3AOp.V_CVT_PKACCUM_U8_F32: _VOP3AOp_V_CVT_PKACCUM_U8_F32, - VOP3AOp.V_MAD_U32_U16: _VOP3AOp_V_MAD_U32_U16, - VOP3AOp.V_MAD_I32_I16: _VOP3AOp_V_MAD_I32_I16, - VOP3AOp.V_XAD_U32: _VOP3AOp_V_XAD_U32, - VOP3AOp.V_MIN3_F16: _VOP3AOp_V_MIN3_F16, - VOP3AOp.V_MIN3_I16: _VOP3AOp_V_MIN3_I16, - VOP3AOp.V_MIN3_U16: _VOP3AOp_V_MIN3_U16, - VOP3AOp.V_MAX3_F16: _VOP3AOp_V_MAX3_F16, - VOP3AOp.V_MAX3_I16: _VOP3AOp_V_MAX3_I16, - VOP3AOp.V_MAX3_U16: _VOP3AOp_V_MAX3_U16, - VOP3AOp.V_MED3_F16: _VOP3AOp_V_MED3_F16, - VOP3AOp.V_MED3_I16: _VOP3AOp_V_MED3_I16, - VOP3AOp.V_MED3_U16: _VOP3AOp_V_MED3_U16, - VOP3AOp.V_LSHL_ADD_U32: _VOP3AOp_V_LSHL_ADD_U32, - VOP3AOp.V_ADD_LSHL_U32: _VOP3AOp_V_ADD_LSHL_U32, - VOP3AOp.V_ADD3_U32: _VOP3AOp_V_ADD3_U32, - VOP3AOp.V_LSHL_OR_B32: _VOP3AOp_V_LSHL_OR_B32, - VOP3AOp.V_AND_OR_B32: _VOP3AOp_V_AND_OR_B32, - VOP3AOp.V_OR3_B32: _VOP3AOp_V_OR3_B32, - VOP3AOp.V_MAD_F16: _VOP3AOp_V_MAD_F16, - VOP3AOp.V_MAD_U16: _VOP3AOp_V_MAD_U16, - VOP3AOp.V_MAD_I16: _VOP3AOp_V_MAD_I16, - VOP3AOp.V_FMA_F16: _VOP3AOp_V_FMA_F16, - VOP3AOp.V_DIV_FIXUP_F16: _VOP3AOp_V_DIV_FIXUP_F16, - VOP3AOp.V_LSHL_ADD_U64: _VOP3AOp_V_LSHL_ADD_U64, - VOP3AOp.V_ADD_F64: _VOP3AOp_V_ADD_F64, - VOP3AOp.V_MUL_F64: _VOP3AOp_V_MUL_F64, - VOP3AOp.V_MIN_F64: _VOP3AOp_V_MIN_F64, - VOP3AOp.V_MAX_F64: _VOP3AOp_V_MAX_F64, - VOP3AOp.V_LDEXP_F64: _VOP3AOp_V_LDEXP_F64, - VOP3AOp.V_MUL_LO_U32: _VOP3AOp_V_MUL_LO_U32, - VOP3AOp.V_MUL_HI_U32: _VOP3AOp_V_MUL_HI_U32, - VOP3AOp.V_MUL_HI_I32: _VOP3AOp_V_MUL_HI_I32, - VOP3AOp.V_LDEXP_F32: _VOP3AOp_V_LDEXP_F32, - VOP3AOp.V_READLANE_B32: _VOP3AOp_V_READLANE_B32, - VOP3AOp.V_BCNT_U32_B32: _VOP3AOp_V_BCNT_U32_B32, - VOP3AOp.V_LSHLREV_B64: _VOP3AOp_V_LSHLREV_B64, - VOP3AOp.V_LSHRREV_B64: _VOP3AOp_V_LSHRREV_B64, - VOP3AOp.V_ASHRREV_I64: _VOP3AOp_V_ASHRREV_I64, - VOP3AOp.V_TRIG_PREOP_F64: _VOP3AOp_V_TRIG_PREOP_F64, - VOP3AOp.V_BFM_B32: _VOP3AOp_V_BFM_B32, - VOP3AOp.V_CVT_PKNORM_I16_F32: _VOP3AOp_V_CVT_PKNORM_I16_F32, - VOP3AOp.V_CVT_PKNORM_U16_F32: _VOP3AOp_V_CVT_PKNORM_U16_F32, - VOP3AOp.V_CVT_PKRTZ_F16_F32: _VOP3AOp_V_CVT_PKRTZ_F16_F32, - VOP3AOp.V_CVT_PK_U16_U32: _VOP3AOp_V_CVT_PK_U16_U32, - VOP3AOp.V_CVT_PK_I16_I32: _VOP3AOp_V_CVT_PK_I16_I32, - VOP3AOp.V_CVT_PKNORM_I16_F16: _VOP3AOp_V_CVT_PKNORM_I16_F16, - VOP3AOp.V_CVT_PKNORM_U16_F16: _VOP3AOp_V_CVT_PKNORM_U16_F16, - VOP3AOp.V_ADD_I32: _VOP3AOp_V_ADD_I32, - VOP3AOp.V_SUB_I32: _VOP3AOp_V_SUB_I32, - VOP3AOp.V_ADD_I16: _VOP3AOp_V_ADD_I16, - VOP3AOp.V_SUB_I16: _VOP3AOp_V_SUB_I16, - VOP3AOp.V_PACK_B32_F16: _VOP3AOp_V_PACK_B32_F16, - VOP3AOp.V_MUL_LEGACY_F32: _VOP3AOp_V_MUL_LEGACY_F32, - VOP3AOp.V_DOT2C_F32_BF16: _VOP3AOp_V_DOT2C_F32_BF16, - VOP3AOp.V_CVT_SCALEF32_PK_F32_FP8: _VOP3AOp_V_CVT_SCALEF32_PK_F32_FP8, - VOP3AOp.V_CVT_SCALEF32_PK_F32_BF8: _VOP3AOp_V_CVT_SCALEF32_PK_F32_BF8, - VOP3AOp.V_CVT_SCALEF32_F32_FP8: _VOP3AOp_V_CVT_SCALEF32_F32_FP8, - VOP3AOp.V_CVT_SCALEF32_F32_BF8: _VOP3AOp_V_CVT_SCALEF32_F32_BF8, - VOP3AOp.V_CVT_SCALEF32_PK_F32_FP4: _VOP3AOp_V_CVT_SCALEF32_PK_F32_FP4, - VOP3AOp.V_CVT_SCALEF32_PK_F16_FP8: _VOP3AOp_V_CVT_SCALEF32_PK_F16_FP8, - VOP3AOp.V_CVT_SCALEF32_PK_F16_BF8: _VOP3AOp_V_CVT_SCALEF32_PK_F16_BF8, - VOP3AOp.V_CVT_SCALEF32_F16_FP8: _VOP3AOp_V_CVT_SCALEF32_F16_FP8, - VOP3AOp.V_CVT_SCALEF32_F16_BF8: _VOP3AOp_V_CVT_SCALEF32_F16_BF8, - VOP3AOp.V_CVT_SCALEF32_PK_F16_FP4: _VOP3AOp_V_CVT_SCALEF32_PK_F16_FP4, - VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP4: _VOP3AOp_V_CVT_SCALEF32_PK_BF16_FP4, - VOP3AOp.V_ASHR_PK_I8_I32: _VOP3AOp_V_ASHR_PK_I8_I32, - VOP3AOp.V_ASHR_PK_U8_I32: _VOP3AOp_V_ASHR_PK_U8_I32, - VOP3AOp.V_CVT_PK_F16_F32: _VOP3AOp_V_CVT_PK_F16_F32, - VOP3AOp.V_CVT_PK_BF16_F32: _VOP3AOp_V_CVT_PK_BF16_F32, - VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP8: _VOP3AOp_V_CVT_SCALEF32_PK_BF16_FP8, - VOP3AOp.V_CVT_SCALEF32_PK_BF16_BF8: _VOP3AOp_V_CVT_SCALEF32_PK_BF16_BF8, - VOP3AOp.V_MINIMUM3_F32: _VOP3AOp_V_MINIMUM3_F32, - VOP3AOp.V_MAXIMUM3_F32: _VOP3AOp_V_MAXIMUM3_F32, -} - -def _VOP3BOp_V_ADD_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32)) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3BOp_V_SUB_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32) - VCC.u64[laneId] = ((1) if (S1.u32 > S0.u32) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3BOp_V_SUBREV_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32) - VCC.u64[laneId] = ((1) if (S0.u32 > S1.u32) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3BOp_V_ADDC_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3BOp_V_SUBB_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S1.u32) + VCC.u64[laneId] > (S0.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3BOp_V_SUBBREV_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S0.u32) + VCC.u64[laneId] > (S1.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3BOp_V_DIV_SCALE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(s0); VCC=Reg(vcc) - # --- compiled pseudocode --- - VCC = Reg(0x0) - if ((F(S2.f32) == 0.0) or (F(S1.f32) == 0.0)): - VCC = Reg(0x1); D0.f32 = float("nan") - elif exponent(S2.f32) - exponent(S1.f32) >= 96: - VCC = Reg(0x1) - if S0.f32 == S1.f32: - D0.f32 = ldexp(S0.f32, 64) - elif False: - pass - elif ((1.0 / F(S1.f32) == DENORM.f64) and (S2.f32 / S1.f32 == DENORM.f32)): - VCC = Reg(0x1) - if S0.f32 == S1.f32: - D0.f32 = ldexp(S0.f32, 64) - elif 1.0 / F(S1.f32) == DENORM.f64: - D0.f32 = ldexp(S0.f32, -64) - elif S2.f32 / S1.f32 == DENORM.f32: - VCC = Reg(0x1) - elif exponent(S2.f32) <= 23: - VCC = Reg(0x1); D0.f32 = ldexp(S0.f32, 64) - if S1.f32 == DENORM.f32: - D0.f32 = float("nan") - return {'D0': D0._val} - -def _VOP3BOp_V_DIV_SCALE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(s0); VCC=Reg(vcc) - # --- compiled pseudocode --- - VCC = Reg(0x0) - if ((S2.f64 == 0.0) or (S1.f64 == 0.0)): - VCC = Reg(0x1); D0.f64 = float("nan") - elif exponent(S2.f64) - exponent(S1.f64) >= 768: - VCC = Reg(0x1) - if S0.f64 == S1.f64: - D0.f64 = ldexp(S0.f64, 128) - elif False: - pass - elif ((1.0 / S1.f64 == DENORM.f64) and (S2.f64 / S1.f64 == DENORM.f64)): - VCC = Reg(0x1) - if S0.f64 == S1.f64: - D0.f64 = ldexp(S0.f64, 128) - elif 1.0 / S1.f64 == DENORM.f64: - D0.f64 = ldexp(S0.f64, -128) - elif S2.f64 / S1.f64 == DENORM.f64: - VCC = Reg(0x1) - elif exponent(S2.f64) <= 53: - D0.f64 = ldexp(S0.f64, 128) - if S1.f64 == DENORM.f64: - D0.f64 = float("nan") - return {'D0': D0._val} - -def _VOP3BOp_V_MAD_U64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); D1=Reg(0) - # --- compiled pseudocode --- - _full = ((S0.u32) * (S1.u32) + (S2.u64)) - D0.u64 = int(_full) & 0xffffffffffffffff - D1 = Reg((int(_full) >> 64) & 1) - return {'D0': D0._val, 'D1': D1._val} - -def _VOP3BOp_V_MAD_I64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); D1=Reg(0) - # --- compiled pseudocode --- - _full = ((S0.i32) * (S1.i32) + (S2.i64)) - D0.u64 = int(_full) & 0xffffffffffffffff - D1 = Reg((int(_full) >> 64) & 1) - return {'D0': D0._val, 'D1': D1._val} - -VOP3BOp_FUNCTIONS = { - VOP3BOp.V_ADD_CO_U32: _VOP3BOp_V_ADD_CO_U32, - VOP3BOp.V_SUB_CO_U32: _VOP3BOp_V_SUB_CO_U32, - VOP3BOp.V_SUBREV_CO_U32: _VOP3BOp_V_SUBREV_CO_U32, - VOP3BOp.V_ADDC_CO_U32: _VOP3BOp_V_ADDC_CO_U32, - VOP3BOp.V_SUBB_CO_U32: _VOP3BOp_V_SUBB_CO_U32, - VOP3BOp.V_SUBBREV_CO_U32: _VOP3BOp_V_SUBBREV_CO_U32, - VOP3BOp.V_DIV_SCALE_F32: _VOP3BOp_V_DIV_SCALE_F32, - VOP3BOp.V_DIV_SCALE_F64: _VOP3BOp_V_DIV_SCALE_F64, - VOP3BOp.V_MAD_U64_U32: _VOP3BOp_V_MAD_U64_U32, - VOP3BOp.V_MAD_I64_I32: _VOP3BOp_V_MAD_I64_I32, -} - -def _DSOp_DS_ADD_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 = DATA.u32 - MEM[addr].u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRITE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET.u32].b32 = DATA[31 : 0] - return {} - -def _DSOp_DS_WRITE2_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET0.u32 * 4].b32 = DATA[31 : 0] - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET1.u32 * 4].b32 = DATA2[31 : 0] - return {} - -def _DSOp_DS_WRITE2ST64_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET0.u32 * 256].b32 = DATA[31 : 0] - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET1.u32 * 256].b32 = DATA2[31 : 0] - return {} - -def _DSOp_DS_CMPST_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - src = DATA2.b32 - cmp = DATA.b32 - MEM[addr].b32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPST_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f32) - src = DATA2.f32 - cmp = DATA.f32 - MEM[addr].f32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f32) - src = DATA.f32 - MEM[addr].f32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f32) - src = DATA.f32 - MEM[addr].f32 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f32) - MEM[addr].f32 += DATA.f32 - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PK_ADD_F16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR]) - src = DATA - dst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16 - dst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16 - MEM[ADDR] = dst.b32 - RETURN_DATA = tmp - return {} - -def _DSOp_DS_PK_ADD_BF16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR]) - src = DATA - dst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16 - dst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16 - MEM[ADDR] = dst.b32 - RETURN_DATA = tmp - return {} - -def _DSOp_DS_WRITE_B8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b8 = DATA[7 : 0] - return {} - -def _DSOp_DS_WRITE_B16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b16 = DATA[15 : 0] - return {} - -def _DSOp_DS_ADD_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 = DATA.u32 - MEM[addr].u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRXCHG_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRXCHG2_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4 - tmp1 = MEM[addr1].b32 - tmp2 = MEM[addr2].b32 - MEM[addr1].b32 = DATA.b32 - MEM[addr2].b32 = DATA2.b32 - RETURN_DATA[31 : 0] = tmp1 - RETURN_DATA[63 : 32] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRXCHG2ST64_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256 - tmp1 = MEM[addr1].b32 - tmp2 = MEM[addr2].b32 - MEM[addr1].b32 = DATA.b32 - MEM[addr2].b32 = DATA2.b32 - RETURN_DATA[31 : 0] = tmp1 - RETURN_DATA[63 : 32] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPST_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b32) - src = DATA2.b32 - cmp = DATA.b32 - MEM[addr].b32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPST_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f32) - src = DATA2.f32 - cmp = DATA.f32 - MEM[addr].f32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f32) - src = DATA.f32 - MEM[addr].f32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f32) - src = DATA.f32 - MEM[addr].f32 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRAP_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 = ((tmp - DATA.u32) if (tmp >= DATA.u32) else (tmp + DATA2.u32)) - RETURN_DATA = tmp - return {} - -def _DSOp_DS_ADD_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f32) - MEM[addr].f32 += DATA.f32 - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ2_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4].b32 - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ2ST64_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256].b32 - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_I8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.i32 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_U8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.u32 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_I16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.i32 = (signext(MEM[ADDR].i16)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_U16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.u32 = (_pack(0, MEM[ADDR].u16)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PERMUTE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - for i in range(0, int(63)+1): - tmp[i] = 0x0 - for i in range(0, int(63)+1): - if EXEC[i].u1: - dst_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4 % 64 - tmp[dst_lane] = VGPR[i][DATA0] - for i in range(0, int(63)+1): - if EXEC[i].u1: - VGPR[i][VDST] = tmp[i] - return {} - -def _DSOp_DS_BPERMUTE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - for i in range(0, int(63)+1): - tmp[i] = 0x0 - for i in range(0, int(63)+1): - src_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4 % 64 - if EXEC[src_lane].u1: - tmp[i] = VGPR[src_lane][DATA0] - for i in range(0, int(63)+1): - if EXEC[i].u1: - VGPR[i][VDST] = tmp[i] - return {} - -def _DSOp_DS_ADD_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 = DATA.u64 - MEM[addr].u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRITE_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET.u32].b32 = DATA[31 : 0] - MEM[addr + OFFSET.u32 + 4].b32 = DATA[63 : 32] - return {} - -def _DSOp_DS_WRITE2_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET0.u32 * 8].b32 = DATA[31 : 0] - MEM[addr + OFFSET0.u32 * 8 + 4].b32 = DATA[63 : 32] - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET1.u32 * 8].b32 = DATA2[31 : 0] - MEM[addr + OFFSET1.u32 * 8 + 4].b32 = DATA2[63 : 32] - return {} - -def _DSOp_DS_WRITE2ST64_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET0.u32 * 512].b32 = DATA[31 : 0] - MEM[addr + OFFSET0.u32 * 512 + 4].b32 = DATA[63 : 32] - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET1.u32 * 512].b32 = DATA2[31 : 0] - MEM[addr + OFFSET1.u32 * 512 + 4].b32 = DATA2[63 : 32] - return {} - -def _DSOp_DS_CMPST_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - src = DATA2.b64 - cmp = DATA.b64 - MEM[addr].b64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPST_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f64) - src = DATA2.f64 - cmp = DATA.f64 - MEM[addr].f64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f64) - src = DATA.f64 - MEM[addr].f64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f64) - src = DATA.f64 - MEM[addr].f64 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRITE_B8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b8 = DATA[23 : 16] - return {} - -def _DSOp_DS_WRITE_B16_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b16 = DATA[31 : 16] - return {} - -def _DSOp_DS_READ_U8_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].u16 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_U8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].u16 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_I8_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].i16 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_I8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].i16 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_U16_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_U16_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - MEM[ADDR].f64 += DATA.f64 - RETURN_DATA = tmp - return {} - -def _DSOp_DS_ADD_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 = DATA.u64 - MEM[addr].u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRXCHG_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRXCHG2_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8 - tmp1 = MEM[addr1].b64 - tmp2 = MEM[addr2].b64 - MEM[addr1].b64 = DATA.b64 - MEM[addr2].b64 = DATA2.b64 - RETURN_DATA[63 : 0] = tmp1 - RETURN_DATA[127 : 64] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRXCHG2ST64_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512 - tmp1 = MEM[addr1].b64 - tmp2 = MEM[addr2].b64 - MEM[addr1].b64 = DATA.b64 - MEM[addr2].b64 = DATA2.b64 - RETURN_DATA[63 : 0] = tmp1 - RETURN_DATA[127 : 64] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPST_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].b64) - src = DATA2.b64 - cmp = DATA.b64 - MEM[addr].b64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPST_RTN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f64) - src = DATA2.f64 - cmp = DATA.f64 - MEM[addr].f64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f64) - src = DATA.f64 - MEM[addr].f64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32) - tmp = Reg(MEM[addr].f64) - src = DATA.f64 - MEM[addr].f64 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ2_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8 + 4].b32 - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8].b32 - RETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ2ST64_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512 + 4].b32 - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512].b32 - RETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_RTN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - MEM[ADDR].f64 += DATA.f64 - RETURN_DATA = tmp - return {} - -def _DSOp_DS_CONDXCHG32_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - ADDR = S0.u32 - DATA = S1.u64 - offset = _pack(OFFSET1, OFFSET0) - RETURN_DATA[0] = LDS[ADDR0].u32 - if DATA[31]: - LDS[ADDR0] = _pack(0, DATA[30 : 0]) - RETURN_DATA[1] = LDS[ADDR1].u32 - if DATA[63]: - LDS[ADDR1] = _pack(0, DATA[62 : 32]) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PK_ADD_RTN_F16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR]) - src = DATA - dst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16 - dst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16 - MEM[ADDR] = dst.b32 - RETURN_DATA = tmp - return {} - -def _DSOp_DS_PK_ADD_RTN_BF16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR]) - src = DATA - dst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16 - dst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16 - MEM[ADDR] = dst.b32 - RETURN_DATA = tmp - return {} - -def _DSOp_DS_WRITE_B96(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET.u32].b32 = DATA[31 : 0] - MEM[addr + OFFSET.u32 + 4].b32 = DATA[63 : 32] - MEM[addr + OFFSET.u32 + 8].b32 = DATA[95 : 64] - return {} - -def _DSOp_DS_WRITE_B128(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - MEM[addr + OFFSET.u32].b32 = DATA[31 : 0] - MEM[addr + OFFSET.u32 + 4].b32 = DATA[63 : 32] - MEM[addr + OFFSET.u32 + 8].b32 = DATA[95 : 64] - MEM[addr + OFFSET.u32 + 12].b32 = DATA[127 : 96] - return {} - -def _DSOp_DS_READ_B96(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4].b32 - RETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_READ_B128(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(ADDR.b32, 0x0, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4].b32 - RETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8].b32 - RETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12].b32 - OFFSET = Unsigned immediate byte offset. - OFFEN = Send offset either as VADDR or as zero.. - IDXEN = Send index either as VADDR or as zero. - VADDR = VGPR address source. - VDATA = Destination vector GPR. - SOFFSET = Byte offset added to the memory address of an SGPR. - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]) - VDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]) - VDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]) - VDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]) - VDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]) - VDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]) - VDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()]) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32) - MEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32) - MEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32) - MEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32) - MEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32) - MEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32) - MEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[15 : 0].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetX()])) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[15 : 0].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetX()])) - VDATA[31 : 16].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetY()])) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[15 : 0].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetX()])) - VDATA[31 : 16].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetY()])) - VDATA[47 : 32].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetZ()])) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[15 : 0].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetX()])) - VDATA[31 : 16].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetY()])) - VDATA[47 : 32].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetZ()])) - VDATA[63 : 48].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetW()])) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat((VDATA[15 : 0].b16)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat((VDATA[15 : 0].b16)) - MEM[addr + ChannelOffsetY()] = ConvertToFormat((VDATA[31 : 16].b16)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat((VDATA[15 : 0].b16)) - MEM[addr + ChannelOffsetY()] = ConvertToFormat((VDATA[31 : 16].b16)) - MEM[addr + ChannelOffsetZ()] = ConvertToFormat((VDATA[47 : 32].b16)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat((VDATA[15 : 0].b16)) - MEM[addr + ChannelOffsetY()] = ConvertToFormat((VDATA[31 : 16].b16)) - MEM[addr + ChannelOffsetZ()] = ConvertToFormat((VDATA[47 : 32].b16)) - MEM[addr + ChannelOffsetW()] = ConvertToFormat((VDATA[63 : 48].b16)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA.u32 = (_pack(0, MEM[addr].u8)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA.i32 = (signext(MEM[addr].i8)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA.u32 = (_pack(0, MEM[addr].u16)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA.i32 = (signext(MEM[addr].i16)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - VDATA[95 : 64] = MEM[addr + 8].b32 - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - VDATA[95 : 64] = MEM[addr + 8].b32 - VDATA[127 : 96] = MEM[addr + 12].b32 - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr].b8 = VDATA[7 : 0] - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr].b8 = VDATA[23 : 16] - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr].b16 = VDATA[15 : 0] - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr].b16 = VDATA[31 : 16] - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - MEM[addr + 8].b32 = VDATA[95 : 64] - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - MEM[addr + 8].b32 = VDATA[95 : 64] - MEM[addr + 12].b32 = VDATA[127 : 96] - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[15 : 0].u16 = (_pack(0, MEM[addr].u8)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 16].u16 = (_pack(0, MEM[addr].u8)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[15 : 0].i16 = (signext(MEM[addr].i8)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 16].i16 = (signext(MEM[addr].i8)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[15 : 0].b16 = MEM[addr].b16 - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 16].b16 = MEM[addr].b16 - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - VDATA[31 : 16].b16 = (ConvertFromFormat(MEM[addr + ChannelOffsetX()])) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - MEM[addr + ChannelOffsetX()] = ConvertToFormat((VDATA[31 : 16].b16)) - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA[31 : 0].u32 - cmp = DATA[63 : 32].u32 - MEM[addr].u32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - tmp = Reg(MEM[ADDR].f32) - MEM[ADDR].f32 += DATA.f32 - RETURN_DATA = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -DSOp_FUNCTIONS = { - DSOp.DS_ADD_U32: _DSOp_DS_ADD_U32, - DSOp.DS_SUB_U32: _DSOp_DS_SUB_U32, - DSOp.DS_RSUB_U32: _DSOp_DS_RSUB_U32, - DSOp.DS_INC_U32: _DSOp_DS_INC_U32, - DSOp.DS_DEC_U32: _DSOp_DS_DEC_U32, - DSOp.DS_MIN_I32: _DSOp_DS_MIN_I32, - DSOp.DS_MAX_I32: _DSOp_DS_MAX_I32, - DSOp.DS_MIN_U32: _DSOp_DS_MIN_U32, - DSOp.DS_MAX_U32: _DSOp_DS_MAX_U32, - DSOp.DS_AND_B32: _DSOp_DS_AND_B32, - DSOp.DS_OR_B32: _DSOp_DS_OR_B32, - DSOp.DS_XOR_B32: _DSOp_DS_XOR_B32, - DSOp.DS_MSKOR_B32: _DSOp_DS_MSKOR_B32, - DSOp.DS_WRITE_B32: _DSOp_DS_WRITE_B32, - DSOp.DS_WRITE2_B32: _DSOp_DS_WRITE2_B32, - DSOp.DS_WRITE2ST64_B32: _DSOp_DS_WRITE2ST64_B32, - DSOp.DS_CMPST_B32: _DSOp_DS_CMPST_B32, - DSOp.DS_CMPST_F32: _DSOp_DS_CMPST_F32, - DSOp.DS_MIN_F32: _DSOp_DS_MIN_F32, - DSOp.DS_MAX_F32: _DSOp_DS_MAX_F32, - DSOp.DS_ADD_F32: _DSOp_DS_ADD_F32, - DSOp.DS_PK_ADD_F16: _DSOp_DS_PK_ADD_F16, - DSOp.DS_PK_ADD_BF16: _DSOp_DS_PK_ADD_BF16, - DSOp.DS_WRITE_B8: _DSOp_DS_WRITE_B8, - DSOp.DS_WRITE_B16: _DSOp_DS_WRITE_B16, - DSOp.DS_ADD_RTN_U32: _DSOp_DS_ADD_RTN_U32, - DSOp.DS_SUB_RTN_U32: _DSOp_DS_SUB_RTN_U32, - DSOp.DS_RSUB_RTN_U32: _DSOp_DS_RSUB_RTN_U32, - DSOp.DS_INC_RTN_U32: _DSOp_DS_INC_RTN_U32, - DSOp.DS_DEC_RTN_U32: _DSOp_DS_DEC_RTN_U32, - DSOp.DS_MIN_RTN_I32: _DSOp_DS_MIN_RTN_I32, - DSOp.DS_MAX_RTN_I32: _DSOp_DS_MAX_RTN_I32, - DSOp.DS_MIN_RTN_U32: _DSOp_DS_MIN_RTN_U32, - DSOp.DS_MAX_RTN_U32: _DSOp_DS_MAX_RTN_U32, - DSOp.DS_AND_RTN_B32: _DSOp_DS_AND_RTN_B32, - DSOp.DS_OR_RTN_B32: _DSOp_DS_OR_RTN_B32, - DSOp.DS_XOR_RTN_B32: _DSOp_DS_XOR_RTN_B32, - DSOp.DS_MSKOR_RTN_B32: _DSOp_DS_MSKOR_RTN_B32, - DSOp.DS_WRXCHG_RTN_B32: _DSOp_DS_WRXCHG_RTN_B32, - DSOp.DS_WRXCHG2_RTN_B32: _DSOp_DS_WRXCHG2_RTN_B32, - DSOp.DS_WRXCHG2ST64_RTN_B32: _DSOp_DS_WRXCHG2ST64_RTN_B32, - DSOp.DS_CMPST_RTN_B32: _DSOp_DS_CMPST_RTN_B32, - DSOp.DS_CMPST_RTN_F32: _DSOp_DS_CMPST_RTN_F32, - DSOp.DS_MIN_RTN_F32: _DSOp_DS_MIN_RTN_F32, - DSOp.DS_MAX_RTN_F32: _DSOp_DS_MAX_RTN_F32, - DSOp.DS_WRAP_RTN_B32: _DSOp_DS_WRAP_RTN_B32, - DSOp.DS_ADD_RTN_F32: _DSOp_DS_ADD_RTN_F32, - DSOp.DS_READ_B32: _DSOp_DS_READ_B32, - DSOp.DS_READ2_B32: _DSOp_DS_READ2_B32, - DSOp.DS_READ2ST64_B32: _DSOp_DS_READ2ST64_B32, - DSOp.DS_READ_I8: _DSOp_DS_READ_I8, - DSOp.DS_READ_U8: _DSOp_DS_READ_U8, - DSOp.DS_READ_I16: _DSOp_DS_READ_I16, - DSOp.DS_READ_U16: _DSOp_DS_READ_U16, - DSOp.DS_PERMUTE_B32: _DSOp_DS_PERMUTE_B32, - DSOp.DS_BPERMUTE_B32: _DSOp_DS_BPERMUTE_B32, - DSOp.DS_ADD_U64: _DSOp_DS_ADD_U64, - DSOp.DS_SUB_U64: _DSOp_DS_SUB_U64, - DSOp.DS_RSUB_U64: _DSOp_DS_RSUB_U64, - DSOp.DS_INC_U64: _DSOp_DS_INC_U64, - DSOp.DS_DEC_U64: _DSOp_DS_DEC_U64, - DSOp.DS_MIN_I64: _DSOp_DS_MIN_I64, - DSOp.DS_MAX_I64: _DSOp_DS_MAX_I64, - DSOp.DS_MIN_U64: _DSOp_DS_MIN_U64, - DSOp.DS_MAX_U64: _DSOp_DS_MAX_U64, - DSOp.DS_AND_B64: _DSOp_DS_AND_B64, - DSOp.DS_OR_B64: _DSOp_DS_OR_B64, - DSOp.DS_XOR_B64: _DSOp_DS_XOR_B64, - DSOp.DS_MSKOR_B64: _DSOp_DS_MSKOR_B64, - DSOp.DS_WRITE_B64: _DSOp_DS_WRITE_B64, - DSOp.DS_WRITE2_B64: _DSOp_DS_WRITE2_B64, - DSOp.DS_WRITE2ST64_B64: _DSOp_DS_WRITE2ST64_B64, - DSOp.DS_CMPST_B64: _DSOp_DS_CMPST_B64, - DSOp.DS_CMPST_F64: _DSOp_DS_CMPST_F64, - DSOp.DS_MIN_F64: _DSOp_DS_MIN_F64, - DSOp.DS_MAX_F64: _DSOp_DS_MAX_F64, - DSOp.DS_WRITE_B8_D16_HI: _DSOp_DS_WRITE_B8_D16_HI, - DSOp.DS_WRITE_B16_D16_HI: _DSOp_DS_WRITE_B16_D16_HI, - DSOp.DS_READ_U8_D16: _DSOp_DS_READ_U8_D16, - DSOp.DS_READ_U8_D16_HI: _DSOp_DS_READ_U8_D16_HI, - DSOp.DS_READ_I8_D16: _DSOp_DS_READ_I8_D16, - DSOp.DS_READ_I8_D16_HI: _DSOp_DS_READ_I8_D16_HI, - DSOp.DS_READ_U16_D16: _DSOp_DS_READ_U16_D16, - DSOp.DS_READ_U16_D16_HI: _DSOp_DS_READ_U16_D16_HI, - DSOp.DS_ADD_F64: _DSOp_DS_ADD_F64, - DSOp.DS_ADD_RTN_U64: _DSOp_DS_ADD_RTN_U64, - DSOp.DS_SUB_RTN_U64: _DSOp_DS_SUB_RTN_U64, - DSOp.DS_RSUB_RTN_U64: _DSOp_DS_RSUB_RTN_U64, - DSOp.DS_INC_RTN_U64: _DSOp_DS_INC_RTN_U64, - DSOp.DS_DEC_RTN_U64: _DSOp_DS_DEC_RTN_U64, - DSOp.DS_MIN_RTN_I64: _DSOp_DS_MIN_RTN_I64, - DSOp.DS_MAX_RTN_I64: _DSOp_DS_MAX_RTN_I64, - DSOp.DS_MIN_RTN_U64: _DSOp_DS_MIN_RTN_U64, - DSOp.DS_MAX_RTN_U64: _DSOp_DS_MAX_RTN_U64, - DSOp.DS_AND_RTN_B64: _DSOp_DS_AND_RTN_B64, - DSOp.DS_OR_RTN_B64: _DSOp_DS_OR_RTN_B64, - DSOp.DS_XOR_RTN_B64: _DSOp_DS_XOR_RTN_B64, - DSOp.DS_MSKOR_RTN_B64: _DSOp_DS_MSKOR_RTN_B64, - DSOp.DS_WRXCHG_RTN_B64: _DSOp_DS_WRXCHG_RTN_B64, - DSOp.DS_WRXCHG2_RTN_B64: _DSOp_DS_WRXCHG2_RTN_B64, - DSOp.DS_WRXCHG2ST64_RTN_B64: _DSOp_DS_WRXCHG2ST64_RTN_B64, - DSOp.DS_CMPST_RTN_B64: _DSOp_DS_CMPST_RTN_B64, - DSOp.DS_CMPST_RTN_F64: _DSOp_DS_CMPST_RTN_F64, - DSOp.DS_MIN_RTN_F64: _DSOp_DS_MIN_RTN_F64, - DSOp.DS_MAX_RTN_F64: _DSOp_DS_MAX_RTN_F64, - DSOp.DS_READ_B64: _DSOp_DS_READ_B64, - DSOp.DS_READ2_B64: _DSOp_DS_READ2_B64, - DSOp.DS_READ2ST64_B64: _DSOp_DS_READ2ST64_B64, - DSOp.DS_ADD_RTN_F64: _DSOp_DS_ADD_RTN_F64, - DSOp.DS_CONDXCHG32_RTN_B64: _DSOp_DS_CONDXCHG32_RTN_B64, - DSOp.DS_PK_ADD_RTN_F16: _DSOp_DS_PK_ADD_RTN_F16, - DSOp.DS_PK_ADD_RTN_BF16: _DSOp_DS_PK_ADD_RTN_BF16, - DSOp.DS_WRITE_B96: _DSOp_DS_WRITE_B96, - DSOp.DS_WRITE_B128: _DSOp_DS_WRITE_B128, - DSOp.DS_READ_B96: _DSOp_DS_READ_B96, - DSOp.DS_READ_B128: _DSOp_DS_READ_B128, -} - -def _FLATOp_FLAT_LOAD_UBYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA.u32 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_SBYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA.i32 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_USHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA.u32 = (_pack(0, MEM[addr].u16)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_SSHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA.i32 = (signext(MEM[addr].i16)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_DWORD(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_DWORDX2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_DWORDX3(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - VDATA[95 : 64] = MEM[addr + 8].b32 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_DWORDX4(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - VDATA[95 : 64] = MEM[addr + 8].b32 - VDATA[127 : 96] = MEM[addr + 12].b32 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_STORE_BYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - MEM[addr].b8 = VDATA[7 : 0] - return {} - -def _FLATOp_FLAT_STORE_BYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - MEM[addr].b8 = VDATA[23 : 16] - return {} - -def _FLATOp_FLAT_STORE_SHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - MEM[addr].b16 = VDATA[15 : 0] - return {} - -def _FLATOp_FLAT_STORE_SHORT_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - MEM[addr].b16 = VDATA[31 : 16] - return {} - -def _FLATOp_FLAT_STORE_DWORD(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - return {} - -def _FLATOp_FLAT_STORE_DWORDX2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - return {} - -def _FLATOp_FLAT_STORE_DWORDX3(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - MEM[addr + 8].b32 = VDATA[95 : 64] - return {} - -def _FLATOp_FLAT_STORE_DWORDX4(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - MEM[addr + 8].b32 = VDATA[95 : 64] - MEM[addr + 12].b32 = VDATA[127 : 96] - return {} - -def _FLATOp_FLAT_LOAD_UBYTE_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[15 : 0].u16 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_UBYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[31 : 16].u16 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_SBYTE_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[15 : 0].i16 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_SBYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[31 : 16].i16 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_SHORT_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[15 : 0].b16 = MEM[addr].b16 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_SHORT_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - VDATA[31 : 16].b16 = MEM[addr].b16 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_ATOMIC_SWAP(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_CMPSWAP(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA[31 : 0].u32 - cmp = DATA[63 : 32].u32 - MEM[addr].u32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_ADD(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SUB(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SMIN(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_UMIN(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SMAX(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_UMAX(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_AND(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_OR(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_XOR(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_INC(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_DEC(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_ADD_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - MEM[ADDR].f32 += DATA.f32 - RETURN_DATA = tmp - return {} - -def _FLATOp_FLAT_ATOMIC_PK_ADD_F16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR]) - src = DATA - dst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16 - dst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16 - MEM[ADDR] = dst.b32 - RETURN_DATA = tmp - return {} - -def _FLATOp_FLAT_ATOMIC_ADD_F64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - MEM[ADDR].f64 += DATA.f64 - RETURN_DATA = tmp - return {} - -def _FLATOp_FLAT_ATOMIC_MIN_F64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].f64) - src = DATA.f64 - MEM[addr].f64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MAX_F64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].f64) - src = DATA.f64 - MEM[addr].f64 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_PK_ADD_BF16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR]) - src = DATA - dst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16 - dst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16 - MEM[ADDR] = dst.b32 - RETURN_DATA = tmp - return {} - -def _FLATOp_FLAT_ATOMIC_SWAP_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_CMPSWAP_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA[63 : 0].u64 - cmp = DATA[127 : 64].u64 - MEM[addr].u64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_ADD_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SUB_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SMIN_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_UMIN_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SMAX_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_UMAX_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_AND_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_OR_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_XOR_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_INC_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_DEC_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcFlatAddr(ADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -FLATOp_FUNCTIONS = { - FLATOp.FLAT_LOAD_UBYTE: _FLATOp_FLAT_LOAD_UBYTE, - FLATOp.FLAT_LOAD_SBYTE: _FLATOp_FLAT_LOAD_SBYTE, - FLATOp.FLAT_LOAD_USHORT: _FLATOp_FLAT_LOAD_USHORT, - FLATOp.FLAT_LOAD_SSHORT: _FLATOp_FLAT_LOAD_SSHORT, - FLATOp.FLAT_LOAD_DWORD: _FLATOp_FLAT_LOAD_DWORD, - FLATOp.FLAT_LOAD_DWORDX2: _FLATOp_FLAT_LOAD_DWORDX2, - FLATOp.FLAT_LOAD_DWORDX3: _FLATOp_FLAT_LOAD_DWORDX3, - FLATOp.FLAT_LOAD_DWORDX4: _FLATOp_FLAT_LOAD_DWORDX4, - FLATOp.FLAT_STORE_BYTE: _FLATOp_FLAT_STORE_BYTE, - FLATOp.FLAT_STORE_BYTE_D16_HI: _FLATOp_FLAT_STORE_BYTE_D16_HI, - FLATOp.FLAT_STORE_SHORT: _FLATOp_FLAT_STORE_SHORT, - FLATOp.FLAT_STORE_SHORT_D16_HI: _FLATOp_FLAT_STORE_SHORT_D16_HI, - FLATOp.FLAT_STORE_DWORD: _FLATOp_FLAT_STORE_DWORD, - FLATOp.FLAT_STORE_DWORDX2: _FLATOp_FLAT_STORE_DWORDX2, - FLATOp.FLAT_STORE_DWORDX3: _FLATOp_FLAT_STORE_DWORDX3, - FLATOp.FLAT_STORE_DWORDX4: _FLATOp_FLAT_STORE_DWORDX4, - FLATOp.FLAT_LOAD_UBYTE_D16: _FLATOp_FLAT_LOAD_UBYTE_D16, - FLATOp.FLAT_LOAD_UBYTE_D16_HI: _FLATOp_FLAT_LOAD_UBYTE_D16_HI, - FLATOp.FLAT_LOAD_SBYTE_D16: _FLATOp_FLAT_LOAD_SBYTE_D16, - FLATOp.FLAT_LOAD_SBYTE_D16_HI: _FLATOp_FLAT_LOAD_SBYTE_D16_HI, - FLATOp.FLAT_LOAD_SHORT_D16: _FLATOp_FLAT_LOAD_SHORT_D16, - FLATOp.FLAT_LOAD_SHORT_D16_HI: _FLATOp_FLAT_LOAD_SHORT_D16_HI, - FLATOp.FLAT_ATOMIC_SWAP: _FLATOp_FLAT_ATOMIC_SWAP, - FLATOp.FLAT_ATOMIC_CMPSWAP: _FLATOp_FLAT_ATOMIC_CMPSWAP, - FLATOp.FLAT_ATOMIC_ADD: _FLATOp_FLAT_ATOMIC_ADD, - FLATOp.FLAT_ATOMIC_SUB: _FLATOp_FLAT_ATOMIC_SUB, - FLATOp.FLAT_ATOMIC_SMIN: _FLATOp_FLAT_ATOMIC_SMIN, - FLATOp.FLAT_ATOMIC_UMIN: _FLATOp_FLAT_ATOMIC_UMIN, - FLATOp.FLAT_ATOMIC_SMAX: _FLATOp_FLAT_ATOMIC_SMAX, - FLATOp.FLAT_ATOMIC_UMAX: _FLATOp_FLAT_ATOMIC_UMAX, - FLATOp.FLAT_ATOMIC_AND: _FLATOp_FLAT_ATOMIC_AND, - FLATOp.FLAT_ATOMIC_OR: _FLATOp_FLAT_ATOMIC_OR, - FLATOp.FLAT_ATOMIC_XOR: _FLATOp_FLAT_ATOMIC_XOR, - FLATOp.FLAT_ATOMIC_INC: _FLATOp_FLAT_ATOMIC_INC, - FLATOp.FLAT_ATOMIC_DEC: _FLATOp_FLAT_ATOMIC_DEC, - FLATOp.FLAT_ATOMIC_ADD_F32: _FLATOp_FLAT_ATOMIC_ADD_F32, - FLATOp.FLAT_ATOMIC_PK_ADD_F16: _FLATOp_FLAT_ATOMIC_PK_ADD_F16, - FLATOp.FLAT_ATOMIC_ADD_F64: _FLATOp_FLAT_ATOMIC_ADD_F64, - FLATOp.FLAT_ATOMIC_MIN_F64: _FLATOp_FLAT_ATOMIC_MIN_F64, - FLATOp.FLAT_ATOMIC_MAX_F64: _FLATOp_FLAT_ATOMIC_MAX_F64, - FLATOp.FLAT_ATOMIC_PK_ADD_BF16: _FLATOp_FLAT_ATOMIC_PK_ADD_BF16, - FLATOp.FLAT_ATOMIC_SWAP_X2: _FLATOp_FLAT_ATOMIC_SWAP_X2, - FLATOp.FLAT_ATOMIC_CMPSWAP_X2: _FLATOp_FLAT_ATOMIC_CMPSWAP_X2, - FLATOp.FLAT_ATOMIC_ADD_X2: _FLATOp_FLAT_ATOMIC_ADD_X2, - FLATOp.FLAT_ATOMIC_SUB_X2: _FLATOp_FLAT_ATOMIC_SUB_X2, - FLATOp.FLAT_ATOMIC_SMIN_X2: _FLATOp_FLAT_ATOMIC_SMIN_X2, - FLATOp.FLAT_ATOMIC_UMIN_X2: _FLATOp_FLAT_ATOMIC_UMIN_X2, - FLATOp.FLAT_ATOMIC_SMAX_X2: _FLATOp_FLAT_ATOMIC_SMAX_X2, - FLATOp.FLAT_ATOMIC_UMAX_X2: _FLATOp_FLAT_ATOMIC_UMAX_X2, - FLATOp.FLAT_ATOMIC_AND_X2: _FLATOp_FLAT_ATOMIC_AND_X2, - FLATOp.FLAT_ATOMIC_OR_X2: _FLATOp_FLAT_ATOMIC_OR_X2, - FLATOp.FLAT_ATOMIC_XOR_X2: _FLATOp_FLAT_ATOMIC_XOR_X2, - FLATOp.FLAT_ATOMIC_INC_X2: _FLATOp_FLAT_ATOMIC_INC_X2, - FLATOp.FLAT_ATOMIC_DEC_X2: _FLATOp_FLAT_ATOMIC_DEC_X2, -} - -def _GLOBALOp_GLOBAL_LOAD_UBYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA.u32 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_SBYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA.i32 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_USHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA.u32 = (_pack(0, MEM[addr].u16)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_SSHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA.i32 = (signext(MEM[addr].i16)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_DWORD(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_DWORDX2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_DWORDX3(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - VDATA[95 : 64] = MEM[addr + 8].b32 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_DWORDX4(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - VDATA[95 : 64] = MEM[addr + 8].b32 - VDATA[127 : 96] = MEM[addr + 12].b32 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_STORE_BYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b8 = VDATA[7 : 0] - return {} - -def _GLOBALOp_GLOBAL_STORE_BYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b8 = VDATA[23 : 16] - return {} - -def _GLOBALOp_GLOBAL_STORE_SHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b16 = VDATA[15 : 0] - return {} - -def _GLOBALOp_GLOBAL_STORE_SHORT_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b16 = VDATA[31 : 16] - return {} - -def _GLOBALOp_GLOBAL_STORE_DWORD(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - return {} - -def _GLOBALOp_GLOBAL_STORE_DWORDX2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - return {} - -def _GLOBALOp_GLOBAL_STORE_DWORDX3(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - MEM[addr + 8].b32 = VDATA[95 : 64] - return {} - -def _GLOBALOp_GLOBAL_STORE_DWORDX4(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - MEM[addr + 8].b32 = VDATA[95 : 64] - MEM[addr + 12].b32 = VDATA[127 : 96] - return {} - -def _GLOBALOp_GLOBAL_LOAD_UBYTE_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[15 : 0].u16 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_UBYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 16].u16 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_SBYTE_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[15 : 0].i16 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_SBYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 16].i16 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_SHORT_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[15 : 0].b16 = MEM[addr].b16 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_SHORT_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 16].b16 = MEM[addr].b16 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SWAP(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA[31 : 0].u32 - cmp = DATA[63 : 32].u32 - MEM[addr].u32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_ADD(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SUB(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SMIN(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_UMIN(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SMAX(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_UMAX(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_AND(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_OR(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_XOR(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_INC(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_DEC(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_ADD_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - MEM[ADDR].f32 += DATA.f32 - RETURN_DATA = tmp - return {} - -def _GLOBALOp_GLOBAL_ATOMIC_PK_ADD_F16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR]) - src = DATA - dst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16 - dst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16 - MEM[ADDR] = dst.b32 - RETURN_DATA = tmp - return {} - -def _GLOBALOp_GLOBAL_ATOMIC_ADD_F64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - MEM[ADDR].f64 += DATA.f64 - RETURN_DATA = tmp - return {} - -def _GLOBALOp_GLOBAL_ATOMIC_MIN_F64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].f64) - src = DATA.f64 - MEM[addr].f64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MAX_F64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].f64) - src = DATA.f64 - MEM[addr].f64 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_PK_ADD_BF16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR]) - src = DATA - dst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16 - dst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16 - MEM[ADDR] = dst.b32 - RETURN_DATA = tmp - return {} - -def _GLOBALOp_GLOBAL_ATOMIC_SWAP_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA[63 : 0].u64 - cmp = DATA[127 : 64].u64 - MEM[addr].u64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_ADD_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SUB_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SMIN_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_UMIN_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SMAX_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_UMAX_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_AND_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_OR_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_XOR_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_INC_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_DEC_X2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -GLOBALOp_FUNCTIONS = { - GLOBALOp.GLOBAL_LOAD_UBYTE: _GLOBALOp_GLOBAL_LOAD_UBYTE, - GLOBALOp.GLOBAL_LOAD_SBYTE: _GLOBALOp_GLOBAL_LOAD_SBYTE, - GLOBALOp.GLOBAL_LOAD_USHORT: _GLOBALOp_GLOBAL_LOAD_USHORT, - GLOBALOp.GLOBAL_LOAD_SSHORT: _GLOBALOp_GLOBAL_LOAD_SSHORT, - GLOBALOp.GLOBAL_LOAD_DWORD: _GLOBALOp_GLOBAL_LOAD_DWORD, - GLOBALOp.GLOBAL_LOAD_DWORDX2: _GLOBALOp_GLOBAL_LOAD_DWORDX2, - GLOBALOp.GLOBAL_LOAD_DWORDX3: _GLOBALOp_GLOBAL_LOAD_DWORDX3, - GLOBALOp.GLOBAL_LOAD_DWORDX4: _GLOBALOp_GLOBAL_LOAD_DWORDX4, - GLOBALOp.GLOBAL_STORE_BYTE: _GLOBALOp_GLOBAL_STORE_BYTE, - GLOBALOp.GLOBAL_STORE_BYTE_D16_HI: _GLOBALOp_GLOBAL_STORE_BYTE_D16_HI, - GLOBALOp.GLOBAL_STORE_SHORT: _GLOBALOp_GLOBAL_STORE_SHORT, - GLOBALOp.GLOBAL_STORE_SHORT_D16_HI: _GLOBALOp_GLOBAL_STORE_SHORT_D16_HI, - GLOBALOp.GLOBAL_STORE_DWORD: _GLOBALOp_GLOBAL_STORE_DWORD, - GLOBALOp.GLOBAL_STORE_DWORDX2: _GLOBALOp_GLOBAL_STORE_DWORDX2, - GLOBALOp.GLOBAL_STORE_DWORDX3: _GLOBALOp_GLOBAL_STORE_DWORDX3, - GLOBALOp.GLOBAL_STORE_DWORDX4: _GLOBALOp_GLOBAL_STORE_DWORDX4, - GLOBALOp.GLOBAL_LOAD_UBYTE_D16: _GLOBALOp_GLOBAL_LOAD_UBYTE_D16, - GLOBALOp.GLOBAL_LOAD_UBYTE_D16_HI: _GLOBALOp_GLOBAL_LOAD_UBYTE_D16_HI, - GLOBALOp.GLOBAL_LOAD_SBYTE_D16: _GLOBALOp_GLOBAL_LOAD_SBYTE_D16, - GLOBALOp.GLOBAL_LOAD_SBYTE_D16_HI: _GLOBALOp_GLOBAL_LOAD_SBYTE_D16_HI, - GLOBALOp.GLOBAL_LOAD_SHORT_D16: _GLOBALOp_GLOBAL_LOAD_SHORT_D16, - GLOBALOp.GLOBAL_LOAD_SHORT_D16_HI: _GLOBALOp_GLOBAL_LOAD_SHORT_D16_HI, - GLOBALOp.GLOBAL_ATOMIC_SWAP: _GLOBALOp_GLOBAL_ATOMIC_SWAP, - GLOBALOp.GLOBAL_ATOMIC_CMPSWAP: _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP, - GLOBALOp.GLOBAL_ATOMIC_ADD: _GLOBALOp_GLOBAL_ATOMIC_ADD, - GLOBALOp.GLOBAL_ATOMIC_SUB: _GLOBALOp_GLOBAL_ATOMIC_SUB, - GLOBALOp.GLOBAL_ATOMIC_SMIN: _GLOBALOp_GLOBAL_ATOMIC_SMIN, - GLOBALOp.GLOBAL_ATOMIC_UMIN: _GLOBALOp_GLOBAL_ATOMIC_UMIN, - GLOBALOp.GLOBAL_ATOMIC_SMAX: _GLOBALOp_GLOBAL_ATOMIC_SMAX, - GLOBALOp.GLOBAL_ATOMIC_UMAX: _GLOBALOp_GLOBAL_ATOMIC_UMAX, - GLOBALOp.GLOBAL_ATOMIC_AND: _GLOBALOp_GLOBAL_ATOMIC_AND, - GLOBALOp.GLOBAL_ATOMIC_OR: _GLOBALOp_GLOBAL_ATOMIC_OR, - GLOBALOp.GLOBAL_ATOMIC_XOR: _GLOBALOp_GLOBAL_ATOMIC_XOR, - GLOBALOp.GLOBAL_ATOMIC_INC: _GLOBALOp_GLOBAL_ATOMIC_INC, - GLOBALOp.GLOBAL_ATOMIC_DEC: _GLOBALOp_GLOBAL_ATOMIC_DEC, - GLOBALOp.GLOBAL_ATOMIC_ADD_F32: _GLOBALOp_GLOBAL_ATOMIC_ADD_F32, - GLOBALOp.GLOBAL_ATOMIC_PK_ADD_F16: _GLOBALOp_GLOBAL_ATOMIC_PK_ADD_F16, - GLOBALOp.GLOBAL_ATOMIC_ADD_F64: _GLOBALOp_GLOBAL_ATOMIC_ADD_F64, - GLOBALOp.GLOBAL_ATOMIC_MIN_F64: _GLOBALOp_GLOBAL_ATOMIC_MIN_F64, - GLOBALOp.GLOBAL_ATOMIC_MAX_F64: _GLOBALOp_GLOBAL_ATOMIC_MAX_F64, - GLOBALOp.GLOBAL_ATOMIC_PK_ADD_BF16: _GLOBALOp_GLOBAL_ATOMIC_PK_ADD_BF16, - GLOBALOp.GLOBAL_ATOMIC_SWAP_X2: _GLOBALOp_GLOBAL_ATOMIC_SWAP_X2, - GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_X2: _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP_X2, - GLOBALOp.GLOBAL_ATOMIC_ADD_X2: _GLOBALOp_GLOBAL_ATOMIC_ADD_X2, - GLOBALOp.GLOBAL_ATOMIC_SUB_X2: _GLOBALOp_GLOBAL_ATOMIC_SUB_X2, - GLOBALOp.GLOBAL_ATOMIC_SMIN_X2: _GLOBALOp_GLOBAL_ATOMIC_SMIN_X2, - GLOBALOp.GLOBAL_ATOMIC_UMIN_X2: _GLOBALOp_GLOBAL_ATOMIC_UMIN_X2, - GLOBALOp.GLOBAL_ATOMIC_SMAX_X2: _GLOBALOp_GLOBAL_ATOMIC_SMAX_X2, - GLOBALOp.GLOBAL_ATOMIC_UMAX_X2: _GLOBALOp_GLOBAL_ATOMIC_UMAX_X2, - GLOBALOp.GLOBAL_ATOMIC_AND_X2: _GLOBALOp_GLOBAL_ATOMIC_AND_X2, - GLOBALOp.GLOBAL_ATOMIC_OR_X2: _GLOBALOp_GLOBAL_ATOMIC_OR_X2, - GLOBALOp.GLOBAL_ATOMIC_XOR_X2: _GLOBALOp_GLOBAL_ATOMIC_XOR_X2, - GLOBALOp.GLOBAL_ATOMIC_INC_X2: _GLOBALOp_GLOBAL_ATOMIC_INC_X2, - GLOBALOp.GLOBAL_ATOMIC_DEC_X2: _GLOBALOp_GLOBAL_ATOMIC_DEC_X2, -} - -def _SCRATCHOp_SCRATCH_LOAD_UBYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA.u32 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_SBYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA.i32 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_USHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA.u32 = (_pack(0, MEM[addr].u16)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_SSHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA.i32 = (signext(MEM[addr].i16)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_DWORD(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_DWORDX2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_DWORDX3(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - VDATA[95 : 64] = MEM[addr + 8].b32 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_DWORDX4(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 0] = MEM[addr].b32 - VDATA[63 : 32] = MEM[addr + 4].b32 - VDATA[95 : 64] = MEM[addr + 8].b32 - VDATA[127 : 96] = MEM[addr + 12].b32 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_STORE_BYTE(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b8 = VDATA[7 : 0] - return {} - -def _SCRATCHOp_SCRATCH_STORE_BYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b8 = VDATA[23 : 16] - return {} - -def _SCRATCHOp_SCRATCH_STORE_SHORT(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b16 = VDATA[15 : 0] - return {} - -def _SCRATCHOp_SCRATCH_STORE_SHORT_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b16 = VDATA[31 : 16] - return {} - -def _SCRATCHOp_SCRATCH_STORE_DWORD(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - return {} - -def _SCRATCHOp_SCRATCH_STORE_DWORDX2(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - return {} - -def _SCRATCHOp_SCRATCH_STORE_DWORDX3(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - MEM[addr + 8].b32 = VDATA[95 : 64] - return {} - -def _SCRATCHOp_SCRATCH_STORE_DWORDX4(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - MEM[addr].b32 = VDATA[31 : 0] - MEM[addr + 4].b32 = VDATA[63 : 32] - MEM[addr + 8].b32 = VDATA[95 : 64] - MEM[addr + 12].b32 = VDATA[127 : 96] - return {} - -def _SCRATCHOp_SCRATCH_LOAD_UBYTE_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[15 : 0].u16 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_UBYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 16].u16 = (_pack(0, MEM[addr].u8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_SBYTE_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[15 : 0].i16 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_SBYTE_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 16].i16 = (signext(MEM[addr].i8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_SHORT_D16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[15 : 0].b16 = MEM[addr].b16 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_SHORT_D16_HI(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32) - VDATA[31 : 16].b16 = MEM[addr].b16 - return {'VDATA': VDATA._val} - -SCRATCHOp_FUNCTIONS = { - SCRATCHOp.SCRATCH_LOAD_UBYTE: _SCRATCHOp_SCRATCH_LOAD_UBYTE, - SCRATCHOp.SCRATCH_LOAD_SBYTE: _SCRATCHOp_SCRATCH_LOAD_SBYTE, - SCRATCHOp.SCRATCH_LOAD_USHORT: _SCRATCHOp_SCRATCH_LOAD_USHORT, - SCRATCHOp.SCRATCH_LOAD_SSHORT: _SCRATCHOp_SCRATCH_LOAD_SSHORT, - SCRATCHOp.SCRATCH_LOAD_DWORD: _SCRATCHOp_SCRATCH_LOAD_DWORD, - SCRATCHOp.SCRATCH_LOAD_DWORDX2: _SCRATCHOp_SCRATCH_LOAD_DWORDX2, - SCRATCHOp.SCRATCH_LOAD_DWORDX3: _SCRATCHOp_SCRATCH_LOAD_DWORDX3, - SCRATCHOp.SCRATCH_LOAD_DWORDX4: _SCRATCHOp_SCRATCH_LOAD_DWORDX4, - SCRATCHOp.SCRATCH_STORE_BYTE: _SCRATCHOp_SCRATCH_STORE_BYTE, - SCRATCHOp.SCRATCH_STORE_BYTE_D16_HI: _SCRATCHOp_SCRATCH_STORE_BYTE_D16_HI, - SCRATCHOp.SCRATCH_STORE_SHORT: _SCRATCHOp_SCRATCH_STORE_SHORT, - SCRATCHOp.SCRATCH_STORE_SHORT_D16_HI: _SCRATCHOp_SCRATCH_STORE_SHORT_D16_HI, - SCRATCHOp.SCRATCH_STORE_DWORD: _SCRATCHOp_SCRATCH_STORE_DWORD, - SCRATCHOp.SCRATCH_STORE_DWORDX2: _SCRATCHOp_SCRATCH_STORE_DWORDX2, - SCRATCHOp.SCRATCH_STORE_DWORDX3: _SCRATCHOp_SCRATCH_STORE_DWORDX3, - SCRATCHOp.SCRATCH_STORE_DWORDX4: _SCRATCHOp_SCRATCH_STORE_DWORDX4, - SCRATCHOp.SCRATCH_LOAD_UBYTE_D16: _SCRATCHOp_SCRATCH_LOAD_UBYTE_D16, - SCRATCHOp.SCRATCH_LOAD_UBYTE_D16_HI: _SCRATCHOp_SCRATCH_LOAD_UBYTE_D16_HI, - SCRATCHOp.SCRATCH_LOAD_SBYTE_D16: _SCRATCHOp_SCRATCH_LOAD_SBYTE_D16, - SCRATCHOp.SCRATCH_LOAD_SBYTE_D16_HI: _SCRATCHOp_SCRATCH_LOAD_SBYTE_D16_HI, - SCRATCHOp.SCRATCH_LOAD_SHORT_D16: _SCRATCHOp_SCRATCH_LOAD_SHORT_D16, - SCRATCHOp.SCRATCH_LOAD_SHORT_D16_HI: _SCRATCHOp_SCRATCH_LOAD_SHORT_D16_HI, -} - -COMPILED_FUNCTIONS = { - SOP1Op: SOP1Op_FUNCTIONS, - SOP2Op: SOP2Op_FUNCTIONS, - SOPCOp: SOPCOp_FUNCTIONS, - SOPKOp: SOPKOp_FUNCTIONS, - SOPPOp: SOPPOp_FUNCTIONS, - SMEMOp: SMEMOp_FUNCTIONS, - VOP1Op: VOP1Op_FUNCTIONS, - VOP2Op: VOP2Op_FUNCTIONS, - VOP3POp: VOP3POp_FUNCTIONS, - VOPCOp: VOPCOp_FUNCTIONS, - VOP3AOp: VOP3AOp_FUNCTIONS, - VOP3BOp: VOP3BOp_FUNCTIONS, - DSOp: DSOp_FUNCTIONS, - FLATOp: FLATOp_FUNCTIONS, - GLOBALOp: GLOBALOp_FUNCTIONS, - SCRATCHOp: SCRATCHOp_FUNCTIONS, -} \ No newline at end of file diff --git a/extra/assembly/amd/autogen/cdna/str_pcode.py b/extra/assembly/amd/autogen/cdna/str_pcode.py new file mode 100644 index 0000000000..63c1d1fe6a --- /dev/null +++ b/extra/assembly/amd/autogen/cdna/str_pcode.py @@ -0,0 +1,1421 @@ +# autogenerated by pdf.py - do not edit +# to regenerate: python -m extra.assembly.amd.pdf --arch cdna +# ruff: noqa: E501 +from extra.assembly.amd.autogen.cdna.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3POp, VOPCOp, VOP3AOp, VOP3BOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp + +SOP1Op_PCODE = { + SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32', + SOP1Op.S_MOV_B64: 'D0.b64 = S0.b64', + SOP1Op.S_CMOV_B32: 'if SCC then\nD0.b32 = S0.b32\nendif', + SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif', + SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U', + SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_WQM_B32: "tmp = 0U;\ndeclare i : 6'U;\nfor i in 6'0U : 6'31U do\ntmp[i] = S0.u32[i & 6'60U +: 6'4U] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]', + SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_FF0_I32_B32: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'0U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FF0_I32_B64: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'0U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FF1_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FF1_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FLBIT_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FLBIT_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FLBIT_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_FLBIT_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))", + SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U", + SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U", + SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U", + SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U", + SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL', + SOP1Op.S_SETPC_B64: 'PC = S0.i64', + SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64', + SOP1Op.S_RFE_B64: 'PC = S0.i64', + SOP1Op.S_AND_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XOR_SAVEEXEC_B64: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ANDN2_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ORN2_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NAND_SAVEEXEC_B64: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NOR_SAVEEXEC_B64: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XNOR_SAVEEXEC_B64: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U', + SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = SGPR[addr].b32', + SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b64 = SGPR[addr].b64', + SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b32 = S0.b32', + SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b64 = S0.b64', + SOP1Op.S_CBRANCH_JOIN: "saved_csp = S0.u32;\nif WAVE_MODE.CSP.u32 == saved_csp then\nPC += 4LL;\nelse\nWAVE_MODE.CSP -= 3'1U;\n{ PC, EXEC } = SGPR[WAVE_MODE.CSP.u32 * 4U].b128;\nendif", + SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0', + SOP1Op.S_SET_GPR_IDX_IDX: 'M0[7 : 0] = S0.u32[7 : 0].b8', + SOP1Op.S_ANDN1_SAVEEXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ORN1_SAVEEXEC_B64: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ANDN1_WREXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ANDN2_WREXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor', +} + +SOP2Op_PCODE = { + SOP2Op.S_ADD_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_SUB_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_ADD_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', + SOP2Op.S_SUB_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', + SOP2Op.S_ADDC_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_SUBB_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MAX_U32: 'SCC = S0.u32 >= S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_CSELECT_B32: 'D0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_CSELECT_B64: 'D0.u64 = SCC ? S0.u64 : S1.u64', + SOP2Op.S_AND_B32: 'D0.u32 = (S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_AND_B64: 'D0.u64 = (S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_OR_B32: 'D0.u32 = (S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_OR_B64: 'D0.u64 = (S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XOR_B64: 'D0.u64 = (S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ANDN2_B32: 'D0.u32 = (S0.u32 & ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_ANDN2_B64: 'D0.u64 = (S0.u64 & ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ORN2_B32: 'D0.u32 = (S0.u32 | ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_ORN2_B64: 'D0.u64 = (S0.u64 | ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NAND_B32: 'D0.u32 = ~(S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NAND_B64: 'D0.u64 = ~(S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NOR_B32: 'D0.u32 = ~(S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NOR_B64: 'D0.u64 = ~(S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XNOR_B64: 'D0.u64 = ~(S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0", + SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + SOP2Op.S_BFM_B64: 'D0.u64 = (((1ULL << S0[5 : 0].u32) - 1ULL) << S1[5 : 0].u32)', + SOP2Op.S_MUL_I32: 'D0.i32 = S0.i32 * S1.i32', + SOP2Op.S_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S1[22 : 16].u32) - 1U));\nSCC = D0.u32 != 0U', + SOP2Op.S_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32);\nSCC = D0.i32 != 0', + SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL', + SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_CBRANCH_G_FORK: "S0 = compare mask (VCC or any SGPR) and S1 = 64-bit byte address of target instruction. See also\nmask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\nif mask_pass == EXEC.u64 then\nPC = 64'I(S1.u64)\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { S1.u64, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = 64'I(S1.u64)\nendif", + SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0', + SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_PACK_LL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_LH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_HH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[31 : 16].u16 }', +} + +SOPCOp_PCODE = { + SOPCOp.S_CMP_EQ_I32: 'SCC = S0.i32 == S1.i32', + SOPCOp.S_CMP_LG_I32: 'SCC = S0.i32 <> S1.i32', + SOPCOp.S_CMP_GT_I32: 'SCC = S0.i32 > S1.i32', + SOPCOp.S_CMP_GE_I32: 'SCC = S0.i32 >= S1.i32', + SOPCOp.S_CMP_LT_I32: 'SCC = S0.i32 < S1.i32', + SOPCOp.S_CMP_LE_I32: 'SCC = S0.i32 <= S1.i32', + SOPCOp.S_CMP_EQ_U32: 'SCC = S0.u32 == S1.u32', + SOPCOp.S_CMP_LG_U32: 'SCC = S0.u32 <> S1.u32', + SOPCOp.S_CMP_GT_U32: 'SCC = S0.u32 > S1.u32', + SOPCOp.S_CMP_GE_U32: 'SCC = S0.u32 >= S1.u32', + SOPCOp.S_CMP_LT_U32: 'SCC = S0.u32 < S1.u32', + SOPCOp.S_CMP_LE_U32: 'SCC = S0.u32 <= S1.u32', + SOPCOp.S_BITCMP0_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U", + SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U", + SOPCOp.S_SETVSKIP: 'VSKIP = S0.u32[S1.u32[4 : 0]]', + SOPCOp.S_SET_GPR_IDX_ON: 'specified in the SRC0 operand. The raw bits of the SRC1 field are read and used to set the enable bits. S1[0] =\nVSRC0_REL, S1[1] = VSRC1_REL, S1[2] = VSRC2_REL and S1[3] = VDST_REL.\nM0[7 : 0] = S0.u32[7 : 0].b8;\n// this is the direct content of raw S1 field', + SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64', + SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64', +} + +SOPKOp_PCODE = { + SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(S0.i16))", + SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(S0.i16))\nendif", + SOPKOp.S_CMPK_EQ_I32: "SCC = S0.i32 == 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_LG_I32: "SCC = S0.i32 != 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_GT_I32: "SCC = S0.i32 > 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_GE_I32: "SCC = S0.i32 >= 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_LT_I32: "SCC = S0.i32 < 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_LE_I32: "SCC = S0.i32 <= 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_EQ_U32: "SCC = S0.u32 == 32'U(S1.u16)", + SOPKOp.S_CMPK_LG_U32: "SCC = S0.u32 != 32'U(S1.u16)", + SOPKOp.S_CMPK_GT_U32: "SCC = S0.u32 > 32'U(S1.u16)", + SOPKOp.S_CMPK_GE_U32: "SCC = S0.u32 >= 32'U(S1.u16)", + SOPKOp.S_CMPK_LT_U32: "SCC = S0.u32 < 32'U(S1.u16)", + SOPKOp.S_CMPK_LE_U32: "SCC = S0.u32 <= 32'U(S1.u16)", + SOPKOp.S_ADDK_I32: "tmp = D0.i32;\nD0.i32 = D0.i32 + 32'I(signext(S0.i16));\nSCC = ((tmp[31] == S0.i16[15]) && (tmp[31] != D0.i32[31]));", + SOPKOp.S_MULK_I32: "D0.i32 = D0.i32 * 32'I(signext(S0.i16))", + SOPKOp.S_CBRANCH_I_FORK: "S0 = compare mask (VCC or any SGPR), and SIMM16 = signed DWORD branch offset relative to next\nmask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\ntarget_addr = PC + signext(SIMM16.i32 * 4) + 4LL;\nif mask_pass == EXEC.u64 then\nPC = target_addr\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { target_addr, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = target_addr\nendif", + SOPKOp.S_GETREG_B32: "offset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", + SOPKOp.S_SETREG_B32: "offset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_SETREG_IMM32_B32: "offset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL", +} + +SOPPOp_PCODE = { + SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nendfor', + SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;", + SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCZ: "If VCCZ is 1 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCNZ: "If VCCZ is 0 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_TRAP: '// PC passed into trap handler points to S_TRAP itself,\nPC = TBA.i64;\n// trap base address', + SOPPOp.S_CBRANCH_CDBGSYS: "if WAVE_STATUS.COND_DBG_SYS.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGUSER: "if WAVE_STATUS.COND_DBG_USER.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: "if (WAVE_STATUS.COND_DBG_SYS || WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: "if (WAVE_STATUS.COND_DBG_SYS && WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_SET_GPR_IDX_MODE: 'SIMM16[1] = VSRC1_REL, SIMM16[2] = VSRC2_REL and SIMM16[3] = VDST_REL.\nGet Doorbell ID 10 - Returns doorbell into EXEC, with the doorbell physical address in bits', +} + +SMEMOp_PCODE = { + SMEMOp.S_LOAD_DWORD: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32', + SMEMOp.S_LOAD_DWORDX2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', + SMEMOp.S_LOAD_DWORDX4: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', + SMEMOp.S_LOAD_DWORDX8: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32', + SMEMOp.S_LOAD_DWORDX16: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32', + SMEMOp.S_SCRATCH_LOAD_DWORD: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32', + SMEMOp.S_SCRATCH_LOAD_DWORDX2: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', + SMEMOp.S_SCRATCH_LOAD_DWORDX4: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', + SMEMOp.S_BUFFER_LOAD_DWORD: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32', + SMEMOp.S_BUFFER_LOAD_DWORDX2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', + SMEMOp.S_BUFFER_LOAD_DWORDX4: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', + SMEMOp.S_BUFFER_LOAD_DWORDX8: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32', + SMEMOp.S_BUFFER_LOAD_DWORDX16: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32', + SMEMOp.S_STORE_DWORD: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0]', + SMEMOp.S_STORE_DWORDX2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32]', + SMEMOp.S_STORE_DWORDX4: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32];\nMEM[addr + 8U].b32 = SDATA[95 : 64];\nMEM[addr + 12U].b32 = SDATA[127 : 96]', + SMEMOp.S_SCRATCH_STORE_DWORD: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0]', + SMEMOp.S_SCRATCH_STORE_DWORDX2: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32]', + SMEMOp.S_SCRATCH_STORE_DWORDX4: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32];\nMEM[addr + 8U].b32 = SDATA[95 : 64];\nMEM[addr + 12U].b32 = SDATA[127 : 96]', + SMEMOp.S_BUFFER_STORE_DWORD: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0]', + SMEMOp.S_BUFFER_STORE_DWORDX2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32]', + SMEMOp.S_BUFFER_STORE_DWORDX4: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32];\nMEM[addr + 8U].b32 = SDATA[95 : 64];\nMEM[addr + 12U].b32 = SDATA[127 : 96]', + SMEMOp.S_BUFFER_ATOMIC_SWAP: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_CMPSWAP: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_ADD: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_SUB: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_SMIN: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_UMIN: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_SMAX: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_UMAX: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_AND: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_OR: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_XOR: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_INC: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_DEC: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_BUFFER_ATOMIC_SWAP_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_CMPSWAP_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_ADD_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_SUB_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_SMIN_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_UMIN_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_SMAX_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_UMAX_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_AND_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_OR_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_XOR_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_INC_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_BUFFER_ATOMIC_DEC_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_ATOMIC_SWAP: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + SMEMOp.S_ATOMIC_CMPSWAP: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_ATOMIC_ADD: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_ATOMIC_SUB: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_ATOMIC_SMIN: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + SMEMOp.S_ATOMIC_UMIN: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_ATOMIC_SMAX: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + SMEMOp.S_ATOMIC_UMAX: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_ATOMIC_AND: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + SMEMOp.S_ATOMIC_OR: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + SMEMOp.S_ATOMIC_XOR: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + SMEMOp.S_ATOMIC_INC: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_ATOMIC_DEC: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + SMEMOp.S_ATOMIC_SWAP_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + SMEMOp.S_ATOMIC_CMPSWAP_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_ATOMIC_ADD_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_ATOMIC_SUB_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_ATOMIC_SMIN_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + SMEMOp.S_ATOMIC_UMIN_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_ATOMIC_SMAX_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + SMEMOp.S_ATOMIC_UMAX_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_ATOMIC_AND_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + SMEMOp.S_ATOMIC_OR_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + SMEMOp.S_ATOMIC_XOR_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + SMEMOp.S_ATOMIC_INC_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + SMEMOp.S_ATOMIC_DEC_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', +} + +VOP1Op_PCODE = { + VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32', + VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'I;\nif EXEC == 0x0LL then\nlane = 0;\n// Force lane 0 if all lanes are disabled\nelse\nlane = s_ff1_i32_b64(EXEC);\n// Lowest active lane\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP1Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP1Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP1Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP1Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP1Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP1Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP1Op.V_CVT_RPI_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP1Op.V_CVT_FLR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP1Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP1Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP1Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP1Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP1Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP1Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP1Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP1Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP1Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP1Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP1Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP1Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP1Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP1Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP1Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', + VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', + VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", + VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP1Op.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP1Op.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP1Op.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', + VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', + VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", + VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", + VOP1Op.V_MOV_B64: 'D0.b64 = S0.b64', + VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", + VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', + VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", + VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', + VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", + VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", + VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", + VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", + VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', + VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", + VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", + VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', + VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', + VOP1Op.V_SAT_PK_U8_I16: "tmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", + VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp', + VOP1Op.V_CVT_F32_FP8: 'if SDWA_SRC0_SEL == BYTE1.b3 then\nD0.f32 = fp8_to_f32(S0[15 : 8].fp8)\nelsif SDWA_SRC0_SEL == BYTE2.b3 then\nD0.f32 = fp8_to_f32(S0[23 : 16].fp8)\nelsif SDWA_SRC0_SEL == BYTE3.b3 then\nD0.f32 = fp8_to_f32(S0[31 : 24].fp8)\nelse\n// BYTE0 implied\nD0.f32 = fp8_to_f32(S0[7 : 0].fp8)\nendif', + VOP1Op.V_CVT_F32_BF8: 'if SDWA_SRC0_SEL == BYTE1.b3 then\nD0.f32 = bf8_to_f32(S0[15 : 8].bf8)\nelsif SDWA_SRC0_SEL == BYTE2.b3 then\nD0.f32 = bf8_to_f32(S0[23 : 16].bf8)\nelsif SDWA_SRC0_SEL == BYTE3.b3 then\nD0.f32 = bf8_to_f32(S0[31 : 24].bf8)\nelse\n// BYTE0 implied\nD0.f32 = bf8_to_f32(S0[7 : 0].bf8)\nendif', + VOP1Op.V_CVT_PK_F32_FP8: 'tmp = SDWA_SRC0_SEL[1 : 0] == WORD1.b2 ? S0[31 : 16] : S0[15 : 0];\nD0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8);\nD0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8)', + VOP1Op.V_CVT_PK_F32_BF8: 'tmp = SDWA_SRC0_SEL[1 : 0] == WORD1.b2 ? S0[31 : 16] : S0[15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)', + VOP1Op.V_PRNG_B32: 'in = S0.u32;\nD0.u32 = ((in << 1U) ^ (in[31] ? 197U : 0U))', + VOP1Op.V_PERMLANE16_SWAP_B32: 'for pass in 0 : 1 do\nfor lane in 0 : 15 do\ntmp = VGPR[pass * 32 + lane][SRC0.u32];\nVGPR[pass * 32 + lane][SRC0.u32] = VGPR[pass * 32 + lane + 16][VDST.u32];\nVGPR[pass * 32 + lane + 16][VDST.u32] = tmp\nendfor\nendfor', + VOP1Op.V_PERMLANE32_SWAP_B32: 'for lane in 0 : 31 do\ntmp = VGPR[lane][SRC0.u32];\nVGPR[lane][SRC0.u32] = VGPR[lane + 32][VDST.u32];\nVGPR[lane + 32][VDST.u32] = tmp\nendfor', + VOP1Op.V_CVT_F32_BF16: "D0.f32 = 32'F({ S0.b16, 16'0U })", +} + +VOP2Op_PCODE = { + VOP2Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP2Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP2Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP2Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP2Op.V_FMAC_F64: 'D0.f64 = fma(S0.f64, S1.f64, D0.f64)', + VOP2Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP2Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP2Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP2Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP2Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP2Op.V_MIN_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S1.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S0.f32\nelse\nD0.f32 = S0.f32 < S1.f32 ? S0.f32 : S1.f32\nendif", + VOP2Op.V_MAX_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S1.f32\nelsif WAVE_MODE.IEEE then\nD0.f32 = S0.f32 >= S1.f32 ? S0.f32 : S1.f32\nelse\nD0.f32 = S0.f32 > S1.f32 ? S0.f32 : S1.f32\nendif", + VOP2Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP2Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP2Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP2Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP2Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP2Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP2Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP2Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP2Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP2Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP2Op.V_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', + VOP2Op.V_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', + VOP2Op.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_ADDC_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUBB_CO_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUBBREV_CO_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP2Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP2Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP2Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP2Op.V_MAC_F16: "tmp = S0.f16 * S1.f16 + D0.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", + VOP2Op.V_MADMK_F16: 'tmp = S0.f16 * SIMM16.f16 + S1.f16;', + VOP2Op.V_MADAK_F16: 'tmp = S0.f16 * S1.f16 + SIMM16.f16;', + VOP2Op.V_ADD_U16: 'D0.u16 = S0.u16 + S1.u16', + VOP2Op.V_SUB_U16: 'D0.u16 = S0.u16 - S1.u16', + VOP2Op.V_SUBREV_U16: 'D0.u16 = S1.u16 - S0.u16', + VOP2Op.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', + VOP2Op.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', + VOP2Op.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', + VOP2Op.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', + VOP2Op.V_MAX_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S1.f16\nelsif WAVE_MODE.IEEE then\nD0.f16 = S0.f16 >= S1.f16 ? S0.f16 : S1.f16\nelse\nD0.f16 = S0.f16 > S1.f16 ? S0.f16 : S1.f16\nendif", + VOP2Op.V_MIN_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S1.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S0.f16\nelse\nD0.f16 = S0.f16 < S1.f16 ? S0.f16 : S1.f16\nendif", + VOP2Op.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', + VOP2Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', + VOP2Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', + VOP2Op.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', + VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP2Op.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP2Op.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP2Op.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP2Op.V_DOT2C_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP2Op.V_DOT2C_I32_I16: 'tmp = D0.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp', + VOP2Op.V_DOT4C_I32_I8: 'tmp = D0.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp', + VOP2Op.V_DOT8C_I32_I4: 'tmp = D0.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp', + VOP2Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP2Op.V_PK_FMAC_F16: 'D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16);\nD0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16)', + VOP2Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP2Op.V_DOT2C_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', +} + +VOP3POp_PCODE = { + VOP3POp.V_PK_MAD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MUL_LO_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ADD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_SUB_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_LSHLREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_LSHRREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ASHRREV_I16: 'tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 >= S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 >= S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MIN_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 < S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 < S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MAD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_ADD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_SUB_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_MAX_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 >= S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 >= S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_MIN_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 < S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 < S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_FMA_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16);\ntmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_ADD_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\nD0.b32 = tmp", + VOP3POp.V_PK_MUL_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.b32 = tmp", + VOP3POp.V_PK_MIN_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_MAX_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_MAD_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = in[0] * in[1] + in[2]", + VOP3POp.V_MAD_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(in[0] * in[1] + in[2])", + VOP3POp.V_MAD_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(in[0] * in[1] + in[2])", + VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP3POp.V_DOT2_I32_I16: 'tmp = S2.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp', + VOP3POp.V_DOT2_U32_U16: 'tmp = S2.u32;\ntmp += u16_to_u32(S0[15 : 0].u16) * u16_to_u32(S1[15 : 0].u16);\ntmp += u16_to_u32(S0[31 : 16].u16) * u16_to_u32(S1[31 : 16].u16);\nD0.u32 = tmp', + VOP3POp.V_DOT4_I32_I8: 'tmp = S2.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp', + VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp', + VOP3POp.V_DOT8_I32_I4: 'tmp = S2.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp', + VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp', + VOP3POp.V_PK_FMA_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = fma(S0[31 : 0].f32, S1[31 : 0].f32, S2[31 : 0].f32);\ntmp[63 : 32].f32 = fma(S0[63 : 32].f32, S1[63 : 32].f32, S2[63 : 32].f32);\nD0.b64 = tmp", + VOP3POp.V_PK_MUL_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 * S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 * S1[63 : 32].f32;\nD0.b64 = tmp", + VOP3POp.V_PK_ADD_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 + S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 + S1[63 : 32].f32;\nD0.b64 = tmp", + VOP3POp.V_PK_MOV_B32: 'tmp0.u32 = S0.u32[OPSEL[0].i32 * 32 + 31 : OPSEL[0].i32 * 32];\ntmp1.u32 = S1.u32[OPSEL[1].i32 * 32 + 31 : OPSEL[1].i32 * 32];\nD0.u32[31 : 0] = tmp0.u32;\nD0.u32[63 : 32] = tmp1.u32', + VOP3POp.V_DOT2_F32_BF16: "tmp = 32'F(S0[15 : 0].bf16) * 32'F(S1[15 : 0].bf16);\ntmp += 32'F(S0[31 : 16].bf16) * 32'F(S1[31 : 16].bf16);\ntmp += S2.f32;\nD0.f32 = tmp", + VOP3POp.V_PK_MINIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_minimum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_minimum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32", + VOP3POp.V_PK_MAXIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_maximum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_maximum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32", +} + +VOPCOp_PCODE = { + VOPCOp.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask and\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOPCOp.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOPCOp.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask and\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOPCOp.V_CMP_F_F16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_TRU_F16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_F16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_O_F16: "EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_U_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NEQ_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_TRU_F16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_F32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_TRU_F32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_F32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_O_F32: "EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_U_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NEQ_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_TRU_F32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_F64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_TRU_F64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_F64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_O_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NEQ_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_TRU_F64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_I16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_I16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_I16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_U16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_U16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_I32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_I32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_I32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_U32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_U32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_I64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_I64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_I64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_U64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_U64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nVDST = Destination VGPR 0- 255.", +} + +VOP3AOp_PCODE = { + VOP3AOp.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask and\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOP3AOp.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOP3AOp.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask and\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOP3AOp.V_CMP_F_F16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_TRU_F16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_F16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_O_F16: "EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_U_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NEQ_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_TRU_F16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_F32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_TRU_F32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_F32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_O_F32: "EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_U_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NEQ_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_TRU_F32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_F64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_TRU_F64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_F64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_O_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NEQ_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_TRU_F64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_I16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_I16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_U16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_U16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_I16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_I16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_U16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_U16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_I32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_I32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_U32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_U32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_I32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_I32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_U32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_U32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_I64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_I64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_U64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_U64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_I64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_I64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_U64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_U64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nVDST = Destination VGPR 0- 255.", + VOP3AOp.V_MOV_B32: 'D0.b32 = S0.b32', + VOP3AOp.V_READFIRSTLANE_B32: "declare lane : 32'I;\nif EXEC == 0x0LL then\nlane = 0;\n// Force lane 0 if all lanes are disabled\nelse\nlane = s_ff1_i32_b64(EXEC);\n// Lowest active lane\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP3AOp.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP3AOp.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP3AOp.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP3AOp.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP3AOp.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP3AOp.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP3AOp.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP3AOp.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP3AOp.V_CVT_RPI_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP3AOp.V_CVT_FLR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP3AOp.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP3AOp.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP3AOp.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP3AOp.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP3AOp.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP3AOp.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP3AOp.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP3AOp.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP3AOp.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP3AOp.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP3AOp.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP3AOp.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP3AOp.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP3AOp.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP3AOp.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP3AOp.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP3AOp.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP3AOp.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP3AOp.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', + VOP3AOp.V_LOG_F32: 'D0.f32 = log2(S0.f32)', + VOP3AOp.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP3AOp.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP3AOp.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP3AOp.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP3AOp.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP3AOp.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP3AOp.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP3AOp.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", + VOP3AOp.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP3AOp.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP3AOp.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP3AOp.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP3AOp.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP3AOp.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', + VOP3AOp.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', + VOP3AOp.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP3AOp.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP3AOp.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", + VOP3AOp.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", + VOP3AOp.V_MOV_B64: 'D0.b64 = S0.b64', + VOP3AOp.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP3AOp.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP3AOp.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP3AOp.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP3AOp.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", + VOP3AOp.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', + VOP3AOp.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", + VOP3AOp.V_LOG_F16: 'D0.f16 = log2(S0.f16)', + VOP3AOp.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", + VOP3AOp.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP3AOp.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP3AOp.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP3AOp.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP3AOp.V_FMAC_F64: 'D0.f64 = fma(S0.f64, S1.f64, D0.f64)', + VOP3AOp.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP3AOp.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP3AOp.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP3AOp.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP3AOp.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP3AOp.V_MIN_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S1.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S0.f32\nelse\nD0.f32 = S0.f32 < S1.f32 ? S0.f32 : S1.f32\nendif", + VOP3AOp.V_MAX_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S1.f32\nelsif WAVE_MODE.IEEE then\nD0.f32 = S0.f32 >= S1.f32 ? S0.f32 : S1.f32\nelse\nD0.f32 = S0.f32 > S1.f32 ? S0.f32 : S1.f32\nendif", + VOP3AOp.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP3AOp.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP3AOp.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP3AOp.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP3AOp.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP3AOp.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP3AOp.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP3AOp.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP3AOp.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP3AOp.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP3AOp.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP3AOp.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP3AOp.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP3AOp.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP3AOp.V_MAC_F16: "tmp = S0.f16 * S1.f16 + D0.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", + VOP3AOp.V_ADD_U16: 'D0.u16 = S0.u16 + S1.u16', + VOP3AOp.V_SUB_U16: 'D0.u16 = S0.u16 - S1.u16', + VOP3AOp.V_SUBREV_U16: 'D0.u16 = S1.u16 - S0.u16', + VOP3AOp.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', + VOP3AOp.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', + VOP3AOp.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', + VOP3AOp.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', + VOP3AOp.V_MAX_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S1.f16\nelsif WAVE_MODE.IEEE then\nD0.f16 = S0.f16 >= S1.f16 ? S0.f16 : S1.f16\nelse\nD0.f16 = S0.f16 > S1.f16 ? S0.f16 : S1.f16\nendif", + VOP3AOp.V_MIN_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S1.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S0.f16\nelse\nD0.f16 = S0.f16 < S1.f16 ? S0.f16 : S1.f16\nendif", + VOP3AOp.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', + VOP3AOp.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', + VOP3AOp.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', + VOP3AOp.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', + VOP3AOp.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP3AOp.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP3AOp.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP3AOp.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP3AOp.V_DOT2C_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP3AOp.V_DOT2C_I32_I16: 'tmp = D0.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp', + VOP3AOp.V_DOT4C_I32_I8: 'tmp = D0.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp', + VOP3AOp.V_DOT8C_I32_I4: 'tmp = D0.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp', + VOP3AOp.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP3AOp.V_PK_FMAC_F16: 'D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16);\nD0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16)', + VOP3AOp.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP3AOp.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32", + VOP3AOp.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32", + VOP3AOp.V_CUBEID_F32: '// Set D0.f = cubemap face ID ({0.0, 1.0, ..., 5.0}).\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = 5.0F\nelse\nD0.f32 = 4.0F\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = 3.0F\nelse\nD0.f32 = 2.0F\nendif\nelse\nif S0.f32 < 0.0F then\nD0.f32 = 1.0F\nelse\nD0.f32 = 0.0F\nendif\nendif', + VOP3AOp.V_CUBESC_F32: '// D0.f = cubemap S coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = -S0.f32\nelse\nD0.f32 = S0.f32\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S0.f32\nelse\nif S0.f32 < 0.0F then\nD0.f32 = S2.f32\nelse\nD0.f32 = -S2.f32\nendif\nendif', + VOP3AOp.V_CUBETC_F32: '// D0.f = cubemap T coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = -S1.f32\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = -S2.f32\nelse\nD0.f32 = S2.f32\nendif\nelse\nD0.f32 = -S1.f32\nendif', + VOP3AOp.V_CUBEMA_F32: '// D0.f = 2.0 * cubemap major axis.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = S2.f32 * 2.0F\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S1.f32 * 2.0F\nelse\nD0.f32 = S0.f32 * 2.0F\nendif', + VOP3AOp.V_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S2[4 : 0].u32) - 1U))', + VOP3AOp.V_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32)', + VOP3AOp.V_BFI_B32: 'D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32))', + VOP3AOp.V_FMA_F32: 'D0.f32 = fma(S0.f32, S1.f32, S2.f32)', + VOP3AOp.V_FMA_F64: 'D0.f64 = fma(S0.f64, S1.f64, S2.f64)', + VOP3AOp.V_LERP_U8: 'tmp = ((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1U << 24U);\ntmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1U << 16U);\ntmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1U << 8U);\ntmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1U);\nD0.u32 = tmp.u32', + VOP3AOp.V_ALIGNBIT_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> S2.u32[4 : 0]) & 0xffffffffLL)", + VOP3AOp.V_ALIGNBYTE_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> (S2.u32[1 : 0] * 8U)) & 0xffffffffLL)", + VOP3AOp.V_MIN3_F32: 'D0.f32 = v_min_f32(v_min_f32(S0.f32, S1.f32), S2.f32)', + VOP3AOp.V_MIN3_I32: 'D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', + VOP3AOp.V_MIN3_U32: 'D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', + VOP3AOp.V_MAX3_F32: 'D0.f32 = v_max_f32(v_max_f32(S0.f32, S1.f32), S2.f32)', + VOP3AOp.V_MAX3_I32: 'D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', + VOP3AOp.V_MAX3_U32: 'D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', + VOP3AOp.V_MED3_F32: "if (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)) || isNAN(64'F(S2.f32))) then\nD0.f32 = v_min3_f32(S0.f32, S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S0.f32 then\nD0.f32 = v_max_f32(S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S1.f32 then\nD0.f32 = v_max_f32(S0.f32, S2.f32)\nelse\nD0.f32 = v_max_f32(S0.f32, S1.f32)\nendif", + VOP3AOp.V_MED3_I32: 'if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32 then\nD0.i32 = v_max_i32(S1.i32, S2.i32)\nelsif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32 then\nD0.i32 = v_max_i32(S0.i32, S2.i32)\nelse\nD0.i32 = v_max_i32(S0.i32, S1.i32)\nendif', + VOP3AOp.V_MED3_U32: 'if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32 then\nD0.u32 = v_max_u32(S1.u32, S2.u32)\nelsif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32 then\nD0.u32 = v_max_u32(S0.u32, S2.u32)\nelse\nD0.u32 = v_max_u32(S0.u32, S1.u32)\nendif', + VOP3AOp.V_SAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3AOp.V_SAD_HI_U8: "D0.u32 = (32'U(v_sad_u8(S0, S1, 0U)) << 16U) + S2.u32", + VOP3AOp.V_SAD_U16: '// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', + VOP3AOp.V_SAD_U32: '// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', + VOP3AOp.V_CVT_PK_U8_F32: "tmp = (S2.u32 & 32'U(~(0xff << (S1.u32[1 : 0].u32 * 8U))));\ntmp = (tmp | ((32'U(f32_to_u8(S0.f32)) & 255U) << (S1.u32[1 : 0].u32 * 8U)));\nD0.u32 = tmp", + VOP3AOp.V_DIV_FIXUP_F32: "sign_out = (sign(S1.f32) ^ sign(S2.f32));\nif isNAN(64'F(S2.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S2.f32)))\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif ((64'F(S1.f32) == 0.0) && (64'F(S2.f32) == 0.0)) then\n// 0/0\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(abs(S1.f32)) == +INF) && (64'F(abs(S2.f32)) == +INF)) then\n// inf/inf\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(S1.f32) == 0.0) || (64'F(abs(S2.f32)) == +INF)) then\n// x/0, or inf/y\nD0.f32 = sign_out ? -INF.f32 : +INF.f32\nelsif ((64'F(abs(S1.f32)) == +INF) || (64'F(S2.f32) == 0.0)) then\n// x/inf, 0/y\nD0.f32 = sign_out ? -0.0F : 0.0F\nelsif exponent(S2.f32) - exponent(S1.f32) < -150 then\nD0.f32 = sign_out ? -UNDERFLOW_F32 : UNDERFLOW_F32\nelsif exponent(S1.f32) == 255 then\nD0.f32 = sign_out ? -OVERFLOW_F32 : OVERFLOW_F32\nelse\nD0.f32 = sign_out ? -abs(S0.f32) : abs(S0.f32)\nendif", + VOP3AOp.V_DIV_FIXUP_F64: "sign_out = (sign(S1.f64) ^ sign(S2.f64));\nif isNAN(S2.f64) then\nD0.f64 = cvtToQuietNAN(S2.f64)\nelsif isNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif ((S1.f64 == 0.0) && (S2.f64 == 0.0)) then\n// 0/0\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((abs(S1.f64) == +INF) && (abs(S2.f64) == +INF)) then\n// inf/inf\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((S1.f64 == 0.0) || (abs(S2.f64) == +INF)) then\n// x/0, or inf/y\nD0.f64 = sign_out ? -INF : +INF\nelsif ((abs(S1.f64) == +INF) || (S2.f64 == 0.0)) then\n// x/inf, 0/y\nD0.f64 = sign_out ? -0.0 : 0.0\nelsif exponent(S2.f64) - exponent(S1.f64) < -1075 then\nD0.f64 = sign_out ? -UNDERFLOW_F64 : UNDERFLOW_F64\nelsif exponent(S1.f64) == 2047 then\nD0.f64 = sign_out ? -OVERFLOW_F64 : OVERFLOW_F64\nelse\nD0.f64 = sign_out ? -abs(S0.f64) : abs(S0.f64)\nendif", + VOP3AOp.V_DIV_FMAS_F32: 'if VCC.u64[laneId] then\nD0.f32 = 2.0F ** 32 * fma(S0.f32, S1.f32, S2.f32)\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif', + VOP3AOp.V_DIV_FMAS_F64: 'if VCC.u64[laneId] then\nD0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)\nelse\nD0.f64 = fma(S0.f64, S1.f64, S2.f64)\nendif', + VOP3AOp.V_MSAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3AOp.V_QSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", + VOP3AOp.V_MQSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", + VOP3AOp.V_MQSAD_U32_U8: "tmp[127 : 96] = 32'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32));\ntmp[95 : 64] = 32'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32));\ntmp[63 : 32] = 32'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32));\ntmp[31 : 0] = 32'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32));\nD0.b128 = tmp.b128", + VOP3AOp.V_MAD_LEGACY_F16: "tmp = S0.f16 * S1.f16 + S2.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", + VOP3AOp.V_MAD_LEGACY_U16: "tmp = S0.u16 * S1.u16 + S2.u16;\nif OPSEL.u4[3] then\nD0 = { tmp.u16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.u16 }\nendif", + VOP3AOp.V_MAD_LEGACY_I16: "tmp = S0.i16 * S1.i16 + S2.i16;\nif OPSEL.u4[3] then\nD0 = { tmp.i16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.i16 }\nendif", + VOP3AOp.V_PERM_B32: 'D0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])', + VOP3AOp.V_FMA_LEGACY_F16: "tmp = fma(S0.f16, S1.f16, S2.f16);\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", + VOP3AOp.V_DIV_FIXUP_LEGACY_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\ntmp = cvtToQuietNAN(64'F(S2.f16))\nelsif isNAN(64'F(S1.f16)) then\ntmp = cvtToQuietNAN(64'F(S1.f16))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\ntmp = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\ntmp = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\ntmp = sign_out ? -INF : +INF\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\ntmp = sign_out ? -0.0 : 0.0\nelse\ntmp = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", + VOP3AOp.V_CVT_PKACCUM_U8_F32: "byte = S1.u32[1 : 0];\nbit = byte.u32 * 8U;\nD0.u32[bit + 7U : bit] = 32'U(f32_to_u8(S0.f32))", + VOP3AOp.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32", + VOP3AOp.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32", + VOP3AOp.V_XAD_U32: 'D0.u32 = (S0.u32 ^ S1.u32) + S2.u32', + VOP3AOp.V_MIN3_F16: 'D0.f16 = v_min_f16(v_min_f16(S0.f16, S1.f16), S2.f16)', + VOP3AOp.V_MIN3_I16: 'D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16)', + VOP3AOp.V_MIN3_U16: 'D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16)', + VOP3AOp.V_MAX3_F16: 'D0.f16 = v_max_f16(v_max_f16(S0.f16, S1.f16), S2.f16)', + VOP3AOp.V_MAX3_I16: 'D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16)', + VOP3AOp.V_MAX3_U16: 'D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16)', + VOP3AOp.V_MED3_F16: "if (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)) || isNAN(64'F(S2.f16))) then\nD0.f16 = v_min3_f16(S0.f16, S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S0.f16 then\nD0.f16 = v_max_f16(S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S1.f16 then\nD0.f16 = v_max_f16(S0.f16, S2.f16)\nelse\nD0.f16 = v_max_f16(S0.f16, S1.f16)\nendif", + VOP3AOp.V_MED3_I16: 'if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16 then\nD0.i16 = v_max_i16(S1.i16, S2.i16)\nelsif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16 then\nD0.i16 = v_max_i16(S0.i16, S2.i16)\nelse\nD0.i16 = v_max_i16(S0.i16, S1.i16)\nendif', + VOP3AOp.V_MED3_U16: 'if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16 then\nD0.u16 = v_max_u16(S1.u16, S2.u16)\nelsif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16 then\nD0.u16 = v_max_u16(S0.u16, S2.u16)\nelse\nD0.u16 = v_max_u16(S0.u16, S1.u16)\nendif', + VOP3AOp.V_LSHL_ADD_U32: 'D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32', + VOP3AOp.V_ADD_LSHL_U32: 'D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32)', + VOP3AOp.V_ADD3_U32: 'D0.u32 = S0.u32 + S1.u32 + S2.u32', + VOP3AOp.V_LSHL_OR_B32: 'D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32)', + VOP3AOp.V_AND_OR_B32: 'D0.u32 = ((S0.u32 & S1.u32) | S2.u32)', + VOP3AOp.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)', + VOP3AOp.V_MAD_F16: 'D0.f16 = S0.f16 * S1.f16 + S2.f16', + VOP3AOp.V_MAD_U16: 'D0.u16 = S0.u16 * S1.u16 + S2.u16', + VOP3AOp.V_MAD_I16: 'D0.i16 = S0.i16 * S1.i16 + S2.i16', + VOP3AOp.V_FMA_F16: 'D0.f16 = fma(S0.f16, S1.f16, S2.f16)', + VOP3AOp.V_DIV_FIXUP_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S2.f16)))\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\nD0.f16 = sign_out ? -INF.f16 : +INF.f16\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\nD0.f16 = sign_out ? -16'0.0 : 16'0.0\nelse\nD0.f16 = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif", + VOP3AOp.V_LSHL_ADD_U64: 'D0.u64 = (S0.u64 << S1.u32[2 : 0].u32) + S2.u64', + VOP3AOp.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', + VOP3AOp.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64', + VOP3AOp.V_MIN_F64: 'if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S1.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = S0.f64 < S1.f64 ? S0.f64 : S1.f64\nendif', + VOP3AOp.V_MAX_F64: 'if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S1.f64\nelsif WAVE_MODE.IEEE then\nD0.f64 = S0.f64 >= S1.f64 ? S0.f64 : S1.f64\nelse\nD0.f64 = S0.f64 > S1.f64 ? S0.f64 : S1.f64\nendif', + VOP3AOp.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32', + VOP3AOp.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32', + VOP3AOp.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + VOP3AOp.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + VOP3AOp.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32', + VOP3AOp.V_READLANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nD0.b32 = VGPR[lane][SRC0.u32]', + VOP3AOp.V_WRITELANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nVGPR[lane][VDST.u32] = S0.b32', + VOP3AOp.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp", + VOP3AOp.V_MBCNT_LO_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', + VOP3AOp.V_MBCNT_HI_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', + VOP3AOp.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)', + VOP3AOp.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)', + VOP3AOp.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)', + VOP3AOp.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", + VOP3AOp.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + VOP3AOp.V_CVT_PKNORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);", + VOP3AOp.V_CVT_PKNORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);", + VOP3AOp.V_CVT_PKRTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + VOP3AOp.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);", + VOP3AOp.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);", + VOP3AOp.V_CVT_PKNORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);", + VOP3AOp.V_CVT_PKNORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);", + VOP3AOp.V_ADD_I32: 'D0.i32 = S0.i32 + S1.i32', + VOP3AOp.V_SUB_I32: 'D0.i32 = S0.i32 - S1.i32', + VOP3AOp.V_ADD_I16: 'D0.i16 = S0.i16 + S1.i16', + VOP3AOp.V_SUB_I16: 'D0.i16 = S0.i16 - S1.i16', + VOP3AOp.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16', + VOP3AOp.V_MUL_LEGACY_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP3AOp.V_CVT_PK_FP8_F32: 'prev_mode = ROUND_MODE;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[15:0] are preserved\nendif;', + VOP3AOp.V_CVT_PK_BF8_F32: 'prev_mode = ROUND_MODE;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[15:0] are preserved\nendif;', + VOP3AOp.V_CVT_SR_FP8_F32: "prev_mode = ROUND_MODE;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 12].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].fp8 = f32_to_fp8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].fp8 = f32_to_fp8(tmp.f32)\nendif;", + VOP3AOp.V_CVT_SR_BF8_F32: "prev_mode = ROUND_MODE;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 11].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].bf8 = f32_to_bf8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].bf8 = f32_to_bf8(tmp.f32)\nendif;", + VOP3AOp.V_DOT2C_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', + VOP3AOp.V_BITOP3_B16: "tmp = 16'0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 16'U(~S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 16'U(~S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 16'U(~S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 16'U(~S0.b16 & S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 16'U(S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 16'U(S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 16'U(S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 16'U(S0.b16 & S1.b16 & S2.b16) : 16'0U));", + VOP3AOp.V_BITOP3_B32: "tmp = 0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 32'U(~S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 32'U(~S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 32'U(~S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 32'U(~S0.b32 & S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 32'U(S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 32'U(S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 32'U(S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 32'U(S0.b32 & S1.b32 & S2.b32) : 0U));", + VOP3AOp.V_CVT_SCALEF32_PK_FP8_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_fp8_scale(S0.f32, scale.u8);\ntmp1 = f32_to_fp8_scale(S1.f32, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_BF8_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_bf8_scale(S0.f32, scale.u8);\ntmp1 = f32_to_bf8_scale(S1.f32, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_FP8_F32: "scale = 32'U(exponent(S2.f32));\ntmp = f32_to_fp8_sr_scale(S0.f32, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_BF8_F32: "scale = 32'U(exponent(S2.f32));\ntmp = f32_to_bf8_sr_scale(S0.f32, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].bf8 = tmp;\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_F32_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", + VOP3AOp.V_CVT_SCALEF32_PK_F32_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", + VOP3AOp.V_CVT_SCALEF32_F32_FP8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8;\ntmp = fp8_to_f32_scale(src, scale.u8);", + VOP3AOp.V_CVT_SCALEF32_F32_BF8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8;\ntmp = bf8_to_f32_scale(src, scale.u8);", + VOP3AOp.V_CVT_SCALEF32_PK_FP4_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_fp4_scale(S0.f32, scale.u8);\ntmp1 = f32_to_fp4_scale(S1.f32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = f32_to_fp4_sr_scale(S0[31 : 0].f32, randomVal, scale.u8);\ntmp1 = f32_to_fp4_sr_scale(S0[63 : 32].f32, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_F32_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", + VOP3AOp.V_CVT_SCALEF32_PK_FP8_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_fp8_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_fp8_scale(S0[31 : 16].f16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_BF8_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_bf8_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_bf8_scale(S0[31 : 16].f16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_FP8_F16: "scale = 32'U(exponent(S2.f32));\ntmp = f16_to_fp8_sr_scale(S0.f16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_BF8_F16: "scale = 32'U(exponent(S2.f32));\ntmp = f16_to_bf8_sr_scale(S0.f16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].bf8 = tmp;\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_FP8_BF16: "scale = 32'U(exponent(S1.f32));\ntmp0 = bf16_to_fp8_scale(S0[15 : 0].bf16, scale.u8);\ntmp1 = bf16_to_fp8_scale(S0[31 : 16].bf16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_BF8_BF16: "scale = 32'U(exponent(S1.f32));\ntmp0 = bf16_to_bf8_scale(S0[15 : 0].bf16, scale.u8);\ntmp1 = bf16_to_bf8_scale(S0[31 : 16].bf16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_FP8_BF16: "scale = 32'U(exponent(S2.f32));\ntmp = bf16_to_fp8_sr_scale(S0.bf16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_BF8_BF16: "scale = 32'U(exponent(S2.f32));\ntmp = bf16_to_bf8_sr_scale(S0.bf16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].bf8 = tmp;\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_F16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", + VOP3AOp.V_CVT_SCALEF32_PK_F16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", + VOP3AOp.V_CVT_SCALEF32_F16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8;\ntmp = fp8_to_f16_scale(src, scale.u8);\n// OPSEL[3] controls destination hi/lo", + VOP3AOp.V_CVT_SCALEF32_F16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8;\ntmp = bf8_to_f16_scale(src, scale.u8);\n// OPSEL[3] controls destination hi/lo", + VOP3AOp.V_CVT_SCALEF32_PK_FP4_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_fp4_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_fp4_scale(S0[31 : 16].f16, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_FP4_BF16: "scale = 32'U(exponent(S1.f32));\ntmp0 = bf16_to_fp4_scale(S0[15 : 0].bf16, scale.u8);\ntmp1 = bf16_to_fp4_scale(S0[31 : 16].bf16, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = f16_to_fp4_sr_scale(S0[15 : 0].f16, randomVal, scale.u8);\ntmp1 = f16_to_fp4_sr_scale(S0[31 : 16].f16, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = bf16_to_fp4_sr_scale(S0[15 : 0].bf16, randomVal, scale.u8);\ntmp1 = bf16_to_fp4_sr_scale(S0[31 : 16].bf16, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_F16_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", + VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\nD0[15 : 0].bf16 = tmp0;\nD0[31 : 16].bf16 = tmp1", + VOP3AOp.V_CVT_SCALEF32_2XPK16_FP6_F32: "scale = 32'U(exponent(S2.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 15 do\n// Note that S0 and S1 inputs are interleaved in the packed result.\ntmp[dOffset + 5 : dOffset].fp6 = f32_to_fp6_scale(S0[sOffset + 31 : sOffset].f32, scale.u8);\ntmp[dOffset + 11 : dOffset + 6].fp6 = f32_to_fp6_scale(S1[sOffset + 31 : sOffset].f32, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_2XPK16_BF6_F32: "scale = 32'U(exponent(S2.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 15 do\n// Note that S0 and S1 inputs are interleaved in the packed result.\ntmp[dOffset + 5 : dOffset].bf6 = f32_to_bf6_scale(S0[sOffset + 31 : sOffset].f32, scale.u8);\ntmp[dOffset + 11 : dOffset + 6].bf6 = f32_to_bf6_scale(S1[sOffset + 31 : sOffset].f32, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].fp6 = f32_to_fp6_sr_scale(S0[sOffset + 31 : sOffset].f32, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = f32_to_bf6_sr_scale(S0[sOffset + 31 : sOffset].f32, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_F32_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 1024'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 31 : dOffset].f32 = fp6_to_f32_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[1023 : 0] = tmp.b1024", + VOP3AOp.V_CVT_SCALEF32_PK32_F32_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 1024'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 31 : dOffset].f32 = bf6_to_f32_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[1023 : 0] = tmp.b1024", + VOP3AOp.V_CVT_SCALEF32_PK32_FP6_BF16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].fp6 = bf16_to_fp6_scale(S0[sOffset + 15 : sOffset].bf16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_BF6_F16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = f16_to_bf6_scale(S0[sOffset + 15 : sOffset].f16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_BF6_BF16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = bf16_to_bf6_scale(S0[sOffset + 15 : sOffset].bf16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].fp6 = f16_to_fp6_sr_scale(S0[sOffset + 15 : sOffset].f16, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].fp6 = bf16_to_fp6_sr_scale(S0[sOffset + 15 : sOffset].bf16, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = f16_to_bf6_sr_scale(S0[sOffset + 15 : sOffset].f16, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = bf16_to_bf6_sr_scale(S0[sOffset + 15 : sOffset].bf16, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_F16_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 15 : dOffset].f16 = fp6_to_f16_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", + VOP3AOp.V_CVT_SCALEF32_PK32_BF16_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 15 : dOffset].bf16 = fp6_to_bf16_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", + VOP3AOp.V_CVT_SCALEF32_PK32_F16_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 15 : dOffset].f16 = bf6_to_f16_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", + VOP3AOp.V_CVT_SCALEF32_PK32_BF16_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 15 : dOffset].bf16 = bf6_to_bf16_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", + VOP3AOp.V_ASHR_PK_I8_I32: "declare tmp : 16'B;\ntmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32);\ntmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32);\nD0[15 : 0] = tmp", + VOP3AOp.V_ASHR_PK_U8_I32: "declare tmp : 16'B;\ntmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32);\ntmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32);\nD0[15 : 0] = tmp", + VOP3AOp.V_CVT_PK_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + VOP3AOp.V_CVT_PK_BF16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].bf16 = f32_to_bf16(S0.f32);\ntmp[31 : 16].bf16 = f32_to_bf16(S1.f32);', + VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[15 : 0].bf16 = tmp0.bf16;\nD0[31 : 16].bf16 = tmp1.bf16", + VOP3AOp.V_CVT_SCALEF32_PK_BF16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[15 : 0].bf16 = tmp0.bf16;\nD0[31 : 16].bf16 = tmp1.bf16", + VOP3AOp.V_CVT_SR_F16_F32: "prev_mode = ROUND_MODE;\nif OPSEL[3].u2 == 2'0U then\nVGPR[laneId][VDST.u32][15 : 0].f16 = 16'F(f32_to_f16_SR(S0.f32, S1.u32))\nelse\nVGPR[laneId][VDST.u32][31 : 16].f16 = 16'F(f32_to_f16_sr(S0.f32, S1.u32))\nendif;", + VOP3AOp.V_CVT_SR_BF16_F32: "prev_mode = ROUND_MODE;\nif OPSEL[3].u2 == 2'0U then\nVGPR[laneId][VDST.u32][15 : 0].bf16 = 16'BF(f32_to_bf16_SR(S0.f32, S1.u32))\nelse\nVGPR[laneId][VDST.u32][31 : 16].bf16 = 16'BF(f32_to_bf16_sr(S0.f32, S1.u32))\nendif;", + VOP3AOp.V_MINIMUM3_F32: "D0.f32 = 32'F(v_minimum_f32(v_minimum_f32(S0.f32, S1.f32), S2.f32))", + VOP3AOp.V_MAXIMUM3_F32: "D0.f32 = 32'F(v_maximum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32))", +} + +VOP3BOp_PCODE = { + VOP3BOp.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32", + VOP3BOp.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32", + VOP3BOp.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32", + VOP3BOp.V_ADDC_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32", + VOP3BOp.V_SUBB_CO_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32", + VOP3BOp.V_SUBBREV_CO_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32", + VOP3BOp.V_DIV_SCALE_F32: "VCC = 0x0LL;\nif ((64'F(S2.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\nD0.f32 = NAN.f32\nelsif exponent(S2.f32) - exponent(S1.f32) >= 96 then\n// N/D near MAX_FLOAT_F32\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif S1.f32 == DENORM.f32 then\nD0.f32 = ldexp(S0.f32, 64)\nelsif ((1.0 / 64'F(S1.f32) == DENORM.f64) && (S2.f32 / S1.f32 == DENORM.f32)) then\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif 1.0 / 64'F(S1.f32) == DENORM.f64 then\nD0.f32 = ldexp(S0.f32, -64)\nelsif S2.f32 / S1.f32 == DENORM.f32 then\nVCC = 0x1LL;\nif S0.f32 == S2.f32 then\n// Only scale the numerator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif exponent(S2.f32) <= 23 then\n// Numerator is tiny\nD0.f32 = ldexp(S0.f32, 64)\nendif", + VOP3BOp.V_DIV_SCALE_F64: 'VCC = 0x0LL;\nif ((S2.f64 == 0.0) || (S1.f64 == 0.0)) then\nD0.f64 = NAN.f64\nelsif exponent(S2.f64) - exponent(S1.f64) >= 768 then\n// N/D near MAX_FLOAT_F64\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, 128)\nelsif ((1.0 / S1.f64 == DENORM.f64) && (S2.f64 / S1.f64 == DENORM.f64)) then\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif 1.0 / S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, -128)\nelsif S2.f64 / S1.f64 == DENORM.f64 then\nVCC = 0x1LL;\nif S0.f64 == S2.f64 then\n// Only scale the numerator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif exponent(S2.f64) <= 53 then\n// Numerator is tiny\nD0.f64 = ldexp(S0.f64, 128)\nendif', + VOP3BOp.V_MAD_U64_U32: "{ D1.u1, D0.u64 } = 65'B(65'U(S0.u32) * 65'U(S1.u32) + 65'U(S2.u64))", + VOP3BOp.V_MAD_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))", +} + +DSOp_PCODE = { + DSOp.DS_ADD_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_WRITE_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]', + DSOp.DS_WRITE2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]', + DSOp.DS_WRITE2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]', + DSOp.DS_CMPST_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', + DSOp.DS_CMPST_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MIN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MAX_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_ADD_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + DSOp.DS_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + DSOp.DS_WRITE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32", + DSOp.DS_WRITE_B8: 'MEM[ADDR].b8 = DATA[7 : 0]', + DSOp.DS_WRITE_B16: 'MEM[ADDR].b16 = DATA[15 : 0]', + DSOp.DS_ADD_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_RTN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_RTN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_WRXCHG_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + DSOp.DS_WRXCHG2_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_WRXCHG2ST64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_CMPST_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', + DSOp.DS_CMPST_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MIN_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MAX_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_WRAP_RTN_B32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = tmp >= DATA.u32 ? tmp - DATA.u32 : tmp + DATA2.u32;\nRETURN_DATA = tmp', + DSOp.DS_ADD_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_READ_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32', + DSOp.DS_READ2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4U].b32', + DSOp.DS_READ2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256U].b32', + DSOp.DS_READ_I8: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i8))", + DSOp.DS_READ_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + DSOp.DS_READ_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))", + DSOp.DS_READ_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;', + DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\nif EXEC[i].u1 then\ndst_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_BPERMUTE_B32: "Note that EXEC mask is applied to both VGPR read and write. If src_lane selects a disabled thread then zero is\n// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\nsrc_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_ADD_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_WRITE_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]', + DSOp.DS_WRITE2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_WRITE2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_CMPST_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', + DSOp.DS_CMPST_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MIN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_WRITE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]', + DSOp.DS_WRITE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]', + DSOp.DS_READ_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + DSOp.DS_READ_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + DSOp.DS_READ_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", + DSOp.DS_READ_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", + DSOp.DS_READ_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;', + DSOp.DS_READ_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;', + DSOp.DS_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + DSOp.DS_ADD_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_RTN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_RTN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_WRXCHG_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + DSOp.DS_WRXCHG2_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_WRXCHG2ST64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_CMPST_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', + DSOp.DS_CMPST_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MIN_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_READ_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32', + DSOp.DS_READ2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8U + 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8U + 4U].b32', + DSOp.DS_READ2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512U + 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512U + 4U].b32', + DSOp.DS_ADD_RTN_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", + DSOp.DS_GWS_SEMA_RELEASE_ALL: '// Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\n// Incr the state counter of the resource', + DSOp.DS_GWS_INIT: '// Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\n// Get the value to use in init\nindex = find_first_valid(vector mask)\nvalue = DATA[thread: index]\n// Set the state of the resource', + DSOp.DS_GWS_SEMA_V: '//Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\n//Incr the state counter of the resource', + DSOp.DS_GWS_SEMA_BR: '//Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\nindex = find first valid (vector mask)\ncount = DATA[thread: index];\n//Add count to the resource state counter', + DSOp.DS_GWS_SEMA_P: '//Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\nstate[rid].counter -= 1;', + DSOp.DS_GWS_BARRIER: '//Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + OFFSET0[5:0];\nindex = find first valid (vector mask);\nvalue = DATA[thread: index];\n// Input Decision Machine\nthread[rid].flag = state[rid].flag;\nstate[rid].flag = !state.flag;\nstate[rid].counter = value;\nelse\nstate[rid].counter -= 1;\n// Release Machine\nendif;', + DSOp.DS_READ_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32", + DSOp.DS_PK_ADD_RTN_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + DSOp.DS_PK_ADD_RTN_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + DSOp.DS_WRITE_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]', + DSOp.DS_WRITE_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]', + DSOp.DS_READ_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32', + DSOp.DS_READ_B128: "addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32\nOFFSET = Unsigned immediate byte offset.\nOFFEN = Send offset either as VADDR or as zero..\nIDXEN = Send index either as VADDR or as zero.\nVADDR = VGPR address source.\nVDATA = Destination vector GPR.\nSOFFSET = Byte offset added to the memory address of an SGPR.\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp\ntmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp", +} + +FLATOp_PCODE = { + FLATOp.FLAT_LOAD_UBYTE: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + FLATOp.FLAT_LOAD_SBYTE: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + FLATOp.FLAT_LOAD_USHORT: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + FLATOp.FLAT_LOAD_SSHORT: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + FLATOp.FLAT_LOAD_DWORD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', + FLATOp.FLAT_LOAD_DWORDX2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + FLATOp.FLAT_LOAD_DWORDX3: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + FLATOp.FLAT_LOAD_DWORDX4: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + FLATOp.FLAT_STORE_BYTE: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', + FLATOp.FLAT_STORE_BYTE_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', + FLATOp.FLAT_STORE_SHORT: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', + FLATOp.FLAT_STORE_SHORT_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', + FLATOp.FLAT_STORE_DWORD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', + FLATOp.FLAT_STORE_DWORDX2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + FLATOp.FLAT_STORE_DWORDX3: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + FLATOp.FLAT_STORE_DWORDX4: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + FLATOp.FLAT_LOAD_UBYTE_D16: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });", + FLATOp.FLAT_LOAD_UBYTE_D16_HI: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });", + FLATOp.FLAT_LOAD_SBYTE_D16: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));", + FLATOp.FLAT_LOAD_SBYTE_D16_HI: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));", + FLATOp.FLAT_LOAD_SHORT_D16: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;', + FLATOp.FLAT_LOAD_SHORT_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;', + FLATOp.FLAT_ATOMIC_SWAP: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_CMPSWAP: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_ADD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_SUB: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_SMIN: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + FLATOp.FLAT_ATOMIC_UMIN: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_SMAX: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + FLATOp.FLAT_ATOMIC_UMAX: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_AND: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_OR: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_XOR: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_INC: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_DEC: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp', + FLATOp.FLAT_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + FLATOp.FLAT_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + FLATOp.FLAT_ATOMIC_MIN_F64: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + FLATOp.FLAT_ATOMIC_MAX_F64: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + FLATOp.FLAT_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + FLATOp.FLAT_ATOMIC_SWAP_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_CMPSWAP_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_ADD_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_SUB_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_SMIN_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + FLATOp.FLAT_ATOMIC_UMIN_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_SMAX_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + FLATOp.FLAT_ATOMIC_UMAX_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_AND_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_OR_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_XOR_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_INC_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_DEC_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', +} + +GLOBALOp_PCODE = { + GLOBALOp.GLOBAL_LOAD_UBYTE: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + GLOBALOp.GLOBAL_LOAD_SBYTE: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + GLOBALOp.GLOBAL_LOAD_USHORT: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + GLOBALOp.GLOBAL_LOAD_SSHORT: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + GLOBALOp.GLOBAL_LOAD_DWORD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', + GLOBALOp.GLOBAL_LOAD_DWORDX2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + GLOBALOp.GLOBAL_LOAD_DWORDX3: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + GLOBALOp.GLOBAL_LOAD_DWORDX4: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + GLOBALOp.GLOBAL_STORE_BYTE: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', + GLOBALOp.GLOBAL_STORE_BYTE_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', + GLOBALOp.GLOBAL_STORE_SHORT: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', + GLOBALOp.GLOBAL_STORE_SHORT_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', + GLOBALOp.GLOBAL_STORE_DWORD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', + GLOBALOp.GLOBAL_STORE_DWORDX2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + GLOBALOp.GLOBAL_STORE_DWORDX3: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + GLOBALOp.GLOBAL_STORE_DWORDX4: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + GLOBALOp.GLOBAL_LOAD_UBYTE_D16: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });", + GLOBALOp.GLOBAL_LOAD_UBYTE_D16_HI: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });", + GLOBALOp.GLOBAL_LOAD_SBYTE_D16: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));", + GLOBALOp.GLOBAL_LOAD_SBYTE_D16_HI: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));", + GLOBALOp.GLOBAL_LOAD_SHORT_D16: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;', + GLOBALOp.GLOBAL_LOAD_SHORT_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;', + GLOBALOp.GLOBAL_ATOMIC_SWAP: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_CMPSWAP: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SUB: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SMIN: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_UMIN: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SMAX: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_UMAX: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_AND: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_OR: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_XOR: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_INC: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_DEC: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp', + GLOBALOp.GLOBAL_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + GLOBALOp.GLOBAL_ATOMIC_MIN_F64: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MAX_F64: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + GLOBALOp.GLOBAL_ATOMIC_SWAP_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SUB_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SMIN_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_UMIN_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SMAX_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_UMAX_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_AND_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_OR_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_XOR_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_INC_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_DEC_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', +} + +SCRATCHOp_PCODE = { + SCRATCHOp.SCRATCH_LOAD_UBYTE: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + SCRATCHOp.SCRATCH_LOAD_SBYTE: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + SCRATCHOp.SCRATCH_LOAD_USHORT: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + SCRATCHOp.SCRATCH_LOAD_SSHORT: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + SCRATCHOp.SCRATCH_LOAD_DWORD: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', + SCRATCHOp.SCRATCH_LOAD_DWORDX2: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + SCRATCHOp.SCRATCH_LOAD_DWORDX3: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + SCRATCHOp.SCRATCH_LOAD_DWORDX4: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + SCRATCHOp.SCRATCH_STORE_BYTE: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', + SCRATCHOp.SCRATCH_STORE_BYTE_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', + SCRATCHOp.SCRATCH_STORE_SHORT: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', + SCRATCHOp.SCRATCH_STORE_SHORT_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', + SCRATCHOp.SCRATCH_STORE_DWORD: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', + SCRATCHOp.SCRATCH_STORE_DWORDX2: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + SCRATCHOp.SCRATCH_STORE_DWORDX3: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + SCRATCHOp.SCRATCH_STORE_DWORDX4: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + SCRATCHOp.SCRATCH_LOAD_UBYTE_D16: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });", + SCRATCHOp.SCRATCH_LOAD_UBYTE_D16_HI: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });", + SCRATCHOp.SCRATCH_LOAD_SBYTE_D16: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));", + SCRATCHOp.SCRATCH_LOAD_SBYTE_D16_HI: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));", + SCRATCHOp.SCRATCH_LOAD_SHORT_D16: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;', + SCRATCHOp.SCRATCH_LOAD_SHORT_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;', +} + +PSEUDOCODE_STRINGS = { + SOP1Op: SOP1Op_PCODE, + SOP2Op: SOP2Op_PCODE, + SOPCOp: SOPCOp_PCODE, + SOPKOp: SOPKOp_PCODE, + SOPPOp: SOPPOp_PCODE, + SMEMOp: SMEMOp_PCODE, + VOP1Op: VOP1Op_PCODE, + VOP2Op: VOP2Op_PCODE, + VOP3POp: VOP3POp_PCODE, + VOPCOp: VOPCOp_PCODE, + VOP3AOp: VOP3AOp_PCODE, + VOP3BOp: VOP3BOp_PCODE, + DSOp: DSOp_PCODE, + FLATOp: FLATOp_PCODE, + GLOBALOp: GLOBALOp_PCODE, + SCRATCHOp: SCRATCHOp_PCODE, +} \ No newline at end of file diff --git a/extra/assembly/amd/autogen/rdna3/gen_pcode.py b/extra/assembly/amd/autogen/rdna3/gen_pcode.py deleted file mode 100644 index e804e7dc1c..0000000000 --- a/extra/assembly/amd/autogen/rdna3/gen_pcode.py +++ /dev/null @@ -1,10391 +0,0 @@ -# autogenerated by pdf.py - do not edit -# to regenerate: python -m extra.assembly.amd.pdf --arch rdna3 -# ruff: noqa: E501 -# mypy: ignore-errors -from extra.assembly.amd.autogen.rdna3.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp -from extra.assembly.amd.pcode import ABSDIFF, BYTE_PERMUTE, DENORM, F, GT_NEG_ZERO, INF, LT_NEG_ZERO, MAX_FLOAT_F32, OVERFLOW_F32, OVERFLOW_F64, PI, ROUND_MODE, Reg, SAT8, SliceProxy, TWO_OVER_PI_1201, UNDERFLOW_F32, UNDERFLOW_F64, WAVE32, WAVE64, WAVE_MODE, _pack, _pack32, bf16_to_f32, cos, cvtToQuietNAN, exponent, f16_to_f32, f16_to_i16, f16_to_snorm, f16_to_u16, f16_to_unorm, f32_to_f16, f32_to_f64, f32_to_i32, f32_to_snorm, f32_to_u32, f32_to_u8, f32_to_unorm, f64_to_f32, f64_to_i32, f64_to_u32, floor, fma, fract, i16_to_f16, i32_to_f32, i32_to_f64, i32_to_i16, isEven, isNAN, isQuietNAN, isSignalNAN, ldexp, log2, mantissa, pow, s_ff1_i32_b32, s_ff1_i32_b64, sign, signext, signext_from_bit, sin, sqrt, trunc, u16_to_f16, u32_to_f32, u32_to_f64, u32_to_u16, u4_to_u32, u8_to_u32, v_cvt_i16_f32, v_cvt_u16_f32, v_max3_f16, v_max3_f32, v_max3_i16, v_max3_i32, v_max3_u16, v_max3_u32, v_max_f16, v_max_f32, v_max_i16, v_max_i32, v_max_u16, v_max_u32, v_min3_f16, v_min3_f32, v_min_f16, v_min_f32, v_min_i16, v_min_i32, v_min_u16, v_min_u32, v_msad_u8, v_sad_u8 - -def _SOP1Op_S_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _SOP1Op_S_MOV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b64 = S0.b64 - return {'D0': D0._val} - -def _SOP1Op_S_CMOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - if SCC: - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _SOP1Op_S_CMOV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - if SCC: - D0.b64 = S0.b64 - return {'D0': D0._val} - -def _SOP1Op_S_BREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _SOP1Op_S_BREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[63 : 0] = S0.u64[0 : 63] - return {'D0': D0._val} - -def _SOP1Op_S_CTZ_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CTZ_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(63)+1): - if S0.u64[i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CLZ_I32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CLZ_I32_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(63)+1): - if S0.u64[63 - i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CLS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(1, int(31)+1): - if S0.u32[31 - i] != S0.u32[31]: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CLS_I32_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(1, int(63)+1): - if S0.u64[63 - i] != S0.u64[63]: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_SEXT_I32_I8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i8)) - return {'D0': D0._val} - -def _SOP1Op_S_SEXT_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _SOP1Op_S_BITSET0_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[S0.u32[4 : 0]] = 0 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET0_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[S0.u32[5 : 0]] = 0 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[S0.u32[4 : 0]] = 1 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[S0.u32[5 : 0]] = 1 - return {'D0': D0._val} - -def _SOP1Op_S_BITREPLICATE_B64_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S0.u32) - for i in range(0, int(31)+1): - D0.u64[i * 2] = tmp[i] - D0.u64[i * 2 + 1] = tmp[i] - return {'D0': D0._val} - -def _SOP1Op_S_ABS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = ((-S0.i32) if (S0.i32 < 0) else (S0.i32)) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT0_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp += ((1) if (S0.u32[i] == 0) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT0_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp += ((1) if (S0.u64[i] == 0) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT1_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp += ((1) if (S0.u32[i] == 1) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT1_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp += ((1) if (S0.u64[i] == 1) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_QUADMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(7)+1): - tmp[i] = S0.u32[(i * 4) + (4) - 1 : (i * 4)] != 0 - D0.u32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_QUADMASK_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(15)+1): - tmp[i] = S0.u64[(i * 4) + (4) - 1 : (i * 4)] != 0 - D0.u64 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_WQM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp[i] = S0.u32[(i & 60) + (4) - 1 : (i & 60)] != 0 - D0.u32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_WQM_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp[i] = S0.u64[(i & 60) + (4) - 1 : (i & 60)] != 0 - D0.u64 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_NOT_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~S0.u64 - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_AND_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 & EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 | EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XOR_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 ^ EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 ^ EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NAND_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = ~(S0.u32 & EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NAND_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NOR_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = ~(S0.u32 | EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XNOR_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = ~(S0.u32 ^ EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XNOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 ^ EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT0_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (~S0.u32 & EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT0_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (~S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_NOT0_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (~S0.u32 | EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_NOT0_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (~S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT1_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 & ~EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT1_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 & ~EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_NOT1_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 | ~EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_NOT1_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 | ~EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT0_WREXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u32 = (~S0.u32 & EXEC.u32) - D0.u32 = EXEC.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT0_WREXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64 = (~S0.u64 & EXEC.u64) - D0.u64 = EXEC.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT1_WREXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u32 = (S0.u32 & ~EXEC.u32) - D0.u32 = EXEC.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT1_WREXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64 = (S0.u64 & ~EXEC.u64) - D0.u64 = EXEC.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_GETPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.i64 = PC + 4 - return {'D0': D0._val} - -def _SOP1Op_S_SETPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - PC = Reg(S0.i64) - return {'PC': PC._val} - -def _SOP1Op_S_SWAPPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - jump_addr = S0.i64 - D0.i64 = PC + 4 - PC = Reg(jump_addr.i64) - return {'D0': D0._val, 'PC': PC._val} - -def _SOP1Op_S_RFE_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - PC = Reg(S0.i64) - return {'PC': PC._val} - -def _SOP1Op_S_SENDMSG_RTN_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc) - # --- compiled pseudocode --- - return {} - -def _SOP1Op_S_SENDMSG_RTN_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc) - # --- compiled pseudocode --- - return {} - -def _SOP1Op_S_CEIL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 > 0.0) and (S0.f32 != D0.f32)): - D0.f32 += 1.0 - return {'D0': D0._val} - -def _SOP1Op_S_FLOOR_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 < 0.0) and (S0.f32 != D0.f32)): - D0.f32 += -1.0 - return {'D0': D0._val} - -def _SOP1Op_S_TRUNC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - return {'D0': D0._val} - -def _SOP1Op_S_RNDNE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = floor(S0.f32 + 0.5) - if (isEven(F(floor(S0.f32))) and (fract(S0.f32) == 0.5)): - D0.f32 -= 1.0 - return {'D0': D0._val} - -def _SOP1Op_S_CVT_F32_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = i32_to_f32(S0.i32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_F32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0.u32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(S0.f32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_U32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f32_to_u32(S0.f32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = f32_to_f16(S0.f32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0.f16) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_HI_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0[31 : 16].f16) - return {'D0': D0._val} - -def _SOP1Op_S_CEIL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 > 0.0) and (S0.f16 != D0.f16)): - D0.f16 += 1.0 - return {'D0': D0._val} - -def _SOP1Op_S_FLOOR_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 < 0.0) and (S0.f16 != D0.f16)): - D0.f16 += -1.0 - return {'D0': D0._val} - -def _SOP1Op_S_TRUNC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - return {'D0': D0._val} - -def _SOP1Op_S_RNDNE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = floor(S0.f16 + 0.5) - if (isEven(F(floor(S0.f16))) and (fract(S0.f16) == 0.5)): - D0.f16 -= 1.0 - return {'D0': D0._val} - -SOP1Op_FUNCTIONS = { - SOP1Op.S_MOV_B32: _SOP1Op_S_MOV_B32, - SOP1Op.S_MOV_B64: _SOP1Op_S_MOV_B64, - SOP1Op.S_CMOV_B32: _SOP1Op_S_CMOV_B32, - SOP1Op.S_CMOV_B64: _SOP1Op_S_CMOV_B64, - SOP1Op.S_BREV_B32: _SOP1Op_S_BREV_B32, - SOP1Op.S_BREV_B64: _SOP1Op_S_BREV_B64, - SOP1Op.S_CTZ_I32_B32: _SOP1Op_S_CTZ_I32_B32, - SOP1Op.S_CTZ_I32_B64: _SOP1Op_S_CTZ_I32_B64, - SOP1Op.S_CLZ_I32_U32: _SOP1Op_S_CLZ_I32_U32, - SOP1Op.S_CLZ_I32_U64: _SOP1Op_S_CLZ_I32_U64, - SOP1Op.S_CLS_I32: _SOP1Op_S_CLS_I32, - SOP1Op.S_CLS_I32_I64: _SOP1Op_S_CLS_I32_I64, - SOP1Op.S_SEXT_I32_I8: _SOP1Op_S_SEXT_I32_I8, - SOP1Op.S_SEXT_I32_I16: _SOP1Op_S_SEXT_I32_I16, - SOP1Op.S_BITSET0_B32: _SOP1Op_S_BITSET0_B32, - SOP1Op.S_BITSET0_B64: _SOP1Op_S_BITSET0_B64, - SOP1Op.S_BITSET1_B32: _SOP1Op_S_BITSET1_B32, - SOP1Op.S_BITSET1_B64: _SOP1Op_S_BITSET1_B64, - SOP1Op.S_BITREPLICATE_B64_B32: _SOP1Op_S_BITREPLICATE_B64_B32, - SOP1Op.S_ABS_I32: _SOP1Op_S_ABS_I32, - SOP1Op.S_BCNT0_I32_B32: _SOP1Op_S_BCNT0_I32_B32, - SOP1Op.S_BCNT0_I32_B64: _SOP1Op_S_BCNT0_I32_B64, - SOP1Op.S_BCNT1_I32_B32: _SOP1Op_S_BCNT1_I32_B32, - SOP1Op.S_BCNT1_I32_B64: _SOP1Op_S_BCNT1_I32_B64, - SOP1Op.S_QUADMASK_B32: _SOP1Op_S_QUADMASK_B32, - SOP1Op.S_QUADMASK_B64: _SOP1Op_S_QUADMASK_B64, - SOP1Op.S_WQM_B32: _SOP1Op_S_WQM_B32, - SOP1Op.S_WQM_B64: _SOP1Op_S_WQM_B64, - SOP1Op.S_NOT_B32: _SOP1Op_S_NOT_B32, - SOP1Op.S_NOT_B64: _SOP1Op_S_NOT_B64, - SOP1Op.S_AND_SAVEEXEC_B32: _SOP1Op_S_AND_SAVEEXEC_B32, - SOP1Op.S_AND_SAVEEXEC_B64: _SOP1Op_S_AND_SAVEEXEC_B64, - SOP1Op.S_OR_SAVEEXEC_B32: _SOP1Op_S_OR_SAVEEXEC_B32, - SOP1Op.S_OR_SAVEEXEC_B64: _SOP1Op_S_OR_SAVEEXEC_B64, - SOP1Op.S_XOR_SAVEEXEC_B32: _SOP1Op_S_XOR_SAVEEXEC_B32, - SOP1Op.S_XOR_SAVEEXEC_B64: _SOP1Op_S_XOR_SAVEEXEC_B64, - SOP1Op.S_NAND_SAVEEXEC_B32: _SOP1Op_S_NAND_SAVEEXEC_B32, - SOP1Op.S_NAND_SAVEEXEC_B64: _SOP1Op_S_NAND_SAVEEXEC_B64, - SOP1Op.S_NOR_SAVEEXEC_B32: _SOP1Op_S_NOR_SAVEEXEC_B32, - SOP1Op.S_NOR_SAVEEXEC_B64: _SOP1Op_S_NOR_SAVEEXEC_B64, - SOP1Op.S_XNOR_SAVEEXEC_B32: _SOP1Op_S_XNOR_SAVEEXEC_B32, - SOP1Op.S_XNOR_SAVEEXEC_B64: _SOP1Op_S_XNOR_SAVEEXEC_B64, - SOP1Op.S_AND_NOT0_SAVEEXEC_B32: _SOP1Op_S_AND_NOT0_SAVEEXEC_B32, - SOP1Op.S_AND_NOT0_SAVEEXEC_B64: _SOP1Op_S_AND_NOT0_SAVEEXEC_B64, - SOP1Op.S_OR_NOT0_SAVEEXEC_B32: _SOP1Op_S_OR_NOT0_SAVEEXEC_B32, - SOP1Op.S_OR_NOT0_SAVEEXEC_B64: _SOP1Op_S_OR_NOT0_SAVEEXEC_B64, - SOP1Op.S_AND_NOT1_SAVEEXEC_B32: _SOP1Op_S_AND_NOT1_SAVEEXEC_B32, - SOP1Op.S_AND_NOT1_SAVEEXEC_B64: _SOP1Op_S_AND_NOT1_SAVEEXEC_B64, - SOP1Op.S_OR_NOT1_SAVEEXEC_B32: _SOP1Op_S_OR_NOT1_SAVEEXEC_B32, - SOP1Op.S_OR_NOT1_SAVEEXEC_B64: _SOP1Op_S_OR_NOT1_SAVEEXEC_B64, - SOP1Op.S_AND_NOT0_WREXEC_B32: _SOP1Op_S_AND_NOT0_WREXEC_B32, - SOP1Op.S_AND_NOT0_WREXEC_B64: _SOP1Op_S_AND_NOT0_WREXEC_B64, - SOP1Op.S_AND_NOT1_WREXEC_B32: _SOP1Op_S_AND_NOT1_WREXEC_B32, - SOP1Op.S_AND_NOT1_WREXEC_B64: _SOP1Op_S_AND_NOT1_WREXEC_B64, - SOP1Op.S_GETPC_B64: _SOP1Op_S_GETPC_B64, - SOP1Op.S_SETPC_B64: _SOP1Op_S_SETPC_B64, - SOP1Op.S_SWAPPC_B64: _SOP1Op_S_SWAPPC_B64, - SOP1Op.S_RFE_B64: _SOP1Op_S_RFE_B64, - SOP1Op.S_SENDMSG_RTN_B32: _SOP1Op_S_SENDMSG_RTN_B32, - SOP1Op.S_SENDMSG_RTN_B64: _SOP1Op_S_SENDMSG_RTN_B64, - SOP1Op.S_CEIL_F32: _SOP1Op_S_CEIL_F32, - SOP1Op.S_FLOOR_F32: _SOP1Op_S_FLOOR_F32, - SOP1Op.S_TRUNC_F32: _SOP1Op_S_TRUNC_F32, - SOP1Op.S_RNDNE_F32: _SOP1Op_S_RNDNE_F32, - SOP1Op.S_CVT_F32_I32: _SOP1Op_S_CVT_F32_I32, - SOP1Op.S_CVT_F32_U32: _SOP1Op_S_CVT_F32_U32, - SOP1Op.S_CVT_I32_F32: _SOP1Op_S_CVT_I32_F32, - SOP1Op.S_CVT_U32_F32: _SOP1Op_S_CVT_U32_F32, - SOP1Op.S_CVT_F16_F32: _SOP1Op_S_CVT_F16_F32, - SOP1Op.S_CVT_F32_F16: _SOP1Op_S_CVT_F32_F16, - SOP1Op.S_CVT_HI_F32_F16: _SOP1Op_S_CVT_HI_F32_F16, - SOP1Op.S_CEIL_F16: _SOP1Op_S_CEIL_F16, - SOP1Op.S_FLOOR_F16: _SOP1Op_S_FLOOR_F16, - SOP1Op.S_TRUNC_F16: _SOP1Op_S_TRUNC_F16, - SOP1Op.S_RNDNE_F16: _SOP1Op_S_RNDNE_F16, -} - -def _SOP2Op_S_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUB_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32) - SCC = Reg(((1) if (S1.u32 > S0.u32) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ADD_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.i32 + S1.i32) - SCC = Reg(((S0.u32[31] == S1.u32[31]) and (S0.u32[31] != tmp.u32[31]))) - D0.i32 = tmp.i32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUB_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.i32 - S1.i32) - SCC = Reg(((S0.u32[31] != S1.u32[31]) and (S0.u32[31] != tmp.u32[31]))) - D0.i32 = tmp.i32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ADDC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + SCC.u64) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUBB_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - SCC.u32) - SCC = Reg(((1) if ((S1.u32) + SCC.u64 > (S0.u32)) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ABSDIFF_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = S0.i32 - S1.i32 - if D0.i32 < 0: - D0.i32 = -D0.i32 - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 << S1[4 : 0].u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 << S1[5 : 0].u32) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 >> S1[4 : 0].u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 >> S1[5 : 0].u32) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ASHR_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i32) >> S1[4 : 0].u32) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ASHR_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32) - SCC = Reg(D0.i64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL1_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 1) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL2_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 2) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL3_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 3) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL4_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 4) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 < S1.i32) - D0.i32 = ((S0.i32) if (SCC) else (S1.i32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 < S1.u32) - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 >= S1.i32) - D0.i32 = ((S0.i32) if (SCC) else (S1.i32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 >= S1.u32) - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 & S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 | S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 ^ S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NAND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 & S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NAND_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 & S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 | S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 | S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XNOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 ^ S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_NOT1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & ~S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_NOT1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 & ~S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_NOT1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | ~S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_NOT1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 | ~S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - D0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - D0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32) - SCC = Reg(D0.i64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((1 << S0[4 : 0].u32) - 1) << S1[4 : 0].u32) - return {'D0': D0._val} - -def _SOP2Op_S_BFM_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (((1 << S0[5 : 0].u32) - 1) << S1[5 : 0].u32) - return {'D0': D0._val} - -def _SOP2Op_S_MUL_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 * S1.i32 - return {'D0': D0._val} - -def _SOP2Op_S_MUL_HI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u32) * (S1.u32)) >> 32) - return {'D0': D0._val} - -def _SOP2Op_S_MUL_HI_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i32) * (S1.i32)) >> 32) - return {'D0': D0._val} - -def _SOP2Op_S_CSELECT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val} - -def _SOP2Op_S_CSELECT_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ((S0.u64) if (SCC) else (S1.u64)) - return {'D0': D0._val} - -def _SOP2Op_S_PACK_LL_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[15 : 0].u16, S0[15 : 0].u16)) - return {} - -def _SOP2Op_S_PACK_LH_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[31 : 16].u16, S0[15 : 0].u16)) - return {} - -def _SOP2Op_S_PACK_HH_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[31 : 16].u16, S0[31 : 16].u16)) - return {} - -def _SOP2Op_S_PACK_HL_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[15 : 0].u16, S0[31 : 16].u16)) - return {} - -def _SOP2Op_S_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_SUB_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 - S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_MIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif LT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - else: - if isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif LT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_MAX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif GT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - else: - if isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif GT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_FMAAK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32) - return {'D0': D0._val} - -def _SOP2Op_S_FMAMK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32) - return {'D0': D0._val} - -def _SOP2Op_S_FMAC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _SOP2Op_S_CVT_PK_RTZ_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].f16 = f32_to_f16(S0.f32) - tmp[31 : 16].f16 = f32_to_f16(S1.f32) - return {} - -def _SOP2Op_S_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_SUB_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 - S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_MIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif LT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - else: - if isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif LT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_MAX_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif GT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - else: - if isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif GT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, D0.f16) - return {'D0': D0._val} - -SOP2Op_FUNCTIONS = { - SOP2Op.S_ADD_U32: _SOP2Op_S_ADD_U32, - SOP2Op.S_SUB_U32: _SOP2Op_S_SUB_U32, - SOP2Op.S_ADD_I32: _SOP2Op_S_ADD_I32, - SOP2Op.S_SUB_I32: _SOP2Op_S_SUB_I32, - SOP2Op.S_ADDC_U32: _SOP2Op_S_ADDC_U32, - SOP2Op.S_SUBB_U32: _SOP2Op_S_SUBB_U32, - SOP2Op.S_ABSDIFF_I32: _SOP2Op_S_ABSDIFF_I32, - SOP2Op.S_LSHL_B32: _SOP2Op_S_LSHL_B32, - SOP2Op.S_LSHL_B64: _SOP2Op_S_LSHL_B64, - SOP2Op.S_LSHR_B32: _SOP2Op_S_LSHR_B32, - SOP2Op.S_LSHR_B64: _SOP2Op_S_LSHR_B64, - SOP2Op.S_ASHR_I32: _SOP2Op_S_ASHR_I32, - SOP2Op.S_ASHR_I64: _SOP2Op_S_ASHR_I64, - SOP2Op.S_LSHL1_ADD_U32: _SOP2Op_S_LSHL1_ADD_U32, - SOP2Op.S_LSHL2_ADD_U32: _SOP2Op_S_LSHL2_ADD_U32, - SOP2Op.S_LSHL3_ADD_U32: _SOP2Op_S_LSHL3_ADD_U32, - SOP2Op.S_LSHL4_ADD_U32: _SOP2Op_S_LSHL4_ADD_U32, - SOP2Op.S_MIN_I32: _SOP2Op_S_MIN_I32, - SOP2Op.S_MIN_U32: _SOP2Op_S_MIN_U32, - SOP2Op.S_MAX_I32: _SOP2Op_S_MAX_I32, - SOP2Op.S_MAX_U32: _SOP2Op_S_MAX_U32, - SOP2Op.S_AND_B32: _SOP2Op_S_AND_B32, - SOP2Op.S_AND_B64: _SOP2Op_S_AND_B64, - SOP2Op.S_OR_B32: _SOP2Op_S_OR_B32, - SOP2Op.S_OR_B64: _SOP2Op_S_OR_B64, - SOP2Op.S_XOR_B32: _SOP2Op_S_XOR_B32, - SOP2Op.S_XOR_B64: _SOP2Op_S_XOR_B64, - SOP2Op.S_NAND_B32: _SOP2Op_S_NAND_B32, - SOP2Op.S_NAND_B64: _SOP2Op_S_NAND_B64, - SOP2Op.S_NOR_B32: _SOP2Op_S_NOR_B32, - SOP2Op.S_NOR_B64: _SOP2Op_S_NOR_B64, - SOP2Op.S_XNOR_B32: _SOP2Op_S_XNOR_B32, - SOP2Op.S_XNOR_B64: _SOP2Op_S_XNOR_B64, - SOP2Op.S_AND_NOT1_B32: _SOP2Op_S_AND_NOT1_B32, - SOP2Op.S_AND_NOT1_B64: _SOP2Op_S_AND_NOT1_B64, - SOP2Op.S_OR_NOT1_B32: _SOP2Op_S_OR_NOT1_B32, - SOP2Op.S_OR_NOT1_B64: _SOP2Op_S_OR_NOT1_B64, - SOP2Op.S_BFE_U32: _SOP2Op_S_BFE_U32, - SOP2Op.S_BFE_I32: _SOP2Op_S_BFE_I32, - SOP2Op.S_BFE_U64: _SOP2Op_S_BFE_U64, - SOP2Op.S_BFE_I64: _SOP2Op_S_BFE_I64, - SOP2Op.S_BFM_B32: _SOP2Op_S_BFM_B32, - SOP2Op.S_BFM_B64: _SOP2Op_S_BFM_B64, - SOP2Op.S_MUL_I32: _SOP2Op_S_MUL_I32, - SOP2Op.S_MUL_HI_U32: _SOP2Op_S_MUL_HI_U32, - SOP2Op.S_MUL_HI_I32: _SOP2Op_S_MUL_HI_I32, - SOP2Op.S_CSELECT_B32: _SOP2Op_S_CSELECT_B32, - SOP2Op.S_CSELECT_B64: _SOP2Op_S_CSELECT_B64, - SOP2Op.S_PACK_LL_B32_B16: _SOP2Op_S_PACK_LL_B32_B16, - SOP2Op.S_PACK_LH_B32_B16: _SOP2Op_S_PACK_LH_B32_B16, - SOP2Op.S_PACK_HH_B32_B16: _SOP2Op_S_PACK_HH_B32_B16, - SOP2Op.S_PACK_HL_B32_B16: _SOP2Op_S_PACK_HL_B32_B16, - SOP2Op.S_ADD_F32: _SOP2Op_S_ADD_F32, - SOP2Op.S_SUB_F32: _SOP2Op_S_SUB_F32, - SOP2Op.S_MIN_F32: _SOP2Op_S_MIN_F32, - SOP2Op.S_MAX_F32: _SOP2Op_S_MAX_F32, - SOP2Op.S_MUL_F32: _SOP2Op_S_MUL_F32, - SOP2Op.S_FMAAK_F32: _SOP2Op_S_FMAAK_F32, - SOP2Op.S_FMAMK_F32: _SOP2Op_S_FMAMK_F32, - SOP2Op.S_FMAC_F32: _SOP2Op_S_FMAC_F32, - SOP2Op.S_CVT_PK_RTZ_F16_F32: _SOP2Op_S_CVT_PK_RTZ_F16_F32, - SOP2Op.S_ADD_F16: _SOP2Op_S_ADD_F16, - SOP2Op.S_SUB_F16: _SOP2Op_S_SUB_F16, - SOP2Op.S_MIN_F16: _SOP2Op_S_MIN_F16, - SOP2Op.S_MAX_F16: _SOP2Op_S_MAX_F16, - SOP2Op.S_MUL_F16: _SOP2Op_S_MUL_F16, - SOP2Op.S_FMAC_F16: _SOP2Op_S_FMAC_F16, -} - -def _SOPCOp_S_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 == S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 != S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 > S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 >= S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 < S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 <= S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 == S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 != S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 > S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 >= S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 < S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 <= S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP0_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32[S1.u32[4 : 0]] == 0) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32[S1.u32[4 : 0]] == 1) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP0_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64[S1.u32[5 : 0]] == 0) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64[S1.u32[5 : 0]] == 1) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64 == S1.u64) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64 != S1.u64) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 < S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 < S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 == S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 == S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 <= S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 <= S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 > S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 > S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 != S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 != S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 >= S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 >= S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32)))) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16)))) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg((isNAN(F(S0.f32)) or isNAN(F(S1.f32)))) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg((isNAN(F(S0.f16)) or isNAN(F(S1.f16)))) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 >= S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 >= S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 != S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 != S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 > S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 > S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 <= S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 <= S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 == S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 == S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 < S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 < S1.f16)) - return {'SCC': SCC._val} - -SOPCOp_FUNCTIONS = { - SOPCOp.S_CMP_EQ_I32: _SOPCOp_S_CMP_EQ_I32, - SOPCOp.S_CMP_LG_I32: _SOPCOp_S_CMP_LG_I32, - SOPCOp.S_CMP_GT_I32: _SOPCOp_S_CMP_GT_I32, - SOPCOp.S_CMP_GE_I32: _SOPCOp_S_CMP_GE_I32, - SOPCOp.S_CMP_LT_I32: _SOPCOp_S_CMP_LT_I32, - SOPCOp.S_CMP_LE_I32: _SOPCOp_S_CMP_LE_I32, - SOPCOp.S_CMP_EQ_U32: _SOPCOp_S_CMP_EQ_U32, - SOPCOp.S_CMP_LG_U32: _SOPCOp_S_CMP_LG_U32, - SOPCOp.S_CMP_GT_U32: _SOPCOp_S_CMP_GT_U32, - SOPCOp.S_CMP_GE_U32: _SOPCOp_S_CMP_GE_U32, - SOPCOp.S_CMP_LT_U32: _SOPCOp_S_CMP_LT_U32, - SOPCOp.S_CMP_LE_U32: _SOPCOp_S_CMP_LE_U32, - SOPCOp.S_BITCMP0_B32: _SOPCOp_S_BITCMP0_B32, - SOPCOp.S_BITCMP1_B32: _SOPCOp_S_BITCMP1_B32, - SOPCOp.S_BITCMP0_B64: _SOPCOp_S_BITCMP0_B64, - SOPCOp.S_BITCMP1_B64: _SOPCOp_S_BITCMP1_B64, - SOPCOp.S_CMP_EQ_U64: _SOPCOp_S_CMP_EQ_U64, - SOPCOp.S_CMP_LG_U64: _SOPCOp_S_CMP_LG_U64, - SOPCOp.S_CMP_LT_F32: _SOPCOp_S_CMP_LT_F32, - SOPCOp.S_CMP_LT_F16: _SOPCOp_S_CMP_LT_F16, - SOPCOp.S_CMP_EQ_F32: _SOPCOp_S_CMP_EQ_F32, - SOPCOp.S_CMP_EQ_F16: _SOPCOp_S_CMP_EQ_F16, - SOPCOp.S_CMP_LE_F32: _SOPCOp_S_CMP_LE_F32, - SOPCOp.S_CMP_LE_F16: _SOPCOp_S_CMP_LE_F16, - SOPCOp.S_CMP_GT_F32: _SOPCOp_S_CMP_GT_F32, - SOPCOp.S_CMP_GT_F16: _SOPCOp_S_CMP_GT_F16, - SOPCOp.S_CMP_LG_F32: _SOPCOp_S_CMP_LG_F32, - SOPCOp.S_CMP_LG_F16: _SOPCOp_S_CMP_LG_F16, - SOPCOp.S_CMP_GE_F32: _SOPCOp_S_CMP_GE_F32, - SOPCOp.S_CMP_GE_F16: _SOPCOp_S_CMP_GE_F16, - SOPCOp.S_CMP_O_F32: _SOPCOp_S_CMP_O_F32, - SOPCOp.S_CMP_O_F16: _SOPCOp_S_CMP_O_F16, - SOPCOp.S_CMP_U_F32: _SOPCOp_S_CMP_U_F32, - SOPCOp.S_CMP_U_F16: _SOPCOp_S_CMP_U_F16, - SOPCOp.S_CMP_NGE_F32: _SOPCOp_S_CMP_NGE_F32, - SOPCOp.S_CMP_NGE_F16: _SOPCOp_S_CMP_NGE_F16, - SOPCOp.S_CMP_NLG_F32: _SOPCOp_S_CMP_NLG_F32, - SOPCOp.S_CMP_NLG_F16: _SOPCOp_S_CMP_NLG_F16, - SOPCOp.S_CMP_NGT_F32: _SOPCOp_S_CMP_NGT_F32, - SOPCOp.S_CMP_NGT_F16: _SOPCOp_S_CMP_NGT_F16, - SOPCOp.S_CMP_NLE_F32: _SOPCOp_S_CMP_NLE_F32, - SOPCOp.S_CMP_NLE_F16: _SOPCOp_S_CMP_NLE_F16, - SOPCOp.S_CMP_NEQ_F32: _SOPCOp_S_CMP_NEQ_F32, - SOPCOp.S_CMP_NEQ_F16: _SOPCOp_S_CMP_NEQ_F16, - SOPCOp.S_CMP_NLT_F32: _SOPCOp_S_CMP_NLT_F32, - SOPCOp.S_CMP_NLT_F16: _SOPCOp_S_CMP_NLT_F16, -} - -def _SOPKOp_S_MOVK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SIMM16=Reg(literal) - # --- compiled pseudocode --- - D0.i32 = (signext(SIMM16.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_VERSION(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - # --- compiled pseudocode --- - return {} - -def _SOPKOp_S_CMOVK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - if SCC: - D0.i32 = (signext(SIMM16.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_CMPK_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg((S0.i32) == signext(SIMM16.i16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LG_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg((S0.i32) != signext(SIMM16.i16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg((S0.i32) > signext(SIMM16.i16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg((S0.i32) >= signext(SIMM16.i16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg((S0.i32) < signext(SIMM16.i16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg((S0.i32) <= signext(SIMM16.i16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 == (SIMM16.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LG_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 != (SIMM16.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 > (SIMM16.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 >= (SIMM16.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 < (SIMM16.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_CMPK_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 <= (SIMM16.u16)) - return {'SCC': SCC._val} - -def _SOPKOp_S_ADDK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SCC=Reg(scc); SIMM16=Reg(literal) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - D0.i32 = ((D0.i32) + signext(SIMM16.i16)) - SCC = Reg(((tmp[31] == SIMM16.i16[15]) and (tmp[31] != D0.i32[31]))) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOPKOp_S_MULK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SIMM16=Reg(literal) - # --- compiled pseudocode --- - D0.i32 = ((D0.i32) * signext(SIMM16.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_CALL_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - D0.i64 = PC + 4 - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - return {'D0': D0._val, 'PC': PC._val} - -SOPKOp_FUNCTIONS = { - SOPKOp.S_MOVK_I32: _SOPKOp_S_MOVK_I32, - SOPKOp.S_VERSION: _SOPKOp_S_VERSION, - SOPKOp.S_CMOVK_I32: _SOPKOp_S_CMOVK_I32, - SOPKOp.S_CMPK_EQ_I32: _SOPKOp_S_CMPK_EQ_I32, - SOPKOp.S_CMPK_LG_I32: _SOPKOp_S_CMPK_LG_I32, - SOPKOp.S_CMPK_GT_I32: _SOPKOp_S_CMPK_GT_I32, - SOPKOp.S_CMPK_GE_I32: _SOPKOp_S_CMPK_GE_I32, - SOPKOp.S_CMPK_LT_I32: _SOPKOp_S_CMPK_LT_I32, - SOPKOp.S_CMPK_LE_I32: _SOPKOp_S_CMPK_LE_I32, - SOPKOp.S_CMPK_EQ_U32: _SOPKOp_S_CMPK_EQ_U32, - SOPKOp.S_CMPK_LG_U32: _SOPKOp_S_CMPK_LG_U32, - SOPKOp.S_CMPK_GT_U32: _SOPKOp_S_CMPK_GT_U32, - SOPKOp.S_CMPK_GE_U32: _SOPKOp_S_CMPK_GE_U32, - SOPKOp.S_CMPK_LT_U32: _SOPKOp_S_CMPK_LT_U32, - SOPKOp.S_CMPK_LE_U32: _SOPKOp_S_CMPK_LE_U32, - SOPKOp.S_ADDK_I32: _SOPKOp_S_ADDK_I32, - SOPKOp.S_MULK_I32: _SOPKOp_S_MULK_I32, - SOPKOp.S_CALL_B64: _SOPKOp_S_CALL_B64, -} - -def _SOPPOp_S_NOP(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SIMM16=Reg(literal) - # --- compiled pseudocode --- - for i in range(0, int(SIMM16.u16[3 : 0].u32)+1): - pass - return {} - -def _SOPPOp_S_DELAY_ALU(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - return {} - -def _SOPPOp_S_TRAP(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - return {'PC': PC._val} - -def _SOPPOp_S_BRANCH(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_SCC0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SCC=Reg(scc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if SCC == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'SCC': SCC._val, 'PC': PC._val} - -def _SOPPOp_S_CBRANCH_SCC1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SCC=Reg(scc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if SCC == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'SCC': SCC._val, 'PC': PC._val} - -def _SOPPOp_S_CBRANCH_VCCZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); VCCZ=Reg(1 if VCC._val == 0 else 0) - # --- compiled pseudocode --- - if VCCZ.u1 == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_VCCNZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); VCCZ=Reg(1 if VCC._val == 0 else 0) - # --- compiled pseudocode --- - if VCCZ.u1 == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_EXECZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); EXECZ=Reg(1 if EXEC._val == 0 else 0) - # --- compiled pseudocode --- - if EXECZ.u1 == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_EXECNZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); EXECZ=Reg(1 if EXEC._val == 0 else 0) - # --- compiled pseudocode --- - if EXECZ.u1 == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_CDBGSYS(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if WAVE_STATUS.COND_DBG_SYS.u32 != 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_CDBGUSER(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if WAVE_STATUS.COND_DBG_USER.u32 != 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_CDBGSYS_OR_USER(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if (WAVE_STATUS.COND_DBG_SYS or WAVE_STATUS.COND_DBG_USER): - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_CDBGSYS_AND_USER(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if (WAVE_STATUS.COND_DBG_SYS and WAVE_STATUS.COND_DBG_USER): - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -SOPPOp_FUNCTIONS = { - SOPPOp.S_NOP: _SOPPOp_S_NOP, - SOPPOp.S_DELAY_ALU: _SOPPOp_S_DELAY_ALU, - SOPPOp.S_TRAP: _SOPPOp_S_TRAP, - SOPPOp.S_BRANCH: _SOPPOp_S_BRANCH, - SOPPOp.S_CBRANCH_SCC0: _SOPPOp_S_CBRANCH_SCC0, - SOPPOp.S_CBRANCH_SCC1: _SOPPOp_S_CBRANCH_SCC1, - SOPPOp.S_CBRANCH_VCCZ: _SOPPOp_S_CBRANCH_VCCZ, - SOPPOp.S_CBRANCH_VCCNZ: _SOPPOp_S_CBRANCH_VCCNZ, - SOPPOp.S_CBRANCH_EXECZ: _SOPPOp_S_CBRANCH_EXECZ, - SOPPOp.S_CBRANCH_EXECNZ: _SOPPOp_S_CBRANCH_EXECNZ, - SOPPOp.S_CBRANCH_CDBGSYS: _SOPPOp_S_CBRANCH_CDBGSYS, - SOPPOp.S_CBRANCH_CDBGUSER: _SOPPOp_S_CBRANCH_CDBGUSER, - SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: _SOPPOp_S_CBRANCH_CDBGSYS_OR_USER, - SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: _SOPPOp_S_CBRANCH_CDBGSYS_AND_USER, -} - -def _SMEMOp_S_LOAD_B32(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B64(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - SDATA[63 : 32] = MEM[ADDR + 4].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B128(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - SDATA[63 : 32] = MEM[ADDR + 4].b32 - SDATA[95 : 64] = MEM[ADDR + 8].b32 - SDATA[127 : 96] = MEM[ADDR + 12].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B256(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - SDATA[63 : 32] = MEM[ADDR + 4].b32 - SDATA[95 : 64] = MEM[ADDR + 8].b32 - SDATA[127 : 96] = MEM[ADDR + 12].b32 - SDATA[159 : 128] = MEM[ADDR + 16].b32 - SDATA[191 : 160] = MEM[ADDR + 20].b32 - SDATA[223 : 192] = MEM[ADDR + 24].b32 - SDATA[255 : 224] = MEM[ADDR + 28].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B512(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - SDATA[63 : 32] = MEM[ADDR + 4].b32 - SDATA[95 : 64] = MEM[ADDR + 8].b32 - SDATA[127 : 96] = MEM[ADDR + 12].b32 - SDATA[159 : 128] = MEM[ADDR + 16].b32 - SDATA[191 : 160] = MEM[ADDR + 20].b32 - SDATA[223 : 192] = MEM[ADDR + 24].b32 - SDATA[255 : 224] = MEM[ADDR + 28].b32 - SDATA[287 : 256] = MEM[ADDR + 32].b32 - SDATA[319 : 288] = MEM[ADDR + 36].b32 - SDATA[351 : 320] = MEM[ADDR + 40].b32 - SDATA[383 : 352] = MEM[ADDR + 44].b32 - SDATA[415 : 384] = MEM[ADDR + 48].b32 - SDATA[447 : 416] = MEM[ADDR + 52].b32 - SDATA[479 : 448] = MEM[ADDR + 56].b32 - SDATA[511 : 480] = MEM[ADDR + 60].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B32(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B64(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - SDATA[63 : 32] = MEM[ADDR + 4].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B128(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - SDATA[63 : 32] = MEM[ADDR + 4].b32 - SDATA[95 : 64] = MEM[ADDR + 8].b32 - SDATA[127 : 96] = MEM[ADDR + 12].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B256(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - SDATA[63 : 32] = MEM[ADDR + 4].b32 - SDATA[95 : 64] = MEM[ADDR + 8].b32 - SDATA[127 : 96] = MEM[ADDR + 12].b32 - SDATA[159 : 128] = MEM[ADDR + 16].b32 - SDATA[191 : 160] = MEM[ADDR + 20].b32 - SDATA[223 : 192] = MEM[ADDR + 24].b32 - SDATA[255 : 224] = MEM[ADDR + 28].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B512(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA[31 : 0] = MEM[ADDR].b32 - SDATA[63 : 32] = MEM[ADDR + 4].b32 - SDATA[95 : 64] = MEM[ADDR + 8].b32 - SDATA[127 : 96] = MEM[ADDR + 12].b32 - SDATA[159 : 128] = MEM[ADDR + 16].b32 - SDATA[191 : 160] = MEM[ADDR + 20].b32 - SDATA[223 : 192] = MEM[ADDR + 24].b32 - SDATA[255 : 224] = MEM[ADDR + 28].b32 - SDATA[287 : 256] = MEM[ADDR + 32].b32 - SDATA[319 : 288] = MEM[ADDR + 36].b32 - SDATA[351 : 320] = MEM[ADDR + 40].b32 - SDATA[383 : 352] = MEM[ADDR + 44].b32 - SDATA[415 : 384] = MEM[ADDR + 48].b32 - SDATA[447 : 416] = MEM[ADDR + 52].b32 - SDATA[479 : 448] = MEM[ADDR + 56].b32 - SDATA[511 : 480] = MEM[ADDR + 60].b32 - return {'SDATA': SDATA._val} - -SMEMOp_FUNCTIONS = { - SMEMOp.S_LOAD_B32: _SMEMOp_S_LOAD_B32, - SMEMOp.S_LOAD_B64: _SMEMOp_S_LOAD_B64, - SMEMOp.S_LOAD_B128: _SMEMOp_S_LOAD_B128, - SMEMOp.S_LOAD_B256: _SMEMOp_S_LOAD_B256, - SMEMOp.S_LOAD_B512: _SMEMOp_S_LOAD_B512, - SMEMOp.S_BUFFER_LOAD_B32: _SMEMOp_S_BUFFER_LOAD_B32, - SMEMOp.S_BUFFER_LOAD_B64: _SMEMOp_S_BUFFER_LOAD_B64, - SMEMOp.S_BUFFER_LOAD_B128: _SMEMOp_S_BUFFER_LOAD_B128, - SMEMOp.S_BUFFER_LOAD_B256: _SMEMOp_S_BUFFER_LOAD_B256, - SMEMOp.S_BUFFER_LOAD_B512: _SMEMOp_S_BUFFER_LOAD_B512, -} - -def _VOP1Op_V_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _VOP1Op_V_READFIRSTLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); EXEC=Reg(exec_mask); SRC0=Reg(src0_idx); EXEC_LO=SliceProxy(EXEC, 31, 0) - # --- compiled pseudocode --- - if WAVE64: - if EXEC == 0x0: - lane = 0 - else: - lane = (s_ff1_i32_b64(EXEC)) - else: - if EXEC_LO.i32 == 0: - lane = 0 - else: - lane = (s_ff1_i32_b32(EXEC_LO)) - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f64_to_i32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = i32_to_f64(S0.i32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = i32_to_f32(S0.i32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0.u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f32_to_u32(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = f32_to_f16(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_NEAREST_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32 + 0.5)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_FLOOR_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f64_to_f32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = f32_to_f64(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[7 : 0].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[15 : 8].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE2(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[23 : 16].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE3(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[31 : 24].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f64_to_u32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = u32_to_f64(S0.u32) - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 > 0.0) and (S0.f64 != D0.f64)): - D0.f64 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = floor(S0.f64 + 0.5) - if (isEven(floor(S0.f64)) and (fract(S0.f64) == 0.5)): - D0.f64 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 < 0.0) and (S0.f64 != D0.f64)): - D0.f64 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_MOV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b16 = S0.b16 - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + -floor(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 > 0.0) and (S0.f32 != D0.f32)): - D0.f32 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = floor(S0.f32 + 0.5) - if (isEven(F(floor(S0.f32))) and (fract(S0.f32) == 0.5)): - D0.f32 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 < 0.0) and (S0.f32 != D0.f32)): - D0.f32 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_EXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = pow(2.0, S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_LOG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = log2(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP1Op_V_RCP_IFLAG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / S0.f64 - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_SIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sin(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_COS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = cos(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - return {'D0': D0._val} - -def _VOP1Op_V_BFREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _VOP1Op_V_CLZ_I32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_CTZ_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_CLS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(1, int(31)+1): - if S0.i32[31 - i] != S0.i32[31]: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f64) - 1023 + 1 - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.f64 = S0.f64 - else: - D0.f64 = mantissa(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + -floor(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f32) - 127 + 1 - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = mantissa(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_MOVRELS_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - addr = SRC0.u32 - D0.b32 = VGPR[laneId][addr].b32 - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = u16_to_f16(S0.u16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = i16_to_f16(S0.i16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_u16(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_i16(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / S0.f16 - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_LOG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = log2(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_EXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = pow(2.0, S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = mantissa(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.i16 = 0 - else: - D0.i16 = (exponent(S0.f16) - 15 + 1) - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 < 0.0) and (S0.f16 != D0.f16)): - D0.f16 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 > 0.0) and (S0.f16 != D0.f16)): - D0.f16 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = floor(S0.f16 + 0.5) - if (isEven(F(floor(S0.f16))) and (fract(S0.f16) == 0.5)): - D0.f16 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + -floor(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_SIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sin(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_COS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = cos(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_SAT_PK_U8_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b16 = _pack(SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_NORM_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_snorm(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_NORM_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_unorm(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_SWAP_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.b32) - D0.b32 = S0.b32 - S0.b32 = tmp - return {'D0': D0._val} - -def _VOP1Op_V_SWAP_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.b16) - D0.b16 = S0.b16 - S0.b16 = tmp - return {'D0': D0._val} - -def _VOP1Op_V_NOT_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ~S0.u16 - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U32_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(0, S0.u16)) - return {} - -VOP1Op_FUNCTIONS = { - VOP1Op.V_MOV_B32: _VOP1Op_V_MOV_B32, - VOP1Op.V_READFIRSTLANE_B32: _VOP1Op_V_READFIRSTLANE_B32, - VOP1Op.V_CVT_I32_F64: _VOP1Op_V_CVT_I32_F64, - VOP1Op.V_CVT_F64_I32: _VOP1Op_V_CVT_F64_I32, - VOP1Op.V_CVT_F32_I32: _VOP1Op_V_CVT_F32_I32, - VOP1Op.V_CVT_F32_U32: _VOP1Op_V_CVT_F32_U32, - VOP1Op.V_CVT_U32_F32: _VOP1Op_V_CVT_U32_F32, - VOP1Op.V_CVT_I32_F32: _VOP1Op_V_CVT_I32_F32, - VOP1Op.V_CVT_F16_F32: _VOP1Op_V_CVT_F16_F32, - VOP1Op.V_CVT_F32_F16: _VOP1Op_V_CVT_F32_F16, - VOP1Op.V_CVT_NEAREST_I32_F32: _VOP1Op_V_CVT_NEAREST_I32_F32, - VOP1Op.V_CVT_FLOOR_I32_F32: _VOP1Op_V_CVT_FLOOR_I32_F32, - VOP1Op.V_CVT_F32_F64: _VOP1Op_V_CVT_F32_F64, - VOP1Op.V_CVT_F64_F32: _VOP1Op_V_CVT_F64_F32, - VOP1Op.V_CVT_F32_UBYTE0: _VOP1Op_V_CVT_F32_UBYTE0, - VOP1Op.V_CVT_F32_UBYTE1: _VOP1Op_V_CVT_F32_UBYTE1, - VOP1Op.V_CVT_F32_UBYTE2: _VOP1Op_V_CVT_F32_UBYTE2, - VOP1Op.V_CVT_F32_UBYTE3: _VOP1Op_V_CVT_F32_UBYTE3, - VOP1Op.V_CVT_U32_F64: _VOP1Op_V_CVT_U32_F64, - VOP1Op.V_CVT_F64_U32: _VOP1Op_V_CVT_F64_U32, - VOP1Op.V_TRUNC_F64: _VOP1Op_V_TRUNC_F64, - VOP1Op.V_CEIL_F64: _VOP1Op_V_CEIL_F64, - VOP1Op.V_RNDNE_F64: _VOP1Op_V_RNDNE_F64, - VOP1Op.V_FLOOR_F64: _VOP1Op_V_FLOOR_F64, - VOP1Op.V_MOV_B16: _VOP1Op_V_MOV_B16, - VOP1Op.V_FRACT_F32: _VOP1Op_V_FRACT_F32, - VOP1Op.V_TRUNC_F32: _VOP1Op_V_TRUNC_F32, - VOP1Op.V_CEIL_F32: _VOP1Op_V_CEIL_F32, - VOP1Op.V_RNDNE_F32: _VOP1Op_V_RNDNE_F32, - VOP1Op.V_FLOOR_F32: _VOP1Op_V_FLOOR_F32, - VOP1Op.V_EXP_F32: _VOP1Op_V_EXP_F32, - VOP1Op.V_LOG_F32: _VOP1Op_V_LOG_F32, - VOP1Op.V_RCP_F32: _VOP1Op_V_RCP_F32, - VOP1Op.V_RCP_IFLAG_F32: _VOP1Op_V_RCP_IFLAG_F32, - VOP1Op.V_RSQ_F32: _VOP1Op_V_RSQ_F32, - VOP1Op.V_RCP_F64: _VOP1Op_V_RCP_F64, - VOP1Op.V_RSQ_F64: _VOP1Op_V_RSQ_F64, - VOP1Op.V_SQRT_F32: _VOP1Op_V_SQRT_F32, - VOP1Op.V_SQRT_F64: _VOP1Op_V_SQRT_F64, - VOP1Op.V_SIN_F32: _VOP1Op_V_SIN_F32, - VOP1Op.V_COS_F32: _VOP1Op_V_COS_F32, - VOP1Op.V_NOT_B32: _VOP1Op_V_NOT_B32, - VOP1Op.V_BFREV_B32: _VOP1Op_V_BFREV_B32, - VOP1Op.V_CLZ_I32_U32: _VOP1Op_V_CLZ_I32_U32, - VOP1Op.V_CTZ_I32_B32: _VOP1Op_V_CTZ_I32_B32, - VOP1Op.V_CLS_I32: _VOP1Op_V_CLS_I32, - VOP1Op.V_FREXP_EXP_I32_F64: _VOP1Op_V_FREXP_EXP_I32_F64, - VOP1Op.V_FREXP_MANT_F64: _VOP1Op_V_FREXP_MANT_F64, - VOP1Op.V_FRACT_F64: _VOP1Op_V_FRACT_F64, - VOP1Op.V_FREXP_EXP_I32_F32: _VOP1Op_V_FREXP_EXP_I32_F32, - VOP1Op.V_FREXP_MANT_F32: _VOP1Op_V_FREXP_MANT_F32, - VOP1Op.V_MOVRELS_B32: _VOP1Op_V_MOVRELS_B32, - VOP1Op.V_CVT_F16_U16: _VOP1Op_V_CVT_F16_U16, - VOP1Op.V_CVT_F16_I16: _VOP1Op_V_CVT_F16_I16, - VOP1Op.V_CVT_U16_F16: _VOP1Op_V_CVT_U16_F16, - VOP1Op.V_CVT_I16_F16: _VOP1Op_V_CVT_I16_F16, - VOP1Op.V_RCP_F16: _VOP1Op_V_RCP_F16, - VOP1Op.V_SQRT_F16: _VOP1Op_V_SQRT_F16, - VOP1Op.V_RSQ_F16: _VOP1Op_V_RSQ_F16, - VOP1Op.V_LOG_F16: _VOP1Op_V_LOG_F16, - VOP1Op.V_EXP_F16: _VOP1Op_V_EXP_F16, - VOP1Op.V_FREXP_MANT_F16: _VOP1Op_V_FREXP_MANT_F16, - VOP1Op.V_FREXP_EXP_I16_F16: _VOP1Op_V_FREXP_EXP_I16_F16, - VOP1Op.V_FLOOR_F16: _VOP1Op_V_FLOOR_F16, - VOP1Op.V_CEIL_F16: _VOP1Op_V_CEIL_F16, - VOP1Op.V_TRUNC_F16: _VOP1Op_V_TRUNC_F16, - VOP1Op.V_RNDNE_F16: _VOP1Op_V_RNDNE_F16, - VOP1Op.V_FRACT_F16: _VOP1Op_V_FRACT_F16, - VOP1Op.V_SIN_F16: _VOP1Op_V_SIN_F16, - VOP1Op.V_COS_F16: _VOP1Op_V_COS_F16, - VOP1Op.V_SAT_PK_U8_I16: _VOP1Op_V_SAT_PK_U8_I16, - VOP1Op.V_CVT_NORM_I16_F16: _VOP1Op_V_CVT_NORM_I16_F16, - VOP1Op.V_CVT_NORM_U16_F16: _VOP1Op_V_CVT_NORM_U16_F16, - VOP1Op.V_SWAP_B32: _VOP1Op_V_SWAP_B32, - VOP1Op.V_SWAP_B16: _VOP1Op_V_SWAP_B16, - VOP1Op.V_NOT_B16: _VOP1Op_V_NOT_B16, - VOP1Op.V_CVT_I32_I16: _VOP1Op_V_CVT_I32_I16, - VOP1Op.V_CVT_U32_U16: _VOP1Op_V_CVT_U32_U16, -} - -def _VOP2Op_V_CNDMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - D0.u32 = ((S1.u32) if (VCC.u64[laneId]) else (S0.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_DOT2ACC_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.f32) - tmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16) - tmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP2Op_V_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 - S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S1.f32 - S0.f32 - return {'D0': D0._val} - -def _VOP2Op_V_FMAC_DX9_ZERO_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = S2.f32 - else: - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_DX9_ZERO_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = 0.0 - else: - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_HI_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i24) * (S1.i24)) >> 32) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_HI_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u24) * (S1.u24)) >> 32) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif LT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - else: - if isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif LT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MAX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif GT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - else: - if isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif GT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 < S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 >= S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 < S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 >= S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_LSHLREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 << S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_LSHRREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_ASHRREV_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S1.i32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_ADD_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_SUB_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S1.u32) + VCC.u64[laneId] > (S0.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_SUBREV_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S0.u32) + VCC.u64[laneId] > (S1.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_ADD_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 - S1.u32 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S1.u32 - S0.u32 - return {'D0': D0._val} - -def _VOP2Op_V_FMAC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _VOP2Op_V_FMAMK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32) - return {'D0': D0._val} - -def _VOP2Op_V_FMAAK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32) - return {'D0': D0._val} - -def _VOP2Op_V_CVT_PK_RTZ_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].f16 = f32_to_f16(S0.f32) - tmp[31 : 16].f16 = f32_to_f16(S1.f32) - return {} - -def _VOP2Op_V_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 - S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S1.f16 - S0.f16 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, D0.f16) - return {'D0': D0._val} - -def _VOP2Op_V_FMAMK_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, SIMM32.f16, S1.f16) - return {'D0': D0._val} - -def _VOP2Op_V_FMAAK_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif GT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - else: - if isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif GT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_MIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif LT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - else: - if isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif LT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_LDEXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * F(2.0 ** (S1.i16)) - return {'D0': D0._val} - -def _VOP2Op_V_PK_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16) - D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16) - return {'D0': D0._val} - -VOP2Op_FUNCTIONS = { - VOP2Op.V_CNDMASK_B32: _VOP2Op_V_CNDMASK_B32, - VOP2Op.V_DOT2ACC_F32_F16: _VOP2Op_V_DOT2ACC_F32_F16, - VOP2Op.V_ADD_F32: _VOP2Op_V_ADD_F32, - VOP2Op.V_SUB_F32: _VOP2Op_V_SUB_F32, - VOP2Op.V_SUBREV_F32: _VOP2Op_V_SUBREV_F32, - VOP2Op.V_FMAC_DX9_ZERO_F32: _VOP2Op_V_FMAC_DX9_ZERO_F32, - VOP2Op.V_MUL_DX9_ZERO_F32: _VOP2Op_V_MUL_DX9_ZERO_F32, - VOP2Op.V_MUL_F32: _VOP2Op_V_MUL_F32, - VOP2Op.V_MUL_I32_I24: _VOP2Op_V_MUL_I32_I24, - VOP2Op.V_MUL_HI_I32_I24: _VOP2Op_V_MUL_HI_I32_I24, - VOP2Op.V_MUL_U32_U24: _VOP2Op_V_MUL_U32_U24, - VOP2Op.V_MUL_HI_U32_U24: _VOP2Op_V_MUL_HI_U32_U24, - VOP2Op.V_MIN_F32: _VOP2Op_V_MIN_F32, - VOP2Op.V_MAX_F32: _VOP2Op_V_MAX_F32, - VOP2Op.V_MIN_I32: _VOP2Op_V_MIN_I32, - VOP2Op.V_MAX_I32: _VOP2Op_V_MAX_I32, - VOP2Op.V_MIN_U32: _VOP2Op_V_MIN_U32, - VOP2Op.V_MAX_U32: _VOP2Op_V_MAX_U32, - VOP2Op.V_LSHLREV_B32: _VOP2Op_V_LSHLREV_B32, - VOP2Op.V_LSHRREV_B32: _VOP2Op_V_LSHRREV_B32, - VOP2Op.V_ASHRREV_I32: _VOP2Op_V_ASHRREV_I32, - VOP2Op.V_AND_B32: _VOP2Op_V_AND_B32, - VOP2Op.V_OR_B32: _VOP2Op_V_OR_B32, - VOP2Op.V_XOR_B32: _VOP2Op_V_XOR_B32, - VOP2Op.V_XNOR_B32: _VOP2Op_V_XNOR_B32, - VOP2Op.V_ADD_CO_CI_U32: _VOP2Op_V_ADD_CO_CI_U32, - VOP2Op.V_SUB_CO_CI_U32: _VOP2Op_V_SUB_CO_CI_U32, - VOP2Op.V_SUBREV_CO_CI_U32: _VOP2Op_V_SUBREV_CO_CI_U32, - VOP2Op.V_ADD_NC_U32: _VOP2Op_V_ADD_NC_U32, - VOP2Op.V_SUB_NC_U32: _VOP2Op_V_SUB_NC_U32, - VOP2Op.V_SUBREV_NC_U32: _VOP2Op_V_SUBREV_NC_U32, - VOP2Op.V_FMAC_F32: _VOP2Op_V_FMAC_F32, - VOP2Op.V_FMAMK_F32: _VOP2Op_V_FMAMK_F32, - VOP2Op.V_FMAAK_F32: _VOP2Op_V_FMAAK_F32, - VOP2Op.V_CVT_PK_RTZ_F16_F32: _VOP2Op_V_CVT_PK_RTZ_F16_F32, - VOP2Op.V_ADD_F16: _VOP2Op_V_ADD_F16, - VOP2Op.V_SUB_F16: _VOP2Op_V_SUB_F16, - VOP2Op.V_SUBREV_F16: _VOP2Op_V_SUBREV_F16, - VOP2Op.V_MUL_F16: _VOP2Op_V_MUL_F16, - VOP2Op.V_FMAC_F16: _VOP2Op_V_FMAC_F16, - VOP2Op.V_FMAMK_F16: _VOP2Op_V_FMAMK_F16, - VOP2Op.V_FMAAK_F16: _VOP2Op_V_FMAAK_F16, - VOP2Op.V_MAX_F16: _VOP2Op_V_MAX_F16, - VOP2Op.V_MIN_F16: _VOP2Op_V_MIN_F16, - VOP2Op.V_LDEXP_F16: _VOP2Op_V_LDEXP_F16, - VOP2Op.V_PK_FMAC_F16: _VOP2Op_V_PK_FMAC_F16, -} - -def _VOP3Op_V_CMP_F_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 < S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 == S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 <= S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 > S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 != S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 >= S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 >= S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 != S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 > S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 <= S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 == S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 < S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_T_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_F_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 < S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 == S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 <= S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 > S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 != S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 >= S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 >= S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 != S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 > S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 <= S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 == S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 < S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_T_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_F_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 < S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 == S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 <= S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 > S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 != S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 >= S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 >= S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 != S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 > S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 <= S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 == S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 < S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_T_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 < S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 == S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 <= S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 > S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 != S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 >= S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 < S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 == S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 <= S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 > S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 != S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 >= S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_F_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 < S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 == S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 <= S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 > S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 != S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 >= S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_T_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_F_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 < S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 == S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 <= S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 > S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 != S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 >= S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_T_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_F_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 < S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 == S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 <= S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 > S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 != S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 >= S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_T_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_F_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 < S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 == S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 <= S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 > S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 != S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 >= S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_T_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3Op_V_CMP_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3Op_V_CMP_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3Op_V_CMPX_F_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 < S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 == S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 <= S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 > S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 != S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 >= S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 >= S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 != S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 > S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 <= S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 == S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 < S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_T_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_F_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 < S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 == S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 <= S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 > S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 != S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 >= S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 >= S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 != S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 > S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 <= S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 == S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 < S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_T_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_F_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 < S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 == S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 <= S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 > S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 != S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 >= S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 >= S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 != S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 > S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 <= S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 == S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 < S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_T_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 < S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 == S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 <= S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 > S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 != S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 >= S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 < S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 == S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 <= S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 > S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 != S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 >= S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_F_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 < S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 == S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 <= S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 > S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 != S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 >= S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_T_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_F_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 < S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 == S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 <= S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 > S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 != S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 >= S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_T_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_F_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 < S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 == S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 <= S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 > S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 != S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 >= S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_T_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_F_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 < S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 == S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 <= S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 > S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 != S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 >= S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_T_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOP3Op_V_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _VOP3Op_V_READFIRSTLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); EXEC=Reg(exec_mask); SRC0=Reg(src0_idx); EXEC_LO=SliceProxy(EXEC, 31, 0) - # --- compiled pseudocode --- - if WAVE64: - if EXEC == 0x0: - lane = 0 - else: - lane = (s_ff1_i32_b64(EXEC)) - else: - if EXEC_LO.i32 == 0: - lane = 0 - else: - lane = (s_ff1_i32_b32(EXEC_LO)) - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP3Op_V_CVT_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f64_to_i32(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = i32_to_f64(S0.i32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = i32_to_f32(S0.i32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0.u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_U32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f32_to_u32(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = f32_to_f16(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_NEAREST_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32 + 0.5)) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_FLOOR_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32)) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f64_to_f32(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F64_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = f32_to_f64(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_UBYTE0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[7 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_UBYTE1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[15 : 8].u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_UBYTE2(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[23 : 16].u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_UBYTE3(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[31 : 24].u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_U32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f64_to_u32(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = u32_to_f64(S0.u32) - return {'D0': D0._val} - -def _VOP3Op_V_TRUNC_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CEIL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 > 0.0) and (S0.f64 != D0.f64)): - D0.f64 += 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_RNDNE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = floor(S0.f64 + 0.5) - if (isEven(floor(S0.f64)) and (fract(S0.f64) == 0.5)): - D0.f64 -= 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_FLOOR_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 < 0.0) and (S0.f64 != D0.f64)): - D0.f64 += -1.0 - return {'D0': D0._val} - -def _VOP3Op_V_MOV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b16 = S0.b16 - return {'D0': D0._val} - -def _VOP3Op_V_FRACT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + -floor(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_TRUNC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CEIL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 > 0.0) and (S0.f32 != D0.f32)): - D0.f32 += 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_RNDNE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = floor(S0.f32 + 0.5) - if (isEven(F(floor(S0.f32))) and (fract(S0.f32) == 0.5)): - D0.f32 -= 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_FLOOR_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 < 0.0) and (S0.f32 != D0.f32)): - D0.f32 += -1.0 - return {'D0': D0._val} - -def _VOP3Op_V_EXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = pow(2.0, S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_LOG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = log2(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_RCP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP3Op_V_RCP_IFLAG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP3Op_V_RSQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_RCP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / S0.f64 - return {'D0': D0._val} - -def _VOP3Op_V_RSQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_SQRT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_SQRT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_SIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sin(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3Op_V_COS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = cos(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3Op_V_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - return {'D0': D0._val} - -def _VOP3Op_V_BFREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _VOP3Op_V_CLZ_I32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3Op_V_CTZ_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3Op_V_CLS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(1, int(31)+1): - if S0.i32[31 - i] != S0.i32[31]: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_EXP_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f64) - 1023 + 1 - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_MANT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.f64 = S0.f64 - else: - D0.f64 = mantissa(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_FRACT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + -floor(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_EXP_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f32) - 127 + 1 - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_MANT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = mantissa(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MOVRELS_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - addr = SRC0.u32 - D0.b32 = VGPR[laneId][addr].b32 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F16_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = u16_to_f16(S0.u16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F16_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = i16_to_f16(S0.i16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_u16(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_i16(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_RCP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / S0.f16 - return {'D0': D0._val} - -def _VOP3Op_V_SQRT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_RSQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_LOG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = log2(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_EXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = pow(2.0, S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_MANT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = mantissa(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_EXP_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.i16 = 0 - else: - D0.i16 = (exponent(S0.f16) - 15 + 1) - return {'D0': D0._val} - -def _VOP3Op_V_FLOOR_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 < 0.0) and (S0.f16 != D0.f16)): - D0.f16 += -1.0 - return {'D0': D0._val} - -def _VOP3Op_V_CEIL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 > 0.0) and (S0.f16 != D0.f16)): - D0.f16 += 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_TRUNC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_RNDNE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = floor(S0.f16 + 0.5) - if (isEven(F(floor(S0.f16))) and (fract(S0.f16) == 0.5)): - D0.f16 -= 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_FRACT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + -floor(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_SIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sin(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3Op_V_COS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = cos(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3Op_V_SAT_PK_U8_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b16 = _pack(SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16)) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_NORM_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_snorm(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_NORM_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_unorm(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_NOT_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ~S0.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_U32_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(0, S0.u16)) - return {} - -def _VOP3Op_V_CNDMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - D0.u32 = ((S1.u32) if (VCC.u64[laneId]) else (S0.u32)) - return {'D0': D0._val} - -def _VOP3Op_V_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 - S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_SUBREV_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S1.f32 - S0.f32 - return {'D0': D0._val} - -def _VOP3Op_V_FMAC_DX9_ZERO_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = S2.f32 - else: - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_DX9_ZERO_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = 0.0 - else: - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_HI_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i24) * (S1.i24)) >> 32) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_HI_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u24) * (S1.u24)) >> 32) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif LT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - else: - if isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif LT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MAX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif GT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - else: - if isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif GT_NEG_ZERO(S0.f32, S1.f32): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 < S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP3Op_V_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 >= S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 < S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP3Op_V_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 >= S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP3Op_V_LSHLREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 << S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_LSHRREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_ASHRREV_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S1.i32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_ADD_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 - S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_SUBREV_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S1.u32 - S0.u32 - return {'D0': D0._val} - -def _VOP3Op_V_FMAC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_RTZ_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].f16 = f32_to_f16(S0.f32) - tmp[31 : 16].f16 = f32_to_f16(S1.f32) - return {} - -def _VOP3Op_V_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 - S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_SUBREV_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S1.f16 - S0.f16 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, D0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MAX_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif GT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - else: - if isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif GT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_MIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif LT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - else: - if isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif LT_NEG_ZERO(S0.f16, S1.f16): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_LDEXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * F(2.0 ** (S1.i16)) - return {'D0': D0._val} - -def _VOP3Op_V_FMA_DX9_ZERO_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = S2.f32 - else: - D0.f32 = fma(S0.f32, S1.f32, S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MAD_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) + S2.i32 - return {'D0': D0._val} - -def _VOP3Op_V_MAD_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CUBEID_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - if S2.f32 < 0.0: - D0.f32 = 5.0 - else: - D0.f32 = 4.0 - elif abs(S1.f32) >= abs(S0.f32): - if S1.f32 < 0.0: - D0.f32 = 3.0 - else: - D0.f32 = 2.0 - else: - if S0.f32 < 0.0: - D0.f32 = 1.0 - else: - D0.f32 = 0.0 - return {'D0': D0._val} - -def _VOP3Op_V_CUBESC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - if S2.f32 < 0.0: - D0.f32 = -S0.f32 - else: - D0.f32 = S0.f32 - elif abs(S1.f32) >= abs(S0.f32): - D0.f32 = S0.f32 - else: - if S0.f32 < 0.0: - D0.f32 = S2.f32 - else: - D0.f32 = -S2.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CUBETC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - D0.f32 = -S1.f32 - elif abs(S1.f32) >= abs(S0.f32): - if S1.f32 < 0.0: - D0.f32 = -S2.f32 - else: - D0.f32 = S2.f32 - else: - D0.f32 = -S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CUBEMA_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - D0.f32 = S2.f32 * 2.0 - elif abs(S1.f32) >= abs(S0.f32): - D0.f32 = S1.f32 * 2.0 - else: - D0.f32 = S0.f32 * 2.0 - return {'D0': D0._val} - -def _VOP3Op_V_BFE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1)) - return {'D0': D0._val} - -def _VOP3Op_V_BFE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1)) - D0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_BFI_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32)) - return {'D0': D0._val} - -def _VOP3Op_V_FMA_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_FMA_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = fma(S0.f64, S1.f64, S2.f64) - return {'D0': D0._val} - -def _VOP3Op_V_LERP_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1 << 24)) - tmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1 << 16) - tmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1 << 8) - tmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1) - D0.u32 = tmp.u32 - return {'D0': D0._val} - -def _VOP3Op_V_ALIGNBIT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((_pack32(S0.u32, S1.u32) >> S2.u32[4 : 0].u32) & 0xffffffff) - return {'D0': D0._val} - -def _VOP3Op_V_ALIGNBYTE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((_pack32(S0.u32, S1.u32) >> (S2.u32[1 : 0].u32 * 8)) & 0xffffffff) - return {'D0': D0._val} - -def _VOP3Op_V_MULLIT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S1.f32 == -MAX_FLOAT_F32) or (F(S1.f32) == (-INF)) or isNAN(F(S1.f32)) or (S2.f32 <= 0.0) or isNAN(F(S2.f32))): - D0.f32 = -MAX_FLOAT_F32 - else: - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_min_f32(v_min_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_max_f32(v_max_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if (isNAN(F(S0.f32)) or isNAN(F(S1.f32)) or isNAN(F(S2.f32))): - D0.f32 = v_min3_f32(S0.f32, S1.f32, S2.f32) - elif v_max3_f32(S0.f32, S1.f32, S2.f32) == S0.f32: - D0.f32 = v_max_f32(S1.f32, S2.f32) - elif v_max3_f32(S0.f32, S1.f32, S2.f32) == S1.f32: - D0.f32 = v_max_f32(S0.f32, S2.f32) - else: - D0.f32 = v_max_f32(S0.f32, S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32: - D0.i32 = v_max_i32(S1.i32, S2.i32) - elif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32: - D0.i32 = v_max_i32(S0.i32, S2.i32) - else: - D0.i32 = v_max_i32(S0.i32, S1.i32) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32: - D0.u32 = v_max_u32(S1.u32, S2.u32) - elif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32: - D0.u32 = v_max_u32(S0.u32, S2.u32) - else: - D0.u32 = v_max_u32(S0.u32, S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_SAD_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += (ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0])) - tmp += (ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8])) - tmp += (ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16])) - tmp += (ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24])) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_SAD_HI_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((v_sad_u8(S0, S1, 0)) << 16) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_SAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16) - tmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_SAD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_U8_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg((S2.u32 & (~(0xff << (S1.u32[1 : 0].u32 * 8))))) - tmp = Reg((tmp | (((f32_to_u8(S0.f32)) & 255) << (S1.u32[1 : 0].u32 * 8)))) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FIXUP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f32) ^ sign(S2.f32)) - if isNAN(F(S2.f32)): - D0.f32 = F(cvtToQuietNAN(F(S2.f32))) - elif isNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif ((F(S1.f32) == 0.0) and (F(S2.f32) == 0.0)): - D0.f32 = F(0xffc00000) - elif ((F(abs(S1.f32)) == INF) and (F(abs(S2.f32)) == INF)): - D0.f32 = F(0xffc00000) - elif ((F(S1.f32) == 0.0) or (F(abs(S2.f32)) == INF)): - D0.f32 = (((-INF).f32) if (sign_out) else (INF.f32)) - elif ((F(abs(S1.f32)) == INF) or (F(S2.f32) == 0.0)): - D0.f32 = ((-0.0) if (sign_out) else (0.0)) - elif exponent(S2.f32) - exponent(S1.f32) < -150: - D0.f32 = ((-UNDERFLOW_F32) if (sign_out) else (UNDERFLOW_F32)) - elif exponent(S1.f32) == 255: - D0.f32 = ((-OVERFLOW_F32) if (sign_out) else (OVERFLOW_F32)) - else: - D0.f32 = ((-OVERFLOW_F32) if (sign_out) else (OVERFLOW_F32)) if isNAN(S0.f32) else ((-abs(S0.f32)) if (sign_out) else (abs(S0.f32))) - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FIXUP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f64) ^ sign(S2.f64)) - if isNAN(S2.f64): - D0.f64 = cvtToQuietNAN(S2.f64) - elif isNAN(S1.f64): - D0.f64 = cvtToQuietNAN(S1.f64) - elif ((S1.f64 == 0.0) and (S2.f64 == 0.0)): - D0.f64 = F(0xfff8000000000000) - elif ((abs(S1.f64) == INF) and (abs(S2.f64) == INF)): - D0.f64 = F(0xfff8000000000000) - elif ((S1.f64 == 0.0) or (abs(S2.f64) == INF)): - D0.f64 = (((-INF)) if (sign_out) else (INF)) - elif ((abs(S1.f64) == INF) or (S2.f64 == 0.0)): - D0.f64 = ((-0.0) if (sign_out) else (0.0)) - elif exponent(S2.f64) - exponent(S1.f64) < -1075: - D0.f64 = ((-UNDERFLOW_F64) if (sign_out) else (UNDERFLOW_F64)) - elif exponent(S1.f64) == 2047: - D0.f64 = ((-OVERFLOW_F64) if (sign_out) else (OVERFLOW_F64)) - else: - D0.f64 = ((-OVERFLOW_F64) if (sign_out) else (OVERFLOW_F64)) if isNAN(S0.f64) else ((-abs(S0.f64)) if (sign_out) else (abs(S0.f64))) - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FMAS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - if VCC.u64[laneId]: - D0.f32 = (2.0 ** 64 if exponent(S2.f32) > 127 else 2.0 ** -64) * fma(S0.f32, S1.f32, S2.f32) - else: - D0.f32 = fma(S0.f32, S1.f32, S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FMAS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - if VCC.u64[laneId]: - D0.f64 = (2.0 ** 128 if exponent(S2.f64) > 1023 else 2.0 ** -128) * fma(S0.f64, S1.f64, S2.f64) - else: - D0.f64 = fma(S0.f64, S1.f64, S2.f64) - return {'D0': D0._val} - -def _VOP3Op_V_MSAD_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += ((0) if (S1.u32[7 : 0] == 0) else ((ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0])))) - tmp += ((0) if (S1.u32[15 : 8] == 0) else ((ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8])))) - tmp += ((0) if (S1.u32[23 : 16] == 0) else ((ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16])))) - tmp += ((0) if (S1.u32[31 : 24] == 0) else ((ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24])))) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_QSAD_PK_U16_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[63 : 48] = (v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32)) - tmp[47 : 32] = (v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32)) - tmp[31 : 16] = (v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32)) - tmp[15 : 0] = (v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32)) - D0.b64 = tmp.b64 - return {'D0': D0._val} - -def _VOP3Op_V_MQSAD_PK_U16_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[63 : 48] = (v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32)) - tmp[47 : 32] = (v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32)) - tmp[31 : 16] = (v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32)) - tmp[15 : 0] = (v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32)) - D0.b64 = tmp.b64 - return {'D0': D0._val} - -def _VOP3Op_V_MQSAD_U32_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[127 : 96] = (v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32)) - tmp[95 : 64] = (v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32)) - tmp[63 : 32] = (v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32)) - tmp[31 : 0] = (v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32)) - D0.b128 = tmp.b128 - return {'D0': D0._val} - -def _VOP3Op_V_XOR3_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32 ^ S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 * S1.u16 + S2.u16 - return {'D0': D0._val} - -def _VOP3Op_V_PERM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0[31 : 24] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[31 : 24]) - D0[23 : 16] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[23 : 16]) - D0[15 : 8] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[15 : 8]) - D0[7 : 0] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[7 : 0]) - return {'D0': D0._val} - -def _VOP3Op_V_XAD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_LSHL_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_ADD_LSHL_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_FMA_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_min_f16(v_min_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_max_f16(v_max_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if (isNAN(F(S0.f16)) or isNAN(F(S1.f16)) or isNAN(F(S2.f16))): - D0.f16 = v_min3_f16(S0.f16, S1.f16, S2.f16) - elif v_max3_f16(S0.f16, S1.f16, S2.f16) == S0.f16: - D0.f16 = v_max_f16(S1.f16, S2.f16) - elif v_max3_f16(S0.f16, S1.f16, S2.f16) == S1.f16: - D0.f16 = v_max_f16(S0.f16, S2.f16) - else: - D0.f16 = v_max_f16(S0.f16, S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16: - D0.i16 = v_max_i16(S1.i16, S2.i16) - elif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16: - D0.i16 = v_max_i16(S0.i16, S2.i16) - else: - D0.i16 = v_max_i16(S0.i16, S1.i16) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16: - D0.u16 = v_max_u16(S1.u16, S2.u16) - elif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16: - D0.u16 = v_max_u16(S0.u16, S2.u16) - else: - D0.u16 = v_max_u16(S0.u16, S1.u16) - return {'D0': D0._val} - -def _VOP3Op_V_MAD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 * S1.i16 + S2.i16 - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FIXUP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f16) ^ sign(S2.f16)) - if isNAN(F(S2.f16)): - D0.f16 = F(cvtToQuietNAN(F(S2.f16))) - elif isNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif ((F(S1.f16) == 0.0) and (F(S2.f16) == 0.0)): - D0.f16 = F(0xfe00) - elif ((F(abs(S1.f16)) == INF) and (F(abs(S2.f16)) == INF)): - D0.f16 = F(0xfe00) - elif ((F(S1.f16) == 0.0) or (F(abs(S2.f16)) == INF)): - D0.f16 = (((-INF).f16) if (sign_out) else (INF.f16)) - elif ((F(abs(S1.f16)) == INF) or (F(S2.f16) == 0.0)): - D0.f16 = ((-0.0) if (sign_out) else (0.0)) - else: - D0.f16 = ((-abs(S0.f16)) if (sign_out) else (abs(S0.f16))) - return {'D0': D0._val} - -def _VOP3Op_V_ADD3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_LSHL_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_AND_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 & S1.u32) | S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_OR3_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32 | S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MAD_U32_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u16) * (S1.u16) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_MAD_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i16) * (S1.i16) + S2.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CNDMASK_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - D0.u16 = ((S1.u16) if (VCC.u64[laneId]) else (S0.u16)) - return {'D0': D0._val} - -def _VOP3Op_V_MAXMIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_min_f32(v_max_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MINMAX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_max_f32(v_min_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MAXMIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_min_f16(v_max_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MINMAX_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_max_f16(v_min_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MAXMIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_min_u32(v_max_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MINMAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_max_u32(v_min_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MAXMIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_min_i32(v_max_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3Op_V_MINMAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_max_i32(v_min_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3Op_V_DOT2_F16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f16) - tmp += S0[15 : 0].f16 * S1[15 : 0].f16 - tmp += S0[31 : 16].f16 * S1[31 : 16].f16 - D0.f16 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_DOT2_BF16_BF16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.bf16) - tmp += S0[15 : 0].bf16 * S1[15 : 0].bf16 - tmp += S0[31 : 16].bf16 * S1[31 : 16].bf16 - D0.bf16 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_ADD_NC_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 + S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_NC_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 - S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_LO_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 * S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_I16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16] = (v_cvt_i16_f32(S1.f32)) - tmp[15 : 0] = (v_cvt_i16_f32(S0.f32)) - return {} - -def _VOP3Op_V_CVT_PK_U16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16] = (v_cvt_u16_f32(S1.f32)) - tmp[15 : 0] = (v_cvt_u16_f32(S0.f32)) - return {} - -def _VOP3Op_V_MAX_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ((S0.u16) if (S0.u16 >= S1.u16) else (S1.u16)) - return {'D0': D0._val} - -def _VOP3Op_V_MAX_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = ((S0.i16) if (S0.i16 >= S1.i16) else (S1.i16)) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ((S0.u16) if (S0.u16 < S1.u16) else (S1.u16)) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = ((S0.i16) if (S0.i16 < S1.i16) else (S1.i16)) - return {'D0': D0._val} - -def _VOP3Op_V_ADD_NC_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 + S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_NC_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 - S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_PACK_B32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0[31 : 16].f16 = S1.f16 - D0[15 : 0].f16 = S0.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_NORM_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = f16_to_snorm(S0.f16) - tmp[31 : 16].i16 = f16_to_snorm(S1.f16) - return {} - -def _VOP3Op_V_CVT_PK_NORM_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = f16_to_unorm(S0.f16) - tmp[31 : 16].u16 = f16_to_unorm(S1.f16) - return {} - -def _VOP3Op_V_LDEXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * 2.0 ** S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_BFM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((1 << S0[4 : 0].u32) - 1) << S1[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_BCNT_U32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S1.u32) - for i in range(0, int(31)+1): - tmp += S0[i].u32 - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_NORM_I16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = f32_to_snorm(S0.f32) - tmp[31 : 16].i16 = f32_to_snorm(S1.f32) - return {} - -def _VOP3Op_V_CVT_PK_NORM_U16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = f32_to_unorm(S0.f32) - tmp[31 : 16].u16 = f32_to_unorm(S1.f32) - return {} - -def _VOP3Op_V_CVT_PK_U16_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = u32_to_u16(S0.u32) - tmp[31 : 16].u16 = u32_to_u16(S1.u32) - return {} - -def _VOP3Op_V_CVT_PK_I16_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = i32_to_i16(S0.i32) - tmp[31 : 16].i16 = i32_to_i16(S1.i32) - return {} - -def _VOP3Op_V_SUB_NC_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 - S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_ADD_NC_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 + S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_ADD_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 * S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_MIN_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(S0.f64): - D0.f64 = cvtToQuietNAN(S0.f64) - elif isSignalNAN(S1.f64): - D0.f64 = cvtToQuietNAN(S1.f64) - elif isQuietNAN(S1.f64): - D0.f64 = S0.f64 - elif isQuietNAN(S0.f64): - D0.f64 = S1.f64 - elif LT_NEG_ZERO(S0.f64, S1.f64): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - else: - if isNAN(S1.f64): - D0.f64 = S0.f64 - elif isNAN(S0.f64): - D0.f64 = S1.f64 - elif LT_NEG_ZERO(S0.f64, S1.f64): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_MAX_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if WAVE_MODE.IEEE: - if isSignalNAN(S0.f64): - D0.f64 = cvtToQuietNAN(S0.f64) - elif isSignalNAN(S1.f64): - D0.f64 = cvtToQuietNAN(S1.f64) - elif isQuietNAN(S1.f64): - D0.f64 = S0.f64 - elif isQuietNAN(S0.f64): - D0.f64 = S1.f64 - elif GT_NEG_ZERO(S0.f64, S1.f64): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - else: - if isNAN(S1.f64): - D0.f64 = S0.f64 - elif isNAN(S0.f64): - D0.f64 = S1.f64 - elif GT_NEG_ZERO(S0.f64, S1.f64): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_LDEXP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 * 2.0 ** S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_LO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 * S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_HI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u32) * (S1.u32)) >> 32) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_HI_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i32) * (S1.i32)) >> 32) - return {'D0': D0._val} - -def _VOP3Op_V_TRIG_PREOP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - shift = (S1[4 : 0].u32) * 53 - if exponent(S0.f64) > 1077: - shift += exponent(S0.f64) - 1077 - result = float(((TWO_OVER_PI_1201[1200 : 0] << int(shift)) >> (1201 - 53)) & 0x1fffffffffffff) - scale = -53 - shift - if exponent(S0.f64) >= 1968: - scale += 128 - D0.f64 = ldexp(result, scale) - return {'D0': D0._val} - -def _VOP3Op_V_LSHLREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S1.u16 << S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_LSHRREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S1.u16 >> S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_ASHRREV_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = (S1.i16 >> S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_LSHLREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (S1.u64 << S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_LSHRREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (S1.u64 >> S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_ASHRREV_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i64 = (S1.i64 >> S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_READLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if WAVE32: - lane = S1.u32[4 : 0].u32 - else: - lane = S1.u32[5 : 0].u32 - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP3Op_V_AND_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S0.u16 & S1.u16) - return {'D0': D0._val} - -def _VOP3Op_V_OR_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S0.u16 | S1.u16) - return {'D0': D0._val} - -def _VOP3Op_V_XOR_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S0.u16 ^ S1.u16) - return {'D0': D0._val} - -VOP3Op_FUNCTIONS = { - VOP3Op.V_CMP_F_F16: _VOP3Op_V_CMP_F_F16, - VOP3Op.V_CMP_LT_F16: _VOP3Op_V_CMP_LT_F16, - VOP3Op.V_CMP_EQ_F16: _VOP3Op_V_CMP_EQ_F16, - VOP3Op.V_CMP_LE_F16: _VOP3Op_V_CMP_LE_F16, - VOP3Op.V_CMP_GT_F16: _VOP3Op_V_CMP_GT_F16, - VOP3Op.V_CMP_LG_F16: _VOP3Op_V_CMP_LG_F16, - VOP3Op.V_CMP_GE_F16: _VOP3Op_V_CMP_GE_F16, - VOP3Op.V_CMP_O_F16: _VOP3Op_V_CMP_O_F16, - VOP3Op.V_CMP_U_F16: _VOP3Op_V_CMP_U_F16, - VOP3Op.V_CMP_NGE_F16: _VOP3Op_V_CMP_NGE_F16, - VOP3Op.V_CMP_NLG_F16: _VOP3Op_V_CMP_NLG_F16, - VOP3Op.V_CMP_NGT_F16: _VOP3Op_V_CMP_NGT_F16, - VOP3Op.V_CMP_NLE_F16: _VOP3Op_V_CMP_NLE_F16, - VOP3Op.V_CMP_NEQ_F16: _VOP3Op_V_CMP_NEQ_F16, - VOP3Op.V_CMP_NLT_F16: _VOP3Op_V_CMP_NLT_F16, - VOP3Op.V_CMP_T_F16: _VOP3Op_V_CMP_T_F16, - VOP3Op.V_CMP_F_F32: _VOP3Op_V_CMP_F_F32, - VOP3Op.V_CMP_LT_F32: _VOP3Op_V_CMP_LT_F32, - VOP3Op.V_CMP_EQ_F32: _VOP3Op_V_CMP_EQ_F32, - VOP3Op.V_CMP_LE_F32: _VOP3Op_V_CMP_LE_F32, - VOP3Op.V_CMP_GT_F32: _VOP3Op_V_CMP_GT_F32, - VOP3Op.V_CMP_LG_F32: _VOP3Op_V_CMP_LG_F32, - VOP3Op.V_CMP_GE_F32: _VOP3Op_V_CMP_GE_F32, - VOP3Op.V_CMP_O_F32: _VOP3Op_V_CMP_O_F32, - VOP3Op.V_CMP_U_F32: _VOP3Op_V_CMP_U_F32, - VOP3Op.V_CMP_NGE_F32: _VOP3Op_V_CMP_NGE_F32, - VOP3Op.V_CMP_NLG_F32: _VOP3Op_V_CMP_NLG_F32, - VOP3Op.V_CMP_NGT_F32: _VOP3Op_V_CMP_NGT_F32, - VOP3Op.V_CMP_NLE_F32: _VOP3Op_V_CMP_NLE_F32, - VOP3Op.V_CMP_NEQ_F32: _VOP3Op_V_CMP_NEQ_F32, - VOP3Op.V_CMP_NLT_F32: _VOP3Op_V_CMP_NLT_F32, - VOP3Op.V_CMP_T_F32: _VOP3Op_V_CMP_T_F32, - VOP3Op.V_CMP_F_F64: _VOP3Op_V_CMP_F_F64, - VOP3Op.V_CMP_LT_F64: _VOP3Op_V_CMP_LT_F64, - VOP3Op.V_CMP_EQ_F64: _VOP3Op_V_CMP_EQ_F64, - VOP3Op.V_CMP_LE_F64: _VOP3Op_V_CMP_LE_F64, - VOP3Op.V_CMP_GT_F64: _VOP3Op_V_CMP_GT_F64, - VOP3Op.V_CMP_LG_F64: _VOP3Op_V_CMP_LG_F64, - VOP3Op.V_CMP_GE_F64: _VOP3Op_V_CMP_GE_F64, - VOP3Op.V_CMP_O_F64: _VOP3Op_V_CMP_O_F64, - VOP3Op.V_CMP_U_F64: _VOP3Op_V_CMP_U_F64, - VOP3Op.V_CMP_NGE_F64: _VOP3Op_V_CMP_NGE_F64, - VOP3Op.V_CMP_NLG_F64: _VOP3Op_V_CMP_NLG_F64, - VOP3Op.V_CMP_NGT_F64: _VOP3Op_V_CMP_NGT_F64, - VOP3Op.V_CMP_NLE_F64: _VOP3Op_V_CMP_NLE_F64, - VOP3Op.V_CMP_NEQ_F64: _VOP3Op_V_CMP_NEQ_F64, - VOP3Op.V_CMP_NLT_F64: _VOP3Op_V_CMP_NLT_F64, - VOP3Op.V_CMP_T_F64: _VOP3Op_V_CMP_T_F64, - VOP3Op.V_CMP_LT_I16: _VOP3Op_V_CMP_LT_I16, - VOP3Op.V_CMP_EQ_I16: _VOP3Op_V_CMP_EQ_I16, - VOP3Op.V_CMP_LE_I16: _VOP3Op_V_CMP_LE_I16, - VOP3Op.V_CMP_GT_I16: _VOP3Op_V_CMP_GT_I16, - VOP3Op.V_CMP_NE_I16: _VOP3Op_V_CMP_NE_I16, - VOP3Op.V_CMP_GE_I16: _VOP3Op_V_CMP_GE_I16, - VOP3Op.V_CMP_LT_U16: _VOP3Op_V_CMP_LT_U16, - VOP3Op.V_CMP_EQ_U16: _VOP3Op_V_CMP_EQ_U16, - VOP3Op.V_CMP_LE_U16: _VOP3Op_V_CMP_LE_U16, - VOP3Op.V_CMP_GT_U16: _VOP3Op_V_CMP_GT_U16, - VOP3Op.V_CMP_NE_U16: _VOP3Op_V_CMP_NE_U16, - VOP3Op.V_CMP_GE_U16: _VOP3Op_V_CMP_GE_U16, - VOP3Op.V_CMP_F_I32: _VOP3Op_V_CMP_F_I32, - VOP3Op.V_CMP_LT_I32: _VOP3Op_V_CMP_LT_I32, - VOP3Op.V_CMP_EQ_I32: _VOP3Op_V_CMP_EQ_I32, - VOP3Op.V_CMP_LE_I32: _VOP3Op_V_CMP_LE_I32, - VOP3Op.V_CMP_GT_I32: _VOP3Op_V_CMP_GT_I32, - VOP3Op.V_CMP_NE_I32: _VOP3Op_V_CMP_NE_I32, - VOP3Op.V_CMP_GE_I32: _VOP3Op_V_CMP_GE_I32, - VOP3Op.V_CMP_T_I32: _VOP3Op_V_CMP_T_I32, - VOP3Op.V_CMP_F_U32: _VOP3Op_V_CMP_F_U32, - VOP3Op.V_CMP_LT_U32: _VOP3Op_V_CMP_LT_U32, - VOP3Op.V_CMP_EQ_U32: _VOP3Op_V_CMP_EQ_U32, - VOP3Op.V_CMP_LE_U32: _VOP3Op_V_CMP_LE_U32, - VOP3Op.V_CMP_GT_U32: _VOP3Op_V_CMP_GT_U32, - VOP3Op.V_CMP_NE_U32: _VOP3Op_V_CMP_NE_U32, - VOP3Op.V_CMP_GE_U32: _VOP3Op_V_CMP_GE_U32, - VOP3Op.V_CMP_T_U32: _VOP3Op_V_CMP_T_U32, - VOP3Op.V_CMP_F_I64: _VOP3Op_V_CMP_F_I64, - VOP3Op.V_CMP_LT_I64: _VOP3Op_V_CMP_LT_I64, - VOP3Op.V_CMP_EQ_I64: _VOP3Op_V_CMP_EQ_I64, - VOP3Op.V_CMP_LE_I64: _VOP3Op_V_CMP_LE_I64, - VOP3Op.V_CMP_GT_I64: _VOP3Op_V_CMP_GT_I64, - VOP3Op.V_CMP_NE_I64: _VOP3Op_V_CMP_NE_I64, - VOP3Op.V_CMP_GE_I64: _VOP3Op_V_CMP_GE_I64, - VOP3Op.V_CMP_T_I64: _VOP3Op_V_CMP_T_I64, - VOP3Op.V_CMP_F_U64: _VOP3Op_V_CMP_F_U64, - VOP3Op.V_CMP_LT_U64: _VOP3Op_V_CMP_LT_U64, - VOP3Op.V_CMP_EQ_U64: _VOP3Op_V_CMP_EQ_U64, - VOP3Op.V_CMP_LE_U64: _VOP3Op_V_CMP_LE_U64, - VOP3Op.V_CMP_GT_U64: _VOP3Op_V_CMP_GT_U64, - VOP3Op.V_CMP_NE_U64: _VOP3Op_V_CMP_NE_U64, - VOP3Op.V_CMP_GE_U64: _VOP3Op_V_CMP_GE_U64, - VOP3Op.V_CMP_T_U64: _VOP3Op_V_CMP_T_U64, - VOP3Op.V_CMP_CLASS_F16: _VOP3Op_V_CMP_CLASS_F16, - VOP3Op.V_CMP_CLASS_F32: _VOP3Op_V_CMP_CLASS_F32, - VOP3Op.V_CMP_CLASS_F64: _VOP3Op_V_CMP_CLASS_F64, - VOP3Op.V_CMPX_F_F16: _VOP3Op_V_CMPX_F_F16, - VOP3Op.V_CMPX_LT_F16: _VOP3Op_V_CMPX_LT_F16, - VOP3Op.V_CMPX_EQ_F16: _VOP3Op_V_CMPX_EQ_F16, - VOP3Op.V_CMPX_LE_F16: _VOP3Op_V_CMPX_LE_F16, - VOP3Op.V_CMPX_GT_F16: _VOP3Op_V_CMPX_GT_F16, - VOP3Op.V_CMPX_LG_F16: _VOP3Op_V_CMPX_LG_F16, - VOP3Op.V_CMPX_GE_F16: _VOP3Op_V_CMPX_GE_F16, - VOP3Op.V_CMPX_O_F16: _VOP3Op_V_CMPX_O_F16, - VOP3Op.V_CMPX_U_F16: _VOP3Op_V_CMPX_U_F16, - VOP3Op.V_CMPX_NGE_F16: _VOP3Op_V_CMPX_NGE_F16, - VOP3Op.V_CMPX_NLG_F16: _VOP3Op_V_CMPX_NLG_F16, - VOP3Op.V_CMPX_NGT_F16: _VOP3Op_V_CMPX_NGT_F16, - VOP3Op.V_CMPX_NLE_F16: _VOP3Op_V_CMPX_NLE_F16, - VOP3Op.V_CMPX_NEQ_F16: _VOP3Op_V_CMPX_NEQ_F16, - VOP3Op.V_CMPX_NLT_F16: _VOP3Op_V_CMPX_NLT_F16, - VOP3Op.V_CMPX_T_F16: _VOP3Op_V_CMPX_T_F16, - VOP3Op.V_CMPX_F_F32: _VOP3Op_V_CMPX_F_F32, - VOP3Op.V_CMPX_LT_F32: _VOP3Op_V_CMPX_LT_F32, - VOP3Op.V_CMPX_EQ_F32: _VOP3Op_V_CMPX_EQ_F32, - VOP3Op.V_CMPX_LE_F32: _VOP3Op_V_CMPX_LE_F32, - VOP3Op.V_CMPX_GT_F32: _VOP3Op_V_CMPX_GT_F32, - VOP3Op.V_CMPX_LG_F32: _VOP3Op_V_CMPX_LG_F32, - VOP3Op.V_CMPX_GE_F32: _VOP3Op_V_CMPX_GE_F32, - VOP3Op.V_CMPX_O_F32: _VOP3Op_V_CMPX_O_F32, - VOP3Op.V_CMPX_U_F32: _VOP3Op_V_CMPX_U_F32, - VOP3Op.V_CMPX_NGE_F32: _VOP3Op_V_CMPX_NGE_F32, - VOP3Op.V_CMPX_NLG_F32: _VOP3Op_V_CMPX_NLG_F32, - VOP3Op.V_CMPX_NGT_F32: _VOP3Op_V_CMPX_NGT_F32, - VOP3Op.V_CMPX_NLE_F32: _VOP3Op_V_CMPX_NLE_F32, - VOP3Op.V_CMPX_NEQ_F32: _VOP3Op_V_CMPX_NEQ_F32, - VOP3Op.V_CMPX_NLT_F32: _VOP3Op_V_CMPX_NLT_F32, - VOP3Op.V_CMPX_T_F32: _VOP3Op_V_CMPX_T_F32, - VOP3Op.V_CMPX_F_F64: _VOP3Op_V_CMPX_F_F64, - VOP3Op.V_CMPX_LT_F64: _VOP3Op_V_CMPX_LT_F64, - VOP3Op.V_CMPX_EQ_F64: _VOP3Op_V_CMPX_EQ_F64, - VOP3Op.V_CMPX_LE_F64: _VOP3Op_V_CMPX_LE_F64, - VOP3Op.V_CMPX_GT_F64: _VOP3Op_V_CMPX_GT_F64, - VOP3Op.V_CMPX_LG_F64: _VOP3Op_V_CMPX_LG_F64, - VOP3Op.V_CMPX_GE_F64: _VOP3Op_V_CMPX_GE_F64, - VOP3Op.V_CMPX_O_F64: _VOP3Op_V_CMPX_O_F64, - VOP3Op.V_CMPX_U_F64: _VOP3Op_V_CMPX_U_F64, - VOP3Op.V_CMPX_NGE_F64: _VOP3Op_V_CMPX_NGE_F64, - VOP3Op.V_CMPX_NLG_F64: _VOP3Op_V_CMPX_NLG_F64, - VOP3Op.V_CMPX_NGT_F64: _VOP3Op_V_CMPX_NGT_F64, - VOP3Op.V_CMPX_NLE_F64: _VOP3Op_V_CMPX_NLE_F64, - VOP3Op.V_CMPX_NEQ_F64: _VOP3Op_V_CMPX_NEQ_F64, - VOP3Op.V_CMPX_NLT_F64: _VOP3Op_V_CMPX_NLT_F64, - VOP3Op.V_CMPX_T_F64: _VOP3Op_V_CMPX_T_F64, - VOP3Op.V_CMPX_LT_I16: _VOP3Op_V_CMPX_LT_I16, - VOP3Op.V_CMPX_EQ_I16: _VOP3Op_V_CMPX_EQ_I16, - VOP3Op.V_CMPX_LE_I16: _VOP3Op_V_CMPX_LE_I16, - VOP3Op.V_CMPX_GT_I16: _VOP3Op_V_CMPX_GT_I16, - VOP3Op.V_CMPX_NE_I16: _VOP3Op_V_CMPX_NE_I16, - VOP3Op.V_CMPX_GE_I16: _VOP3Op_V_CMPX_GE_I16, - VOP3Op.V_CMPX_LT_U16: _VOP3Op_V_CMPX_LT_U16, - VOP3Op.V_CMPX_EQ_U16: _VOP3Op_V_CMPX_EQ_U16, - VOP3Op.V_CMPX_LE_U16: _VOP3Op_V_CMPX_LE_U16, - VOP3Op.V_CMPX_GT_U16: _VOP3Op_V_CMPX_GT_U16, - VOP3Op.V_CMPX_NE_U16: _VOP3Op_V_CMPX_NE_U16, - VOP3Op.V_CMPX_GE_U16: _VOP3Op_V_CMPX_GE_U16, - VOP3Op.V_CMPX_F_I32: _VOP3Op_V_CMPX_F_I32, - VOP3Op.V_CMPX_LT_I32: _VOP3Op_V_CMPX_LT_I32, - VOP3Op.V_CMPX_EQ_I32: _VOP3Op_V_CMPX_EQ_I32, - VOP3Op.V_CMPX_LE_I32: _VOP3Op_V_CMPX_LE_I32, - VOP3Op.V_CMPX_GT_I32: _VOP3Op_V_CMPX_GT_I32, - VOP3Op.V_CMPX_NE_I32: _VOP3Op_V_CMPX_NE_I32, - VOP3Op.V_CMPX_GE_I32: _VOP3Op_V_CMPX_GE_I32, - VOP3Op.V_CMPX_T_I32: _VOP3Op_V_CMPX_T_I32, - VOP3Op.V_CMPX_F_U32: _VOP3Op_V_CMPX_F_U32, - VOP3Op.V_CMPX_LT_U32: _VOP3Op_V_CMPX_LT_U32, - VOP3Op.V_CMPX_EQ_U32: _VOP3Op_V_CMPX_EQ_U32, - VOP3Op.V_CMPX_LE_U32: _VOP3Op_V_CMPX_LE_U32, - VOP3Op.V_CMPX_GT_U32: _VOP3Op_V_CMPX_GT_U32, - VOP3Op.V_CMPX_NE_U32: _VOP3Op_V_CMPX_NE_U32, - VOP3Op.V_CMPX_GE_U32: _VOP3Op_V_CMPX_GE_U32, - VOP3Op.V_CMPX_T_U32: _VOP3Op_V_CMPX_T_U32, - VOP3Op.V_CMPX_F_I64: _VOP3Op_V_CMPX_F_I64, - VOP3Op.V_CMPX_LT_I64: _VOP3Op_V_CMPX_LT_I64, - VOP3Op.V_CMPX_EQ_I64: _VOP3Op_V_CMPX_EQ_I64, - VOP3Op.V_CMPX_LE_I64: _VOP3Op_V_CMPX_LE_I64, - VOP3Op.V_CMPX_GT_I64: _VOP3Op_V_CMPX_GT_I64, - VOP3Op.V_CMPX_NE_I64: _VOP3Op_V_CMPX_NE_I64, - VOP3Op.V_CMPX_GE_I64: _VOP3Op_V_CMPX_GE_I64, - VOP3Op.V_CMPX_T_I64: _VOP3Op_V_CMPX_T_I64, - VOP3Op.V_CMPX_F_U64: _VOP3Op_V_CMPX_F_U64, - VOP3Op.V_CMPX_LT_U64: _VOP3Op_V_CMPX_LT_U64, - VOP3Op.V_CMPX_EQ_U64: _VOP3Op_V_CMPX_EQ_U64, - VOP3Op.V_CMPX_LE_U64: _VOP3Op_V_CMPX_LE_U64, - VOP3Op.V_CMPX_GT_U64: _VOP3Op_V_CMPX_GT_U64, - VOP3Op.V_CMPX_NE_U64: _VOP3Op_V_CMPX_NE_U64, - VOP3Op.V_CMPX_GE_U64: _VOP3Op_V_CMPX_GE_U64, - VOP3Op.V_CMPX_T_U64: _VOP3Op_V_CMPX_T_U64, - VOP3Op.V_CMPX_CLASS_F16: _VOP3Op_V_CMPX_CLASS_F16, - VOP3Op.V_CMPX_CLASS_F32: _VOP3Op_V_CMPX_CLASS_F32, - VOP3Op.V_CMPX_CLASS_F64: _VOP3Op_V_CMPX_CLASS_F64, - VOP3Op.V_MOV_B32: _VOP3Op_V_MOV_B32, - VOP3Op.V_READFIRSTLANE_B32: _VOP3Op_V_READFIRSTLANE_B32, - VOP3Op.V_CVT_I32_F64: _VOP3Op_V_CVT_I32_F64, - VOP3Op.V_CVT_F64_I32: _VOP3Op_V_CVT_F64_I32, - VOP3Op.V_CVT_F32_I32: _VOP3Op_V_CVT_F32_I32, - VOP3Op.V_CVT_F32_U32: _VOP3Op_V_CVT_F32_U32, - VOP3Op.V_CVT_U32_F32: _VOP3Op_V_CVT_U32_F32, - VOP3Op.V_CVT_I32_F32: _VOP3Op_V_CVT_I32_F32, - VOP3Op.V_CVT_F16_F32: _VOP3Op_V_CVT_F16_F32, - VOP3Op.V_CVT_F32_F16: _VOP3Op_V_CVT_F32_F16, - VOP3Op.V_CVT_NEAREST_I32_F32: _VOP3Op_V_CVT_NEAREST_I32_F32, - VOP3Op.V_CVT_FLOOR_I32_F32: _VOP3Op_V_CVT_FLOOR_I32_F32, - VOP3Op.V_CVT_F32_F64: _VOP3Op_V_CVT_F32_F64, - VOP3Op.V_CVT_F64_F32: _VOP3Op_V_CVT_F64_F32, - VOP3Op.V_CVT_F32_UBYTE0: _VOP3Op_V_CVT_F32_UBYTE0, - VOP3Op.V_CVT_F32_UBYTE1: _VOP3Op_V_CVT_F32_UBYTE1, - VOP3Op.V_CVT_F32_UBYTE2: _VOP3Op_V_CVT_F32_UBYTE2, - VOP3Op.V_CVT_F32_UBYTE3: _VOP3Op_V_CVT_F32_UBYTE3, - VOP3Op.V_CVT_U32_F64: _VOP3Op_V_CVT_U32_F64, - VOP3Op.V_CVT_F64_U32: _VOP3Op_V_CVT_F64_U32, - VOP3Op.V_TRUNC_F64: _VOP3Op_V_TRUNC_F64, - VOP3Op.V_CEIL_F64: _VOP3Op_V_CEIL_F64, - VOP3Op.V_RNDNE_F64: _VOP3Op_V_RNDNE_F64, - VOP3Op.V_FLOOR_F64: _VOP3Op_V_FLOOR_F64, - VOP3Op.V_MOV_B16: _VOP3Op_V_MOV_B16, - VOP3Op.V_FRACT_F32: _VOP3Op_V_FRACT_F32, - VOP3Op.V_TRUNC_F32: _VOP3Op_V_TRUNC_F32, - VOP3Op.V_CEIL_F32: _VOP3Op_V_CEIL_F32, - VOP3Op.V_RNDNE_F32: _VOP3Op_V_RNDNE_F32, - VOP3Op.V_FLOOR_F32: _VOP3Op_V_FLOOR_F32, - VOP3Op.V_EXP_F32: _VOP3Op_V_EXP_F32, - VOP3Op.V_LOG_F32: _VOP3Op_V_LOG_F32, - VOP3Op.V_RCP_F32: _VOP3Op_V_RCP_F32, - VOP3Op.V_RCP_IFLAG_F32: _VOP3Op_V_RCP_IFLAG_F32, - VOP3Op.V_RSQ_F32: _VOP3Op_V_RSQ_F32, - VOP3Op.V_RCP_F64: _VOP3Op_V_RCP_F64, - VOP3Op.V_RSQ_F64: _VOP3Op_V_RSQ_F64, - VOP3Op.V_SQRT_F32: _VOP3Op_V_SQRT_F32, - VOP3Op.V_SQRT_F64: _VOP3Op_V_SQRT_F64, - VOP3Op.V_SIN_F32: _VOP3Op_V_SIN_F32, - VOP3Op.V_COS_F32: _VOP3Op_V_COS_F32, - VOP3Op.V_NOT_B32: _VOP3Op_V_NOT_B32, - VOP3Op.V_BFREV_B32: _VOP3Op_V_BFREV_B32, - VOP3Op.V_CLZ_I32_U32: _VOP3Op_V_CLZ_I32_U32, - VOP3Op.V_CTZ_I32_B32: _VOP3Op_V_CTZ_I32_B32, - VOP3Op.V_CLS_I32: _VOP3Op_V_CLS_I32, - VOP3Op.V_FREXP_EXP_I32_F64: _VOP3Op_V_FREXP_EXP_I32_F64, - VOP3Op.V_FREXP_MANT_F64: _VOP3Op_V_FREXP_MANT_F64, - VOP3Op.V_FRACT_F64: _VOP3Op_V_FRACT_F64, - VOP3Op.V_FREXP_EXP_I32_F32: _VOP3Op_V_FREXP_EXP_I32_F32, - VOP3Op.V_FREXP_MANT_F32: _VOP3Op_V_FREXP_MANT_F32, - VOP3Op.V_MOVRELS_B32: _VOP3Op_V_MOVRELS_B32, - VOP3Op.V_CVT_F16_U16: _VOP3Op_V_CVT_F16_U16, - VOP3Op.V_CVT_F16_I16: _VOP3Op_V_CVT_F16_I16, - VOP3Op.V_CVT_U16_F16: _VOP3Op_V_CVT_U16_F16, - VOP3Op.V_CVT_I16_F16: _VOP3Op_V_CVT_I16_F16, - VOP3Op.V_RCP_F16: _VOP3Op_V_RCP_F16, - VOP3Op.V_SQRT_F16: _VOP3Op_V_SQRT_F16, - VOP3Op.V_RSQ_F16: _VOP3Op_V_RSQ_F16, - VOP3Op.V_LOG_F16: _VOP3Op_V_LOG_F16, - VOP3Op.V_EXP_F16: _VOP3Op_V_EXP_F16, - VOP3Op.V_FREXP_MANT_F16: _VOP3Op_V_FREXP_MANT_F16, - VOP3Op.V_FREXP_EXP_I16_F16: _VOP3Op_V_FREXP_EXP_I16_F16, - VOP3Op.V_FLOOR_F16: _VOP3Op_V_FLOOR_F16, - VOP3Op.V_CEIL_F16: _VOP3Op_V_CEIL_F16, - VOP3Op.V_TRUNC_F16: _VOP3Op_V_TRUNC_F16, - VOP3Op.V_RNDNE_F16: _VOP3Op_V_RNDNE_F16, - VOP3Op.V_FRACT_F16: _VOP3Op_V_FRACT_F16, - VOP3Op.V_SIN_F16: _VOP3Op_V_SIN_F16, - VOP3Op.V_COS_F16: _VOP3Op_V_COS_F16, - VOP3Op.V_SAT_PK_U8_I16: _VOP3Op_V_SAT_PK_U8_I16, - VOP3Op.V_CVT_NORM_I16_F16: _VOP3Op_V_CVT_NORM_I16_F16, - VOP3Op.V_CVT_NORM_U16_F16: _VOP3Op_V_CVT_NORM_U16_F16, - VOP3Op.V_NOT_B16: _VOP3Op_V_NOT_B16, - VOP3Op.V_CVT_I32_I16: _VOP3Op_V_CVT_I32_I16, - VOP3Op.V_CVT_U32_U16: _VOP3Op_V_CVT_U32_U16, - VOP3Op.V_CNDMASK_B32: _VOP3Op_V_CNDMASK_B32, - VOP3Op.V_ADD_F32: _VOP3Op_V_ADD_F32, - VOP3Op.V_SUB_F32: _VOP3Op_V_SUB_F32, - VOP3Op.V_SUBREV_F32: _VOP3Op_V_SUBREV_F32, - VOP3Op.V_FMAC_DX9_ZERO_F32: _VOP3Op_V_FMAC_DX9_ZERO_F32, - VOP3Op.V_MUL_DX9_ZERO_F32: _VOP3Op_V_MUL_DX9_ZERO_F32, - VOP3Op.V_MUL_F32: _VOP3Op_V_MUL_F32, - VOP3Op.V_MUL_I32_I24: _VOP3Op_V_MUL_I32_I24, - VOP3Op.V_MUL_HI_I32_I24: _VOP3Op_V_MUL_HI_I32_I24, - VOP3Op.V_MUL_U32_U24: _VOP3Op_V_MUL_U32_U24, - VOP3Op.V_MUL_HI_U32_U24: _VOP3Op_V_MUL_HI_U32_U24, - VOP3Op.V_MIN_F32: _VOP3Op_V_MIN_F32, - VOP3Op.V_MAX_F32: _VOP3Op_V_MAX_F32, - VOP3Op.V_MIN_I32: _VOP3Op_V_MIN_I32, - VOP3Op.V_MAX_I32: _VOP3Op_V_MAX_I32, - VOP3Op.V_MIN_U32: _VOP3Op_V_MIN_U32, - VOP3Op.V_MAX_U32: _VOP3Op_V_MAX_U32, - VOP3Op.V_LSHLREV_B32: _VOP3Op_V_LSHLREV_B32, - VOP3Op.V_LSHRREV_B32: _VOP3Op_V_LSHRREV_B32, - VOP3Op.V_ASHRREV_I32: _VOP3Op_V_ASHRREV_I32, - VOP3Op.V_AND_B32: _VOP3Op_V_AND_B32, - VOP3Op.V_OR_B32: _VOP3Op_V_OR_B32, - VOP3Op.V_XOR_B32: _VOP3Op_V_XOR_B32, - VOP3Op.V_XNOR_B32: _VOP3Op_V_XNOR_B32, - VOP3Op.V_ADD_NC_U32: _VOP3Op_V_ADD_NC_U32, - VOP3Op.V_SUB_NC_U32: _VOP3Op_V_SUB_NC_U32, - VOP3Op.V_SUBREV_NC_U32: _VOP3Op_V_SUBREV_NC_U32, - VOP3Op.V_FMAC_F32: _VOP3Op_V_FMAC_F32, - VOP3Op.V_CVT_PK_RTZ_F16_F32: _VOP3Op_V_CVT_PK_RTZ_F16_F32, - VOP3Op.V_ADD_F16: _VOP3Op_V_ADD_F16, - VOP3Op.V_SUB_F16: _VOP3Op_V_SUB_F16, - VOP3Op.V_SUBREV_F16: _VOP3Op_V_SUBREV_F16, - VOP3Op.V_MUL_F16: _VOP3Op_V_MUL_F16, - VOP3Op.V_FMAC_F16: _VOP3Op_V_FMAC_F16, - VOP3Op.V_MAX_F16: _VOP3Op_V_MAX_F16, - VOP3Op.V_MIN_F16: _VOP3Op_V_MIN_F16, - VOP3Op.V_LDEXP_F16: _VOP3Op_V_LDEXP_F16, - VOP3Op.V_FMA_DX9_ZERO_F32: _VOP3Op_V_FMA_DX9_ZERO_F32, - VOP3Op.V_MAD_I32_I24: _VOP3Op_V_MAD_I32_I24, - VOP3Op.V_MAD_U32_U24: _VOP3Op_V_MAD_U32_U24, - VOP3Op.V_CUBEID_F32: _VOP3Op_V_CUBEID_F32, - VOP3Op.V_CUBESC_F32: _VOP3Op_V_CUBESC_F32, - VOP3Op.V_CUBETC_F32: _VOP3Op_V_CUBETC_F32, - VOP3Op.V_CUBEMA_F32: _VOP3Op_V_CUBEMA_F32, - VOP3Op.V_BFE_U32: _VOP3Op_V_BFE_U32, - VOP3Op.V_BFE_I32: _VOP3Op_V_BFE_I32, - VOP3Op.V_BFI_B32: _VOP3Op_V_BFI_B32, - VOP3Op.V_FMA_F32: _VOP3Op_V_FMA_F32, - VOP3Op.V_FMA_F64: _VOP3Op_V_FMA_F64, - VOP3Op.V_LERP_U8: _VOP3Op_V_LERP_U8, - VOP3Op.V_ALIGNBIT_B32: _VOP3Op_V_ALIGNBIT_B32, - VOP3Op.V_ALIGNBYTE_B32: _VOP3Op_V_ALIGNBYTE_B32, - VOP3Op.V_MULLIT_F32: _VOP3Op_V_MULLIT_F32, - VOP3Op.V_MIN3_F32: _VOP3Op_V_MIN3_F32, - VOP3Op.V_MIN3_I32: _VOP3Op_V_MIN3_I32, - VOP3Op.V_MIN3_U32: _VOP3Op_V_MIN3_U32, - VOP3Op.V_MAX3_F32: _VOP3Op_V_MAX3_F32, - VOP3Op.V_MAX3_I32: _VOP3Op_V_MAX3_I32, - VOP3Op.V_MAX3_U32: _VOP3Op_V_MAX3_U32, - VOP3Op.V_MED3_F32: _VOP3Op_V_MED3_F32, - VOP3Op.V_MED3_I32: _VOP3Op_V_MED3_I32, - VOP3Op.V_MED3_U32: _VOP3Op_V_MED3_U32, - VOP3Op.V_SAD_U8: _VOP3Op_V_SAD_U8, - VOP3Op.V_SAD_HI_U8: _VOP3Op_V_SAD_HI_U8, - VOP3Op.V_SAD_U16: _VOP3Op_V_SAD_U16, - VOP3Op.V_SAD_U32: _VOP3Op_V_SAD_U32, - VOP3Op.V_CVT_PK_U8_F32: _VOP3Op_V_CVT_PK_U8_F32, - VOP3Op.V_DIV_FIXUP_F32: _VOP3Op_V_DIV_FIXUP_F32, - VOP3Op.V_DIV_FIXUP_F64: _VOP3Op_V_DIV_FIXUP_F64, - VOP3Op.V_DIV_FMAS_F32: _VOP3Op_V_DIV_FMAS_F32, - VOP3Op.V_DIV_FMAS_F64: _VOP3Op_V_DIV_FMAS_F64, - VOP3Op.V_MSAD_U8: _VOP3Op_V_MSAD_U8, - VOP3Op.V_QSAD_PK_U16_U8: _VOP3Op_V_QSAD_PK_U16_U8, - VOP3Op.V_MQSAD_PK_U16_U8: _VOP3Op_V_MQSAD_PK_U16_U8, - VOP3Op.V_MQSAD_U32_U8: _VOP3Op_V_MQSAD_U32_U8, - VOP3Op.V_XOR3_B32: _VOP3Op_V_XOR3_B32, - VOP3Op.V_MAD_U16: _VOP3Op_V_MAD_U16, - VOP3Op.V_PERM_B32: _VOP3Op_V_PERM_B32, - VOP3Op.V_XAD_U32: _VOP3Op_V_XAD_U32, - VOP3Op.V_LSHL_ADD_U32: _VOP3Op_V_LSHL_ADD_U32, - VOP3Op.V_ADD_LSHL_U32: _VOP3Op_V_ADD_LSHL_U32, - VOP3Op.V_FMA_F16: _VOP3Op_V_FMA_F16, - VOP3Op.V_MIN3_F16: _VOP3Op_V_MIN3_F16, - VOP3Op.V_MIN3_I16: _VOP3Op_V_MIN3_I16, - VOP3Op.V_MIN3_U16: _VOP3Op_V_MIN3_U16, - VOP3Op.V_MAX3_F16: _VOP3Op_V_MAX3_F16, - VOP3Op.V_MAX3_I16: _VOP3Op_V_MAX3_I16, - VOP3Op.V_MAX3_U16: _VOP3Op_V_MAX3_U16, - VOP3Op.V_MED3_F16: _VOP3Op_V_MED3_F16, - VOP3Op.V_MED3_I16: _VOP3Op_V_MED3_I16, - VOP3Op.V_MED3_U16: _VOP3Op_V_MED3_U16, - VOP3Op.V_MAD_I16: _VOP3Op_V_MAD_I16, - VOP3Op.V_DIV_FIXUP_F16: _VOP3Op_V_DIV_FIXUP_F16, - VOP3Op.V_ADD3_U32: _VOP3Op_V_ADD3_U32, - VOP3Op.V_LSHL_OR_B32: _VOP3Op_V_LSHL_OR_B32, - VOP3Op.V_AND_OR_B32: _VOP3Op_V_AND_OR_B32, - VOP3Op.V_OR3_B32: _VOP3Op_V_OR3_B32, - VOP3Op.V_MAD_U32_U16: _VOP3Op_V_MAD_U32_U16, - VOP3Op.V_MAD_I32_I16: _VOP3Op_V_MAD_I32_I16, - VOP3Op.V_CNDMASK_B16: _VOP3Op_V_CNDMASK_B16, - VOP3Op.V_MAXMIN_F32: _VOP3Op_V_MAXMIN_F32, - VOP3Op.V_MINMAX_F32: _VOP3Op_V_MINMAX_F32, - VOP3Op.V_MAXMIN_F16: _VOP3Op_V_MAXMIN_F16, - VOP3Op.V_MINMAX_F16: _VOP3Op_V_MINMAX_F16, - VOP3Op.V_MAXMIN_U32: _VOP3Op_V_MAXMIN_U32, - VOP3Op.V_MINMAX_U32: _VOP3Op_V_MINMAX_U32, - VOP3Op.V_MAXMIN_I32: _VOP3Op_V_MAXMIN_I32, - VOP3Op.V_MINMAX_I32: _VOP3Op_V_MINMAX_I32, - VOP3Op.V_DOT2_F16_F16: _VOP3Op_V_DOT2_F16_F16, - VOP3Op.V_DOT2_BF16_BF16: _VOP3Op_V_DOT2_BF16_BF16, - VOP3Op.V_ADD_NC_U16: _VOP3Op_V_ADD_NC_U16, - VOP3Op.V_SUB_NC_U16: _VOP3Op_V_SUB_NC_U16, - VOP3Op.V_MUL_LO_U16: _VOP3Op_V_MUL_LO_U16, - VOP3Op.V_CVT_PK_I16_F32: _VOP3Op_V_CVT_PK_I16_F32, - VOP3Op.V_CVT_PK_U16_F32: _VOP3Op_V_CVT_PK_U16_F32, - VOP3Op.V_MAX_U16: _VOP3Op_V_MAX_U16, - VOP3Op.V_MAX_I16: _VOP3Op_V_MAX_I16, - VOP3Op.V_MIN_U16: _VOP3Op_V_MIN_U16, - VOP3Op.V_MIN_I16: _VOP3Op_V_MIN_I16, - VOP3Op.V_ADD_NC_I16: _VOP3Op_V_ADD_NC_I16, - VOP3Op.V_SUB_NC_I16: _VOP3Op_V_SUB_NC_I16, - VOP3Op.V_PACK_B32_F16: _VOP3Op_V_PACK_B32_F16, - VOP3Op.V_CVT_PK_NORM_I16_F16: _VOP3Op_V_CVT_PK_NORM_I16_F16, - VOP3Op.V_CVT_PK_NORM_U16_F16: _VOP3Op_V_CVT_PK_NORM_U16_F16, - VOP3Op.V_LDEXP_F32: _VOP3Op_V_LDEXP_F32, - VOP3Op.V_BFM_B32: _VOP3Op_V_BFM_B32, - VOP3Op.V_BCNT_U32_B32: _VOP3Op_V_BCNT_U32_B32, - VOP3Op.V_CVT_PK_NORM_I16_F32: _VOP3Op_V_CVT_PK_NORM_I16_F32, - VOP3Op.V_CVT_PK_NORM_U16_F32: _VOP3Op_V_CVT_PK_NORM_U16_F32, - VOP3Op.V_CVT_PK_U16_U32: _VOP3Op_V_CVT_PK_U16_U32, - VOP3Op.V_CVT_PK_I16_I32: _VOP3Op_V_CVT_PK_I16_I32, - VOP3Op.V_SUB_NC_I32: _VOP3Op_V_SUB_NC_I32, - VOP3Op.V_ADD_NC_I32: _VOP3Op_V_ADD_NC_I32, - VOP3Op.V_ADD_F64: _VOP3Op_V_ADD_F64, - VOP3Op.V_MUL_F64: _VOP3Op_V_MUL_F64, - VOP3Op.V_MIN_F64: _VOP3Op_V_MIN_F64, - VOP3Op.V_MAX_F64: _VOP3Op_V_MAX_F64, - VOP3Op.V_LDEXP_F64: _VOP3Op_V_LDEXP_F64, - VOP3Op.V_MUL_LO_U32: _VOP3Op_V_MUL_LO_U32, - VOP3Op.V_MUL_HI_U32: _VOP3Op_V_MUL_HI_U32, - VOP3Op.V_MUL_HI_I32: _VOP3Op_V_MUL_HI_I32, - VOP3Op.V_TRIG_PREOP_F64: _VOP3Op_V_TRIG_PREOP_F64, - VOP3Op.V_LSHLREV_B16: _VOP3Op_V_LSHLREV_B16, - VOP3Op.V_LSHRREV_B16: _VOP3Op_V_LSHRREV_B16, - VOP3Op.V_ASHRREV_I16: _VOP3Op_V_ASHRREV_I16, - VOP3Op.V_LSHLREV_B64: _VOP3Op_V_LSHLREV_B64, - VOP3Op.V_LSHRREV_B64: _VOP3Op_V_LSHRREV_B64, - VOP3Op.V_ASHRREV_I64: _VOP3Op_V_ASHRREV_I64, - VOP3Op.V_READLANE_B32: _VOP3Op_V_READLANE_B32, - VOP3Op.V_AND_B16: _VOP3Op_V_AND_B16, - VOP3Op.V_OR_B16: _VOP3Op_V_OR_B16, - VOP3Op.V_XOR_B16: _VOP3Op_V_XOR_B16, -} - -def _VOP3SDOp_V_ADD_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_SUB_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S1.u32) + VCC.u64[laneId] > (S0.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_SUBREV_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S0.u32) + VCC.u64[laneId] > (S1.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_DIV_SCALE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(s0); VCC=Reg(vcc) - # --- compiled pseudocode --- - VCC = Reg(0x0) - if ((F(S2.f32) == 0.0) or (F(S1.f32) == 0.0)): - VCC = Reg(0x1); D0.f32 = float("nan") - elif exponent(S2.f32) - exponent(S1.f32) >= 96: - VCC = Reg(0x1) - if S0.f32 == S1.f32: - D0.f32 = ldexp(S0.f32, 64) - elif False: - pass - elif ((1.0 / F(S1.f32) == DENORM.f64) and (S2.f32 / S1.f32 == DENORM.f32)): - VCC = Reg(0x1) - if S0.f32 == S1.f32: - D0.f32 = ldexp(S0.f32, 64) - elif 1.0 / F(S1.f32) == DENORM.f64: - D0.f32 = ldexp(S0.f32, -64) - elif S2.f32 / S1.f32 == DENORM.f32: - VCC = Reg(0x1) - elif exponent(S2.f32) <= 23: - VCC = Reg(0x1); D0.f32 = ldexp(S0.f32, 64) - if S1.f32 == DENORM.f32: - D0.f32 = float("nan") - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_DIV_SCALE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(s0); VCC=Reg(vcc) - # --- compiled pseudocode --- - VCC = Reg(0x0) - if ((S2.f64 == 0.0) or (S1.f64 == 0.0)): - VCC = Reg(0x1); D0.f64 = float("nan") - elif exponent(S2.f64) - exponent(S1.f64) >= 768: - VCC = Reg(0x1) - if S0.f64 == S1.f64: - D0.f64 = ldexp(S0.f64, 128) - elif False: - pass - elif ((1.0 / S1.f64 == DENORM.f64) and (S2.f64 / S1.f64 == DENORM.f64)): - VCC = Reg(0x1) - if S0.f64 == S1.f64: - D0.f64 = ldexp(S0.f64, 128) - elif 1.0 / S1.f64 == DENORM.f64: - D0.f64 = ldexp(S0.f64, -128) - elif S2.f64 / S1.f64 == DENORM.f64: - VCC = Reg(0x1) - elif exponent(S2.f64) <= 53: - D0.f64 = ldexp(S0.f64, 128) - if S1.f64 == DENORM.f64: - D0.f64 = float("nan") - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_MAD_U64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); D1=Reg(0) - # --- compiled pseudocode --- - _full = ((S0.u32) * (S1.u32) + (S2.u64)) - D0.u64 = int(_full) & 0xffffffffffffffff - D1 = Reg((int(_full) >> 64) & 1) - return {'D0': D0._val, 'D1': D1._val} - -def _VOP3SDOp_V_MAD_I64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); D1=Reg(0) - # --- compiled pseudocode --- - _full = ((S0.i32) * (S1.i32) + (S2.i64)) - D0.u64 = int(_full) & 0xffffffffffffffff - D1 = Reg((int(_full) >> 64) & 1) - return {'D0': D0._val, 'D1': D1._val} - -def _VOP3SDOp_V_ADD_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32)) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_SUB_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32) - VCC.u64[laneId] = ((1) if (S1.u32 > S0.u32) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_SUBREV_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32) - VCC.u64[laneId] = ((1) if (S0.u32 > S1.u32) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -VOP3SDOp_FUNCTIONS = { - VOP3SDOp.V_ADD_CO_CI_U32: _VOP3SDOp_V_ADD_CO_CI_U32, - VOP3SDOp.V_SUB_CO_CI_U32: _VOP3SDOp_V_SUB_CO_CI_U32, - VOP3SDOp.V_SUBREV_CO_CI_U32: _VOP3SDOp_V_SUBREV_CO_CI_U32, - VOP3SDOp.V_DIV_SCALE_F32: _VOP3SDOp_V_DIV_SCALE_F32, - VOP3SDOp.V_DIV_SCALE_F64: _VOP3SDOp_V_DIV_SCALE_F64, - VOP3SDOp.V_MAD_U64_U32: _VOP3SDOp_V_MAD_U64_U32, - VOP3SDOp.V_MAD_I64_I32: _VOP3SDOp_V_MAD_I64_I32, - VOP3SDOp.V_ADD_CO_U32: _VOP3SDOp_V_ADD_CO_U32, - VOP3SDOp.V_SUB_CO_U32: _VOP3SDOp_V_SUB_CO_U32, - VOP3SDOp.V_SUBREV_CO_U32: _VOP3SDOp_V_SUBREV_CO_U32, -} - -def _VOP3POp_V_PK_MAD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16 - tmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MUL_LO_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 - tmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16 - tmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_SUB_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16 - tmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_LSHLREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32) - tmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_LSHRREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32) - tmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_ASHRREV_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32) - tmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].i16 = ((S0[31 : 16].i16) if (S0[31 : 16].i16 >= S1[31 : 16].i16) else (S1[31 : 16].i16)) - tmp[15 : 0].i16 = ((S0[15 : 0].i16) if (S0[15 : 0].i16 >= S1[15 : 0].i16) else (S1[15 : 0].i16)) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].i16 = ((S0[31 : 16].i16) if (S0[31 : 16].i16 < S1[31 : 16].i16) else (S1[31 : 16].i16)) - tmp[15 : 0].i16 = ((S0[15 : 0].i16) if (S0[15 : 0].i16 < S1[15 : 0].i16) else (S1[15 : 0].i16)) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16 - tmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16 - tmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_SUB_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16 - tmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = ((S0[31 : 16].u16) if (S0[31 : 16].u16 >= S1[31 : 16].u16) else (S1[31 : 16].u16)) - tmp[15 : 0].u16 = ((S0[15 : 0].u16) if (S0[15 : 0].u16 >= S1[15 : 0].u16) else (S1[15 : 0].u16)) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = ((S0[31 : 16].u16) if (S0[31 : 16].u16 < S1[31 : 16].u16) else (S1[31 : 16].u16)) - tmp[15 : 0].u16 = ((S0[15 : 0].u16) if (S0[15 : 0].u16 < S1[15 : 0].u16) else (S1[15 : 0].u16)) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_FMA_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16) - tmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16 - tmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16 - tmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16) - tmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16) - tmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_DOT2_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16) - tmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT4_U32_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8) - tmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8) - tmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8) - tmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT8_U32_U4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4) - tmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4) - tmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4) - tmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4) - tmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4) - tmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4) - tmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4) - tmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT2_F32_BF16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16) - tmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_FMA_MIX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[31 : 0].f32 = fma(ins[0], ins[1], ins[2]) - return {'D0': D0._val} - -def _VOP3POp_V_FMA_MIXLO_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[15 : 0].f16 = f32_to_f16(fma(ins[0], ins[1], ins[2])) - return {'D0': D0._val} - -def _VOP3POp_V_FMA_MIXHI_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[31 : 16].f16 = f32_to_f16(fma(ins[0], ins[1], ins[2])) - return {'D0': D0._val} - -VOP3POp_FUNCTIONS = { - VOP3POp.V_PK_MAD_I16: _VOP3POp_V_PK_MAD_I16, - VOP3POp.V_PK_MUL_LO_U16: _VOP3POp_V_PK_MUL_LO_U16, - VOP3POp.V_PK_ADD_I16: _VOP3POp_V_PK_ADD_I16, - VOP3POp.V_PK_SUB_I16: _VOP3POp_V_PK_SUB_I16, - VOP3POp.V_PK_LSHLREV_B16: _VOP3POp_V_PK_LSHLREV_B16, - VOP3POp.V_PK_LSHRREV_B16: _VOP3POp_V_PK_LSHRREV_B16, - VOP3POp.V_PK_ASHRREV_I16: _VOP3POp_V_PK_ASHRREV_I16, - VOP3POp.V_PK_MAX_I16: _VOP3POp_V_PK_MAX_I16, - VOP3POp.V_PK_MIN_I16: _VOP3POp_V_PK_MIN_I16, - VOP3POp.V_PK_MAD_U16: _VOP3POp_V_PK_MAD_U16, - VOP3POp.V_PK_ADD_U16: _VOP3POp_V_PK_ADD_U16, - VOP3POp.V_PK_SUB_U16: _VOP3POp_V_PK_SUB_U16, - VOP3POp.V_PK_MAX_U16: _VOP3POp_V_PK_MAX_U16, - VOP3POp.V_PK_MIN_U16: _VOP3POp_V_PK_MIN_U16, - VOP3POp.V_PK_FMA_F16: _VOP3POp_V_PK_FMA_F16, - VOP3POp.V_PK_ADD_F16: _VOP3POp_V_PK_ADD_F16, - VOP3POp.V_PK_MUL_F16: _VOP3POp_V_PK_MUL_F16, - VOP3POp.V_PK_MIN_F16: _VOP3POp_V_PK_MIN_F16, - VOP3POp.V_PK_MAX_F16: _VOP3POp_V_PK_MAX_F16, - VOP3POp.V_DOT2_F32_F16: _VOP3POp_V_DOT2_F32_F16, - VOP3POp.V_DOT4_U32_U8: _VOP3POp_V_DOT4_U32_U8, - VOP3POp.V_DOT8_U32_U4: _VOP3POp_V_DOT8_U32_U4, - VOP3POp.V_DOT2_F32_BF16: _VOP3POp_V_DOT2_F32_BF16, - VOP3POp.V_FMA_MIX_F32: _VOP3POp_V_FMA_MIX_F32, - VOP3POp.V_FMA_MIXLO_F16: _VOP3POp_V_FMA_MIXLO_F16, - VOP3POp.V_FMA_MIXHI_F16: _VOP3POp_V_FMA_MIXHI_F16, -} - -def _VOPCOp_V_CMP_F_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 < S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 == S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 <= S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 > S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 != S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 >= S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 >= S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 != S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 > S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 <= S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 == S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 < S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 < S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 == S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 <= S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 > S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 != S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 >= S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 >= S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 != S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 > S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 <= S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 == S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 < S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 < S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 == S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 <= S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 > S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 != S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 >= S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 >= S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 != S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 > S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 <= S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 == S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 < S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 < S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 == S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 <= S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 > S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 != S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 >= S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 < S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 == S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 <= S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 > S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 != S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 >= S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 < S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 == S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 <= S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 > S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 != S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 >= S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 < S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 == S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 <= S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 > S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 != S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 >= S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 < S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 == S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 <= S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 > S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 != S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 >= S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_F_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 0 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 < S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 == S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 <= S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 > S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 != S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 >= S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_T_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = 1 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMP_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMP_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_F_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 < S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 == S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 <= S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 > S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 != S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 >= S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 >= S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 != S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 > S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 <= S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 == S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 < S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 < S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 == S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 <= S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 > S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 != S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 >= S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 >= S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 != S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 > S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 <= S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 == S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 < S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 < S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 == S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 <= S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 > S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 != S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 >= S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 >= S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 != S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 > S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 <= S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 == S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 < S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 < S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 == S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 <= S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 > S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 != S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 >= S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 < S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 == S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 <= S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 > S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 != S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 >= S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 < S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 == S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 <= S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 > S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 != S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 >= S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 < S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 == S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 <= S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 > S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 != S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 >= S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 < S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 == S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 <= S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 > S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 != S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 >= S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_F_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 0 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 < S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 == S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 <= S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 > S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 != S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 >= S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_T_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = 1 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -VOPCOp_FUNCTIONS = { - VOPCOp.V_CMP_F_F16: _VOPCOp_V_CMP_F_F16, - VOPCOp.V_CMP_LT_F16: _VOPCOp_V_CMP_LT_F16, - VOPCOp.V_CMP_EQ_F16: _VOPCOp_V_CMP_EQ_F16, - VOPCOp.V_CMP_LE_F16: _VOPCOp_V_CMP_LE_F16, - VOPCOp.V_CMP_GT_F16: _VOPCOp_V_CMP_GT_F16, - VOPCOp.V_CMP_LG_F16: _VOPCOp_V_CMP_LG_F16, - VOPCOp.V_CMP_GE_F16: _VOPCOp_V_CMP_GE_F16, - VOPCOp.V_CMP_O_F16: _VOPCOp_V_CMP_O_F16, - VOPCOp.V_CMP_U_F16: _VOPCOp_V_CMP_U_F16, - VOPCOp.V_CMP_NGE_F16: _VOPCOp_V_CMP_NGE_F16, - VOPCOp.V_CMP_NLG_F16: _VOPCOp_V_CMP_NLG_F16, - VOPCOp.V_CMP_NGT_F16: _VOPCOp_V_CMP_NGT_F16, - VOPCOp.V_CMP_NLE_F16: _VOPCOp_V_CMP_NLE_F16, - VOPCOp.V_CMP_NEQ_F16: _VOPCOp_V_CMP_NEQ_F16, - VOPCOp.V_CMP_NLT_F16: _VOPCOp_V_CMP_NLT_F16, - VOPCOp.V_CMP_T_F16: _VOPCOp_V_CMP_T_F16, - VOPCOp.V_CMP_F_F32: _VOPCOp_V_CMP_F_F32, - VOPCOp.V_CMP_LT_F32: _VOPCOp_V_CMP_LT_F32, - VOPCOp.V_CMP_EQ_F32: _VOPCOp_V_CMP_EQ_F32, - VOPCOp.V_CMP_LE_F32: _VOPCOp_V_CMP_LE_F32, - VOPCOp.V_CMP_GT_F32: _VOPCOp_V_CMP_GT_F32, - VOPCOp.V_CMP_LG_F32: _VOPCOp_V_CMP_LG_F32, - VOPCOp.V_CMP_GE_F32: _VOPCOp_V_CMP_GE_F32, - VOPCOp.V_CMP_O_F32: _VOPCOp_V_CMP_O_F32, - VOPCOp.V_CMP_U_F32: _VOPCOp_V_CMP_U_F32, - VOPCOp.V_CMP_NGE_F32: _VOPCOp_V_CMP_NGE_F32, - VOPCOp.V_CMP_NLG_F32: _VOPCOp_V_CMP_NLG_F32, - VOPCOp.V_CMP_NGT_F32: _VOPCOp_V_CMP_NGT_F32, - VOPCOp.V_CMP_NLE_F32: _VOPCOp_V_CMP_NLE_F32, - VOPCOp.V_CMP_NEQ_F32: _VOPCOp_V_CMP_NEQ_F32, - VOPCOp.V_CMP_NLT_F32: _VOPCOp_V_CMP_NLT_F32, - VOPCOp.V_CMP_T_F32: _VOPCOp_V_CMP_T_F32, - VOPCOp.V_CMP_F_F64: _VOPCOp_V_CMP_F_F64, - VOPCOp.V_CMP_LT_F64: _VOPCOp_V_CMP_LT_F64, - VOPCOp.V_CMP_EQ_F64: _VOPCOp_V_CMP_EQ_F64, - VOPCOp.V_CMP_LE_F64: _VOPCOp_V_CMP_LE_F64, - VOPCOp.V_CMP_GT_F64: _VOPCOp_V_CMP_GT_F64, - VOPCOp.V_CMP_LG_F64: _VOPCOp_V_CMP_LG_F64, - VOPCOp.V_CMP_GE_F64: _VOPCOp_V_CMP_GE_F64, - VOPCOp.V_CMP_O_F64: _VOPCOp_V_CMP_O_F64, - VOPCOp.V_CMP_U_F64: _VOPCOp_V_CMP_U_F64, - VOPCOp.V_CMP_NGE_F64: _VOPCOp_V_CMP_NGE_F64, - VOPCOp.V_CMP_NLG_F64: _VOPCOp_V_CMP_NLG_F64, - VOPCOp.V_CMP_NGT_F64: _VOPCOp_V_CMP_NGT_F64, - VOPCOp.V_CMP_NLE_F64: _VOPCOp_V_CMP_NLE_F64, - VOPCOp.V_CMP_NEQ_F64: _VOPCOp_V_CMP_NEQ_F64, - VOPCOp.V_CMP_NLT_F64: _VOPCOp_V_CMP_NLT_F64, - VOPCOp.V_CMP_T_F64: _VOPCOp_V_CMP_T_F64, - VOPCOp.V_CMP_LT_I16: _VOPCOp_V_CMP_LT_I16, - VOPCOp.V_CMP_EQ_I16: _VOPCOp_V_CMP_EQ_I16, - VOPCOp.V_CMP_LE_I16: _VOPCOp_V_CMP_LE_I16, - VOPCOp.V_CMP_GT_I16: _VOPCOp_V_CMP_GT_I16, - VOPCOp.V_CMP_NE_I16: _VOPCOp_V_CMP_NE_I16, - VOPCOp.V_CMP_GE_I16: _VOPCOp_V_CMP_GE_I16, - VOPCOp.V_CMP_LT_U16: _VOPCOp_V_CMP_LT_U16, - VOPCOp.V_CMP_EQ_U16: _VOPCOp_V_CMP_EQ_U16, - VOPCOp.V_CMP_LE_U16: _VOPCOp_V_CMP_LE_U16, - VOPCOp.V_CMP_GT_U16: _VOPCOp_V_CMP_GT_U16, - VOPCOp.V_CMP_NE_U16: _VOPCOp_V_CMP_NE_U16, - VOPCOp.V_CMP_GE_U16: _VOPCOp_V_CMP_GE_U16, - VOPCOp.V_CMP_F_I32: _VOPCOp_V_CMP_F_I32, - VOPCOp.V_CMP_LT_I32: _VOPCOp_V_CMP_LT_I32, - VOPCOp.V_CMP_EQ_I32: _VOPCOp_V_CMP_EQ_I32, - VOPCOp.V_CMP_LE_I32: _VOPCOp_V_CMP_LE_I32, - VOPCOp.V_CMP_GT_I32: _VOPCOp_V_CMP_GT_I32, - VOPCOp.V_CMP_NE_I32: _VOPCOp_V_CMP_NE_I32, - VOPCOp.V_CMP_GE_I32: _VOPCOp_V_CMP_GE_I32, - VOPCOp.V_CMP_T_I32: _VOPCOp_V_CMP_T_I32, - VOPCOp.V_CMP_F_U32: _VOPCOp_V_CMP_F_U32, - VOPCOp.V_CMP_LT_U32: _VOPCOp_V_CMP_LT_U32, - VOPCOp.V_CMP_EQ_U32: _VOPCOp_V_CMP_EQ_U32, - VOPCOp.V_CMP_LE_U32: _VOPCOp_V_CMP_LE_U32, - VOPCOp.V_CMP_GT_U32: _VOPCOp_V_CMP_GT_U32, - VOPCOp.V_CMP_NE_U32: _VOPCOp_V_CMP_NE_U32, - VOPCOp.V_CMP_GE_U32: _VOPCOp_V_CMP_GE_U32, - VOPCOp.V_CMP_T_U32: _VOPCOp_V_CMP_T_U32, - VOPCOp.V_CMP_F_I64: _VOPCOp_V_CMP_F_I64, - VOPCOp.V_CMP_LT_I64: _VOPCOp_V_CMP_LT_I64, - VOPCOp.V_CMP_EQ_I64: _VOPCOp_V_CMP_EQ_I64, - VOPCOp.V_CMP_LE_I64: _VOPCOp_V_CMP_LE_I64, - VOPCOp.V_CMP_GT_I64: _VOPCOp_V_CMP_GT_I64, - VOPCOp.V_CMP_NE_I64: _VOPCOp_V_CMP_NE_I64, - VOPCOp.V_CMP_GE_I64: _VOPCOp_V_CMP_GE_I64, - VOPCOp.V_CMP_T_I64: _VOPCOp_V_CMP_T_I64, - VOPCOp.V_CMP_F_U64: _VOPCOp_V_CMP_F_U64, - VOPCOp.V_CMP_LT_U64: _VOPCOp_V_CMP_LT_U64, - VOPCOp.V_CMP_EQ_U64: _VOPCOp_V_CMP_EQ_U64, - VOPCOp.V_CMP_LE_U64: _VOPCOp_V_CMP_LE_U64, - VOPCOp.V_CMP_GT_U64: _VOPCOp_V_CMP_GT_U64, - VOPCOp.V_CMP_NE_U64: _VOPCOp_V_CMP_NE_U64, - VOPCOp.V_CMP_GE_U64: _VOPCOp_V_CMP_GE_U64, - VOPCOp.V_CMP_T_U64: _VOPCOp_V_CMP_T_U64, - VOPCOp.V_CMP_CLASS_F16: _VOPCOp_V_CMP_CLASS_F16, - VOPCOp.V_CMP_CLASS_F32: _VOPCOp_V_CMP_CLASS_F32, - VOPCOp.V_CMP_CLASS_F64: _VOPCOp_V_CMP_CLASS_F64, - VOPCOp.V_CMPX_F_F16: _VOPCOp_V_CMPX_F_F16, - VOPCOp.V_CMPX_LT_F16: _VOPCOp_V_CMPX_LT_F16, - VOPCOp.V_CMPX_EQ_F16: _VOPCOp_V_CMPX_EQ_F16, - VOPCOp.V_CMPX_LE_F16: _VOPCOp_V_CMPX_LE_F16, - VOPCOp.V_CMPX_GT_F16: _VOPCOp_V_CMPX_GT_F16, - VOPCOp.V_CMPX_LG_F16: _VOPCOp_V_CMPX_LG_F16, - VOPCOp.V_CMPX_GE_F16: _VOPCOp_V_CMPX_GE_F16, - VOPCOp.V_CMPX_O_F16: _VOPCOp_V_CMPX_O_F16, - VOPCOp.V_CMPX_U_F16: _VOPCOp_V_CMPX_U_F16, - VOPCOp.V_CMPX_NGE_F16: _VOPCOp_V_CMPX_NGE_F16, - VOPCOp.V_CMPX_NLG_F16: _VOPCOp_V_CMPX_NLG_F16, - VOPCOp.V_CMPX_NGT_F16: _VOPCOp_V_CMPX_NGT_F16, - VOPCOp.V_CMPX_NLE_F16: _VOPCOp_V_CMPX_NLE_F16, - VOPCOp.V_CMPX_NEQ_F16: _VOPCOp_V_CMPX_NEQ_F16, - VOPCOp.V_CMPX_NLT_F16: _VOPCOp_V_CMPX_NLT_F16, - VOPCOp.V_CMPX_T_F16: _VOPCOp_V_CMPX_T_F16, - VOPCOp.V_CMPX_F_F32: _VOPCOp_V_CMPX_F_F32, - VOPCOp.V_CMPX_LT_F32: _VOPCOp_V_CMPX_LT_F32, - VOPCOp.V_CMPX_EQ_F32: _VOPCOp_V_CMPX_EQ_F32, - VOPCOp.V_CMPX_LE_F32: _VOPCOp_V_CMPX_LE_F32, - VOPCOp.V_CMPX_GT_F32: _VOPCOp_V_CMPX_GT_F32, - VOPCOp.V_CMPX_LG_F32: _VOPCOp_V_CMPX_LG_F32, - VOPCOp.V_CMPX_GE_F32: _VOPCOp_V_CMPX_GE_F32, - VOPCOp.V_CMPX_O_F32: _VOPCOp_V_CMPX_O_F32, - VOPCOp.V_CMPX_U_F32: _VOPCOp_V_CMPX_U_F32, - VOPCOp.V_CMPX_NGE_F32: _VOPCOp_V_CMPX_NGE_F32, - VOPCOp.V_CMPX_NLG_F32: _VOPCOp_V_CMPX_NLG_F32, - VOPCOp.V_CMPX_NGT_F32: _VOPCOp_V_CMPX_NGT_F32, - VOPCOp.V_CMPX_NLE_F32: _VOPCOp_V_CMPX_NLE_F32, - VOPCOp.V_CMPX_NEQ_F32: _VOPCOp_V_CMPX_NEQ_F32, - VOPCOp.V_CMPX_NLT_F32: _VOPCOp_V_CMPX_NLT_F32, - VOPCOp.V_CMPX_T_F32: _VOPCOp_V_CMPX_T_F32, - VOPCOp.V_CMPX_F_F64: _VOPCOp_V_CMPX_F_F64, - VOPCOp.V_CMPX_LT_F64: _VOPCOp_V_CMPX_LT_F64, - VOPCOp.V_CMPX_EQ_F64: _VOPCOp_V_CMPX_EQ_F64, - VOPCOp.V_CMPX_LE_F64: _VOPCOp_V_CMPX_LE_F64, - VOPCOp.V_CMPX_GT_F64: _VOPCOp_V_CMPX_GT_F64, - VOPCOp.V_CMPX_LG_F64: _VOPCOp_V_CMPX_LG_F64, - VOPCOp.V_CMPX_GE_F64: _VOPCOp_V_CMPX_GE_F64, - VOPCOp.V_CMPX_O_F64: _VOPCOp_V_CMPX_O_F64, - VOPCOp.V_CMPX_U_F64: _VOPCOp_V_CMPX_U_F64, - VOPCOp.V_CMPX_NGE_F64: _VOPCOp_V_CMPX_NGE_F64, - VOPCOp.V_CMPX_NLG_F64: _VOPCOp_V_CMPX_NLG_F64, - VOPCOp.V_CMPX_NGT_F64: _VOPCOp_V_CMPX_NGT_F64, - VOPCOp.V_CMPX_NLE_F64: _VOPCOp_V_CMPX_NLE_F64, - VOPCOp.V_CMPX_NEQ_F64: _VOPCOp_V_CMPX_NEQ_F64, - VOPCOp.V_CMPX_NLT_F64: _VOPCOp_V_CMPX_NLT_F64, - VOPCOp.V_CMPX_T_F64: _VOPCOp_V_CMPX_T_F64, - VOPCOp.V_CMPX_LT_I16: _VOPCOp_V_CMPX_LT_I16, - VOPCOp.V_CMPX_EQ_I16: _VOPCOp_V_CMPX_EQ_I16, - VOPCOp.V_CMPX_LE_I16: _VOPCOp_V_CMPX_LE_I16, - VOPCOp.V_CMPX_GT_I16: _VOPCOp_V_CMPX_GT_I16, - VOPCOp.V_CMPX_NE_I16: _VOPCOp_V_CMPX_NE_I16, - VOPCOp.V_CMPX_GE_I16: _VOPCOp_V_CMPX_GE_I16, - VOPCOp.V_CMPX_LT_U16: _VOPCOp_V_CMPX_LT_U16, - VOPCOp.V_CMPX_EQ_U16: _VOPCOp_V_CMPX_EQ_U16, - VOPCOp.V_CMPX_LE_U16: _VOPCOp_V_CMPX_LE_U16, - VOPCOp.V_CMPX_GT_U16: _VOPCOp_V_CMPX_GT_U16, - VOPCOp.V_CMPX_NE_U16: _VOPCOp_V_CMPX_NE_U16, - VOPCOp.V_CMPX_GE_U16: _VOPCOp_V_CMPX_GE_U16, - VOPCOp.V_CMPX_F_I32: _VOPCOp_V_CMPX_F_I32, - VOPCOp.V_CMPX_LT_I32: _VOPCOp_V_CMPX_LT_I32, - VOPCOp.V_CMPX_EQ_I32: _VOPCOp_V_CMPX_EQ_I32, - VOPCOp.V_CMPX_LE_I32: _VOPCOp_V_CMPX_LE_I32, - VOPCOp.V_CMPX_GT_I32: _VOPCOp_V_CMPX_GT_I32, - VOPCOp.V_CMPX_NE_I32: _VOPCOp_V_CMPX_NE_I32, - VOPCOp.V_CMPX_GE_I32: _VOPCOp_V_CMPX_GE_I32, - VOPCOp.V_CMPX_T_I32: _VOPCOp_V_CMPX_T_I32, - VOPCOp.V_CMPX_F_U32: _VOPCOp_V_CMPX_F_U32, - VOPCOp.V_CMPX_LT_U32: _VOPCOp_V_CMPX_LT_U32, - VOPCOp.V_CMPX_EQ_U32: _VOPCOp_V_CMPX_EQ_U32, - VOPCOp.V_CMPX_LE_U32: _VOPCOp_V_CMPX_LE_U32, - VOPCOp.V_CMPX_GT_U32: _VOPCOp_V_CMPX_GT_U32, - VOPCOp.V_CMPX_NE_U32: _VOPCOp_V_CMPX_NE_U32, - VOPCOp.V_CMPX_GE_U32: _VOPCOp_V_CMPX_GE_U32, - VOPCOp.V_CMPX_T_U32: _VOPCOp_V_CMPX_T_U32, - VOPCOp.V_CMPX_F_I64: _VOPCOp_V_CMPX_F_I64, - VOPCOp.V_CMPX_LT_I64: _VOPCOp_V_CMPX_LT_I64, - VOPCOp.V_CMPX_EQ_I64: _VOPCOp_V_CMPX_EQ_I64, - VOPCOp.V_CMPX_LE_I64: _VOPCOp_V_CMPX_LE_I64, - VOPCOp.V_CMPX_GT_I64: _VOPCOp_V_CMPX_GT_I64, - VOPCOp.V_CMPX_NE_I64: _VOPCOp_V_CMPX_NE_I64, - VOPCOp.V_CMPX_GE_I64: _VOPCOp_V_CMPX_GE_I64, - VOPCOp.V_CMPX_T_I64: _VOPCOp_V_CMPX_T_I64, - VOPCOp.V_CMPX_F_U64: _VOPCOp_V_CMPX_F_U64, - VOPCOp.V_CMPX_LT_U64: _VOPCOp_V_CMPX_LT_U64, - VOPCOp.V_CMPX_EQ_U64: _VOPCOp_V_CMPX_EQ_U64, - VOPCOp.V_CMPX_LE_U64: _VOPCOp_V_CMPX_LE_U64, - VOPCOp.V_CMPX_GT_U64: _VOPCOp_V_CMPX_GT_U64, - VOPCOp.V_CMPX_NE_U64: _VOPCOp_V_CMPX_NE_U64, - VOPCOp.V_CMPX_GE_U64: _VOPCOp_V_CMPX_GE_U64, - VOPCOp.V_CMPX_T_U64: _VOPCOp_V_CMPX_T_U64, - VOPCOp.V_CMPX_CLASS_F16: _VOPCOp_V_CMPX_CLASS_F16, - VOPCOp.V_CMPX_CLASS_F32: _VOPCOp_V_CMPX_CLASS_F32, - VOPCOp.V_CMPX_CLASS_F64: _VOPCOp_V_CMPX_CLASS_F64, -} - -def _DSOp_DS_ADD_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 = DATA.u32 - MEM[ADDR].u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i32) - src = DATA.i32 - MEM[ADDR].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i32) - src = DATA.i32 - MEM[ADDR].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = ((tmp & ~DATA.b32) | DATA2.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STORE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0] - return {} - -def _DSOp_DS_STORE_2ADDR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - MEM[ADDR + OFFSET0.u32 * 4].b32 = DATA[31 : 0] - MEM[ADDR + OFFSET1.u32 * 4].b32 = DATA2[31 : 0] - return {} - -def _DSOp_DS_STORE_2ADDR_STRIDE64_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - MEM[ADDR + OFFSET0.u32 * 256].b32 = DATA[31 : 0] - MEM[ADDR + OFFSET1.u32 * 256].b32 = DATA2[31 : 0] - return {} - -def _DSOp_DS_CMPSTORE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - src = DATA.b32 - cmp = DATA2.b32 - MEM[ADDR].b32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPSTORE_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - cmp = DATA2.f32 - MEM[ADDR].f32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - MEM[ADDR].f32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - MEM[ADDR].f32 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - MEM[ADDR].f32 += DATA.f32 - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STORE_B8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b8 = DATA[7 : 0] - return {} - -def _DSOp_DS_STORE_B16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b16 = DATA[15 : 0] - return {} - -def _DSOp_DS_ADD_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 = DATA.u32 - MEM[ADDR].u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i32) - src = DATA.i32 - MEM[ADDR].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i32) - src = DATA.i32 - MEM[ADDR].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = ((tmp & ~DATA.b32) | DATA2.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_2ADDR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4 - tmp1 = MEM[addr1].b32 - tmp2 = MEM[addr2].b32 - MEM[addr1].b32 = DATA.b32 - MEM[addr2].b32 = DATA2.b32 - RETURN_DATA[31 : 0] = tmp1 - RETURN_DATA[63 : 32] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256 - tmp1 = MEM[addr1].b32 - tmp2 = MEM[addr2].b32 - MEM[addr1].b32 = DATA.b32 - MEM[addr2].b32 = DATA2.b32 - RETURN_DATA[31 : 0] = tmp1 - RETURN_DATA[63 : 32] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPSTORE_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - src = DATA.b32 - cmp = DATA2.b32 - MEM[ADDR].b32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPSTORE_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - cmp = DATA2.f32 - MEM[ADDR].f32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - MEM[ADDR].f32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - MEM[ADDR].f32 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_WRAP_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 = ((tmp - DATA.u32) if (tmp >= DATA.u32) else (tmp + DATA2.u32)) - RETURN_DATA = tmp - return {} - -def _DSOp_DS_LOAD_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_2ADDR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 4].b32 - RETURN_DATA[63 : 32] = MEM[ADDR + OFFSET1.u32 * 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_2ADDR_STRIDE64_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 256].b32 - RETURN_DATA[63 : 32] = MEM[ADDR + OFFSET1.u32 * 256].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_I8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.i32 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.u32 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_I16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.i32 = (signext(MEM[ADDR].i16)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.u32 = (_pack(0, MEM[ADDR].u16)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 = DATA.u64 - MEM[ADDR].u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i64) - src = DATA.i64 - MEM[ADDR].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i64) - src = DATA.i64 - MEM[ADDR].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = ((tmp & ~DATA.b64) | DATA2.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STORE_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0] - MEM[ADDR + OFFSET.u32 + 4].b32 = DATA[63 : 32] - return {} - -def _DSOp_DS_STORE_2ADDR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - MEM[ADDR + OFFSET0.u32 * 8].b32 = DATA[31 : 0] - MEM[ADDR + OFFSET0.u32 * 8 + 4].b32 = DATA[63 : 32] - MEM[ADDR + OFFSET1.u32 * 8].b32 = DATA2[31 : 0] - MEM[ADDR + OFFSET1.u32 * 8 + 4].b32 = DATA2[63 : 32] - return {} - -def _DSOp_DS_STORE_2ADDR_STRIDE64_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - MEM[ADDR + OFFSET0.u32 * 512].b32 = DATA[31 : 0] - MEM[ADDR + OFFSET0.u32 * 512 + 4].b32 = DATA[63 : 32] - MEM[ADDR + OFFSET1.u32 * 512].b32 = DATA2[31 : 0] - MEM[ADDR + OFFSET1.u32 * 512 + 4].b32 = DATA2[63 : 32] - return {} - -def _DSOp_DS_CMPSTORE_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - src = DATA.b64 - cmp = DATA2.b64 - MEM[ADDR].b64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPSTORE_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - src = DATA.f64 - cmp = DATA2.f64 - MEM[ADDR].f64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - src = DATA.f64 - MEM[ADDR].f64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - src = DATA.f64 - MEM[ADDR].f64 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 = DATA.u64 - MEM[ADDR].u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i64) - src = DATA.i64 - MEM[ADDR].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i64) - src = DATA.i64 - MEM[ADDR].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = ((tmp & ~DATA.b64) | DATA2.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_2ADDR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8 - tmp1 = MEM[addr1].b64 - tmp2 = MEM[addr2].b64 - MEM[addr1].b64 = DATA.b64 - MEM[addr2].b64 = DATA2.b64 - RETURN_DATA[63 : 0] = tmp1 - RETURN_DATA[127 : 64] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512 - tmp1 = MEM[addr1].b64 - tmp2 = MEM[addr2].b64 - MEM[addr1].b64 = DATA.b64 - MEM[addr2].b64 = DATA2.b64 - RETURN_DATA[63 : 0] = tmp1 - RETURN_DATA[127 : 64] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPSTORE_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - src = DATA.b64 - cmp = DATA2.b64 - MEM[ADDR].b64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPSTORE_RTN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - src = DATA.f64 - cmp = DATA2.f64 - MEM[ADDR].f64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - src = DATA.f64 - MEM[ADDR].f64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_F64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f64) - src = DATA.f64 - MEM[ADDR].f64 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32 - RETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_2ADDR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 8].b32 - RETURN_DATA[63 : 32] = MEM[ADDR + OFFSET0.u32 * 8 + 4].b32 - RETURN_DATA[95 : 64] = MEM[ADDR + OFFSET1.u32 * 8].b32 - RETURN_DATA[127 : 96] = MEM[ADDR + OFFSET1.u32 * 8 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_2ADDR_STRIDE64_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 512].b32 - RETURN_DATA[63 : 32] = MEM[ADDR + OFFSET0.u32 * 512 + 4].b32 - RETURN_DATA[95 : 64] = MEM[ADDR + OFFSET1.u32 * 512].b32 - RETURN_DATA[127 : 96] = MEM[ADDR + OFFSET1.u32 * 512 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - MEM[ADDR].f32 += DATA.f32 - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CONDXCHG32_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - ADDR = S0.u32 - DATA = S1.u64 - offset = _pack(OFFSET1, OFFSET0) - RETURN_DATA[0] = LDS[ADDR0].u32 - if DATA[31]: - LDS[ADDR0] = _pack(0, DATA[30 : 0]) - RETURN_DATA[1] = LDS[ADDR1].u32 - if DATA[63]: - LDS[ADDR1] = _pack(0, DATA[62 : 32]) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STORE_B8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b8 = DATA[23 : 16] - return {} - -def _DSOp_DS_STORE_B16_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b16 = DATA[31 : 16] - return {} - -def _DSOp_DS_LOAD_U8_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].u16 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].u16 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_I8_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].i16 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_I8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].i16 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U16_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U16_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PERMUTE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - for i in range(0, int(((63) if (WAVE64) else (31)))+1): - tmp[i] = 0x0 - for i in range(0, int(((63) if (WAVE64) else (31)))+1): - if EXEC[i].u1: - dst_lane = (VGPR[i][ADDR] + OFFSET.b32) / 4 % 32 - tmp[dst_lane] = VGPR[i][DATA0] - for i in range(0, int(((63) if (WAVE64) else (31)))+1): - if EXEC[i].u1: - VGPR[i][VDST] = tmp[i] - return {} - -def _DSOp_DS_BPERMUTE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - for i in range(0, int(((63) if (WAVE64) else (31)))+1): - tmp[i] = 0x0 - for i in range(0, int(((63) if (WAVE64) else (31)))+1): - src_lane = (VGPR[i][ADDR] + OFFSET.b32) / 4 % 32 - if EXEC[src_lane].u1: - tmp[i] = VGPR[src_lane][DATA0] - for i in range(0, int(((63) if (WAVE64) else (31)))+1): - if EXEC[i].u1: - VGPR[i][VDST] = tmp[i] - return {} - -def _DSOp_DS_STORE_B96(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0] - MEM[ADDR + OFFSET.u32 + 4].b32 = DATA[63 : 32] - MEM[ADDR + OFFSET.u32 + 8].b32 = DATA[95 : 64] - return {} - -def _DSOp_DS_STORE_B128(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0] - MEM[ADDR + OFFSET.u32 + 4].b32 = DATA[63 : 32] - MEM[ADDR + OFFSET.u32 + 8].b32 = DATA[95 : 64] - MEM[ADDR + OFFSET.u32 + 12].b32 = DATA[127 : 96] - return {} - -def _DSOp_DS_LOAD_B96(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32 - RETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4].b32 - RETURN_DATA[95 : 64] = MEM[ADDR + OFFSET.u32 + 8].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_B128(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32 - RETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4].b32 - RETURN_DATA[95 : 64] = MEM[ADDR + OFFSET.u32 + 8].b32 - RETURN_DATA[127 : 96] = MEM[ADDR + OFFSET.u32 + 12].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -DSOp_FUNCTIONS = { - DSOp.DS_ADD_U32: _DSOp_DS_ADD_U32, - DSOp.DS_SUB_U32: _DSOp_DS_SUB_U32, - DSOp.DS_RSUB_U32: _DSOp_DS_RSUB_U32, - DSOp.DS_INC_U32: _DSOp_DS_INC_U32, - DSOp.DS_DEC_U32: _DSOp_DS_DEC_U32, - DSOp.DS_MIN_I32: _DSOp_DS_MIN_I32, - DSOp.DS_MAX_I32: _DSOp_DS_MAX_I32, - DSOp.DS_MIN_U32: _DSOp_DS_MIN_U32, - DSOp.DS_MAX_U32: _DSOp_DS_MAX_U32, - DSOp.DS_AND_B32: _DSOp_DS_AND_B32, - DSOp.DS_OR_B32: _DSOp_DS_OR_B32, - DSOp.DS_XOR_B32: _DSOp_DS_XOR_B32, - DSOp.DS_MSKOR_B32: _DSOp_DS_MSKOR_B32, - DSOp.DS_STORE_B32: _DSOp_DS_STORE_B32, - DSOp.DS_STORE_2ADDR_B32: _DSOp_DS_STORE_2ADDR_B32, - DSOp.DS_STORE_2ADDR_STRIDE64_B32: _DSOp_DS_STORE_2ADDR_STRIDE64_B32, - DSOp.DS_CMPSTORE_B32: _DSOp_DS_CMPSTORE_B32, - DSOp.DS_CMPSTORE_F32: _DSOp_DS_CMPSTORE_F32, - DSOp.DS_MIN_F32: _DSOp_DS_MIN_F32, - DSOp.DS_MAX_F32: _DSOp_DS_MAX_F32, - DSOp.DS_ADD_F32: _DSOp_DS_ADD_F32, - DSOp.DS_STORE_B8: _DSOp_DS_STORE_B8, - DSOp.DS_STORE_B16: _DSOp_DS_STORE_B16, - DSOp.DS_ADD_RTN_U32: _DSOp_DS_ADD_RTN_U32, - DSOp.DS_SUB_RTN_U32: _DSOp_DS_SUB_RTN_U32, - DSOp.DS_RSUB_RTN_U32: _DSOp_DS_RSUB_RTN_U32, - DSOp.DS_INC_RTN_U32: _DSOp_DS_INC_RTN_U32, - DSOp.DS_DEC_RTN_U32: _DSOp_DS_DEC_RTN_U32, - DSOp.DS_MIN_RTN_I32: _DSOp_DS_MIN_RTN_I32, - DSOp.DS_MAX_RTN_I32: _DSOp_DS_MAX_RTN_I32, - DSOp.DS_MIN_RTN_U32: _DSOp_DS_MIN_RTN_U32, - DSOp.DS_MAX_RTN_U32: _DSOp_DS_MAX_RTN_U32, - DSOp.DS_AND_RTN_B32: _DSOp_DS_AND_RTN_B32, - DSOp.DS_OR_RTN_B32: _DSOp_DS_OR_RTN_B32, - DSOp.DS_XOR_RTN_B32: _DSOp_DS_XOR_RTN_B32, - DSOp.DS_MSKOR_RTN_B32: _DSOp_DS_MSKOR_RTN_B32, - DSOp.DS_STOREXCHG_RTN_B32: _DSOp_DS_STOREXCHG_RTN_B32, - DSOp.DS_STOREXCHG_2ADDR_RTN_B32: _DSOp_DS_STOREXCHG_2ADDR_RTN_B32, - DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: _DSOp_DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32, - DSOp.DS_CMPSTORE_RTN_B32: _DSOp_DS_CMPSTORE_RTN_B32, - DSOp.DS_CMPSTORE_RTN_F32: _DSOp_DS_CMPSTORE_RTN_F32, - DSOp.DS_MIN_RTN_F32: _DSOp_DS_MIN_RTN_F32, - DSOp.DS_MAX_RTN_F32: _DSOp_DS_MAX_RTN_F32, - DSOp.DS_WRAP_RTN_B32: _DSOp_DS_WRAP_RTN_B32, - DSOp.DS_LOAD_B32: _DSOp_DS_LOAD_B32, - DSOp.DS_LOAD_2ADDR_B32: _DSOp_DS_LOAD_2ADDR_B32, - DSOp.DS_LOAD_2ADDR_STRIDE64_B32: _DSOp_DS_LOAD_2ADDR_STRIDE64_B32, - DSOp.DS_LOAD_I8: _DSOp_DS_LOAD_I8, - DSOp.DS_LOAD_U8: _DSOp_DS_LOAD_U8, - DSOp.DS_LOAD_I16: _DSOp_DS_LOAD_I16, - DSOp.DS_LOAD_U16: _DSOp_DS_LOAD_U16, - DSOp.DS_ADD_U64: _DSOp_DS_ADD_U64, - DSOp.DS_SUB_U64: _DSOp_DS_SUB_U64, - DSOp.DS_RSUB_U64: _DSOp_DS_RSUB_U64, - DSOp.DS_INC_U64: _DSOp_DS_INC_U64, - DSOp.DS_DEC_U64: _DSOp_DS_DEC_U64, - DSOp.DS_MIN_I64: _DSOp_DS_MIN_I64, - DSOp.DS_MAX_I64: _DSOp_DS_MAX_I64, - DSOp.DS_MIN_U64: _DSOp_DS_MIN_U64, - DSOp.DS_MAX_U64: _DSOp_DS_MAX_U64, - DSOp.DS_AND_B64: _DSOp_DS_AND_B64, - DSOp.DS_OR_B64: _DSOp_DS_OR_B64, - DSOp.DS_XOR_B64: _DSOp_DS_XOR_B64, - DSOp.DS_MSKOR_B64: _DSOp_DS_MSKOR_B64, - DSOp.DS_STORE_B64: _DSOp_DS_STORE_B64, - DSOp.DS_STORE_2ADDR_B64: _DSOp_DS_STORE_2ADDR_B64, - DSOp.DS_STORE_2ADDR_STRIDE64_B64: _DSOp_DS_STORE_2ADDR_STRIDE64_B64, - DSOp.DS_CMPSTORE_B64: _DSOp_DS_CMPSTORE_B64, - DSOp.DS_CMPSTORE_F64: _DSOp_DS_CMPSTORE_F64, - DSOp.DS_MIN_F64: _DSOp_DS_MIN_F64, - DSOp.DS_MAX_F64: _DSOp_DS_MAX_F64, - DSOp.DS_ADD_RTN_U64: _DSOp_DS_ADD_RTN_U64, - DSOp.DS_SUB_RTN_U64: _DSOp_DS_SUB_RTN_U64, - DSOp.DS_RSUB_RTN_U64: _DSOp_DS_RSUB_RTN_U64, - DSOp.DS_INC_RTN_U64: _DSOp_DS_INC_RTN_U64, - DSOp.DS_DEC_RTN_U64: _DSOp_DS_DEC_RTN_U64, - DSOp.DS_MIN_RTN_I64: _DSOp_DS_MIN_RTN_I64, - DSOp.DS_MAX_RTN_I64: _DSOp_DS_MAX_RTN_I64, - DSOp.DS_MIN_RTN_U64: _DSOp_DS_MIN_RTN_U64, - DSOp.DS_MAX_RTN_U64: _DSOp_DS_MAX_RTN_U64, - DSOp.DS_AND_RTN_B64: _DSOp_DS_AND_RTN_B64, - DSOp.DS_OR_RTN_B64: _DSOp_DS_OR_RTN_B64, - DSOp.DS_XOR_RTN_B64: _DSOp_DS_XOR_RTN_B64, - DSOp.DS_MSKOR_RTN_B64: _DSOp_DS_MSKOR_RTN_B64, - DSOp.DS_STOREXCHG_RTN_B64: _DSOp_DS_STOREXCHG_RTN_B64, - DSOp.DS_STOREXCHG_2ADDR_RTN_B64: _DSOp_DS_STOREXCHG_2ADDR_RTN_B64, - DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: _DSOp_DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64, - DSOp.DS_CMPSTORE_RTN_B64: _DSOp_DS_CMPSTORE_RTN_B64, - DSOp.DS_CMPSTORE_RTN_F64: _DSOp_DS_CMPSTORE_RTN_F64, - DSOp.DS_MIN_RTN_F64: _DSOp_DS_MIN_RTN_F64, - DSOp.DS_MAX_RTN_F64: _DSOp_DS_MAX_RTN_F64, - DSOp.DS_LOAD_B64: _DSOp_DS_LOAD_B64, - DSOp.DS_LOAD_2ADDR_B64: _DSOp_DS_LOAD_2ADDR_B64, - DSOp.DS_LOAD_2ADDR_STRIDE64_B64: _DSOp_DS_LOAD_2ADDR_STRIDE64_B64, - DSOp.DS_ADD_RTN_F32: _DSOp_DS_ADD_RTN_F32, - DSOp.DS_CONDXCHG32_RTN_B64: _DSOp_DS_CONDXCHG32_RTN_B64, - DSOp.DS_STORE_B8_D16_HI: _DSOp_DS_STORE_B8_D16_HI, - DSOp.DS_STORE_B16_D16_HI: _DSOp_DS_STORE_B16_D16_HI, - DSOp.DS_LOAD_U8_D16: _DSOp_DS_LOAD_U8_D16, - DSOp.DS_LOAD_U8_D16_HI: _DSOp_DS_LOAD_U8_D16_HI, - DSOp.DS_LOAD_I8_D16: _DSOp_DS_LOAD_I8_D16, - DSOp.DS_LOAD_I8_D16_HI: _DSOp_DS_LOAD_I8_D16_HI, - DSOp.DS_LOAD_U16_D16: _DSOp_DS_LOAD_U16_D16, - DSOp.DS_LOAD_U16_D16_HI: _DSOp_DS_LOAD_U16_D16_HI, - DSOp.DS_PERMUTE_B32: _DSOp_DS_PERMUTE_B32, - DSOp.DS_BPERMUTE_B32: _DSOp_DS_BPERMUTE_B32, - DSOp.DS_STORE_B96: _DSOp_DS_STORE_B96, - DSOp.DS_STORE_B128: _DSOp_DS_STORE_B128, - DSOp.DS_LOAD_B96: _DSOp_DS_LOAD_B96, - DSOp.DS_LOAD_B128: _DSOp_DS_LOAD_B128, -} - -def _FLATOp_FLAT_LOAD_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.u32 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.i32 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_U16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.u32 = (_pack(0, MEM[ADDR].u16)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_I16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.i32 = (signext(MEM[ADDR].i16)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_B96(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - VDATA[95 : 64] = MEM[ADDR + 8].b32 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_B128(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - VDATA[95 : 64] = MEM[ADDR + 8].b32 - VDATA[127 : 96] = MEM[ADDR + 12].b32 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_STORE_B8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b8 = VDATA[7 : 0] - return {} - -def _FLATOp_FLAT_STORE_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b16 = VDATA[15 : 0] - return {} - -def _FLATOp_FLAT_STORE_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - return {} - -def _FLATOp_FLAT_STORE_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - return {} - -def _FLATOp_FLAT_STORE_B96(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - MEM[ADDR + 8].b32 = VDATA[95 : 64] - return {} - -def _FLATOp_FLAT_STORE_B128(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - MEM[ADDR + 8].b32 = VDATA[95 : 64] - MEM[ADDR + 12].b32 = VDATA[127 : 96] - return {} - -def _FLATOp_FLAT_LOAD_D16_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].u16 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_D16_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].i16 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_D16_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].b16 = MEM[ADDR].b16 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_D16_HI_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].u16 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_D16_HI_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].i16 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_LOAD_D16_HI_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].b16 = MEM[ADDR].b16 - return {'VDATA': VDATA._val} - -def _FLATOp_FLAT_STORE_D16_HI_B8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b8 = VDATA[23 : 16] - return {} - -def _FLATOp_FLAT_STORE_D16_HI_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b16 = VDATA[31 : 16] - return {} - -def _FLATOp_FLAT_ATOMIC_SWAP_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_CMPSWAP_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA[31 : 0].u32 - cmp = DATA[63 : 32].u32 - MEM[ADDR].u32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_ADD_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SUB_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MIN_I32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i32) - src = DATA.i32 - MEM[ADDR].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MIN_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MAX_I32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i32) - src = DATA.i32 - MEM[ADDR].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MAX_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_AND_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_OR_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_XOR_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_INC_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_DEC_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SWAP_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_CMPSWAP_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA[63 : 0].u64 - cmp = DATA[127 : 64].u64 - MEM[ADDR].u64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_ADD_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_SUB_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MIN_I64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i64) - src = DATA.i64 - MEM[ADDR].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MIN_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MAX_I64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i64) - src = DATA.i64 - MEM[ADDR].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MAX_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_AND_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_OR_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_XOR_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_INC_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_DEC_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_CMPSWAP_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA[31 : 0].f32 - cmp = DATA[63 : 32].f32 - MEM[ADDR].f32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MIN_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - MEM[ADDR].f32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_MAX_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - MEM[ADDR].f32 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _FLATOp_FLAT_ATOMIC_ADD_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - MEM[ADDR].f32 += DATA.f32 - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -FLATOp_FUNCTIONS = { - FLATOp.FLAT_LOAD_U8: _FLATOp_FLAT_LOAD_U8, - FLATOp.FLAT_LOAD_I8: _FLATOp_FLAT_LOAD_I8, - FLATOp.FLAT_LOAD_U16: _FLATOp_FLAT_LOAD_U16, - FLATOp.FLAT_LOAD_I16: _FLATOp_FLAT_LOAD_I16, - FLATOp.FLAT_LOAD_B32: _FLATOp_FLAT_LOAD_B32, - FLATOp.FLAT_LOAD_B64: _FLATOp_FLAT_LOAD_B64, - FLATOp.FLAT_LOAD_B96: _FLATOp_FLAT_LOAD_B96, - FLATOp.FLAT_LOAD_B128: _FLATOp_FLAT_LOAD_B128, - FLATOp.FLAT_STORE_B8: _FLATOp_FLAT_STORE_B8, - FLATOp.FLAT_STORE_B16: _FLATOp_FLAT_STORE_B16, - FLATOp.FLAT_STORE_B32: _FLATOp_FLAT_STORE_B32, - FLATOp.FLAT_STORE_B64: _FLATOp_FLAT_STORE_B64, - FLATOp.FLAT_STORE_B96: _FLATOp_FLAT_STORE_B96, - FLATOp.FLAT_STORE_B128: _FLATOp_FLAT_STORE_B128, - FLATOp.FLAT_LOAD_D16_U8: _FLATOp_FLAT_LOAD_D16_U8, - FLATOp.FLAT_LOAD_D16_I8: _FLATOp_FLAT_LOAD_D16_I8, - FLATOp.FLAT_LOAD_D16_B16: _FLATOp_FLAT_LOAD_D16_B16, - FLATOp.FLAT_LOAD_D16_HI_U8: _FLATOp_FLAT_LOAD_D16_HI_U8, - FLATOp.FLAT_LOAD_D16_HI_I8: _FLATOp_FLAT_LOAD_D16_HI_I8, - FLATOp.FLAT_LOAD_D16_HI_B16: _FLATOp_FLAT_LOAD_D16_HI_B16, - FLATOp.FLAT_STORE_D16_HI_B8: _FLATOp_FLAT_STORE_D16_HI_B8, - FLATOp.FLAT_STORE_D16_HI_B16: _FLATOp_FLAT_STORE_D16_HI_B16, - FLATOp.FLAT_ATOMIC_SWAP_B32: _FLATOp_FLAT_ATOMIC_SWAP_B32, - FLATOp.FLAT_ATOMIC_CMPSWAP_B32: _FLATOp_FLAT_ATOMIC_CMPSWAP_B32, - FLATOp.FLAT_ATOMIC_ADD_U32: _FLATOp_FLAT_ATOMIC_ADD_U32, - FLATOp.FLAT_ATOMIC_SUB_U32: _FLATOp_FLAT_ATOMIC_SUB_U32, - FLATOp.FLAT_ATOMIC_MIN_I32: _FLATOp_FLAT_ATOMIC_MIN_I32, - FLATOp.FLAT_ATOMIC_MIN_U32: _FLATOp_FLAT_ATOMIC_MIN_U32, - FLATOp.FLAT_ATOMIC_MAX_I32: _FLATOp_FLAT_ATOMIC_MAX_I32, - FLATOp.FLAT_ATOMIC_MAX_U32: _FLATOp_FLAT_ATOMIC_MAX_U32, - FLATOp.FLAT_ATOMIC_AND_B32: _FLATOp_FLAT_ATOMIC_AND_B32, - FLATOp.FLAT_ATOMIC_OR_B32: _FLATOp_FLAT_ATOMIC_OR_B32, - FLATOp.FLAT_ATOMIC_XOR_B32: _FLATOp_FLAT_ATOMIC_XOR_B32, - FLATOp.FLAT_ATOMIC_INC_U32: _FLATOp_FLAT_ATOMIC_INC_U32, - FLATOp.FLAT_ATOMIC_DEC_U32: _FLATOp_FLAT_ATOMIC_DEC_U32, - FLATOp.FLAT_ATOMIC_SWAP_B64: _FLATOp_FLAT_ATOMIC_SWAP_B64, - FLATOp.FLAT_ATOMIC_CMPSWAP_B64: _FLATOp_FLAT_ATOMIC_CMPSWAP_B64, - FLATOp.FLAT_ATOMIC_ADD_U64: _FLATOp_FLAT_ATOMIC_ADD_U64, - FLATOp.FLAT_ATOMIC_SUB_U64: _FLATOp_FLAT_ATOMIC_SUB_U64, - FLATOp.FLAT_ATOMIC_MIN_I64: _FLATOp_FLAT_ATOMIC_MIN_I64, - FLATOp.FLAT_ATOMIC_MIN_U64: _FLATOp_FLAT_ATOMIC_MIN_U64, - FLATOp.FLAT_ATOMIC_MAX_I64: _FLATOp_FLAT_ATOMIC_MAX_I64, - FLATOp.FLAT_ATOMIC_MAX_U64: _FLATOp_FLAT_ATOMIC_MAX_U64, - FLATOp.FLAT_ATOMIC_AND_B64: _FLATOp_FLAT_ATOMIC_AND_B64, - FLATOp.FLAT_ATOMIC_OR_B64: _FLATOp_FLAT_ATOMIC_OR_B64, - FLATOp.FLAT_ATOMIC_XOR_B64: _FLATOp_FLAT_ATOMIC_XOR_B64, - FLATOp.FLAT_ATOMIC_INC_U64: _FLATOp_FLAT_ATOMIC_INC_U64, - FLATOp.FLAT_ATOMIC_DEC_U64: _FLATOp_FLAT_ATOMIC_DEC_U64, - FLATOp.FLAT_ATOMIC_CMPSWAP_F32: _FLATOp_FLAT_ATOMIC_CMPSWAP_F32, - FLATOp.FLAT_ATOMIC_MIN_F32: _FLATOp_FLAT_ATOMIC_MIN_F32, - FLATOp.FLAT_ATOMIC_MAX_F32: _FLATOp_FLAT_ATOMIC_MAX_F32, - FLATOp.FLAT_ATOMIC_ADD_F32: _FLATOp_FLAT_ATOMIC_ADD_F32, -} - -def _GLOBALOp_GLOBAL_LOAD_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.u32 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.i32 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_U16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.u32 = (_pack(0, MEM[ADDR].u16)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_I16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.i32 = (signext(MEM[ADDR].i16)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_B96(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - VDATA[95 : 64] = MEM[ADDR + 8].b32 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_B128(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - VDATA[95 : 64] = MEM[ADDR + 8].b32 - VDATA[127 : 96] = MEM[ADDR + 12].b32 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_STORE_B8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b8 = VDATA[7 : 0] - return {} - -def _GLOBALOp_GLOBAL_STORE_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b16 = VDATA[15 : 0] - return {} - -def _GLOBALOp_GLOBAL_STORE_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - return {} - -def _GLOBALOp_GLOBAL_STORE_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - return {} - -def _GLOBALOp_GLOBAL_STORE_B96(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - MEM[ADDR + 8].b32 = VDATA[95 : 64] - return {} - -def _GLOBALOp_GLOBAL_STORE_B128(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - MEM[ADDR + 8].b32 = VDATA[95 : 64] - MEM[ADDR + 12].b32 = VDATA[127 : 96] - return {} - -def _GLOBALOp_GLOBAL_LOAD_D16_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].u16 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_D16_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].i16 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_D16_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].b16 = MEM[ADDR].b16 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_D16_HI_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].u16 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_D16_HI_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].i16 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_LOAD_D16_HI_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].b16 = MEM[ADDR].b16 - return {'VDATA': VDATA._val} - -def _GLOBALOp_GLOBAL_STORE_D16_HI_B8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b8 = VDATA[23 : 16] - return {} - -def _GLOBALOp_GLOBAL_STORE_D16_HI_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b16 = VDATA[31 : 16] - return {} - -def _GLOBALOp_GLOBAL_ATOMIC_SWAP_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA[31 : 0].u32 - cmp = DATA[63 : 32].u32 - MEM[ADDR].u32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_ADD_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SUB_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - MEM[ADDR].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_CSUB_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - old_value = MEM[ADDR].u32 - if old_value < DATA.u32: - new_value = 0 - else: - new_value = old_value - DATA.u32 - MEM[ADDR].u32 = new_value - RETURN_DATA.u32 = old_value - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MIN_I32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i32) - src = DATA.i32 - MEM[ADDR].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MIN_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MAX_I32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i32) - src = DATA.i32 - MEM[ADDR].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MAX_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_AND_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_OR_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_XOR_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - MEM[ADDR].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_INC_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_DEC_U32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SWAP_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA[63 : 0].u64 - cmp = DATA[127 : 64].u64 - MEM[ADDR].u64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_ADD_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_SUB_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - MEM[ADDR].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MIN_I64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i64) - src = DATA.i64 - MEM[ADDR].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MIN_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MAX_I64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].i64) - src = DATA.i64 - MEM[ADDR].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MAX_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_AND_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_OR_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_XOR_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b64) - MEM[ADDR].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_INC_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_DEC_U64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].u64) - src = DATA.u64 - MEM[ADDR].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA[31 : 0].f32 - cmp = DATA[63 : 32].f32 - MEM[ADDR].f32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MIN_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - MEM[ADDR].f32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_MAX_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - src = DATA.f32 - MEM[ADDR].f32 = ((src) if (src > tmp) else (tmp)) - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _GLOBALOp_GLOBAL_ATOMIC_ADD_F32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].f32) - MEM[ADDR].f32 += DATA.f32 - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -GLOBALOp_FUNCTIONS = { - GLOBALOp.GLOBAL_LOAD_U8: _GLOBALOp_GLOBAL_LOAD_U8, - GLOBALOp.GLOBAL_LOAD_I8: _GLOBALOp_GLOBAL_LOAD_I8, - GLOBALOp.GLOBAL_LOAD_U16: _GLOBALOp_GLOBAL_LOAD_U16, - GLOBALOp.GLOBAL_LOAD_I16: _GLOBALOp_GLOBAL_LOAD_I16, - GLOBALOp.GLOBAL_LOAD_B32: _GLOBALOp_GLOBAL_LOAD_B32, - GLOBALOp.GLOBAL_LOAD_B64: _GLOBALOp_GLOBAL_LOAD_B64, - GLOBALOp.GLOBAL_LOAD_B96: _GLOBALOp_GLOBAL_LOAD_B96, - GLOBALOp.GLOBAL_LOAD_B128: _GLOBALOp_GLOBAL_LOAD_B128, - GLOBALOp.GLOBAL_STORE_B8: _GLOBALOp_GLOBAL_STORE_B8, - GLOBALOp.GLOBAL_STORE_B16: _GLOBALOp_GLOBAL_STORE_B16, - GLOBALOp.GLOBAL_STORE_B32: _GLOBALOp_GLOBAL_STORE_B32, - GLOBALOp.GLOBAL_STORE_B64: _GLOBALOp_GLOBAL_STORE_B64, - GLOBALOp.GLOBAL_STORE_B96: _GLOBALOp_GLOBAL_STORE_B96, - GLOBALOp.GLOBAL_STORE_B128: _GLOBALOp_GLOBAL_STORE_B128, - GLOBALOp.GLOBAL_LOAD_D16_U8: _GLOBALOp_GLOBAL_LOAD_D16_U8, - GLOBALOp.GLOBAL_LOAD_D16_I8: _GLOBALOp_GLOBAL_LOAD_D16_I8, - GLOBALOp.GLOBAL_LOAD_D16_B16: _GLOBALOp_GLOBAL_LOAD_D16_B16, - GLOBALOp.GLOBAL_LOAD_D16_HI_U8: _GLOBALOp_GLOBAL_LOAD_D16_HI_U8, - GLOBALOp.GLOBAL_LOAD_D16_HI_I8: _GLOBALOp_GLOBAL_LOAD_D16_HI_I8, - GLOBALOp.GLOBAL_LOAD_D16_HI_B16: _GLOBALOp_GLOBAL_LOAD_D16_HI_B16, - GLOBALOp.GLOBAL_STORE_D16_HI_B8: _GLOBALOp_GLOBAL_STORE_D16_HI_B8, - GLOBALOp.GLOBAL_STORE_D16_HI_B16: _GLOBALOp_GLOBAL_STORE_D16_HI_B16, - GLOBALOp.GLOBAL_ATOMIC_SWAP_B32: _GLOBALOp_GLOBAL_ATOMIC_SWAP_B32, - GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_B32: _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP_B32, - GLOBALOp.GLOBAL_ATOMIC_ADD_U32: _GLOBALOp_GLOBAL_ATOMIC_ADD_U32, - GLOBALOp.GLOBAL_ATOMIC_SUB_U32: _GLOBALOp_GLOBAL_ATOMIC_SUB_U32, - GLOBALOp.GLOBAL_ATOMIC_CSUB_U32: _GLOBALOp_GLOBAL_ATOMIC_CSUB_U32, - GLOBALOp.GLOBAL_ATOMIC_MIN_I32: _GLOBALOp_GLOBAL_ATOMIC_MIN_I32, - GLOBALOp.GLOBAL_ATOMIC_MIN_U32: _GLOBALOp_GLOBAL_ATOMIC_MIN_U32, - GLOBALOp.GLOBAL_ATOMIC_MAX_I32: _GLOBALOp_GLOBAL_ATOMIC_MAX_I32, - GLOBALOp.GLOBAL_ATOMIC_MAX_U32: _GLOBALOp_GLOBAL_ATOMIC_MAX_U32, - GLOBALOp.GLOBAL_ATOMIC_AND_B32: _GLOBALOp_GLOBAL_ATOMIC_AND_B32, - GLOBALOp.GLOBAL_ATOMIC_OR_B32: _GLOBALOp_GLOBAL_ATOMIC_OR_B32, - GLOBALOp.GLOBAL_ATOMIC_XOR_B32: _GLOBALOp_GLOBAL_ATOMIC_XOR_B32, - GLOBALOp.GLOBAL_ATOMIC_INC_U32: _GLOBALOp_GLOBAL_ATOMIC_INC_U32, - GLOBALOp.GLOBAL_ATOMIC_DEC_U32: _GLOBALOp_GLOBAL_ATOMIC_DEC_U32, - GLOBALOp.GLOBAL_ATOMIC_SWAP_B64: _GLOBALOp_GLOBAL_ATOMIC_SWAP_B64, - GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_B64: _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP_B64, - GLOBALOp.GLOBAL_ATOMIC_ADD_U64: _GLOBALOp_GLOBAL_ATOMIC_ADD_U64, - GLOBALOp.GLOBAL_ATOMIC_SUB_U64: _GLOBALOp_GLOBAL_ATOMIC_SUB_U64, - GLOBALOp.GLOBAL_ATOMIC_MIN_I64: _GLOBALOp_GLOBAL_ATOMIC_MIN_I64, - GLOBALOp.GLOBAL_ATOMIC_MIN_U64: _GLOBALOp_GLOBAL_ATOMIC_MIN_U64, - GLOBALOp.GLOBAL_ATOMIC_MAX_I64: _GLOBALOp_GLOBAL_ATOMIC_MAX_I64, - GLOBALOp.GLOBAL_ATOMIC_MAX_U64: _GLOBALOp_GLOBAL_ATOMIC_MAX_U64, - GLOBALOp.GLOBAL_ATOMIC_AND_B64: _GLOBALOp_GLOBAL_ATOMIC_AND_B64, - GLOBALOp.GLOBAL_ATOMIC_OR_B64: _GLOBALOp_GLOBAL_ATOMIC_OR_B64, - GLOBALOp.GLOBAL_ATOMIC_XOR_B64: _GLOBALOp_GLOBAL_ATOMIC_XOR_B64, - GLOBALOp.GLOBAL_ATOMIC_INC_U64: _GLOBALOp_GLOBAL_ATOMIC_INC_U64, - GLOBALOp.GLOBAL_ATOMIC_DEC_U64: _GLOBALOp_GLOBAL_ATOMIC_DEC_U64, - GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_F32: _GLOBALOp_GLOBAL_ATOMIC_CMPSWAP_F32, - GLOBALOp.GLOBAL_ATOMIC_MIN_F32: _GLOBALOp_GLOBAL_ATOMIC_MIN_F32, - GLOBALOp.GLOBAL_ATOMIC_MAX_F32: _GLOBALOp_GLOBAL_ATOMIC_MAX_F32, - GLOBALOp.GLOBAL_ATOMIC_ADD_F32: _GLOBALOp_GLOBAL_ATOMIC_ADD_F32, -} - -def _SCRATCHOp_SCRATCH_LOAD_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.u32 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.i32 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_U16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.u32 = (_pack(0, MEM[ADDR].u16)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_I16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA.i32 = (signext(MEM[ADDR].i16)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_B96(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - VDATA[95 : 64] = MEM[ADDR + 8].b32 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_B128(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 0] = MEM[ADDR].b32 - VDATA[63 : 32] = MEM[ADDR + 4].b32 - VDATA[95 : 64] = MEM[ADDR + 8].b32 - VDATA[127 : 96] = MEM[ADDR + 12].b32 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_STORE_B8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b8 = VDATA[7 : 0] - return {} - -def _SCRATCHOp_SCRATCH_STORE_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b16 = VDATA[15 : 0] - return {} - -def _SCRATCHOp_SCRATCH_STORE_B32(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - return {} - -def _SCRATCHOp_SCRATCH_STORE_B64(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - return {} - -def _SCRATCHOp_SCRATCH_STORE_B96(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - MEM[ADDR + 8].b32 = VDATA[95 : 64] - return {} - -def _SCRATCHOp_SCRATCH_STORE_B128(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b32 = VDATA[31 : 0] - MEM[ADDR + 4].b32 = VDATA[63 : 32] - MEM[ADDR + 8].b32 = VDATA[95 : 64] - MEM[ADDR + 12].b32 = VDATA[127 : 96] - return {} - -def _SCRATCHOp_SCRATCH_LOAD_D16_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].u16 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_D16_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].i16 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_D16_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[15 : 0].b16 = MEM[ADDR].b16 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_D16_HI_U8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].u16 = (_pack(0, MEM[ADDR].u8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_D16_HI_I8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].i16 = (signext(MEM[ADDR].i8)) - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_LOAD_D16_HI_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - VDATA[31 : 16].b16 = MEM[ADDR].b16 - return {'VDATA': VDATA._val} - -def _SCRATCHOp_SCRATCH_STORE_D16_HI_B8(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b8 = VDATA[23 : 16] - return {} - -def _SCRATCHOp_SCRATCH_STORE_D16_HI_B16(MEM, addr, vdata, vdst): - ADDR=addr; VDATA=Reg(vdata); VDST=Reg(vdst); RETURN_DATA=Reg(0); DATA=VDATA - # --- compiled pseudocode --- - MEM[ADDR].b16 = VDATA[31 : 16] - return {} - -SCRATCHOp_FUNCTIONS = { - SCRATCHOp.SCRATCH_LOAD_U8: _SCRATCHOp_SCRATCH_LOAD_U8, - SCRATCHOp.SCRATCH_LOAD_I8: _SCRATCHOp_SCRATCH_LOAD_I8, - SCRATCHOp.SCRATCH_LOAD_U16: _SCRATCHOp_SCRATCH_LOAD_U16, - SCRATCHOp.SCRATCH_LOAD_I16: _SCRATCHOp_SCRATCH_LOAD_I16, - SCRATCHOp.SCRATCH_LOAD_B32: _SCRATCHOp_SCRATCH_LOAD_B32, - SCRATCHOp.SCRATCH_LOAD_B64: _SCRATCHOp_SCRATCH_LOAD_B64, - SCRATCHOp.SCRATCH_LOAD_B96: _SCRATCHOp_SCRATCH_LOAD_B96, - SCRATCHOp.SCRATCH_LOAD_B128: _SCRATCHOp_SCRATCH_LOAD_B128, - SCRATCHOp.SCRATCH_STORE_B8: _SCRATCHOp_SCRATCH_STORE_B8, - SCRATCHOp.SCRATCH_STORE_B16: _SCRATCHOp_SCRATCH_STORE_B16, - SCRATCHOp.SCRATCH_STORE_B32: _SCRATCHOp_SCRATCH_STORE_B32, - SCRATCHOp.SCRATCH_STORE_B64: _SCRATCHOp_SCRATCH_STORE_B64, - SCRATCHOp.SCRATCH_STORE_B96: _SCRATCHOp_SCRATCH_STORE_B96, - SCRATCHOp.SCRATCH_STORE_B128: _SCRATCHOp_SCRATCH_STORE_B128, - SCRATCHOp.SCRATCH_LOAD_D16_U8: _SCRATCHOp_SCRATCH_LOAD_D16_U8, - SCRATCHOp.SCRATCH_LOAD_D16_I8: _SCRATCHOp_SCRATCH_LOAD_D16_I8, - SCRATCHOp.SCRATCH_LOAD_D16_B16: _SCRATCHOp_SCRATCH_LOAD_D16_B16, - SCRATCHOp.SCRATCH_LOAD_D16_HI_U8: _SCRATCHOp_SCRATCH_LOAD_D16_HI_U8, - SCRATCHOp.SCRATCH_LOAD_D16_HI_I8: _SCRATCHOp_SCRATCH_LOAD_D16_HI_I8, - SCRATCHOp.SCRATCH_LOAD_D16_HI_B16: _SCRATCHOp_SCRATCH_LOAD_D16_HI_B16, - SCRATCHOp.SCRATCH_STORE_D16_HI_B8: _SCRATCHOp_SCRATCH_STORE_D16_HI_B8, - SCRATCHOp.SCRATCH_STORE_D16_HI_B16: _SCRATCHOp_SCRATCH_STORE_D16_HI_B16, -} - -COMPILED_FUNCTIONS = { - SOP1Op: SOP1Op_FUNCTIONS, - SOP2Op: SOP2Op_FUNCTIONS, - SOPCOp: SOPCOp_FUNCTIONS, - SOPKOp: SOPKOp_FUNCTIONS, - SOPPOp: SOPPOp_FUNCTIONS, - SMEMOp: SMEMOp_FUNCTIONS, - VOP1Op: VOP1Op_FUNCTIONS, - VOP2Op: VOP2Op_FUNCTIONS, - VOP3Op: VOP3Op_FUNCTIONS, - VOP3SDOp: VOP3SDOp_FUNCTIONS, - VOP3POp: VOP3POp_FUNCTIONS, - VOPCOp: VOPCOp_FUNCTIONS, - DSOp: DSOp_FUNCTIONS, - FLATOp: FLATOp_FUNCTIONS, - GLOBALOp: GLOBALOp_FUNCTIONS, - SCRATCHOp: SCRATCHOp_FUNCTIONS, -} \ No newline at end of file diff --git a/extra/assembly/amd/autogen/rdna3/str_pcode.py b/extra/assembly/amd/autogen/rdna3/str_pcode.py new file mode 100644 index 0000000000..ef93a8cae7 --- /dev/null +++ b/extra/assembly/amd/autogen/rdna3/str_pcode.py @@ -0,0 +1,1354 @@ +# autogenerated by pdf.py - do not edit +# to regenerate: python -m extra.assembly.amd.pdf --arch rdna3 +# ruff: noqa: E501 +from extra.assembly.amd.autogen.rdna3.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp + +SOP1Op_PCODE = { + SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32', + SOP1Op.S_MOV_B64: 'D0.b64 = S0.b64', + SOP1Op.S_CMOV_B32: 'if SCC then\nD0.b32 = S0.b32\nendif', + SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif', + SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]', + SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CTZ_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLZ_I32_U64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_CLS_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))", + SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U", + SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U", + SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U", + SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U", + SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor', + SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0', + SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U', + SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_WQM_B32: "tmp = 0U;\ndeclare i : 6'U;\nfor i in 6'0U : 6'31U do\ntmp[i] = S0.u32[i & 6'60U +: 6'4U] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U', + SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_AND_SAVEEXEC_B32: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_SAVEEXEC_B32: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XOR_SAVEEXEC_B32: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_XOR_SAVEEXEC_B64: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NAND_SAVEEXEC_B32: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_NAND_SAVEEXEC_B64: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NOR_SAVEEXEC_B32: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_NOR_SAVEEXEC_B64: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XNOR_SAVEEXEC_B32: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_XNOR_SAVEEXEC_B64: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT0_SAVEEXEC_B32: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT0_SAVEEXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_NOT0_SAVEEXEC_B32: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_NOT0_SAVEEXEC_B64: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT1_SAVEEXEC_B32: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT1_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_NOT1_SAVEEXEC_B32: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_NOT1_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT0_WREXEC_B32: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT0_WREXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT1_WREXEC_B32: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT1_WREXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = SGPR[addr].b32', + SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b64 = SGPR[addr].b64', + SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b32 = S0.b32', + SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b64 = S0.b64', + SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL', + SOP1Op.S_SETPC_B64: 'PC = S0.i64', + SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64', + SOP1Op.S_RFE_B64: 'PC = S0.i64', + SOP1Op.S_SENDMSG_RTN_B32: 'If SDST is VCC then VCCZ is undefined.', + SOP1Op.S_SENDMSG_RTN_B64: 'If SDST is VCC then VCCZ is undefined.', + SOP1Op.S_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + SOP1Op.S_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + SOP1Op.S_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + SOP1Op.S_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + SOP1Op.S_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + SOP1Op.S_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + SOP1Op.S_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + SOP1Op.S_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + SOP1Op.S_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + SOP1Op.S_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + SOP1Op.S_CVT_HI_F32_F16: 'D0.f32 = f16_to_f32(S0[31 : 16].f16)', + SOP1Op.S_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + SOP1Op.S_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + SOP1Op.S_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + SOP1Op.S_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", +} + +SOP2Op_PCODE = { + SOP2Op.S_ADD_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_SUB_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_ADD_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', + SOP2Op.S_SUB_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', + SOP2Op.S_ADDC_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_SUBB_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0', + SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0", + SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MAX_U32: 'SCC = S0.u32 >= S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_AND_B32: 'D0.u32 = (S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_AND_B64: 'D0.u64 = (S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_OR_B32: 'D0.u32 = (S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_OR_B64: 'D0.u64 = (S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XOR_B64: 'D0.u64 = (S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NAND_B32: 'D0.u32 = ~(S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NAND_B64: 'D0.u64 = ~(S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NOR_B32: 'D0.u32 = ~(S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NOR_B64: 'D0.u64 = ~(S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XNOR_B64: 'D0.u64 = ~(S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_AND_NOT1_B32: 'D0.u32 = (S0.u32 & ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_AND_NOT1_B64: 'D0.u64 = (S0.u64 & ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_OR_NOT1_B32: 'D0.u32 = (S0.u32 | ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_OR_NOT1_B64: 'D0.u64 = (S0.u64 | ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S1[22 : 16].u32) - 1U));\nSCC = D0.u32 != 0U', + SOP2Op.S_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32);\nSCC = D0.i32 != 0', + SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL', + SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + SOP2Op.S_BFM_B64: 'D0.u64 = (((1ULL << S0[5 : 0].u32) - 1ULL) << S1[5 : 0].u32)', + SOP2Op.S_MUL_I32: 'D0.i32 = S0.i32 * S1.i32', + SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + SOP2Op.S_CSELECT_B32: 'D0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_CSELECT_B64: 'D0.u64 = SCC ? S0.u64 : S1.u64', + SOP2Op.S_PACK_LL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_LH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_HH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[31 : 16].u16 }', + SOP2Op.S_PACK_HL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[31 : 16].u16 }', + SOP2Op.S_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + SOP2Op.S_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + SOP2Op.S_MIN_F32: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + SOP2Op.S_MAX_F32: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + SOP2Op.S_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + SOP2Op.S_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', + SOP2Op.S_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', + SOP2Op.S_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + SOP2Op.S_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + SOP2Op.S_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + SOP2Op.S_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + SOP2Op.S_MIN_F16: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + SOP2Op.S_MAX_F16: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + SOP2Op.S_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + SOP2Op.S_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', +} + +SOPCOp_PCODE = { + SOPCOp.S_CMP_EQ_I32: 'SCC = S0.i32 == S1.i32', + SOPCOp.S_CMP_LG_I32: 'SCC = S0.i32 <> S1.i32', + SOPCOp.S_CMP_GT_I32: 'SCC = S0.i32 > S1.i32', + SOPCOp.S_CMP_GE_I32: 'SCC = S0.i32 >= S1.i32', + SOPCOp.S_CMP_LT_I32: 'SCC = S0.i32 < S1.i32', + SOPCOp.S_CMP_LE_I32: 'SCC = S0.i32 <= S1.i32', + SOPCOp.S_CMP_EQ_U32: 'SCC = S0.u32 == S1.u32', + SOPCOp.S_CMP_LG_U32: 'SCC = S0.u32 <> S1.u32', + SOPCOp.S_CMP_GT_U32: 'SCC = S0.u32 > S1.u32', + SOPCOp.S_CMP_GE_U32: 'SCC = S0.u32 >= S1.u32', + SOPCOp.S_CMP_LT_U32: 'SCC = S0.u32 < S1.u32', + SOPCOp.S_CMP_LE_U32: 'SCC = S0.u32 <= S1.u32', + SOPCOp.S_BITCMP0_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U", + SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U", + SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64', + SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64', + SOPCOp.S_CMP_LT_F32: 'SCC = S0.f32 < S1.f32', + SOPCOp.S_CMP_LT_F16: 'SCC = S0.f16 < S1.f16', + SOPCOp.S_CMP_EQ_F32: 'SCC = S0.f32 == S1.f32', + SOPCOp.S_CMP_EQ_F16: 'SCC = S0.f16 == S1.f16', + SOPCOp.S_CMP_LE_F32: 'SCC = S0.f32 <= S1.f32', + SOPCOp.S_CMP_LE_F16: 'SCC = S0.f16 <= S1.f16', + SOPCOp.S_CMP_GT_F32: 'SCC = S0.f32 > S1.f32', + SOPCOp.S_CMP_GT_F16: 'SCC = S0.f16 > S1.f16', + SOPCOp.S_CMP_LG_F32: 'SCC = S0.f32 <> S1.f32', + SOPCOp.S_CMP_LG_F16: 'SCC = S0.f16 <> S1.f16', + SOPCOp.S_CMP_GE_F32: 'SCC = S0.f32 >= S1.f32', + SOPCOp.S_CMP_GE_F16: 'SCC = S0.f16 >= S1.f16', + SOPCOp.S_CMP_O_F32: "SCC = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + SOPCOp.S_CMP_O_F16: "SCC = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + SOPCOp.S_CMP_U_F32: "SCC = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + SOPCOp.S_CMP_U_F16: "SCC = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + SOPCOp.S_CMP_NGE_F32: 'SCC = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + SOPCOp.S_CMP_NGE_F16: 'SCC = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + SOPCOp.S_CMP_NLG_F32: 'SCC = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + SOPCOp.S_CMP_NLG_F16: 'SCC = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + SOPCOp.S_CMP_NGT_F32: 'SCC = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + SOPCOp.S_CMP_NGT_F16: 'SCC = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + SOPCOp.S_CMP_NLE_F32: 'SCC = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + SOPCOp.S_CMP_NLE_F16: 'SCC = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + SOPCOp.S_CMP_NEQ_F32: 'SCC = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + SOPCOp.S_CMP_NEQ_F16: 'SCC = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + SOPCOp.S_CMP_NLT_F32: 'SCC = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + SOPCOp.S_CMP_NLT_F16: 'SCC = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', +} + +SOPKOp_PCODE = { + SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(SIMM16.i16))", + SOPKOp.S_VERSION: '// Do nothing - for use by tools only', + SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(SIMM16.i16))\nendif", + SOPKOp.S_CMPK_EQ_I32: "SCC = 64'I(S0.i32) == signext(SIMM16.i16)", + SOPKOp.S_CMPK_LG_I32: "SCC = 64'I(S0.i32) != signext(SIMM16.i16)", + SOPKOp.S_CMPK_GT_I32: "SCC = 64'I(S0.i32) > signext(SIMM16.i16)", + SOPKOp.S_CMPK_GE_I32: "SCC = 64'I(S0.i32) >= signext(SIMM16.i16)", + SOPKOp.S_CMPK_LT_I32: "SCC = 64'I(S0.i32) < signext(SIMM16.i16)", + SOPKOp.S_CMPK_LE_I32: "SCC = 64'I(S0.i32) <= signext(SIMM16.i16)", + SOPKOp.S_CMPK_EQ_U32: "SCC = S0.u32 == 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_LG_U32: "SCC = S0.u32 != 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_GT_U32: "SCC = S0.u32 > 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_GE_U32: "SCC = S0.u32 >= 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_LT_U32: "SCC = S0.u32 < 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_LE_U32: "SCC = S0.u32 <= 32'U(SIMM16.u16)", + SOPKOp.S_ADDK_I32: "tmp = D0.i32;\nD0.i32 = 32'I(64'I(D0.i32) + signext(SIMM16.i16));\nSCC = ((tmp[31] == SIMM16.i16[15]) && (tmp[31] != D0.i32[31]));", + SOPKOp.S_MULK_I32: "D0.i32 = 32'I(64'I(D0.i32) * signext(SIMM16.i16))", + SOPKOp.S_GETREG_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", + SOPKOp.S_SETREG_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask & 32'I(writeableBitMask(hwRegId.u32, WAVE_STATUS.PRIV)));\n// Mask of bits we are allowed to modify\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_SETREG_IMM32_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask & 32'I(writeableBitMask(hwRegId.u32, WAVE_STATUS.PRIV)));\n// Mask of bits we are allowed to modify\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL", + SOPKOp.S_WAITCNT_VSCNT: 'vscnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', + SOPKOp.S_WAITCNT_VMCNT: 'vmcnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', + SOPKOp.S_WAITCNT_EXPCNT: 'expcnt <= S0.u[2:0] + S1.u[2:0].\n// Comparison is 3 bits, no clamping is applied for add overflow', + SOPKOp.S_WAITCNT_LGKMCNT: 'lgkmcnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', +} + +SOPPOp_PCODE = { + SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nendfor', + SOPPOp.S_SETHALT: 'When halt type control is set to 1 (FATAL HALT bit select): Set FATAL_HALT bit to value of SIMM16[0]; 1 =\nfatal_halt, 0 = clear FATAL_HALT bit. Setting the fatal_halt flag halts the shader in or outside of the trap', + SOPPOp.S_DELAY_ALU: 'instruction may be omitted. For wave64 the compiler may not know the status of the EXEC mask and hence\n// 1 cycle delay here\n// 2 cycles delay here', + SOPPOp.S_TRAP: '// PC passed into trap handler points to S_TRAP itself,\nPC = TBA.i64;\n// trap base address', + SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;", + SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCZ: "If VCCZ is 1 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCNZ: "If VCCZ is 0 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS: "if WAVE_STATUS.COND_DBG_SYS.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGUSER: "if WAVE_STATUS.COND_DBG_USER.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: "if (WAVE_STATUS.COND_DBG_SYS || WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: "if (WAVE_STATUS.COND_DBG_SYS && WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", +} + +SMEMOp_PCODE = { + SMEMOp.S_LOAD_B32: 'SDATA[31 : 0] = MEM[ADDR].b32', + SMEMOp.S_LOAD_B64: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32', + SMEMOp.S_LOAD_B128: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32', + SMEMOp.S_LOAD_B256: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32', + SMEMOp.S_LOAD_B512: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32;\nSDATA[287 : 256] = MEM[ADDR + 32U].b32;\nSDATA[319 : 288] = MEM[ADDR + 36U].b32;\nSDATA[351 : 320] = MEM[ADDR + 40U].b32;\nSDATA[383 : 352] = MEM[ADDR + 44U].b32;\nSDATA[415 : 384] = MEM[ADDR + 48U].b32;\nSDATA[447 : 416] = MEM[ADDR + 52U].b32;\nSDATA[479 : 448] = MEM[ADDR + 56U].b32;\nSDATA[511 : 480] = MEM[ADDR + 60U].b32', + SMEMOp.S_BUFFER_LOAD_B32: 'SDATA[31 : 0] = MEM[ADDR].b32', + SMEMOp.S_BUFFER_LOAD_B64: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32', + SMEMOp.S_BUFFER_LOAD_B128: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32', + SMEMOp.S_BUFFER_LOAD_B256: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32', + SMEMOp.S_BUFFER_LOAD_B512: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32;\nSDATA[287 : 256] = MEM[ADDR + 32U].b32;\nSDATA[319 : 288] = MEM[ADDR + 36U].b32;\nSDATA[351 : 320] = MEM[ADDR + 40U].b32;\nSDATA[383 : 352] = MEM[ADDR + 44U].b32;\nSDATA[415 : 384] = MEM[ADDR + 48U].b32;\nSDATA[447 : 416] = MEM[ADDR + 52U].b32;\nSDATA[479 : 448] = MEM[ADDR + 56U].b32;\nSDATA[511 : 480] = MEM[ADDR + 60U].b32', +} + +VOP1Op_PCODE = { + VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32', + VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP1Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP1Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP1Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP1Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP1Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP1Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP1Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP1Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP1Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP1Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP1Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP1Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP1Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP1Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP1Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP1Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP1Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP1Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP1Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP1Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP1Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP1Op.V_MOV_B16: 'D0.b16 = S0.b16', + VOP1Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP1Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', + VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', + VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", + VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', + VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', + VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", + VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", + VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nVGPR[laneId][addr].b32 = S0.b32', + VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = VGPR[laneId][addr].b32', + VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", + VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', + VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", + VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', + VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", + VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", + VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", + VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", + VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', + VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", + VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", + VOP1Op.V_SAT_PK_U8_I16: 'D0.b16 = { SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16) }', + VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', + VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', + VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp', + VOP1Op.V_SWAP_B16: 'tmp = D0.b16;\nD0.b16 = S0.b16;\nS0.b16 = tmp', + VOP1Op.V_PERMLANE64_B32: "declare tmp : 32'B[64];\ndeclare lane : 32'U;\nif WAVE32 then\n// Supported in wave64 ONLY; treated as scalar NOP in wave32\nelse\nfor lane in 0U : 63U do\n// Copy original S0 in case D==S0\ntmp[lane] = VGPR[lane][SRC0.u32]\nendfor;\nfor lane in 0U : 63U do\naltlane = { ~lane[5], lane[4 : 0] };\n// 0<->32, ..., 31<->63\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altlane]\nendif\nendfor\nendif", + VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\ntmp = VGPR[laneId][addrd].b32;', + VOP1Op.V_NOT_B16: 'D0.u16 = ~S0.u16', + VOP1Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + VOP1Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", +} + +VOP2Op_PCODE = { + VOP2Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP2Op.V_DOT2ACC_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP2Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP2Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP2Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP2Op.V_FMAC_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, D0.f32)\nendif", + VOP2Op.V_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP2Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP2Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP2Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP2Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP2Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP2Op.V_MIN_F32: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + VOP2Op.V_MAX_F32: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + VOP2Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP2Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP2Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP2Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP2Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP2Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP2Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP2Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP2Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP2Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP2Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP2Op.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP2Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP2Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP2Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP2Op.V_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', + VOP2Op.V_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', + VOP2Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + VOP2Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP2Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP2Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP2Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP2Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', + VOP2Op.V_FMAMK_F16: 'D0.f16 = fma(S0.f16, SIMM32.f16, S1.f16)', + VOP2Op.V_FMAAK_F16: 'D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16)', + VOP2Op.V_MAX_F16: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + VOP2Op.V_MIN_F16: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP2Op.V_PK_FMAC_F16: 'D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16);\nD0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16)', +} + +VOP3Op_PCODE = { + VOP3Op.V_CMP_F_F16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_F16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_F32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_F32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_F64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_F64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_F_I32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_I32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_U32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_U32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_I64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_I64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_U64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_U64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMPX_F_F16: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', + VOP3Op.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f16 == S1.f16', + VOP3Op.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', + VOP3Op.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', + VOP3Op.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', + VOP3Op.V_CMPX_GE_F16: 'EXEC.u64[laneId] = S0.f16 >= S1.f16', + VOP3Op.V_CMPX_O_F16: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + VOP3Op.V_CMPX_U_F16: "EXEC.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + VOP3Op.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_T_F16: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_F32: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', + VOP3Op.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f32 == S1.f32', + VOP3Op.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', + VOP3Op.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', + VOP3Op.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', + VOP3Op.V_CMPX_GE_F32: 'EXEC.u64[laneId] = S0.f32 >= S1.f32', + VOP3Op.V_CMPX_O_F32: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + VOP3Op.V_CMPX_U_F32: "EXEC.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + VOP3Op.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_T_F32: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_F64: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', + VOP3Op.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f64 == S1.f64', + VOP3Op.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', + VOP3Op.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', + VOP3Op.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', + VOP3Op.V_CMPX_GE_F64: 'EXEC.u64[laneId] = S0.f64 >= S1.f64', + VOP3Op.V_CMPX_O_F64: 'EXEC.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64))', + VOP3Op.V_CMPX_U_F64: 'EXEC.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64))', + VOP3Op.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_T_F64: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', + VOP3Op.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i16 == S1.i16', + VOP3Op.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', + VOP3Op.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', + VOP3Op.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', + VOP3Op.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', + VOP3Op.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', + VOP3Op.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u16 == S1.u16', + VOP3Op.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', + VOP3Op.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', + VOP3Op.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', + VOP3Op.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', + VOP3Op.V_CMPX_F_I32: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', + VOP3Op.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i32 == S1.i32', + VOP3Op.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', + VOP3Op.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', + VOP3Op.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', + VOP3Op.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', + VOP3Op.V_CMPX_T_I32: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_U32: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', + VOP3Op.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u32 == S1.u32', + VOP3Op.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', + VOP3Op.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', + VOP3Op.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', + VOP3Op.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', + VOP3Op.V_CMPX_T_U32: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_I64: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', + VOP3Op.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i64 == S1.i64', + VOP3Op.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', + VOP3Op.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', + VOP3Op.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', + VOP3Op.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', + VOP3Op.V_CMPX_T_I64: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_U64: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', + VOP3Op.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u64 == S1.u64', + VOP3Op.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', + VOP3Op.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', + VOP3Op.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', + VOP3Op.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', + VOP3Op.V_CMPX_T_U64: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_CLASS_F16: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CMPX_CLASS_F32: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CMPX_CLASS_F64: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32', + VOP3Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP3Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP3Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP3Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP3Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP3Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP3Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP3Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP3Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP3Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP3Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP3Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP3Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP3Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP3Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP3Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP3Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP3Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP3Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP3Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP3Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP3Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP3Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP3Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP3Op.V_MOV_B16: 'D0.b16 = S0.b16', + VOP3Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP3Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP3Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP3Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP3Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', + VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', + VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP3Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP3Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP3Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP3Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", + VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP3Op.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP3Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', + VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', + VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP3Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", + VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", + VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nVGPR[laneId][addr].b32 = S0.b32', + VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = VGPR[laneId][addr].b32', + VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP3Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP3Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP3Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP3Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", + VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', + VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", + VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', + VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", + VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", + VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", + VOP3Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + VOP3Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + VOP3Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + VOP3Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", + VOP3Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', + VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", + VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", + VOP3Op.V_SAT_PK_U8_I16: 'D0.b16 = { SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16) }', + VOP3Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', + VOP3Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', + VOP3Op.V_NOT_B16: 'D0.u16 = ~S0.u16', + VOP3Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + VOP3Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", + VOP3Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP3Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP3Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP3Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP3Op.V_FMAC_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, D0.f32)\nendif", + VOP3Op.V_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP3Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP3Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP3Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP3Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP3Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP3Op.V_MIN_F32: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + VOP3Op.V_MAX_F32: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + VOP3Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP3Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP3Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP3Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP3Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP3Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP3Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP3Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP3Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP3Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP3Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP3Op.V_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP3Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP3Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP3Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP3Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + VOP3Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP3Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP3Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP3Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP3Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', + VOP3Op.V_MAX_F16: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + VOP3Op.V_MIN_F16: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", + VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP3Op.V_FMA_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif", + VOP3Op.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32", + VOP3Op.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32", + VOP3Op.V_CUBEID_F32: '// Set D0.f = cubemap face ID ({0.0, 1.0, ..., 5.0}).\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = 5.0F\nelse\nD0.f32 = 4.0F\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = 3.0F\nelse\nD0.f32 = 2.0F\nendif\nelse\nif S0.f32 < 0.0F then\nD0.f32 = 1.0F\nelse\nD0.f32 = 0.0F\nendif\nendif', + VOP3Op.V_CUBESC_F32: '// D0.f = cubemap S coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = -S0.f32\nelse\nD0.f32 = S0.f32\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S0.f32\nelse\nif S0.f32 < 0.0F then\nD0.f32 = S2.f32\nelse\nD0.f32 = -S2.f32\nendif\nendif', + VOP3Op.V_CUBETC_F32: '// D0.f = cubemap T coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = -S1.f32\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = -S2.f32\nelse\nD0.f32 = S2.f32\nendif\nelse\nD0.f32 = -S1.f32\nendif', + VOP3Op.V_CUBEMA_F32: '// D0.f = 2.0 * cubemap major axis.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = S2.f32 * 2.0F\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S1.f32 * 2.0F\nelse\nD0.f32 = S0.f32 * 2.0F\nendif', + VOP3Op.V_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S2[4 : 0].u32) - 1U))', + VOP3Op.V_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32)', + VOP3Op.V_BFI_B32: 'D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32))', + VOP3Op.V_FMA_F32: 'D0.f32 = fma(S0.f32, S1.f32, S2.f32)', + VOP3Op.V_FMA_F64: 'D0.f64 = fma(S0.f64, S1.f64, S2.f64)', + VOP3Op.V_LERP_U8: 'tmp = ((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1U << 24U);\ntmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1U << 16U);\ntmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1U << 8U);\ntmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1U);\nD0.u32 = tmp.u32', + VOP3Op.V_ALIGNBIT_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> S2.u32[4 : 0].u32) & 0xffffffffLL)", + VOP3Op.V_ALIGNBYTE_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> (S2.u32[1 : 0].u32 * 8U)) & 0xffffffffLL)", + VOP3Op.V_MULLIT_F32: "if ((S1.f32 == -MAX_FLOAT_F32) || (64'F(S1.f32) == -INF) || isNAN(64'F(S1.f32)) || (S2.f32 <= 0.0F) ||\nisNAN(64'F(S2.f32))) then\nD0.f32 = -MAX_FLOAT_F32\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP3Op.V_MIN3_F32: 'D0.f32 = v_min_f32(v_min_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MIN3_I32: 'D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MIN3_U32: 'D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MAX3_F32: 'D0.f32 = v_max_f32(v_max_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MAX3_I32: 'D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MAX3_U32: 'D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MED3_F32: "if (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)) || isNAN(64'F(S2.f32))) then\nD0.f32 = v_min3_f32(S0.f32, S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S0.f32 then\nD0.f32 = v_max_f32(S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S1.f32 then\nD0.f32 = v_max_f32(S0.f32, S2.f32)\nelse\nD0.f32 = v_max_f32(S0.f32, S1.f32)\nendif", + VOP3Op.V_MED3_I32: 'if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32 then\nD0.i32 = v_max_i32(S1.i32, S2.i32)\nelsif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32 then\nD0.i32 = v_max_i32(S0.i32, S2.i32)\nelse\nD0.i32 = v_max_i32(S0.i32, S1.i32)\nendif', + VOP3Op.V_MED3_U32: 'if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32 then\nD0.u32 = v_max_u32(S1.u32, S2.u32)\nelsif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32 then\nD0.u32 = v_max_u32(S0.u32, S2.u32)\nelse\nD0.u32 = v_max_u32(S0.u32, S1.u32)\nendif', + VOP3Op.V_SAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3Op.V_SAD_HI_U8: "D0.u32 = (32'U(v_sad_u8(S0, S1, 0U)) << 16U) + S2.u32", + VOP3Op.V_SAD_U16: '// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', + VOP3Op.V_SAD_U32: '// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', + VOP3Op.V_CVT_PK_U8_F32: "tmp = (S2.u32 & 32'U(~(0xff << (S1.u32[1 : 0].u32 * 8U))));\ntmp = (tmp | ((32'U(f32_to_u8(S0.f32)) & 255U) << (S1.u32[1 : 0].u32 * 8U)));\nD0.u32 = tmp", + VOP3Op.V_DIV_FIXUP_F32: "sign_out = (sign(S1.f32) ^ sign(S2.f32));\nif isNAN(64'F(S2.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S2.f32)))\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif ((64'F(S1.f32) == 0.0) && (64'F(S2.f32) == 0.0)) then\n// 0/0\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(abs(S1.f32)) == +INF) && (64'F(abs(S2.f32)) == +INF)) then\n// inf/inf\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(S1.f32) == 0.0) || (64'F(abs(S2.f32)) == +INF)) then\n// x/0, or inf/y\nD0.f32 = sign_out ? -INF.f32 : +INF.f32\nelsif ((64'F(abs(S1.f32)) == +INF) || (64'F(S2.f32) == 0.0)) then\n// x/inf, 0/y\nD0.f32 = sign_out ? -0.0F : 0.0F\nelsif exponent(S2.f32) - exponent(S1.f32) < -150 then\nD0.f32 = sign_out ? -UNDERFLOW_F32 : UNDERFLOW_F32\nelsif exponent(S1.f32) == 255 then\nD0.f32 = sign_out ? -OVERFLOW_F32 : OVERFLOW_F32\nelse\nD0.f32 = sign_out ? -abs(S0.f32) : abs(S0.f32)\nendif", + VOP3Op.V_DIV_FIXUP_F64: "sign_out = (sign(S1.f64) ^ sign(S2.f64));\nif isNAN(S2.f64) then\nD0.f64 = cvtToQuietNAN(S2.f64)\nelsif isNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif ((S1.f64 == 0.0) && (S2.f64 == 0.0)) then\n// 0/0\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((abs(S1.f64) == +INF) && (abs(S2.f64) == +INF)) then\n// inf/inf\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((S1.f64 == 0.0) || (abs(S2.f64) == +INF)) then\n// x/0, or inf/y\nD0.f64 = sign_out ? -INF : +INF\nelsif ((abs(S1.f64) == +INF) || (S2.f64 == 0.0)) then\n// x/inf, 0/y\nD0.f64 = sign_out ? -0.0 : 0.0\nelsif exponent(S2.f64) - exponent(S1.f64) < -1075 then\nD0.f64 = sign_out ? -UNDERFLOW_F64 : UNDERFLOW_F64\nelsif exponent(S1.f64) == 2047 then\nD0.f64 = sign_out ? -OVERFLOW_F64 : OVERFLOW_F64\nelse\nD0.f64 = sign_out ? -abs(S0.f64) : abs(S0.f64)\nendif", + VOP3Op.V_DIV_FMAS_F32: 'if VCC.u64[laneId] then\nD0.f32 = 2.0F ** 32 * fma(S0.f32, S1.f32, S2.f32)\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif', + VOP3Op.V_DIV_FMAS_F64: 'if VCC.u64[laneId] then\nD0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)\nelse\nD0.f64 = fma(S0.f64, S1.f64, S2.f64)\nendif', + VOP3Op.V_MSAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3Op.V_QSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", + VOP3Op.V_MQSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", + VOP3Op.V_MQSAD_U32_U8: "tmp[127 : 96] = 32'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32));\ntmp[95 : 64] = 32'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32));\ntmp[63 : 32] = 32'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32));\ntmp[31 : 0] = 32'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32));\nD0.b128 = tmp.b128", + VOP3Op.V_XOR3_B32: 'D0.u32 = (S0.u32 ^ S1.u32 ^ S2.u32)', + VOP3Op.V_MAD_U16: 'D0.u16 = S0.u16 * S1.u16 + S2.u16', + VOP3Op.V_PERM_B32: 'D0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])', + VOP3Op.V_XAD_U32: 'D0.u32 = (S0.u32 ^ S1.u32) + S2.u32', + VOP3Op.V_LSHL_ADD_U32: 'D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32', + VOP3Op.V_ADD_LSHL_U32: 'D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32)', + VOP3Op.V_FMA_F16: 'D0.f16 = fma(S0.f16, S1.f16, S2.f16)', + VOP3Op.V_MIN3_F16: 'D0.f16 = v_min_f16(v_min_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MIN3_I16: 'D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16)', + VOP3Op.V_MIN3_U16: 'D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16)', + VOP3Op.V_MAX3_F16: 'D0.f16 = v_max_f16(v_max_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MAX3_I16: 'D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16)', + VOP3Op.V_MAX3_U16: 'D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16)', + VOP3Op.V_MED3_F16: "if (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)) || isNAN(64'F(S2.f16))) then\nD0.f16 = v_min3_f16(S0.f16, S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S0.f16 then\nD0.f16 = v_max_f16(S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S1.f16 then\nD0.f16 = v_max_f16(S0.f16, S2.f16)\nelse\nD0.f16 = v_max_f16(S0.f16, S1.f16)\nendif", + VOP3Op.V_MED3_I16: 'if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16 then\nD0.i16 = v_max_i16(S1.i16, S2.i16)\nelsif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16 then\nD0.i16 = v_max_i16(S0.i16, S2.i16)\nelse\nD0.i16 = v_max_i16(S0.i16, S1.i16)\nendif', + VOP3Op.V_MED3_U16: 'if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16 then\nD0.u16 = v_max_u16(S1.u16, S2.u16)\nelsif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16 then\nD0.u16 = v_max_u16(S0.u16, S2.u16)\nelse\nD0.u16 = v_max_u16(S0.u16, S1.u16)\nendif', + VOP3Op.V_MAD_I16: 'D0.i16 = S0.i16 * S1.i16 + S2.i16', + VOP3Op.V_DIV_FIXUP_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S2.f16)))\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\nD0.f16 = sign_out ? -INF.f16 : +INF.f16\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\nD0.f16 = sign_out ? -16'0.0 : 16'0.0\nelse\nD0.f16 = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif", + VOP3Op.V_ADD3_U32: 'D0.u32 = S0.u32 + S1.u32 + S2.u32', + VOP3Op.V_LSHL_OR_B32: 'D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32)', + VOP3Op.V_AND_OR_B32: 'D0.u32 = ((S0.u32 & S1.u32) | S2.u32)', + VOP3Op.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)', + VOP3Op.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32", + VOP3Op.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32", + VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor", + VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor", + VOP3Op.V_CNDMASK_B16: 'D0.u16 = VCC.u64[laneId] ? S1.u16 : S0.u16', + VOP3Op.V_MAXMIN_F32: 'D0.f32 = v_min_f32(v_max_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MINMAX_F32: 'D0.f32 = v_max_f32(v_min_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MAXMIN_F16: 'D0.f16 = v_min_f16(v_max_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MINMAX_F16: 'D0.f16 = v_max_f16(v_min_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MAXMIN_U32: 'D0.u32 = v_min_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MINMAX_U32: 'D0.u32 = v_max_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MAXMIN_I32: 'D0.i32 = v_min_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MINMAX_I32: 'D0.i32 = v_max_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_DOT2_F16_F16: 'tmp = S2.f16;\ntmp += S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp += S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.f16 = tmp', + VOP3Op.V_DOT2_BF16_BF16: 'tmp = S2.bf16;\ntmp += S0[15 : 0].bf16 * S1[15 : 0].bf16;\ntmp += S0[31 : 16].bf16 * S1[31 : 16].bf16;\nD0.bf16 = tmp', + VOP3Op.V_ADD_NC_U16: 'D0.u16 = S0.u16 + S1.u16', + VOP3Op.V_SUB_NC_U16: 'D0.u16 = S0.u16 - S1.u16', + VOP3Op.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', + VOP3Op.V_CVT_PK_I16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_i16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_i16_f32(S0.f32));", + VOP3Op.V_CVT_PK_U16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_u16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_u16_f32(S0.f32));", + VOP3Op.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', + VOP3Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', + VOP3Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', + VOP3Op.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', + VOP3Op.V_ADD_NC_I16: 'D0.i16 = S0.i16 + S1.i16', + VOP3Op.V_SUB_NC_I16: 'D0.i16 = S0.i16 - S1.i16', + VOP3Op.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16', + VOP3Op.V_CVT_PK_NORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);", + VOP3Op.V_CVT_PK_NORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);", + VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32', + VOP3Op.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + VOP3Op.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp", + VOP3Op.V_MBCNT_LO_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', + VOP3Op.V_MBCNT_HI_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', + VOP3Op.V_CVT_PK_NORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);", + VOP3Op.V_CVT_PK_NORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);", + VOP3Op.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);", + VOP3Op.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);", + VOP3Op.V_SUB_NC_I32: 'D0.i32 = S0.i32 - S1.i32', + VOP3Op.V_ADD_NC_I32: 'D0.i32 = S0.i32 + S1.i32', + VOP3Op.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', + VOP3Op.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64', + VOP3Op.V_MIN_F64: '// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif LT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nelse\nif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif LT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE', + VOP3Op.V_MAX_F64: '// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif GT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nelse\nif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif GT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE', + VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32', + VOP3Op.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32', + VOP3Op.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + VOP3Op.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + VOP3Op.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", + VOP3Op.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', + VOP3Op.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', + VOP3Op.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', + VOP3Op.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)', + VOP3Op.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)', + VOP3Op.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)', + VOP3Op.V_READLANE_B32: "declare lane : 32'U;\nif WAVE32 then\nlane = S1.u32[4 : 0].u32;\n// Lane select for wave32\nelse\nlane = S1.u32[5 : 0].u32;\n// Lane select for wave64\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP3Op.V_WRITELANE_B32: "declare lane : 32'U;\nif WAVE32 then\nlane = S1.u32[4 : 0].u32;\n// Lane select for wave32\nelse\nlane = S1.u32[5 : 0].u32;\n// Lane select for wave64\nendif;\nVGPR[lane][VDST.u32] = S0.b32", + VOP3Op.V_AND_B16: 'D0.u16 = (S0.u16 & S1.u16)', + VOP3Op.V_OR_B16: 'D0.u16 = (S0.u16 | S1.u16)', + VOP3Op.V_XOR_B16: 'D0.u16 = (S0.u16 ^ S1.u16)', +} + +VOP3SDOp_PCODE = { + VOP3SDOp.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_DIV_SCALE_F32: "VCC = 0x0LL;\nif ((64'F(S2.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\nD0.f32 = NAN.f32\nelsif exponent(S2.f32) - exponent(S1.f32) >= 96 then\n// N/D near MAX_FLOAT_F32\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif S1.f32 == DENORM.f32 then\nD0.f32 = ldexp(S0.f32, 64)\nelsif ((1.0 / 64'F(S1.f32) == DENORM.f64) && (S2.f32 / S1.f32 == DENORM.f32)) then\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif 1.0 / 64'F(S1.f32) == DENORM.f64 then\nD0.f32 = ldexp(S0.f32, -64)\nelsif S2.f32 / S1.f32 == DENORM.f32 then\nVCC = 0x1LL;\nif S0.f32 == S2.f32 then\n// Only scale the numerator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif exponent(S2.f32) <= 23 then\n// Numerator is tiny\nD0.f32 = ldexp(S0.f32, 64)\nendif", + VOP3SDOp.V_DIV_SCALE_F64: 'VCC = 0x0LL;\nif ((S2.f64 == 0.0) || (S1.f64 == 0.0)) then\nD0.f64 = NAN.f64\nelsif exponent(S2.f64) - exponent(S1.f64) >= 768 then\n// N/D near MAX_FLOAT_F64\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, 128)\nelsif ((1.0 / S1.f64 == DENORM.f64) && (S2.f64 / S1.f64 == DENORM.f64)) then\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif 1.0 / S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, -128)\nelsif S2.f64 / S1.f64 == DENORM.f64 then\nVCC = 0x1LL;\nif S0.f64 == S2.f64 then\n// Only scale the numerator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif exponent(S2.f64) <= 53 then\n// Numerator is tiny\nD0.f64 = ldexp(S0.f64, 128)\nendif', + VOP3SDOp.V_MAD_U64_U32: "{ D1.u1, D0.u64 } = 65'B(65'U(S0.u32) * 65'U(S1.u32) + 65'U(S2.u64))", + VOP3SDOp.V_MAD_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))", + VOP3SDOp.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", +} + +VOP3POp_PCODE = { + VOP3POp.V_PK_MAD_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MUL_LO_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ADD_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_SUB_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_LSHLREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_LSHRREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ASHRREV_I16: 'tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 >= S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 >= S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MIN_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 < S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 < S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAD_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ADD_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_SUB_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 >= S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 >= S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MIN_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 < S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 < S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_FMA_F16: "declare tmp : 32'B;\ntmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16);\ntmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_ADD_F16: 'tmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MUL_F16: 'tmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MIN_F16: 'tmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16);\ntmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_F16: 'tmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16);\ntmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16);\nD0.b32 = tmp.b32', + VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP3POp.V_DOT4_I32_IU8: "declare A : 32'I[4];\ndeclare B : 32'I[4];\nfor i in 0 : 3 do\nA8 = S0[i * 8 + 7 : i * 8];\nB8 = S1[i * 8 + 7 : i * 8];\nendfor;\nC = S2.i32;\ntmp = C.i32;\nD0.i32 = tmp", + VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp', + VOP3POp.V_DOT8_I32_IU4: "declare A : 32'I[8];\ndeclare B : 32'I[8];\nfor i in 0 : 7 do\nA4 = S0[i * 4 + 3 : i * 4];\nB4 = S1[i * 4 + 3 : i * 4];\nendfor;\nC = S2.i32;\ntmp = C.i32;\nD0.i32 = tmp", + VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp', + VOP3POp.V_DOT2_F32_BF16: 'tmp = S2.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', + VOP3POp.V_FMA_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])", + VOP3POp.V_FMA_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", + VOP3POp.V_FMA_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", + VOP3POp.V_WMMA_F32_16X16X16_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F16_16X16X16_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.bf16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X16_IU8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X16_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', +} + +VOPCOp_PCODE = { + VOPCOp.V_CMP_F_F16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_F16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_F32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_F32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_F64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_F64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_F_I32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_I64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_F16: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', + VOPCOp.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f16 == S1.f16', + VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', + VOPCOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', + VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', + VOPCOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = S0.f16 >= S1.f16', + VOPCOp.V_CMPX_O_F16: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + VOPCOp.V_CMPX_U_F16: "EXEC.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + VOPCOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_T_F16: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_F32: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', + VOPCOp.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f32 == S1.f32', + VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', + VOPCOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', + VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', + VOPCOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = S0.f32 >= S1.f32', + VOPCOp.V_CMPX_O_F32: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + VOPCOp.V_CMPX_U_F32: "EXEC.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + VOPCOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_T_F32: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_F64: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', + VOPCOp.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f64 == S1.f64', + VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', + VOPCOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', + VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', + VOPCOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = S0.f64 >= S1.f64', + VOPCOp.V_CMPX_O_F64: 'EXEC.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64))', + VOPCOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64))', + VOPCOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_T_F64: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', + VOPCOp.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i16 == S1.i16', + VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', + VOPCOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', + VOPCOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', + VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', + VOPCOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', + VOPCOp.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u16 == S1.u16', + VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', + VOPCOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', + VOPCOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', + VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', + VOPCOp.V_CMPX_F_I32: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', + VOPCOp.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i32 == S1.i32', + VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', + VOPCOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', + VOPCOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', + VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', + VOPCOp.V_CMPX_T_I32: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_U32: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', + VOPCOp.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u32 == S1.u32', + VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', + VOPCOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', + VOPCOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', + VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', + VOPCOp.V_CMPX_T_U32: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_I64: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', + VOPCOp.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i64 == S1.i64', + VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', + VOPCOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', + VOPCOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', + VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', + VOPCOp.V_CMPX_T_I64: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_U64: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', + VOPCOp.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u64 == S1.u64', + VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', + VOPCOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', + VOPCOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', + VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', + VOPCOp.V_CMPX_T_U64: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_CLASS_F16: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F32: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F64: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", +} + +DSOp_PCODE = { + DSOp.DS_ADD_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = DATA.u32 - MEM[ADDR].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_STORE_B32: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0]', + DSOp.DS_STORE_2ADDR_B32: 'MEM[ADDR + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]', + DSOp.DS_STORE_2ADDR_STRIDE64_B32: 'MEM[ADDR + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]', + DSOp.DS_CMPSTORE_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', + DSOp.DS_CMPSTORE_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MIN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MAX_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_STORE_B8: 'MEM[ADDR].b8 = DATA[7 : 0]', + DSOp.DS_STORE_B16: 'MEM[ADDR].b16 = DATA[15 : 0]', + DSOp.DS_ADD_RTN_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_RTN_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_RTN_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = DATA.u32 - MEM[ADDR].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_RTN_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_RTN_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_RTN_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_RTN_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_RTN_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_RTN_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_RTN_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_RTN_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_RTN_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_RTN_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_STOREXCHG_RTN_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + DSOp.DS_STOREXCHG_2ADDR_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_CMPSTORE_RTN_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', + DSOp.DS_CMPSTORE_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MIN_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MAX_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_WRAP_RTN_B32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = tmp >= DATA.u32 ? tmp - DATA.u32 : tmp + DATA2.u32;\nRETURN_DATA = tmp', + DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;', + DSOp.DS_LOAD_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32', + DSOp.DS_LOAD_2ADDR_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 4U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET1.u32 * 4U].b32', + DSOp.DS_LOAD_2ADDR_STRIDE64_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 256U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET1.u32 * 256U].b32', + DSOp.DS_LOAD_I8: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i8))", + DSOp.DS_LOAD_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + DSOp.DS_LOAD_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))", + DSOp.DS_LOAD_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + DSOp.DS_CONSUME: 'addr = M0.base + offset; // offset by LDS HWBASE, limit to M.size\nrtnval = LDS(addr);\nGPR[VDST] = rtnval; // return to all valid threads', + DSOp.DS_APPEND: 'addr = M0.base + offset; // offset by LDS HWBASE, limit to M.size\nrtnval = LDS(addr);\nGPR[VDST] = rtnval; // return to all valid threads', + DSOp.DS_ADD_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 = DATA.u64 - MEM[ADDR].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_STORE_B64: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32]', + DSOp.DS_STORE_2ADDR_B64: 'MEM[ADDR + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_STORE_2ADDR_STRIDE64_B64: 'MEM[ADDR + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_CMPSTORE_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', + DSOp.DS_CMPSTORE_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MIN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_ADD_RTN_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_RTN_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_RTN_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 = DATA.u64 - MEM[ADDR].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_RTN_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_RTN_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_RTN_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_RTN_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_RTN_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_RTN_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_RTN_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_RTN_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_RTN_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_RTN_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_STOREXCHG_RTN_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + DSOp.DS_STOREXCHG_2ADDR_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_CMPSTORE_RTN_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', + DSOp.DS_CMPSTORE_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MIN_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_LOAD_B64: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4U].b32', + DSOp.DS_LOAD_2ADDR_B64: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET0.u32 * 8U + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[ADDR + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[ADDR + OFFSET1.u32 * 8U + 4U].b32', + DSOp.DS_LOAD_2ADDR_STRIDE64_B64: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET0.u32 * 512U + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[ADDR + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[ADDR + OFFSET1.u32 * 512U + 4U].b32', + DSOp.DS_ADD_RTN_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_ADD_GS_REG_RTN: 'if OFFSET0[5:2] > 7\n// 64-bit GS register access\naddr = (OFFSET0[5:2] - 8) * 2 + 8;\nVDST[0] = GS_REGS(addr + 0);\nVDST[1] = GS_REGS(addr + 1);\n{GS_REGS(addr + 1), GS_REGS(addr)} += DATA0[0]; // source is 32 bit\nelse\naddr = OFFSET0[5:2];\nVDST[0] = GS_REGS(addr);\nGS_REGS(addr) += DATA0[0];\noffset[5:2] Register\noffset[5:2] Register', + DSOp.DS_SUB_GS_REG_RTN: 'if OFFSET0[5:2] > 7\n// 64-bit GS register access\naddr = (OFFSET0[5:2] - 8) * 2 + 8;\nVDST[0] = GS_REGS(addr + 0);\nVDST[1] = GS_REGS(addr + 1);\n{GS_REGS(addr + 1), GS_REGS(addr)} -= DATA0[0]; // source is 32 bit\nelse\naddr = OFFSET0[5:2];\nVDST[0] = GS_REGS(addr);\nGS_REGS(addr) -= DATA0[0];\noffset[5:2] Register\noffset[5:2] Register', + DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", + DSOp.DS_STORE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]', + DSOp.DS_STORE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]', + DSOp.DS_LOAD_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + DSOp.DS_LOAD_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + DSOp.DS_LOAD_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", + DSOp.DS_LOAD_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", + DSOp.DS_LOAD_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;', + DSOp.DS_LOAD_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;', + DSOp.DS_BVH_STACK_RTN_B32: 'The LDS stack address is computed using values packed into ADDR and part of OFFSET1. ADDR carries the\nstack address for the lane. OFFSET1[5:4] contains stack_size[1:0] -- this value is constant for all lanes and is\n(stack_base, stack_index) = DECODE_ADDR(ADDR, OFFSET1);\nlast_node_ptr = DATA0;\n// First 3 passes: push data onto stack\nfor i = 0..2 do\nif DATA_VALID(DATA1[i])\nMEM[stack_base + stack_index] = DATA1[i];\nelsif DATA1[i] == last_node_ptr\nendif\nendfor\n// Fourth pass: return data or pop\nif DATA_VALID(DATA1[3])\nVGPR_RTN = DATA1[3]\nelse\nVGPR_RTN = MEM[stack_base + stack_index];\nMEM[stack_base + stack_index] = INVALID_NODE;\nendif\nif data == INVALID_NODE\nelsif last_node_ptr != INVALID_NODE && data == last_node_ptr\n// Match last_node_ptr\nelse\nendif', + DSOp.DS_STORE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32", + DSOp.DS_LOAD_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32", + DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_BPERMUTE_B32: "Note that EXEC mask is applied to both VGPR read and write. If src_lane selects a disabled thread then zero is\n// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_STORE_B96: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET.u32 + 8U].b32 = DATA[95 : 64]', + DSOp.DS_STORE_B128: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[ADDR + OFFSET.u32 + 12U].b32 = DATA[127 : 96]', + DSOp.DS_LOAD_B96: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[ADDR + OFFSET.u32 + 8U].b32', + DSOp.DS_LOAD_B128: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[ADDR + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[ADDR + OFFSET.u32 + 12U].b32', +} + +FLATOp_PCODE = { + FLATOp.FLAT_LOAD_U8: "VDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + FLATOp.FLAT_LOAD_I8: "VDATA.i32 = 32'I(signext(MEM[ADDR].i8))", + FLATOp.FLAT_LOAD_U16: "VDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + FLATOp.FLAT_LOAD_I16: "VDATA.i32 = 32'I(signext(MEM[ADDR].i16))", + FLATOp.FLAT_LOAD_B32: 'VDATA[31 : 0] = MEM[ADDR].b32', + FLATOp.FLAT_LOAD_B64: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32', + FLATOp.FLAT_LOAD_B96: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32;\nVDATA[95 : 64] = MEM[ADDR + 8U].b32', + FLATOp.FLAT_LOAD_B128: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32;\nVDATA[95 : 64] = MEM[ADDR + 8U].b32;\nVDATA[127 : 96] = MEM[ADDR + 12U].b32', + FLATOp.FLAT_STORE_B8: 'MEM[ADDR].b8 = VDATA[7 : 0]', + FLATOp.FLAT_STORE_B16: 'MEM[ADDR].b16 = VDATA[15 : 0]', + FLATOp.FLAT_STORE_B32: 'MEM[ADDR].b32 = VDATA[31 : 0]', + FLATOp.FLAT_STORE_B64: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32]', + FLATOp.FLAT_STORE_B96: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64]', + FLATOp.FLAT_STORE_B128: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64];\nMEM[ADDR + 12U].b32 = VDATA[127 : 96]', + FLATOp.FLAT_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + FLATOp.FLAT_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", + FLATOp.FLAT_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;', + FLATOp.FLAT_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + FLATOp.FLAT_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", + FLATOp.FLAT_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;', + FLATOp.FLAT_STORE_D16_HI_B8: 'MEM[ADDR].b8 = VDATA[23 : 16]', + FLATOp.FLAT_STORE_D16_HI_B16: 'MEM[ADDR].b16 = VDATA[31 : 16]', + FLATOp.FLAT_ATOMIC_SWAP_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_CMPSWAP_B32: 'tmp = MEM[ADDR].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[ADDR].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_ADD_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_SUB_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_MIN_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + FLATOp.FLAT_ATOMIC_MIN_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_MAX_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + FLATOp.FLAT_ATOMIC_MAX_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_AND_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_OR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_XOR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_INC_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_DEC_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_SWAP_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_CMPSWAP_B64: 'tmp = MEM[ADDR].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[ADDR].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_ADD_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_SUB_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_MIN_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + FLATOp.FLAT_ATOMIC_MIN_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_MAX_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + FLATOp.FLAT_ATOMIC_MAX_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_AND_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_OR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_XOR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_INC_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_DEC_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_CMPSWAP_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA[31 : 0].f32;\ncmp = DATA[63 : 32].f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + FLATOp.FLAT_ATOMIC_MIN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + FLATOp.FLAT_ATOMIC_MAX_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + FLATOp.FLAT_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + FLATOp.GLOBAL_LOAD_ADDTID_B32: "RETURN_DATA.u32 = MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32", + FLATOp.GLOBAL_STORE_ADDTID_B32: "MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32 = DATA.u32", +} + +GLOBALOp_PCODE = { + GLOBALOp.GLOBAL_LOAD_U8: "VDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + GLOBALOp.GLOBAL_LOAD_I8: "VDATA.i32 = 32'I(signext(MEM[ADDR].i8))", + GLOBALOp.GLOBAL_LOAD_U16: "VDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + GLOBALOp.GLOBAL_LOAD_I16: "VDATA.i32 = 32'I(signext(MEM[ADDR].i16))", + GLOBALOp.GLOBAL_LOAD_B32: 'VDATA[31 : 0] = MEM[ADDR].b32', + GLOBALOp.GLOBAL_LOAD_B64: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32', + GLOBALOp.GLOBAL_LOAD_B96: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32;\nVDATA[95 : 64] = MEM[ADDR + 8U].b32', + GLOBALOp.GLOBAL_LOAD_B128: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32;\nVDATA[95 : 64] = MEM[ADDR + 8U].b32;\nVDATA[127 : 96] = MEM[ADDR + 12U].b32', + GLOBALOp.GLOBAL_STORE_B8: 'MEM[ADDR].b8 = VDATA[7 : 0]', + GLOBALOp.GLOBAL_STORE_B16: 'MEM[ADDR].b16 = VDATA[15 : 0]', + GLOBALOp.GLOBAL_STORE_B32: 'MEM[ADDR].b32 = VDATA[31 : 0]', + GLOBALOp.GLOBAL_STORE_B64: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32]', + GLOBALOp.GLOBAL_STORE_B96: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64]', + GLOBALOp.GLOBAL_STORE_B128: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64];\nMEM[ADDR + 12U].b32 = VDATA[127 : 96]', + GLOBALOp.GLOBAL_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + GLOBALOp.GLOBAL_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", + GLOBALOp.GLOBAL_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;', + GLOBALOp.GLOBAL_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + GLOBALOp.GLOBAL_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", + GLOBALOp.GLOBAL_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;', + GLOBALOp.GLOBAL_STORE_D16_HI_B8: 'MEM[ADDR].b8 = VDATA[23 : 16]', + GLOBALOp.GLOBAL_STORE_D16_HI_B16: 'MEM[ADDR].b16 = VDATA[31 : 16]', + GLOBALOp.GLOBAL_LOAD_ADDTID_B32: "RETURN_DATA.u32 = MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32", + GLOBALOp.GLOBAL_STORE_ADDTID_B32: "MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32 = DATA.u32", + GLOBALOp.GLOBAL_ATOMIC_SWAP_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_B32: 'tmp = MEM[ADDR].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[ADDR].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SUB_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_CSUB_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + GLOBALOp.GLOBAL_ATOMIC_MIN_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MIN_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MAX_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MAX_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_AND_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_OR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_XOR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_INC_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_DEC_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SWAP_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_B64: 'tmp = MEM[ADDR].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[ADDR].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SUB_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MIN_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MIN_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MAX_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MAX_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_AND_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_OR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_XOR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_INC_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_DEC_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA[31 : 0].f32;\ncmp = DATA[63 : 32].f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MIN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MAX_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', +} + +SCRATCHOp_PCODE = { + SCRATCHOp.SCRATCH_LOAD_U8: "VDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + SCRATCHOp.SCRATCH_LOAD_I8: "VDATA.i32 = 32'I(signext(MEM[ADDR].i8))", + SCRATCHOp.SCRATCH_LOAD_U16: "VDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + SCRATCHOp.SCRATCH_LOAD_I16: "VDATA.i32 = 32'I(signext(MEM[ADDR].i16))", + SCRATCHOp.SCRATCH_LOAD_B32: 'VDATA[31 : 0] = MEM[ADDR].b32', + SCRATCHOp.SCRATCH_LOAD_B64: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32', + SCRATCHOp.SCRATCH_LOAD_B96: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32;\nVDATA[95 : 64] = MEM[ADDR + 8U].b32', + SCRATCHOp.SCRATCH_LOAD_B128: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32;\nVDATA[95 : 64] = MEM[ADDR + 8U].b32;\nVDATA[127 : 96] = MEM[ADDR + 12U].b32', + SCRATCHOp.SCRATCH_STORE_B8: 'MEM[ADDR].b8 = VDATA[7 : 0]', + SCRATCHOp.SCRATCH_STORE_B16: 'MEM[ADDR].b16 = VDATA[15 : 0]', + SCRATCHOp.SCRATCH_STORE_B32: 'MEM[ADDR].b32 = VDATA[31 : 0]', + SCRATCHOp.SCRATCH_STORE_B64: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32]', + SCRATCHOp.SCRATCH_STORE_B96: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64]', + SCRATCHOp.SCRATCH_STORE_B128: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64];\nMEM[ADDR + 12U].b32 = VDATA[127 : 96]', + SCRATCHOp.SCRATCH_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + SCRATCHOp.SCRATCH_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", + SCRATCHOp.SCRATCH_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;', + SCRATCHOp.SCRATCH_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + SCRATCHOp.SCRATCH_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", + SCRATCHOp.SCRATCH_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;', + SCRATCHOp.SCRATCH_STORE_D16_HI_B8: 'MEM[ADDR].b8 = VDATA[23 : 16]', + SCRATCHOp.SCRATCH_STORE_D16_HI_B16: 'MEM[ADDR].b16 = VDATA[31 : 16]', +} + +PSEUDOCODE_STRINGS = { + SOP1Op: SOP1Op_PCODE, + SOP2Op: SOP2Op_PCODE, + SOPCOp: SOPCOp_PCODE, + SOPKOp: SOPKOp_PCODE, + SOPPOp: SOPPOp_PCODE, + SMEMOp: SMEMOp_PCODE, + VOP1Op: VOP1Op_PCODE, + VOP2Op: VOP2Op_PCODE, + VOP3Op: VOP3Op_PCODE, + VOP3SDOp: VOP3SDOp_PCODE, + VOP3POp: VOP3POp_PCODE, + VOPCOp: VOPCOp_PCODE, + DSOp: DSOp_PCODE, + FLATOp: FLATOp_PCODE, + GLOBALOp: GLOBALOp_PCODE, + SCRATCHOp: SCRATCHOp_PCODE, +} \ No newline at end of file diff --git a/extra/assembly/amd/autogen/rdna4/gen_pcode.py b/extra/assembly/amd/autogen/rdna4/gen_pcode.py deleted file mode 100644 index dd2c782347..0000000000 --- a/extra/assembly/amd/autogen/rdna4/gen_pcode.py +++ /dev/null @@ -1,9452 +0,0 @@ -# autogenerated by pdf.py - do not edit -# to regenerate: python -m extra.assembly.amd.pdf --arch rdna4 -# ruff: noqa: E501 -# mypy: ignore-errors -from extra.assembly.amd.autogen.rdna4.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp -from extra.assembly.amd.pcode import ABSDIFF, BYTE_PERMUTE, DENORM, F, INF, MAX_FLOAT_F32, OVERFLOW_F32, OVERFLOW_F64, PI, ROUND_MODE, Reg, SAT8, SliceProxy, TWO_OVER_PI_1201, UNDERFLOW_F32, UNDERFLOW_F64, WAVE32, WAVE64, _pack, _pack32, bf16_to_f32, cos, cvtToQuietNAN, exponent, f16_to_f32, f16_to_i16, f16_to_snorm, f16_to_u16, f16_to_unorm, f32_to_f16, f32_to_f64, f32_to_i32, f32_to_snorm, f32_to_u32, f32_to_u8, f32_to_unorm, f64_to_f32, f64_to_i32, f64_to_u32, floor, fma, fract, i16_to_f16, i32_to_f32, i32_to_f64, i32_to_i16, isEven, isNAN, isQuietNAN, isSignalNAN, ldexp, log2, mantissa, pow, s_ff1_i32_b32, s_ff1_i32_b64, sign, signext, signext_from_bit, sin, sqrt, trunc, u16_to_f16, u32_to_f32, u32_to_f64, u32_to_u16, u4_to_u32, u8_to_u32, v_cvt_i16_f32, v_cvt_u16_f32, v_max3_i16, v_max3_i32, v_max3_u16, v_max3_u32, v_max_i16, v_max_i32, v_max_u16, v_max_u32, v_min_i16, v_min_i32, v_min_u16, v_min_u32, v_msad_u8, v_sad_u8 - -def _SOP1Op_S_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _SOP1Op_S_MOV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b64 = S0.b64 - return {'D0': D0._val} - -def _SOP1Op_S_CMOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - if SCC: - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _SOP1Op_S_CMOV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - if SCC: - D0.b64 = S0.b64 - return {'D0': D0._val} - -def _SOP1Op_S_BREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _SOP1Op_S_BREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[63 : 0] = S0.u64[0 : 63] - return {'D0': D0._val} - -def _SOP1Op_S_CTZ_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CTZ_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(63)+1): - if S0.u64[i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CLZ_I32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CLZ_I32_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(0, int(63)+1): - if S0.u64[63 - i] == 1: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CLS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(1, int(31)+1): - if S0.u32[31 - i] != S0.u32[31]: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_CLS_I32_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(-1) - for i in range(1, int(63)+1): - if S0.u64[63 - i] != S0.u64[63]: - tmp = Reg(i); break - D0.i32 = tmp - return {'D0': D0._val} - -def _SOP1Op_S_SEXT_I32_I8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i8)) - return {'D0': D0._val} - -def _SOP1Op_S_SEXT_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _SOP1Op_S_BITSET0_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[S0.u32[4 : 0]] = 0 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET0_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[S0.u32[5 : 0]] = 0 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[S0.u32[4 : 0]] = 1 - return {'D0': D0._val} - -def _SOP1Op_S_BITSET1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64[S0.u32[5 : 0]] = 1 - return {'D0': D0._val} - -def _SOP1Op_S_BITREPLICATE_B64_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S0.u32) - for i in range(0, int(31)+1): - D0.u64[i * 2] = tmp[i] - D0.u64[i * 2 + 1] = tmp[i] - return {'D0': D0._val} - -def _SOP1Op_S_ABS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = ((-S0.i32) if (S0.i32 < 0) else (S0.i32)) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT0_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp += ((1) if (S0.u32[i] == 0) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT0_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp += ((1) if (S0.u64[i] == 0) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT1_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp += ((1) if (S0.u32[i] == 1) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_BCNT1_I32_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp += ((1) if (S0.u64[i] == 1) else (0)) - D0.i32 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_QUADMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(7)+1): - tmp[i] = S0.u32[(i * 4) + (4) - 1 : (i * 4)] != 0 - D0.u32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_QUADMASK_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(15)+1): - tmp[i] = S0.u64[(i * 4) + (4) - 1 : (i * 4)] != 0 - D0.u64 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_WQM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(31)+1): - tmp[i] = S0.u32[(i & 60) + (4) - 1 : (i & 60)] != 0 - D0.u32 = tmp - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_WQM_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(0) - for i in range(0, int(63)+1): - tmp[i] = S0.u64[(i & 60) + (4) - 1 : (i & 60)] != 0 - D0.u64 = tmp - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_NOT_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~S0.u64 - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP1Op_S_AND_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 & EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 | EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XOR_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 ^ EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 ^ EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NAND_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = ~(S0.u32 & EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NAND_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NOR_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = ~(S0.u32 | EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_NOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XNOR_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = ~(S0.u32 ^ EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_XNOR_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = ~(S0.u64 ^ EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT0_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (~S0.u32 & EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT0_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (~S0.u64 & EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_NOT0_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (~S0.u32 | EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_NOT0_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (~S0.u64 | EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT1_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 & ~EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT1_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 & ~EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_NOT1_SAVEEXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u32) - EXEC.u32 = (S0.u32 | ~EXEC.u32) - D0.u32 = saveexec.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_OR_NOT1_SAVEEXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - saveexec = Reg(EXEC.u64) - EXEC.u64 = (S0.u64 | ~EXEC.u64) - D0.u64 = saveexec.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT0_WREXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u32 = (~S0.u32 & EXEC.u32) - D0.u32 = EXEC.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT0_WREXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64 = (~S0.u64 & EXEC.u64) - D0.u64 = EXEC.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT1_WREXEC_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u32 = (S0.u32 & ~EXEC.u32) - D0.u32 = EXEC.u32 - SCC = Reg(EXEC.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_AND_NOT1_WREXEC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64 = (S0.u64 & ~EXEC.u64) - D0.u64 = EXEC.u64 - SCC = Reg(EXEC.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val, 'EXEC': EXEC._val} - -def _SOP1Op_S_GETPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.i64 = PC + 4 - return {'D0': D0._val} - -def _SOP1Op_S_SETPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - PC = Reg(S0.i64) - return {'PC': PC._val} - -def _SOP1Op_S_SWAPPC_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - jump_addr = S0.i64 - D0.i64 = PC + 4 - PC = Reg(jump_addr.i64) - return {'D0': D0._val, 'PC': PC._val} - -def _SOP1Op_S_RFE_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - PC = Reg(S0.i64) - return {'PC': PC._val} - -def _SOP1Op_S_SENDMSG_RTN_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc) - # --- compiled pseudocode --- - return {} - -def _SOP1Op_S_SENDMSG_RTN_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc) - # --- compiled pseudocode --- - return {} - -def _SOP1Op_S_SLEEP_VAR(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0) - # --- compiled pseudocode --- - return {} - -def _SOP1Op_S_CEIL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 > 0.0) and (S0.f32 != D0.f32)): - D0.f32 += 1.0 - return {'D0': D0._val} - -def _SOP1Op_S_FLOOR_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 < 0.0) and (S0.f32 != D0.f32)): - D0.f32 += -1.0 - return {'D0': D0._val} - -def _SOP1Op_S_TRUNC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - return {'D0': D0._val} - -def _SOP1Op_S_RNDNE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = floor(S0.f32 + 0.5) - if (isEven(F(floor(S0.f32))) and (fract(S0.f32) == 0.5)): - D0.f32 -= 1.0 - return {'D0': D0._val} - -def _SOP1Op_S_CVT_F32_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = i32_to_f32(S0.i32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_F32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0.u32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(S0.f32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_U32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f32_to_u32(S0.f32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = f32_to_f16(S0.f32) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0.f16) - return {'D0': D0._val} - -def _SOP1Op_S_CVT_HI_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0[31 : 16].f16) - return {'D0': D0._val} - -def _SOP1Op_S_CEIL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 > 0.0) and (S0.f16 != D0.f16)): - D0.f16 += 1.0 - return {'D0': D0._val} - -def _SOP1Op_S_FLOOR_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 < 0.0) and (S0.f16 != D0.f16)): - D0.f16 += -1.0 - return {'D0': D0._val} - -def _SOP1Op_S_TRUNC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - return {'D0': D0._val} - -def _SOP1Op_S_RNDNE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = floor(S0.f16 + 0.5) - if (isEven(F(floor(S0.f16))) and (fract(S0.f16) == 0.5)): - D0.f16 -= 1.0 - return {'D0': D0._val} - -SOP1Op_FUNCTIONS = { - SOP1Op.S_MOV_B32: _SOP1Op_S_MOV_B32, - SOP1Op.S_MOV_B64: _SOP1Op_S_MOV_B64, - SOP1Op.S_CMOV_B32: _SOP1Op_S_CMOV_B32, - SOP1Op.S_CMOV_B64: _SOP1Op_S_CMOV_B64, - SOP1Op.S_BREV_B32: _SOP1Op_S_BREV_B32, - SOP1Op.S_BREV_B64: _SOP1Op_S_BREV_B64, - SOP1Op.S_CTZ_I32_B32: _SOP1Op_S_CTZ_I32_B32, - SOP1Op.S_CTZ_I32_B64: _SOP1Op_S_CTZ_I32_B64, - SOP1Op.S_CLZ_I32_U32: _SOP1Op_S_CLZ_I32_U32, - SOP1Op.S_CLZ_I32_U64: _SOP1Op_S_CLZ_I32_U64, - SOP1Op.S_CLS_I32: _SOP1Op_S_CLS_I32, - SOP1Op.S_CLS_I32_I64: _SOP1Op_S_CLS_I32_I64, - SOP1Op.S_SEXT_I32_I8: _SOP1Op_S_SEXT_I32_I8, - SOP1Op.S_SEXT_I32_I16: _SOP1Op_S_SEXT_I32_I16, - SOP1Op.S_BITSET0_B32: _SOP1Op_S_BITSET0_B32, - SOP1Op.S_BITSET0_B64: _SOP1Op_S_BITSET0_B64, - SOP1Op.S_BITSET1_B32: _SOP1Op_S_BITSET1_B32, - SOP1Op.S_BITSET1_B64: _SOP1Op_S_BITSET1_B64, - SOP1Op.S_BITREPLICATE_B64_B32: _SOP1Op_S_BITREPLICATE_B64_B32, - SOP1Op.S_ABS_I32: _SOP1Op_S_ABS_I32, - SOP1Op.S_BCNT0_I32_B32: _SOP1Op_S_BCNT0_I32_B32, - SOP1Op.S_BCNT0_I32_B64: _SOP1Op_S_BCNT0_I32_B64, - SOP1Op.S_BCNT1_I32_B32: _SOP1Op_S_BCNT1_I32_B32, - SOP1Op.S_BCNT1_I32_B64: _SOP1Op_S_BCNT1_I32_B64, - SOP1Op.S_QUADMASK_B32: _SOP1Op_S_QUADMASK_B32, - SOP1Op.S_QUADMASK_B64: _SOP1Op_S_QUADMASK_B64, - SOP1Op.S_WQM_B32: _SOP1Op_S_WQM_B32, - SOP1Op.S_WQM_B64: _SOP1Op_S_WQM_B64, - SOP1Op.S_NOT_B32: _SOP1Op_S_NOT_B32, - SOP1Op.S_NOT_B64: _SOP1Op_S_NOT_B64, - SOP1Op.S_AND_SAVEEXEC_B32: _SOP1Op_S_AND_SAVEEXEC_B32, - SOP1Op.S_AND_SAVEEXEC_B64: _SOP1Op_S_AND_SAVEEXEC_B64, - SOP1Op.S_OR_SAVEEXEC_B32: _SOP1Op_S_OR_SAVEEXEC_B32, - SOP1Op.S_OR_SAVEEXEC_B64: _SOP1Op_S_OR_SAVEEXEC_B64, - SOP1Op.S_XOR_SAVEEXEC_B32: _SOP1Op_S_XOR_SAVEEXEC_B32, - SOP1Op.S_XOR_SAVEEXEC_B64: _SOP1Op_S_XOR_SAVEEXEC_B64, - SOP1Op.S_NAND_SAVEEXEC_B32: _SOP1Op_S_NAND_SAVEEXEC_B32, - SOP1Op.S_NAND_SAVEEXEC_B64: _SOP1Op_S_NAND_SAVEEXEC_B64, - SOP1Op.S_NOR_SAVEEXEC_B32: _SOP1Op_S_NOR_SAVEEXEC_B32, - SOP1Op.S_NOR_SAVEEXEC_B64: _SOP1Op_S_NOR_SAVEEXEC_B64, - SOP1Op.S_XNOR_SAVEEXEC_B32: _SOP1Op_S_XNOR_SAVEEXEC_B32, - SOP1Op.S_XNOR_SAVEEXEC_B64: _SOP1Op_S_XNOR_SAVEEXEC_B64, - SOP1Op.S_AND_NOT0_SAVEEXEC_B32: _SOP1Op_S_AND_NOT0_SAVEEXEC_B32, - SOP1Op.S_AND_NOT0_SAVEEXEC_B64: _SOP1Op_S_AND_NOT0_SAVEEXEC_B64, - SOP1Op.S_OR_NOT0_SAVEEXEC_B32: _SOP1Op_S_OR_NOT0_SAVEEXEC_B32, - SOP1Op.S_OR_NOT0_SAVEEXEC_B64: _SOP1Op_S_OR_NOT0_SAVEEXEC_B64, - SOP1Op.S_AND_NOT1_SAVEEXEC_B32: _SOP1Op_S_AND_NOT1_SAVEEXEC_B32, - SOP1Op.S_AND_NOT1_SAVEEXEC_B64: _SOP1Op_S_AND_NOT1_SAVEEXEC_B64, - SOP1Op.S_OR_NOT1_SAVEEXEC_B32: _SOP1Op_S_OR_NOT1_SAVEEXEC_B32, - SOP1Op.S_OR_NOT1_SAVEEXEC_B64: _SOP1Op_S_OR_NOT1_SAVEEXEC_B64, - SOP1Op.S_AND_NOT0_WREXEC_B32: _SOP1Op_S_AND_NOT0_WREXEC_B32, - SOP1Op.S_AND_NOT0_WREXEC_B64: _SOP1Op_S_AND_NOT0_WREXEC_B64, - SOP1Op.S_AND_NOT1_WREXEC_B32: _SOP1Op_S_AND_NOT1_WREXEC_B32, - SOP1Op.S_AND_NOT1_WREXEC_B64: _SOP1Op_S_AND_NOT1_WREXEC_B64, - SOP1Op.S_GETPC_B64: _SOP1Op_S_GETPC_B64, - SOP1Op.S_SETPC_B64: _SOP1Op_S_SETPC_B64, - SOP1Op.S_SWAPPC_B64: _SOP1Op_S_SWAPPC_B64, - SOP1Op.S_RFE_B64: _SOP1Op_S_RFE_B64, - SOP1Op.S_SENDMSG_RTN_B32: _SOP1Op_S_SENDMSG_RTN_B32, - SOP1Op.S_SENDMSG_RTN_B64: _SOP1Op_S_SENDMSG_RTN_B64, - SOP1Op.S_SLEEP_VAR: _SOP1Op_S_SLEEP_VAR, - SOP1Op.S_CEIL_F32: _SOP1Op_S_CEIL_F32, - SOP1Op.S_FLOOR_F32: _SOP1Op_S_FLOOR_F32, - SOP1Op.S_TRUNC_F32: _SOP1Op_S_TRUNC_F32, - SOP1Op.S_RNDNE_F32: _SOP1Op_S_RNDNE_F32, - SOP1Op.S_CVT_F32_I32: _SOP1Op_S_CVT_F32_I32, - SOP1Op.S_CVT_F32_U32: _SOP1Op_S_CVT_F32_U32, - SOP1Op.S_CVT_I32_F32: _SOP1Op_S_CVT_I32_F32, - SOP1Op.S_CVT_U32_F32: _SOP1Op_S_CVT_U32_F32, - SOP1Op.S_CVT_F16_F32: _SOP1Op_S_CVT_F16_F32, - SOP1Op.S_CVT_F32_F16: _SOP1Op_S_CVT_F32_F16, - SOP1Op.S_CVT_HI_F32_F16: _SOP1Op_S_CVT_HI_F32_F16, - SOP1Op.S_CEIL_F16: _SOP1Op_S_CEIL_F16, - SOP1Op.S_FLOOR_F16: _SOP1Op_S_FLOOR_F16, - SOP1Op.S_TRUNC_F16: _SOP1Op_S_TRUNC_F16, - SOP1Op.S_RNDNE_F16: _SOP1Op_S_RNDNE_F16, -} - -def _SOP2Op_S_ADD_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUB_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32) - SCC = Reg(((1) if (S1.u32 > S0.u32) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ADD_CO_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.i32 + S1.i32) - SCC = Reg(((S0.u32[31] == S1.u32[31]) and (S0.u32[31] != tmp.u32[31]))) - D0.i32 = tmp.i32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUB_CO_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.i32 - S1.i32) - SCC = Reg(((S0.u32[31] != S1.u32[31]) and (S0.u32[31] != tmp.u32[31]))) - D0.i32 = tmp.i32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ADD_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + SCC.u64) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_SUB_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - SCC.u32) - SCC = Reg(((1) if ((S1.u32) + SCC.u64 > (S0.u32)) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ABSDIFF_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = S0.i32 - S1.i32 - if D0.i32 < 0: - D0.i32 = -D0.i32 - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 << S1[4 : 0].u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 << S1[5 : 0].u32) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 >> S1[4 : 0].u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 >> S1[5 : 0].u32) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ASHR_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i32) >> S1[4 : 0].u32) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_ASHR_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32) - SCC = Reg(D0.i64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL1_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 1) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL2_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 2) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL3_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 3) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_LSHL4_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32) << 4) + (S1.u32)) - SCC = Reg(((1) if (tmp >= 0x100000000) else (0))) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 < S1.i32) - D0.i32 = ((S0.i32) if (SCC) else (S1.i32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 < S1.u32) - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 >= S1.i32) - D0.i32 = ((S0.i32) if (SCC) else (S1.i32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 >= S1.u32) - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 & S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 | S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 ^ S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NAND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 & S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NAND_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 & S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 | S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_NOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 | S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_XNOR_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ~(S0.u64 ^ S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_NOT1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & ~S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_AND_NOT1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 & ~S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_NOT1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | ~S1.u32) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_OR_NOT1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = (S0.u64 | ~S1.u64) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - SCC = Reg(D0.u32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - D0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32) - SCC = Reg(D0.i32 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - SCC = Reg(D0.u64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1 << S1[22 : 16].u32) - 1)) - D0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32) - SCC = Reg(D0.i64 != 0) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOP2Op_S_BFM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((1 << S0[4 : 0].u32) - 1) << S1[4 : 0].u32) - return {'D0': D0._val} - -def _SOP2Op_S_BFM_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (((1 << S0[5 : 0].u32) - 1) << S1[5 : 0].u32) - return {'D0': D0._val} - -def _SOP2Op_S_MUL_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 * S1.i32 - return {'D0': D0._val} - -def _SOP2Op_S_MUL_HI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u32) * (S1.u32)) >> 32) - return {'D0': D0._val} - -def _SOP2Op_S_MUL_HI_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i32) * (S1.i32)) >> 32) - return {'D0': D0._val} - -def _SOP2Op_S_CSELECT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (SCC) else (S1.u32)) - return {'D0': D0._val} - -def _SOP2Op_S_CSELECT_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - D0.u64 = ((S0.u64) if (SCC) else (S1.u64)) - return {'D0': D0._val} - -def _SOP2Op_S_PACK_LL_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[15 : 0].u16, S0[15 : 0].u16)) - return {} - -def _SOP2Op_S_PACK_LH_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[31 : 16].u16, S0[15 : 0].u16)) - return {} - -def _SOP2Op_S_PACK_HH_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[31 : 16].u16, S0[31 : 16].u16)) - return {} - -def _SOP2Op_S_PACK_HL_B32_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(S1[15 : 0].u16, S0[31 : 16].u16)) - return {} - -def _SOP2Op_S_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_SUB_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 - S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_MIN_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f32)) and isNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((S0.f32 < S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and sign(S0.f32) and not sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_MAX_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f32)) and isNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((S0.f32 > S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and not sign(S0.f32) and sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_FMAAK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32) - return {'D0': D0._val} - -def _SOP2Op_S_FMAMK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32) - return {'D0': D0._val} - -def _SOP2Op_S_FMAC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _SOP2Op_S_CVT_PK_RTZ_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].f16 = f32_to_f16(S0.f32) - tmp[31 : 16].f16 = f32_to_f16(S1.f32) - return {} - -def _SOP2Op_S_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_SUB_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 - S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_MIN_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f16)) and isNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((S0.f16 < S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and sign(S0.f16) and not sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_MAX_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f16)) and isNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((S0.f16 > S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and not sign(S0.f16) and sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, D0.f16) - return {'D0': D0._val} - -def _SOP2Op_S_MINIMUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S0.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S1.f32)): - D0.f32 = S1.f32 - elif ((S0.f32 < S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and sign(S0.f32) and not sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_MAXIMUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S0.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S1.f32)): - D0.f32 = S1.f32 - elif ((S0.f32 > S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and not sign(S0.f32) and sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _SOP2Op_S_MINIMUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S0.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S1.f16)): - D0.f16 = S1.f16 - elif ((S0.f16 < S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and sign(S0.f16) and not sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_MAXIMUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S0.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S1.f16)): - D0.f16 = S1.f16 - elif ((S0.f16 > S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and not sign(S0.f16) and sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _SOP2Op_S_ADD_NC_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = S0.u64 + S1.u64 - return {'D0': D0._val} - -def _SOP2Op_S_SUB_NC_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = S0.u64 - S1.u64 - return {'D0': D0._val} - -def _SOP2Op_S_MUL_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = S0.u64 * S1.u64 - return {'D0': D0._val} - -SOP2Op_FUNCTIONS = { - SOP2Op.S_ADD_CO_U32: _SOP2Op_S_ADD_CO_U32, - SOP2Op.S_SUB_CO_U32: _SOP2Op_S_SUB_CO_U32, - SOP2Op.S_ADD_CO_I32: _SOP2Op_S_ADD_CO_I32, - SOP2Op.S_SUB_CO_I32: _SOP2Op_S_SUB_CO_I32, - SOP2Op.S_ADD_CO_CI_U32: _SOP2Op_S_ADD_CO_CI_U32, - SOP2Op.S_SUB_CO_CI_U32: _SOP2Op_S_SUB_CO_CI_U32, - SOP2Op.S_ABSDIFF_I32: _SOP2Op_S_ABSDIFF_I32, - SOP2Op.S_LSHL_B32: _SOP2Op_S_LSHL_B32, - SOP2Op.S_LSHL_B64: _SOP2Op_S_LSHL_B64, - SOP2Op.S_LSHR_B32: _SOP2Op_S_LSHR_B32, - SOP2Op.S_LSHR_B64: _SOP2Op_S_LSHR_B64, - SOP2Op.S_ASHR_I32: _SOP2Op_S_ASHR_I32, - SOP2Op.S_ASHR_I64: _SOP2Op_S_ASHR_I64, - SOP2Op.S_LSHL1_ADD_U32: _SOP2Op_S_LSHL1_ADD_U32, - SOP2Op.S_LSHL2_ADD_U32: _SOP2Op_S_LSHL2_ADD_U32, - SOP2Op.S_LSHL3_ADD_U32: _SOP2Op_S_LSHL3_ADD_U32, - SOP2Op.S_LSHL4_ADD_U32: _SOP2Op_S_LSHL4_ADD_U32, - SOP2Op.S_MIN_I32: _SOP2Op_S_MIN_I32, - SOP2Op.S_MIN_U32: _SOP2Op_S_MIN_U32, - SOP2Op.S_MAX_I32: _SOP2Op_S_MAX_I32, - SOP2Op.S_MAX_U32: _SOP2Op_S_MAX_U32, - SOP2Op.S_AND_B32: _SOP2Op_S_AND_B32, - SOP2Op.S_AND_B64: _SOP2Op_S_AND_B64, - SOP2Op.S_OR_B32: _SOP2Op_S_OR_B32, - SOP2Op.S_OR_B64: _SOP2Op_S_OR_B64, - SOP2Op.S_XOR_B32: _SOP2Op_S_XOR_B32, - SOP2Op.S_XOR_B64: _SOP2Op_S_XOR_B64, - SOP2Op.S_NAND_B32: _SOP2Op_S_NAND_B32, - SOP2Op.S_NAND_B64: _SOP2Op_S_NAND_B64, - SOP2Op.S_NOR_B32: _SOP2Op_S_NOR_B32, - SOP2Op.S_NOR_B64: _SOP2Op_S_NOR_B64, - SOP2Op.S_XNOR_B32: _SOP2Op_S_XNOR_B32, - SOP2Op.S_XNOR_B64: _SOP2Op_S_XNOR_B64, - SOP2Op.S_AND_NOT1_B32: _SOP2Op_S_AND_NOT1_B32, - SOP2Op.S_AND_NOT1_B64: _SOP2Op_S_AND_NOT1_B64, - SOP2Op.S_OR_NOT1_B32: _SOP2Op_S_OR_NOT1_B32, - SOP2Op.S_OR_NOT1_B64: _SOP2Op_S_OR_NOT1_B64, - SOP2Op.S_BFE_U32: _SOP2Op_S_BFE_U32, - SOP2Op.S_BFE_I32: _SOP2Op_S_BFE_I32, - SOP2Op.S_BFE_U64: _SOP2Op_S_BFE_U64, - SOP2Op.S_BFE_I64: _SOP2Op_S_BFE_I64, - SOP2Op.S_BFM_B32: _SOP2Op_S_BFM_B32, - SOP2Op.S_BFM_B64: _SOP2Op_S_BFM_B64, - SOP2Op.S_MUL_I32: _SOP2Op_S_MUL_I32, - SOP2Op.S_MUL_HI_U32: _SOP2Op_S_MUL_HI_U32, - SOP2Op.S_MUL_HI_I32: _SOP2Op_S_MUL_HI_I32, - SOP2Op.S_CSELECT_B32: _SOP2Op_S_CSELECT_B32, - SOP2Op.S_CSELECT_B64: _SOP2Op_S_CSELECT_B64, - SOP2Op.S_PACK_LL_B32_B16: _SOP2Op_S_PACK_LL_B32_B16, - SOP2Op.S_PACK_LH_B32_B16: _SOP2Op_S_PACK_LH_B32_B16, - SOP2Op.S_PACK_HH_B32_B16: _SOP2Op_S_PACK_HH_B32_B16, - SOP2Op.S_PACK_HL_B32_B16: _SOP2Op_S_PACK_HL_B32_B16, - SOP2Op.S_ADD_F32: _SOP2Op_S_ADD_F32, - SOP2Op.S_SUB_F32: _SOP2Op_S_SUB_F32, - SOP2Op.S_MIN_NUM_F32: _SOP2Op_S_MIN_NUM_F32, - SOP2Op.S_MAX_NUM_F32: _SOP2Op_S_MAX_NUM_F32, - SOP2Op.S_MUL_F32: _SOP2Op_S_MUL_F32, - SOP2Op.S_FMAAK_F32: _SOP2Op_S_FMAAK_F32, - SOP2Op.S_FMAMK_F32: _SOP2Op_S_FMAMK_F32, - SOP2Op.S_FMAC_F32: _SOP2Op_S_FMAC_F32, - SOP2Op.S_CVT_PK_RTZ_F16_F32: _SOP2Op_S_CVT_PK_RTZ_F16_F32, - SOP2Op.S_ADD_F16: _SOP2Op_S_ADD_F16, - SOP2Op.S_SUB_F16: _SOP2Op_S_SUB_F16, - SOP2Op.S_MIN_NUM_F16: _SOP2Op_S_MIN_NUM_F16, - SOP2Op.S_MAX_NUM_F16: _SOP2Op_S_MAX_NUM_F16, - SOP2Op.S_MUL_F16: _SOP2Op_S_MUL_F16, - SOP2Op.S_FMAC_F16: _SOP2Op_S_FMAC_F16, - SOP2Op.S_MINIMUM_F32: _SOP2Op_S_MINIMUM_F32, - SOP2Op.S_MAXIMUM_F32: _SOP2Op_S_MAXIMUM_F32, - SOP2Op.S_MINIMUM_F16: _SOP2Op_S_MINIMUM_F16, - SOP2Op.S_MAXIMUM_F16: _SOP2Op_S_MAXIMUM_F16, - SOP2Op.S_ADD_NC_U64: _SOP2Op_S_ADD_NC_U64, - SOP2Op.S_SUB_NC_U64: _SOP2Op_S_SUB_NC_U64, - SOP2Op.S_MUL_U64: _SOP2Op_S_MUL_U64, -} - -def _SOPCOp_S_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 == S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 != S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 > S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 >= S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 < S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.i32 <= S1.i32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 == S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 != S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 > S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 >= S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 < S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32 <= S1.u32) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP0_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32[S1.u32[4 : 0]] == 0) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP1_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u32[S1.u32[4 : 0]] == 1) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP0_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64[S1.u32[5 : 0]] == 0) - return {'SCC': SCC._val} - -def _SOPCOp_S_BITCMP1_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64[S1.u32[5 : 0]] == 1) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64 == S1.u64) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.u64 != S1.u64) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 < S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 < S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 == S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 == S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 <= S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 <= S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 > S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 > S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 != S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 != S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f32 >= S1.f32) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(S0.f16 >= S1.f16) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32)))) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg(( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16)))) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg((isNAN(F(S0.f32)) or isNAN(F(S1.f32)))) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg((isNAN(F(S0.f16)) or isNAN(F(S1.f16)))) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 >= S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 >= S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 != S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 != S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 > S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 > S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 <= S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 <= S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 == S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 == S1.f16)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f32 < S1.f32)) - return {'SCC': SCC._val} - -def _SOPCOp_S_CMP_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); SCC=Reg(scc) - # --- compiled pseudocode --- - SCC = Reg( not (S0.f16 < S1.f16)) - return {'SCC': SCC._val} - -SOPCOp_FUNCTIONS = { - SOPCOp.S_CMP_EQ_I32: _SOPCOp_S_CMP_EQ_I32, - SOPCOp.S_CMP_LG_I32: _SOPCOp_S_CMP_LG_I32, - SOPCOp.S_CMP_GT_I32: _SOPCOp_S_CMP_GT_I32, - SOPCOp.S_CMP_GE_I32: _SOPCOp_S_CMP_GE_I32, - SOPCOp.S_CMP_LT_I32: _SOPCOp_S_CMP_LT_I32, - SOPCOp.S_CMP_LE_I32: _SOPCOp_S_CMP_LE_I32, - SOPCOp.S_CMP_EQ_U32: _SOPCOp_S_CMP_EQ_U32, - SOPCOp.S_CMP_LG_U32: _SOPCOp_S_CMP_LG_U32, - SOPCOp.S_CMP_GT_U32: _SOPCOp_S_CMP_GT_U32, - SOPCOp.S_CMP_GE_U32: _SOPCOp_S_CMP_GE_U32, - SOPCOp.S_CMP_LT_U32: _SOPCOp_S_CMP_LT_U32, - SOPCOp.S_CMP_LE_U32: _SOPCOp_S_CMP_LE_U32, - SOPCOp.S_BITCMP0_B32: _SOPCOp_S_BITCMP0_B32, - SOPCOp.S_BITCMP1_B32: _SOPCOp_S_BITCMP1_B32, - SOPCOp.S_BITCMP0_B64: _SOPCOp_S_BITCMP0_B64, - SOPCOp.S_BITCMP1_B64: _SOPCOp_S_BITCMP1_B64, - SOPCOp.S_CMP_EQ_U64: _SOPCOp_S_CMP_EQ_U64, - SOPCOp.S_CMP_LG_U64: _SOPCOp_S_CMP_LG_U64, - SOPCOp.S_CMP_LT_F32: _SOPCOp_S_CMP_LT_F32, - SOPCOp.S_CMP_LT_F16: _SOPCOp_S_CMP_LT_F16, - SOPCOp.S_CMP_EQ_F32: _SOPCOp_S_CMP_EQ_F32, - SOPCOp.S_CMP_EQ_F16: _SOPCOp_S_CMP_EQ_F16, - SOPCOp.S_CMP_LE_F32: _SOPCOp_S_CMP_LE_F32, - SOPCOp.S_CMP_LE_F16: _SOPCOp_S_CMP_LE_F16, - SOPCOp.S_CMP_GT_F32: _SOPCOp_S_CMP_GT_F32, - SOPCOp.S_CMP_GT_F16: _SOPCOp_S_CMP_GT_F16, - SOPCOp.S_CMP_LG_F32: _SOPCOp_S_CMP_LG_F32, - SOPCOp.S_CMP_LG_F16: _SOPCOp_S_CMP_LG_F16, - SOPCOp.S_CMP_GE_F32: _SOPCOp_S_CMP_GE_F32, - SOPCOp.S_CMP_GE_F16: _SOPCOp_S_CMP_GE_F16, - SOPCOp.S_CMP_O_F32: _SOPCOp_S_CMP_O_F32, - SOPCOp.S_CMP_O_F16: _SOPCOp_S_CMP_O_F16, - SOPCOp.S_CMP_U_F32: _SOPCOp_S_CMP_U_F32, - SOPCOp.S_CMP_U_F16: _SOPCOp_S_CMP_U_F16, - SOPCOp.S_CMP_NGE_F32: _SOPCOp_S_CMP_NGE_F32, - SOPCOp.S_CMP_NGE_F16: _SOPCOp_S_CMP_NGE_F16, - SOPCOp.S_CMP_NLG_F32: _SOPCOp_S_CMP_NLG_F32, - SOPCOp.S_CMP_NLG_F16: _SOPCOp_S_CMP_NLG_F16, - SOPCOp.S_CMP_NGT_F32: _SOPCOp_S_CMP_NGT_F32, - SOPCOp.S_CMP_NGT_F16: _SOPCOp_S_CMP_NGT_F16, - SOPCOp.S_CMP_NLE_F32: _SOPCOp_S_CMP_NLE_F32, - SOPCOp.S_CMP_NLE_F16: _SOPCOp_S_CMP_NLE_F16, - SOPCOp.S_CMP_NEQ_F32: _SOPCOp_S_CMP_NEQ_F32, - SOPCOp.S_CMP_NEQ_F16: _SOPCOp_S_CMP_NEQ_F16, - SOPCOp.S_CMP_NLT_F32: _SOPCOp_S_CMP_NLT_F32, - SOPCOp.S_CMP_NLT_F16: _SOPCOp_S_CMP_NLT_F16, -} - -def _SOPKOp_S_MOVK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_VERSION(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - # --- compiled pseudocode --- - return {} - -def _SOPKOp_S_CMOVK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - if SCC: - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_ADDK_CO_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0); SCC=Reg(scc) - # --- compiled pseudocode --- - tmp = Reg(D0.i32) - D0.i32 = D0.i32 + (signext(S0.i16)) - SCC = Reg(((tmp[31] == S0.i16[15]) and (tmp[31] != D0.i32[31]))) - return {'D0': D0._val, 'SCC': SCC._val} - -def _SOPKOp_S_MULK_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = D0.i32 * (signext(S0.i16)) - return {'D0': D0._val} - -def _SOPKOp_S_CALL_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - D0.i64 = PC + 4 - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - return {'D0': D0._val, 'PC': PC._val} - -SOPKOp_FUNCTIONS = { - SOPKOp.S_MOVK_I32: _SOPKOp_S_MOVK_I32, - SOPKOp.S_VERSION: _SOPKOp_S_VERSION, - SOPKOp.S_CMOVK_I32: _SOPKOp_S_CMOVK_I32, - SOPKOp.S_ADDK_CO_I32: _SOPKOp_S_ADDK_CO_I32, - SOPKOp.S_MULK_I32: _SOPKOp_S_MULK_I32, - SOPKOp.S_CALL_B64: _SOPKOp_S_CALL_B64, -} - -def _SOPPOp_S_NOP(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SIMM16=Reg(literal) - # --- compiled pseudocode --- - for i in range(0, int(SIMM16.u16[3 : 0].u32)+1): - pass - return {} - -def _SOPPOp_S_DELAY_ALU(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - return {} - -def _SOPPOp_S_TRAP(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - return {'PC': PC._val} - -def _SOPPOp_S_BARRIER_WAIT(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - # --- compiled pseudocode --- - return {} - -def _SOPPOp_S_BRANCH(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_SCC0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SCC=Reg(scc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if SCC == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'SCC': SCC._val, 'PC': PC._val} - -def _SOPPOp_S_CBRANCH_SCC1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - SCC=Reg(scc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal) - # --- compiled pseudocode --- - if SCC == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'SCC': SCC._val, 'PC': PC._val} - -def _SOPPOp_S_CBRANCH_VCCZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); VCCZ=Reg(1 if VCC._val == 0 else 0) - # --- compiled pseudocode --- - if VCCZ.u1 == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_VCCNZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); VCCZ=Reg(1 if VCC._val == 0 else 0) - # --- compiled pseudocode --- - if VCCZ.u1 == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_EXECZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); EXECZ=Reg(1 if EXEC._val == 0 else 0) - # --- compiled pseudocode --- - if EXECZ.u1 == 1: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -def _SOPPOp_S_CBRANCH_EXECNZ(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - EXEC=Reg(exec_mask); PC=Reg(pc) if pc is not None else None; SIMM16=Reg(literal); EXECZ=Reg(1 if EXEC._val == 0 else 0) - # --- compiled pseudocode --- - if EXECZ.u1 == 0: - PC = Reg(PC + signext(SIMM16.i16 * 4) + 4) - else: - PC = Reg(PC + 4) - return {'PC': PC._val} - -SOPPOp_FUNCTIONS = { - SOPPOp.S_NOP: _SOPPOp_S_NOP, - SOPPOp.S_DELAY_ALU: _SOPPOp_S_DELAY_ALU, - SOPPOp.S_TRAP: _SOPPOp_S_TRAP, - SOPPOp.S_BARRIER_WAIT: _SOPPOp_S_BARRIER_WAIT, - SOPPOp.S_BRANCH: _SOPPOp_S_BRANCH, - SOPPOp.S_CBRANCH_SCC0: _SOPPOp_S_CBRANCH_SCC0, - SOPPOp.S_CBRANCH_SCC1: _SOPPOp_S_CBRANCH_SCC1, - SOPPOp.S_CBRANCH_VCCZ: _SOPPOp_S_CBRANCH_VCCZ, - SOPPOp.S_CBRANCH_VCCNZ: _SOPPOp_S_CBRANCH_VCCNZ, - SOPPOp.S_CBRANCH_EXECZ: _SOPPOp_S_CBRANCH_EXECZ, - SOPPOp.S_CBRANCH_EXECNZ: _SOPPOp_S_CBRANCH_EXECNZ, -} - -def _SMEMOp_S_LOAD_B32(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcGlobalAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B64(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcGlobalAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B128(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcGlobalAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B256(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcGlobalAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - SDATA[159 : 128] = MEM[addr + 16].b32 - SDATA[191 : 160] = MEM[addr + 20].b32 - SDATA[223 : 192] = MEM[addr + 24].b32 - SDATA[255 : 224] = MEM[addr + 28].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B512(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcGlobalAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - SDATA[159 : 128] = MEM[addr + 16].b32 - SDATA[191 : 160] = MEM[addr + 20].b32 - SDATA[223 : 192] = MEM[addr + 24].b32 - SDATA[255 : 224] = MEM[addr + 28].b32 - SDATA[287 : 256] = MEM[addr + 32].b32 - SDATA[319 : 288] = MEM[addr + 36].b32 - SDATA[351 : 320] = MEM[addr + 40].b32 - SDATA[383 : 352] = MEM[addr + 44].b32 - SDATA[415 : 384] = MEM[addr + 48].b32 - SDATA[447 : 416] = MEM[addr + 52].b32 - SDATA[479 : 448] = MEM[addr + 56].b32 - SDATA[511 : 480] = MEM[addr + 60].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_B96(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcGlobalAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_I8(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA.i32 = (signext(MEM[ADDR].i8)) - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_U8(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA.u32 = (_pack(0, MEM[ADDR].u8)) - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_I16(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA.i32 = (signext(MEM[ADDR].i16)) - return {'SDATA': SDATA._val} - -def _SMEMOp_S_LOAD_U16(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA.u32 = (_pack(0, MEM[ADDR].u16)) - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B32(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcBufferAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B64(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcBufferAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B128(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcBufferAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B256(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcBufferAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - SDATA[159 : 128] = MEM[addr + 16].b32 - SDATA[191 : 160] = MEM[addr + 20].b32 - SDATA[223 : 192] = MEM[addr + 24].b32 - SDATA[255 : 224] = MEM[addr + 28].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B512(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcBufferAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - SDATA[127 : 96] = MEM[addr + 12].b32 - SDATA[159 : 128] = MEM[addr + 16].b32 - SDATA[191 : 160] = MEM[addr + 20].b32 - SDATA[223 : 192] = MEM[addr + 24].b32 - SDATA[255 : 224] = MEM[addr + 28].b32 - SDATA[287 : 256] = MEM[addr + 32].b32 - SDATA[319 : 288] = MEM[addr + 36].b32 - SDATA[351 : 320] = MEM[addr + 40].b32 - SDATA[383 : 352] = MEM[addr + 44].b32 - SDATA[415 : 384] = MEM[addr + 48].b32 - SDATA[447 : 416] = MEM[addr + 52].b32 - SDATA[479 : 448] = MEM[addr + 56].b32 - SDATA[511 : 480] = MEM[addr + 60].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_B96(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - addr = CalcBufferAddr(sgpr_base.b64, offset.b64) - SDATA[31 : 0] = MEM[addr].b32 - SDATA[63 : 32] = MEM[addr + 4].b32 - SDATA[95 : 64] = MEM[addr + 8].b32 - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_I8(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA.i32 = (signext(MEM[ADDR].i8)) - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_U8(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA.u32 = (_pack(0, MEM[ADDR].u8)) - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_I16(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA.i32 = (signext(MEM[ADDR].i16)) - return {'SDATA': SDATA._val} - -def _SMEMOp_S_BUFFER_LOAD_U16(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - SDATA.u32 = (_pack(0, MEM[ADDR].u16)) - return {'SDATA': SDATA._val} - -def _SMEMOp_S_PREFETCH_INST(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - if MODE.SCALAR_PREFETCH_EN.u1: - mem_addr = ((S0[63 : 0].i64 + (IOFFSET.i24)) & 0xffffffffffffff80) - length = S2.u32 - length += SDATA.u32 - length = (length & 31) - length = (length + 1) * 128 - return {} - -def _SMEMOp_S_PREFETCH_INST_PC_REL(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - if MODE.SCALAR_PREFETCH_EN.u1: - mem_addr = ((PC[63 : 0].i64 + 8 + (IOFFSET.i24)) & 0xffffffffffffff80) - length = S1.u32 - length += SDATA.u32 - length = (length & 31) - length = (length + 1) * 128 - return {} - -def _SMEMOp_S_PREFETCH_DATA(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - if MODE.SCALAR_PREFETCH_EN.u1: - mem_addr = ((S0[63 : 0].i64 + (IOFFSET.i24)) & 0xffffffffffffff80) - length = S2.u32 - length += SDATA.u32 - length = (length & 31) - length = (length + 1) * 128 - return {} - -def _SMEMOp_S_BUFFER_PREFETCH_DATA(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - if MODE.SCALAR_PREFETCH_EN.u1: - mem_addr = ((S0[47 : 0].i64 + (IOFFSET.i24)) & 0xffffffffffffff80) - length = S2.u32 - length += SDATA.u32 - length = (length & 31) - length = (length + 1) * 128 - return {} - -def _SMEMOp_S_PREFETCH_DATA_PC_REL(MEM, addr): - ADDR=Reg(addr); SDATA=Reg(0) - # --- compiled pseudocode --- - if MODE.SCALAR_PREFETCH_EN.u1: - mem_addr = ((PC[63 : 0].i64 + 8 + (IOFFSET.i24)) & 0xffffffffffffff80) - length = S1.u32 - length += SDATA.u32 - length = (length & 31) - length = (length + 1) * 128 - return {} - -SMEMOp_FUNCTIONS = { - SMEMOp.S_LOAD_B32: _SMEMOp_S_LOAD_B32, - SMEMOp.S_LOAD_B64: _SMEMOp_S_LOAD_B64, - SMEMOp.S_LOAD_B128: _SMEMOp_S_LOAD_B128, - SMEMOp.S_LOAD_B256: _SMEMOp_S_LOAD_B256, - SMEMOp.S_LOAD_B512: _SMEMOp_S_LOAD_B512, - SMEMOp.S_LOAD_B96: _SMEMOp_S_LOAD_B96, - SMEMOp.S_LOAD_I8: _SMEMOp_S_LOAD_I8, - SMEMOp.S_LOAD_U8: _SMEMOp_S_LOAD_U8, - SMEMOp.S_LOAD_I16: _SMEMOp_S_LOAD_I16, - SMEMOp.S_LOAD_U16: _SMEMOp_S_LOAD_U16, - SMEMOp.S_BUFFER_LOAD_B32: _SMEMOp_S_BUFFER_LOAD_B32, - SMEMOp.S_BUFFER_LOAD_B64: _SMEMOp_S_BUFFER_LOAD_B64, - SMEMOp.S_BUFFER_LOAD_B128: _SMEMOp_S_BUFFER_LOAD_B128, - SMEMOp.S_BUFFER_LOAD_B256: _SMEMOp_S_BUFFER_LOAD_B256, - SMEMOp.S_BUFFER_LOAD_B512: _SMEMOp_S_BUFFER_LOAD_B512, - SMEMOp.S_BUFFER_LOAD_B96: _SMEMOp_S_BUFFER_LOAD_B96, - SMEMOp.S_BUFFER_LOAD_I8: _SMEMOp_S_BUFFER_LOAD_I8, - SMEMOp.S_BUFFER_LOAD_U8: _SMEMOp_S_BUFFER_LOAD_U8, - SMEMOp.S_BUFFER_LOAD_I16: _SMEMOp_S_BUFFER_LOAD_I16, - SMEMOp.S_BUFFER_LOAD_U16: _SMEMOp_S_BUFFER_LOAD_U16, - SMEMOp.S_PREFETCH_INST: _SMEMOp_S_PREFETCH_INST, - SMEMOp.S_PREFETCH_INST_PC_REL: _SMEMOp_S_PREFETCH_INST_PC_REL, - SMEMOp.S_PREFETCH_DATA: _SMEMOp_S_PREFETCH_DATA, - SMEMOp.S_BUFFER_PREFETCH_DATA: _SMEMOp_S_BUFFER_PREFETCH_DATA, - SMEMOp.S_PREFETCH_DATA_PC_REL: _SMEMOp_S_PREFETCH_DATA_PC_REL, -} - -def _VOP1Op_V_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _VOP1Op_V_READFIRSTLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); EXEC=Reg(exec_mask); SRC0=Reg(src0_idx); EXEC_LO=SliceProxy(EXEC, 31, 0) - # --- compiled pseudocode --- - if WAVE64: - if EXEC == 0x0: - lane = 0 - else: - lane = (s_ff1_i32_b64(EXEC)) - else: - if EXEC_LO.i32 == 0: - lane = 0 - else: - lane = (s_ff1_i32_b32(EXEC_LO)) - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f64_to_i32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = i32_to_f64(S0.i32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = i32_to_f32(S0.i32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0.u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f32_to_u32(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = f32_to_f16(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_NEAREST_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32 + 0.5)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_FLOOR_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f64_to_f32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = f32_to_f64(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[7 : 0].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[15 : 8].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE2(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[23 : 16].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_UBYTE3(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[31 : 24].u32) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f64_to_u32(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = u32_to_f64(S0.u32) - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 > 0.0) and (S0.f64 != D0.f64)): - D0.f64 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = floor(S0.f64 + 0.5) - if (isEven(floor(S0.f64)) and (fract(S0.f64) == 0.5)): - D0.f64 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 < 0.0) and (S0.f64 != D0.f64)): - D0.f64 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_MOV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b16 = S0.b16 - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + -floor(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 > 0.0) and (S0.f32 != D0.f32)): - D0.f32 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = floor(S0.f32 + 0.5) - if (isEven(F(floor(S0.f32))) and (fract(S0.f32) == 0.5)): - D0.f32 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 < 0.0) and (S0.f32 != D0.f32)): - D0.f32 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_EXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = pow(2.0, S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_LOG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = log2(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP1Op_V_RCP_IFLAG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / S0.f64 - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_SIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sin(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_COS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = cos(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - return {'D0': D0._val} - -def _VOP1Op_V_BFREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _VOP1Op_V_CLZ_I32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_CTZ_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_CLS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(1, int(31)+1): - if S0.i32[31 - i] != S0.i32[31]: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f64) - 1023 + 1 - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.f64 = S0.f64 - else: - D0.f64 = mantissa(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + -floor(S0.f64) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f32) - 127 + 1 - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = mantissa(S0.f32) - return {'D0': D0._val} - -def _VOP1Op_V_MOVRELS_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - addr = SRC0.u32 - D0.b32 = VGPR[laneId][addr].b32 - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = u16_to_f16(S0.u16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F16_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = i16_to_f16(S0.i16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_u16(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_i16(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RCP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / S0.f16 - return {'D0': D0._val} - -def _VOP1Op_V_SQRT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RSQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_LOG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = log2(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_EXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = pow(2.0, S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_MANT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = mantissa(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_FREXP_EXP_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.i16 = 0 - else: - D0.i16 = (exponent(S0.f16) - 15 + 1) - return {'D0': D0._val} - -def _VOP1Op_V_FLOOR_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 < 0.0) and (S0.f16 != D0.f16)): - D0.f16 += -1.0 - return {'D0': D0._val} - -def _VOP1Op_V_CEIL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 > 0.0) and (S0.f16 != D0.f16)): - D0.f16 += 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_TRUNC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_RNDNE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = floor(S0.f16 + 0.5) - if (isEven(F(floor(S0.f16))) and (fract(S0.f16) == 0.5)): - D0.f16 -= 1.0 - return {'D0': D0._val} - -def _VOP1Op_V_FRACT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + -floor(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_SIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sin(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_COS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = cos(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP1Op_V_SAT_PK_U8_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(0) - tmp[7 : 0].u8 = SAT8(S0[15 : 0].i16) - tmp[15 : 8].u8 = SAT8(S0[31 : 16].i16) - D0.b16 = tmp.b16 - return {'D0': D0._val} - -def _VOP1Op_V_CVT_NORM_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_snorm(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_NORM_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_unorm(S0.f16) - return {'D0': D0._val} - -def _VOP1Op_V_SWAP_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.b32) - D0.b32 = S0.b32 - S0.b32 = tmp - return {'D0': D0._val} - -def _VOP1Op_V_SWAP_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(D0.b16) - D0.b16 = S0.b16 - S0.b16 = tmp - return {'D0': D0._val} - -def _VOP1Op_V_NOT_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ~S0.u16 - return {'D0': D0._val} - -def _VOP1Op_V_CVT_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_U32_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(0, S0.u16)) - return {} - -def _VOP1Op_V_CVT_F32_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if OPSEL[1 : 0].u2 == 0: - D0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].fp8) - elif OPSEL[1 : 0].u2 == 2: - D0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].fp8) - elif OPSEL[1 : 0].u2 == 1: - D0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].fp8) - else: - D0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].fp8) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_F32_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if OPSEL[1 : 0].u2 == 0: - D0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].bf8) - elif OPSEL[1 : 0].u2 == 2: - D0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].bf8) - elif OPSEL[1 : 0].u2 == 1: - D0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].bf8) - else: - D0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].bf8) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_PK_F32_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - tmp = Reg(((VGPR[laneId][SRC0.u32][31 : 16]) if (OPSEL[0].u1) else (VGPR[laneId][SRC0.u32][15 : 0]))) - D0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8) - D0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8) - return {'D0': D0._val} - -def _VOP1Op_V_CVT_PK_F32_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - tmp = Reg(((VGPR[laneId][SRC0.u32][31 : 16]) if (OPSEL[0].u1) else (VGPR[laneId][SRC0.u32][15 : 0]))) - D0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8) - D0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8) - return {'D0': D0._val} - -VOP1Op_FUNCTIONS = { - VOP1Op.V_MOV_B32: _VOP1Op_V_MOV_B32, - VOP1Op.V_READFIRSTLANE_B32: _VOP1Op_V_READFIRSTLANE_B32, - VOP1Op.V_CVT_I32_F64: _VOP1Op_V_CVT_I32_F64, - VOP1Op.V_CVT_F64_I32: _VOP1Op_V_CVT_F64_I32, - VOP1Op.V_CVT_F32_I32: _VOP1Op_V_CVT_F32_I32, - VOP1Op.V_CVT_F32_U32: _VOP1Op_V_CVT_F32_U32, - VOP1Op.V_CVT_U32_F32: _VOP1Op_V_CVT_U32_F32, - VOP1Op.V_CVT_I32_F32: _VOP1Op_V_CVT_I32_F32, - VOP1Op.V_CVT_F16_F32: _VOP1Op_V_CVT_F16_F32, - VOP1Op.V_CVT_F32_F16: _VOP1Op_V_CVT_F32_F16, - VOP1Op.V_CVT_NEAREST_I32_F32: _VOP1Op_V_CVT_NEAREST_I32_F32, - VOP1Op.V_CVT_FLOOR_I32_F32: _VOP1Op_V_CVT_FLOOR_I32_F32, - VOP1Op.V_CVT_F32_F64: _VOP1Op_V_CVT_F32_F64, - VOP1Op.V_CVT_F64_F32: _VOP1Op_V_CVT_F64_F32, - VOP1Op.V_CVT_F32_UBYTE0: _VOP1Op_V_CVT_F32_UBYTE0, - VOP1Op.V_CVT_F32_UBYTE1: _VOP1Op_V_CVT_F32_UBYTE1, - VOP1Op.V_CVT_F32_UBYTE2: _VOP1Op_V_CVT_F32_UBYTE2, - VOP1Op.V_CVT_F32_UBYTE3: _VOP1Op_V_CVT_F32_UBYTE3, - VOP1Op.V_CVT_U32_F64: _VOP1Op_V_CVT_U32_F64, - VOP1Op.V_CVT_F64_U32: _VOP1Op_V_CVT_F64_U32, - VOP1Op.V_TRUNC_F64: _VOP1Op_V_TRUNC_F64, - VOP1Op.V_CEIL_F64: _VOP1Op_V_CEIL_F64, - VOP1Op.V_RNDNE_F64: _VOP1Op_V_RNDNE_F64, - VOP1Op.V_FLOOR_F64: _VOP1Op_V_FLOOR_F64, - VOP1Op.V_MOV_B16: _VOP1Op_V_MOV_B16, - VOP1Op.V_FRACT_F32: _VOP1Op_V_FRACT_F32, - VOP1Op.V_TRUNC_F32: _VOP1Op_V_TRUNC_F32, - VOP1Op.V_CEIL_F32: _VOP1Op_V_CEIL_F32, - VOP1Op.V_RNDNE_F32: _VOP1Op_V_RNDNE_F32, - VOP1Op.V_FLOOR_F32: _VOP1Op_V_FLOOR_F32, - VOP1Op.V_EXP_F32: _VOP1Op_V_EXP_F32, - VOP1Op.V_LOG_F32: _VOP1Op_V_LOG_F32, - VOP1Op.V_RCP_F32: _VOP1Op_V_RCP_F32, - VOP1Op.V_RCP_IFLAG_F32: _VOP1Op_V_RCP_IFLAG_F32, - VOP1Op.V_RSQ_F32: _VOP1Op_V_RSQ_F32, - VOP1Op.V_RCP_F64: _VOP1Op_V_RCP_F64, - VOP1Op.V_RSQ_F64: _VOP1Op_V_RSQ_F64, - VOP1Op.V_SQRT_F32: _VOP1Op_V_SQRT_F32, - VOP1Op.V_SQRT_F64: _VOP1Op_V_SQRT_F64, - VOP1Op.V_SIN_F32: _VOP1Op_V_SIN_F32, - VOP1Op.V_COS_F32: _VOP1Op_V_COS_F32, - VOP1Op.V_NOT_B32: _VOP1Op_V_NOT_B32, - VOP1Op.V_BFREV_B32: _VOP1Op_V_BFREV_B32, - VOP1Op.V_CLZ_I32_U32: _VOP1Op_V_CLZ_I32_U32, - VOP1Op.V_CTZ_I32_B32: _VOP1Op_V_CTZ_I32_B32, - VOP1Op.V_CLS_I32: _VOP1Op_V_CLS_I32, - VOP1Op.V_FREXP_EXP_I32_F64: _VOP1Op_V_FREXP_EXP_I32_F64, - VOP1Op.V_FREXP_MANT_F64: _VOP1Op_V_FREXP_MANT_F64, - VOP1Op.V_FRACT_F64: _VOP1Op_V_FRACT_F64, - VOP1Op.V_FREXP_EXP_I32_F32: _VOP1Op_V_FREXP_EXP_I32_F32, - VOP1Op.V_FREXP_MANT_F32: _VOP1Op_V_FREXP_MANT_F32, - VOP1Op.V_MOVRELS_B32: _VOP1Op_V_MOVRELS_B32, - VOP1Op.V_CVT_F16_U16: _VOP1Op_V_CVT_F16_U16, - VOP1Op.V_CVT_F16_I16: _VOP1Op_V_CVT_F16_I16, - VOP1Op.V_CVT_U16_F16: _VOP1Op_V_CVT_U16_F16, - VOP1Op.V_CVT_I16_F16: _VOP1Op_V_CVT_I16_F16, - VOP1Op.V_RCP_F16: _VOP1Op_V_RCP_F16, - VOP1Op.V_SQRT_F16: _VOP1Op_V_SQRT_F16, - VOP1Op.V_RSQ_F16: _VOP1Op_V_RSQ_F16, - VOP1Op.V_LOG_F16: _VOP1Op_V_LOG_F16, - VOP1Op.V_EXP_F16: _VOP1Op_V_EXP_F16, - VOP1Op.V_FREXP_MANT_F16: _VOP1Op_V_FREXP_MANT_F16, - VOP1Op.V_FREXP_EXP_I16_F16: _VOP1Op_V_FREXP_EXP_I16_F16, - VOP1Op.V_FLOOR_F16: _VOP1Op_V_FLOOR_F16, - VOP1Op.V_CEIL_F16: _VOP1Op_V_CEIL_F16, - VOP1Op.V_TRUNC_F16: _VOP1Op_V_TRUNC_F16, - VOP1Op.V_RNDNE_F16: _VOP1Op_V_RNDNE_F16, - VOP1Op.V_FRACT_F16: _VOP1Op_V_FRACT_F16, - VOP1Op.V_SIN_F16: _VOP1Op_V_SIN_F16, - VOP1Op.V_COS_F16: _VOP1Op_V_COS_F16, - VOP1Op.V_SAT_PK_U8_I16: _VOP1Op_V_SAT_PK_U8_I16, - VOP1Op.V_CVT_NORM_I16_F16: _VOP1Op_V_CVT_NORM_I16_F16, - VOP1Op.V_CVT_NORM_U16_F16: _VOP1Op_V_CVT_NORM_U16_F16, - VOP1Op.V_SWAP_B32: _VOP1Op_V_SWAP_B32, - VOP1Op.V_SWAP_B16: _VOP1Op_V_SWAP_B16, - VOP1Op.V_NOT_B16: _VOP1Op_V_NOT_B16, - VOP1Op.V_CVT_I32_I16: _VOP1Op_V_CVT_I32_I16, - VOP1Op.V_CVT_U32_U16: _VOP1Op_V_CVT_U32_U16, - VOP1Op.V_CVT_F32_FP8: _VOP1Op_V_CVT_F32_FP8, - VOP1Op.V_CVT_F32_BF8: _VOP1Op_V_CVT_F32_BF8, - VOP1Op.V_CVT_PK_F32_FP8: _VOP1Op_V_CVT_PK_F32_FP8, - VOP1Op.V_CVT_PK_F32_BF8: _VOP1Op_V_CVT_PK_F32_BF8, -} - -def _VOP2Op_V_CNDMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - D0.u32 = ((S1.u32) if (VCC.u64[laneId]) else (S0.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_ADD_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + S1.f64 - return {'D0': D0._val} - -def _VOP2Op_V_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 - S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S1.f32 - S0.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 * S1.f64 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_DX9_ZERO_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = 0.0 - else: - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_HI_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i24) * (S1.i24)) >> 32) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) - return {'D0': D0._val} - -def _VOP2Op_V_MUL_HI_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u24) * (S1.u24)) >> 32) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_NUM_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(S0.f64) or isSignalNAN(S1.f64)): - TRAPSTS.INVALID = 1 - if (isNAN(S0.f64) and isNAN(S1.f64)): - D0.f64 = cvtToQuietNAN(S0.f64) - elif isNAN(S0.f64): - D0.f64 = S1.f64 - elif isNAN(S1.f64): - D0.f64 = S0.f64 - elif ((S0.f64 < S1.f64) or ((abs(S0.f64) == 0.0) and (abs(S1.f64) == 0.0) and sign(S0.f64) and not sign(S1.f64))): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - return {'D0': D0._val} - -def _VOP2Op_V_MAX_NUM_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(S0.f64) or isSignalNAN(S1.f64)): - TRAPSTS.INVALID = 1 - if (isNAN(S0.f64) and isNAN(S1.f64)): - D0.f64 = cvtToQuietNAN(S0.f64) - elif isNAN(S0.f64): - D0.f64 = S1.f64 - elif isNAN(S1.f64): - D0.f64 = S0.f64 - elif ((S0.f64 > S1.f64) or ((abs(S0.f64) == 0.0) and (abs(S1.f64) == 0.0) and not sign(S0.f64) and sign(S1.f64))): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - return {'D0': D0._val} - -def _VOP2Op_V_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 < S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 >= S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 < S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 >= S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP2Op_V_MIN_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f32)) and isNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((S0.f32 < S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and sign(S0.f32) and not sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_MAX_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f32)) and isNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((S0.f32 > S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and not sign(S0.f32) and sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP2Op_V_LSHLREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 << S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_LSHRREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_ASHRREV_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S1.i32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP2Op_V_LSHLREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (S1.u64 << S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP2Op_V_ADD_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_SUB_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S1.u32) + VCC.u64[laneId] > (S0.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_SUBREV_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S0.u32) + VCC.u64[laneId] > (S1.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP2Op_V_ADD_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 - S1.u32 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S1.u32 - S0.u32 - return {'D0': D0._val} - -def _VOP2Op_V_FMAC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _VOP2Op_V_FMAMK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32) - return {'D0': D0._val} - -def _VOP2Op_V_FMAAK_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32) - return {'D0': D0._val} - -def _VOP2Op_V_CVT_PK_RTZ_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].f16 = f32_to_f16(S0.f32) - tmp[31 : 16].f16 = f32_to_f16(S1.f32) - return {} - -def _VOP2Op_V_MIN_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f16)) and isNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((S0.f16 < S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and sign(S0.f16) and not sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_MAX_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f16)) and isNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((S0.f16 > S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and not sign(S0.f16) and sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_SUB_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 - S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_SUBREV_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S1.f16 - S0.f16 - return {'D0': D0._val} - -def _VOP2Op_V_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 - return {'D0': D0._val} - -def _VOP2Op_V_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, D0.f16) - return {'D0': D0._val} - -def _VOP2Op_V_FMAMK_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, SIMM32.f16, S1.f16) - return {'D0': D0._val} - -def _VOP2Op_V_FMAAK_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); SIMM32=Reg(literal) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16) - return {'D0': D0._val} - -def _VOP2Op_V_LDEXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * F(2.0 ** (S1.i16)) - return {'D0': D0._val} - -def _VOP2Op_V_PK_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16) - D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16) - return {'D0': D0._val} - -VOP2Op_FUNCTIONS = { - VOP2Op.V_CNDMASK_B32: _VOP2Op_V_CNDMASK_B32, - VOP2Op.V_ADD_F64: _VOP2Op_V_ADD_F64, - VOP2Op.V_ADD_F32: _VOP2Op_V_ADD_F32, - VOP2Op.V_SUB_F32: _VOP2Op_V_SUB_F32, - VOP2Op.V_SUBREV_F32: _VOP2Op_V_SUBREV_F32, - VOP2Op.V_MUL_F64: _VOP2Op_V_MUL_F64, - VOP2Op.V_MUL_DX9_ZERO_F32: _VOP2Op_V_MUL_DX9_ZERO_F32, - VOP2Op.V_MUL_F32: _VOP2Op_V_MUL_F32, - VOP2Op.V_MUL_I32_I24: _VOP2Op_V_MUL_I32_I24, - VOP2Op.V_MUL_HI_I32_I24: _VOP2Op_V_MUL_HI_I32_I24, - VOP2Op.V_MUL_U32_U24: _VOP2Op_V_MUL_U32_U24, - VOP2Op.V_MUL_HI_U32_U24: _VOP2Op_V_MUL_HI_U32_U24, - VOP2Op.V_MIN_NUM_F64: _VOP2Op_V_MIN_NUM_F64, - VOP2Op.V_MAX_NUM_F64: _VOP2Op_V_MAX_NUM_F64, - VOP2Op.V_MIN_I32: _VOP2Op_V_MIN_I32, - VOP2Op.V_MAX_I32: _VOP2Op_V_MAX_I32, - VOP2Op.V_MIN_U32: _VOP2Op_V_MIN_U32, - VOP2Op.V_MAX_U32: _VOP2Op_V_MAX_U32, - VOP2Op.V_MIN_NUM_F32: _VOP2Op_V_MIN_NUM_F32, - VOP2Op.V_MAX_NUM_F32: _VOP2Op_V_MAX_NUM_F32, - VOP2Op.V_LSHLREV_B32: _VOP2Op_V_LSHLREV_B32, - VOP2Op.V_LSHRREV_B32: _VOP2Op_V_LSHRREV_B32, - VOP2Op.V_ASHRREV_I32: _VOP2Op_V_ASHRREV_I32, - VOP2Op.V_AND_B32: _VOP2Op_V_AND_B32, - VOP2Op.V_OR_B32: _VOP2Op_V_OR_B32, - VOP2Op.V_XOR_B32: _VOP2Op_V_XOR_B32, - VOP2Op.V_XNOR_B32: _VOP2Op_V_XNOR_B32, - VOP2Op.V_LSHLREV_B64: _VOP2Op_V_LSHLREV_B64, - VOP2Op.V_ADD_CO_CI_U32: _VOP2Op_V_ADD_CO_CI_U32, - VOP2Op.V_SUB_CO_CI_U32: _VOP2Op_V_SUB_CO_CI_U32, - VOP2Op.V_SUBREV_CO_CI_U32: _VOP2Op_V_SUBREV_CO_CI_U32, - VOP2Op.V_ADD_NC_U32: _VOP2Op_V_ADD_NC_U32, - VOP2Op.V_SUB_NC_U32: _VOP2Op_V_SUB_NC_U32, - VOP2Op.V_SUBREV_NC_U32: _VOP2Op_V_SUBREV_NC_U32, - VOP2Op.V_FMAC_F32: _VOP2Op_V_FMAC_F32, - VOP2Op.V_FMAMK_F32: _VOP2Op_V_FMAMK_F32, - VOP2Op.V_FMAAK_F32: _VOP2Op_V_FMAAK_F32, - VOP2Op.V_CVT_PK_RTZ_F16_F32: _VOP2Op_V_CVT_PK_RTZ_F16_F32, - VOP2Op.V_MIN_NUM_F16: _VOP2Op_V_MIN_NUM_F16, - VOP2Op.V_MAX_NUM_F16: _VOP2Op_V_MAX_NUM_F16, - VOP2Op.V_ADD_F16: _VOP2Op_V_ADD_F16, - VOP2Op.V_SUB_F16: _VOP2Op_V_SUB_F16, - VOP2Op.V_SUBREV_F16: _VOP2Op_V_SUBREV_F16, - VOP2Op.V_MUL_F16: _VOP2Op_V_MUL_F16, - VOP2Op.V_FMAC_F16: _VOP2Op_V_FMAC_F16, - VOP2Op.V_FMAMK_F16: _VOP2Op_V_FMAMK_F16, - VOP2Op.V_FMAAK_F16: _VOP2Op_V_FMAAK_F16, - VOP2Op.V_LDEXP_F16: _VOP2Op_V_LDEXP_F16, - VOP2Op.V_PK_FMAC_F16: _VOP2Op_V_PK_FMAC_F16, -} - -def _VOP3Op_V_CMP_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 < S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 == S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 <= S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 > S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 != S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 >= S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 >= S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 != S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 > S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 <= S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 == S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 < S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 < S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 == S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 <= S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 > S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 != S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 >= S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 >= S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 != S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 > S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 <= S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 == S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 < S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 < S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 == S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 <= S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 > S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 != S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 >= S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 >= S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 != S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 > S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 <= S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 == S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 < S1.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 < S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 == S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 <= S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 > S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 != S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 >= S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 < S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 == S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 <= S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 > S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 != S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 >= S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 < S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 == S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 <= S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 > S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 != S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 >= S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 < S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 == S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 <= S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 > S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 != S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 >= S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 < S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 == S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 <= S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 > S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 != S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 >= S1.i64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 < S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 == S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 <= S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 > S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 != S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 >= S1.u64 - return {'D0': D0._val} - -def _VOP3Op_V_CMP_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3Op_V_CMP_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3Op_V_CMP_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOP3Op_V_CMPX_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 < S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 == S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 <= S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 > S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 != S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 >= S1.f16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 >= S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 != S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 > S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 <= S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 == S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 < S1.f16) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 < S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 == S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 <= S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 > S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 != S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 >= S1.f32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 >= S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 != S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 > S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 <= S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 == S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 < S1.f32) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 < S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 == S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 <= S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 > S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 != S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 >= S1.f64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 >= S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 != S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 > S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 <= S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 == S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 < S1.f64) - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 < S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 == S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 <= S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 > S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 != S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 >= S1.i16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 < S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 == S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 <= S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 > S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 != S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 >= S1.u16 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 < S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 == S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 <= S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 > S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 != S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 >= S1.i32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 < S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 == S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 <= S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 > S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 != S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 >= S1.u32 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 < S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 == S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 <= S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 > S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 != S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 >= S1.i64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 < S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 == S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 <= S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 > S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 != S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 >= S1.u64 - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOP3Op_V_CMPX_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOP3Op_V_MOV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b32 = S0.b32 - return {'D0': D0._val} - -def _VOP3Op_V_READFIRSTLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); EXEC=Reg(exec_mask); SRC0=Reg(src0_idx); EXEC_LO=SliceProxy(EXEC, 31, 0) - # --- compiled pseudocode --- - if WAVE64: - if EXEC == 0x0: - lane = 0 - else: - lane = (s_ff1_i32_b64(EXEC)) - else: - if EXEC_LO.i32 == 0: - lane = 0 - else: - lane = (s_ff1_i32_b32(EXEC_LO)) - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP3Op_V_CVT_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f64_to_i32(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = i32_to_f64(S0.i32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = i32_to_f32(S0.i32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0.u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_U32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f32_to_u32(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = f32_to_f16(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f16_to_f32(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_NEAREST_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32 + 0.5)) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_FLOOR_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = f32_to_i32(floor(S0.f32)) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = f64_to_f32(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F64_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = f32_to_f64(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_UBYTE0(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[7 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_UBYTE1(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[15 : 8].u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_UBYTE2(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[23 : 16].u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_UBYTE3(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = u32_to_f32(S0[31 : 24].u32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_U32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = f64_to_u32(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = u32_to_f64(S0.u32) - return {'D0': D0._val} - -def _VOP3Op_V_TRUNC_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_CEIL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 > 0.0) and (S0.f64 != D0.f64)): - D0.f64 += 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_RNDNE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = floor(S0.f64 + 0.5) - if (isEven(floor(S0.f64)) and (fract(S0.f64) == 0.5)): - D0.f64 -= 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_FLOOR_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = trunc(S0.f64) - if ((S0.f64 < 0.0) and (S0.f64 != D0.f64)): - D0.f64 += -1.0 - return {'D0': D0._val} - -def _VOP3Op_V_MOV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.b16 = S0.b16 - return {'D0': D0._val} - -def _VOP3Op_V_FRACT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + -floor(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_TRUNC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CEIL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 > 0.0) and (S0.f32 != D0.f32)): - D0.f32 += 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_RNDNE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = floor(S0.f32 + 0.5) - if (isEven(F(floor(S0.f32))) and (fract(S0.f32) == 0.5)): - D0.f32 -= 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_FLOOR_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = trunc(S0.f32) - if ((S0.f32 < 0.0) and (S0.f32 != D0.f32)): - D0.f32 += -1.0 - return {'D0': D0._val} - -def _VOP3Op_V_EXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = pow(2.0, S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_LOG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = log2(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_RCP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP3Op_V_RCP_IFLAG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP3Op_V_RSQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_RCP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / S0.f64 - return {'D0': D0._val} - -def _VOP3Op_V_RSQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = 1.0 / sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_SQRT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_SQRT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = sqrt(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_SIN_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sin(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3Op_V_COS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = cos(S0.f32 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3Op_V_NOT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~S0.u32 - return {'D0': D0._val} - -def _VOP3Op_V_BFREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32[31 : 0] = S0.u32[0 : 31] - return {'D0': D0._val} - -def _VOP3Op_V_CLZ_I32_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[31 - i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3Op_V_CTZ_I32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(0, int(31)+1): - if S0.u32[i] == 1: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3Op_V_CLS_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = -1 - for i in range(1, int(31)+1): - if S0.i32[31 - i] != S0.i32[31]: - D0.i32 = i; break - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_EXP_I32_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f64) - 1023 + 1 - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_MANT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S0.f64 == INF) or (S0.f64 == (-INF)) or isNAN(S0.f64)): - D0.f64 = S0.f64 - else: - D0.f64 = mantissa(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_FRACT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + -floor(S0.f64) - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_EXP_I32_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.i32 = 0 - else: - D0.i32 = exponent(S0.f32) - 127 + 1 - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_MANT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == INF) or (F(S0.f32) == (-INF)) or isNAN(F(S0.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = mantissa(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MOVRELS_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - addr = SRC0.u32 - D0.b32 = VGPR[laneId][addr].b32 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F16_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = u16_to_f16(S0.u16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F16_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = i16_to_f16(S0.i16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_u16(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_i16(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_RCP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / S0.f16 - return {'D0': D0._val} - -def _VOP3Op_V_SQRT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_RSQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / sqrt(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_LOG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = log2(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_EXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = pow(2.0, S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_MANT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = mantissa(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_FREXP_EXP_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f16) == INF) or (F(S0.f16) == (-INF)) or isNAN(F(S0.f16))): - D0.i16 = 0 - else: - D0.i16 = (exponent(S0.f16) - 15 + 1) - return {'D0': D0._val} - -def _VOP3Op_V_FLOOR_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 < 0.0) and (S0.f16 != D0.f16)): - D0.f16 += -1.0 - return {'D0': D0._val} - -def _VOP3Op_V_CEIL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - if ((S0.f16 > 0.0) and (S0.f16 != D0.f16)): - D0.f16 += 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_TRUNC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = trunc(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_RNDNE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = floor(S0.f16 + 0.5) - if (isEven(F(floor(S0.f16))) and (fract(S0.f16) == 0.5)): - D0.f16 -= 1.0 - return {'D0': D0._val} - -def _VOP3Op_V_FRACT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + -floor(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_SIN_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sin(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3Op_V_COS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = cos(S0.f16 * F(PI * 2.0)) - return {'D0': D0._val} - -def _VOP3Op_V_SAT_PK_U8_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(0) - tmp[7 : 0].u8 = SAT8(S0[15 : 0].i16) - tmp[15 : 8].u8 = SAT8(S0[31 : 16].i16) - D0.b16 = tmp.b16 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_NORM_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = f16_to_snorm(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_NORM_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = f16_to_unorm(S0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_NOT_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ~S0.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (signext(S0.i16)) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_U32_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0 = Reg(_pack(0, S0.u16)) - return {} - -def _VOP3Op_V_CVT_F32_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if OPSEL[1 : 0].u2 == 0: - D0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].fp8) - elif OPSEL[1 : 0].u2 == 2: - D0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].fp8) - elif OPSEL[1 : 0].u2 == 1: - D0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].fp8) - else: - D0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].fp8) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_F32_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if OPSEL[1 : 0].u2 == 0: - D0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].bf8) - elif OPSEL[1 : 0].u2 == 2: - D0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].bf8) - elif OPSEL[1 : 0].u2 == 1: - D0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].bf8) - else: - D0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].bf8) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_F32_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - tmp = Reg(((VGPR[laneId][SRC0.u32][31 : 16]) if (OPSEL[0].u1) else (VGPR[laneId][SRC0.u32][15 : 0]))) - D0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8) - D0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_F32_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - tmp = Reg(((VGPR[laneId][SRC0.u32][31 : 16]) if (OPSEL[0].u1) else (VGPR[laneId][SRC0.u32][15 : 0]))) - D0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8) - D0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8) - return {'D0': D0._val} - -def _VOP3Op_V_CNDMASK_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - D0.u32 = ((S1.u32) if (VCC.u64[laneId]) else (S0.u32)) - return {'D0': D0._val} - -def _VOP3Op_V_ADD_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 + S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_ADD_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 + S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 - S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_SUBREV_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S1.f32 - S0.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 * S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_DX9_ZERO_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = 0.0 - else: - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_HI_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i24) * (S1.i24)) >> 32) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_HI_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u24) * (S1.u24)) >> 32) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_NUM_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(S0.f64) or isSignalNAN(S1.f64)): - TRAPSTS.INVALID = 1 - if (isNAN(S0.f64) and isNAN(S1.f64)): - D0.f64 = cvtToQuietNAN(S0.f64) - elif isNAN(S0.f64): - D0.f64 = S1.f64 - elif isNAN(S1.f64): - D0.f64 = S0.f64 - elif ((S0.f64 < S1.f64) or ((abs(S0.f64) == 0.0) and (abs(S1.f64) == 0.0) and sign(S0.f64) and not sign(S1.f64))): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_MAX_NUM_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(S0.f64) or isSignalNAN(S1.f64)): - TRAPSTS.INVALID = 1 - if (isNAN(S0.f64) and isNAN(S1.f64)): - D0.f64 = cvtToQuietNAN(S0.f64) - elif isNAN(S0.f64): - D0.f64 = S1.f64 - elif isNAN(S1.f64): - D0.f64 = S0.f64 - elif ((S0.f64 > S1.f64) or ((abs(S0.f64) == 0.0) and (abs(S1.f64) == 0.0) and not sign(S0.f64) and sign(S1.f64))): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_MIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 < S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP3Op_V_MAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = ((S0.i32) if (S0.i32 >= S1.i32) else (S1.i32)) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 < S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP3Op_V_MAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32) if (S0.u32 >= S1.u32) else (S1.u32)) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f32)) and isNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((S0.f32 < S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and sign(S0.f32) and not sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MAX_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f32)) and isNAN(F(S1.f32))): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isNAN(F(S0.f32)): - D0.f32 = S1.f32 - elif isNAN(F(S1.f32)): - D0.f32 = S0.f32 - elif ((S0.f32 > S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and not sign(S0.f32) and sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_LSHLREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 << S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_LSHRREV_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S1.u32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_ASHRREV_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S1.i32 >> S0[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_AND_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 & S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_XOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_XNOR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ~(S0.u32 ^ S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_LSHLREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (S1.u64 << S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_ADD_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 - S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_SUBREV_NC_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S1.u32 - S0.u32 - return {'D0': D0._val} - -def _VOP3Op_V_FMAC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, D0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_RTZ_F16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - prev_mode = ROUND_MODE - tmp[15 : 0].f16 = f32_to_f16(S0.f32) - tmp[31 : 16].f16 = f32_to_f16(S1.f32) - return {} - -def _VOP3Op_V_MIN_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f16)) and isNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((S0.f16 < S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and sign(S0.f16) and not sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_MAX_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if (isNAN(F(S0.f16)) and isNAN(F(S1.f16))): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isNAN(F(S0.f16)): - D0.f16 = S1.f16 - elif isNAN(F(S1.f16)): - D0.f16 = S0.f16 - elif ((S0.f16 > S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and not sign(S0.f16) and sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 + S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 - S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_SUBREV_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S1.f16 - S0.f16 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_FMAC_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, D0.f16) - return {'D0': D0._val} - -def _VOP3Op_V_LDEXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = S0.f16 * F(2.0 ** (S1.i16)) - return {'D0': D0._val} - -def _VOP3Op_V_FMA_DX9_ZERO_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((F(S0.f32) == 0.0) or (F(S1.f32) == 0.0)): - D0.f32 = S2.f32 - else: - D0.f32 = fma(S0.f32, S1.f32, S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MAD_I32_I24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i24) * (S1.i24) + S2.i32 - return {'D0': D0._val} - -def _VOP3Op_V_MAD_U32_U24(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u24) * (S1.u24) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CUBEID_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - if S2.f32 < 0.0: - D0.f32 = 5.0 - else: - D0.f32 = 4.0 - elif abs(S1.f32) >= abs(S0.f32): - if S1.f32 < 0.0: - D0.f32 = 3.0 - else: - D0.f32 = 2.0 - else: - if S0.f32 < 0.0: - D0.f32 = 1.0 - else: - D0.f32 = 0.0 - return {'D0': D0._val} - -def _VOP3Op_V_CUBESC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - if S2.f32 < 0.0: - D0.f32 = -S0.f32 - else: - D0.f32 = S0.f32 - elif abs(S1.f32) >= abs(S0.f32): - D0.f32 = S0.f32 - else: - if S0.f32 < 0.0: - D0.f32 = S2.f32 - else: - D0.f32 = -S2.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CUBETC_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - D0.f32 = -S1.f32 - elif abs(S1.f32) >= abs(S0.f32): - if S1.f32 < 0.0: - D0.f32 = -S2.f32 - else: - D0.f32 = S2.f32 - else: - D0.f32 = -S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_CUBEMA_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((abs(S2.f32) >= abs(S0.f32)) and (abs(S2.f32) >= abs(S1.f32))): - D0.f32 = S2.f32 * 2.0 - elif abs(S1.f32) >= abs(S0.f32): - D0.f32 = S1.f32 * 2.0 - else: - D0.f32 = S0.f32 * 2.0 - return {'D0': D0._val} - -def _VOP3Op_V_BFE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1)) - return {'D0': D0._val} - -def _VOP3Op_V_BFE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1)) - D0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_BFI_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32)) - return {'D0': D0._val} - -def _VOP3Op_V_FMA_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = fma(S0.f32, S1.f32, S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_FMA_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = fma(S0.f64, S1.f64, S2.f64) - return {'D0': D0._val} - -def _VOP3Op_V_LERP_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1 << 24)) - tmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1 << 16) - tmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1 << 8) - tmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1) - D0.u32 = tmp.u32 - return {'D0': D0._val} - -def _VOP3Op_V_ALIGNBIT_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((_pack32(S0.u32, S1.u32) >> S2.u32[4 : 0]) & 0xffffffff) - return {'D0': D0._val} - -def _VOP3Op_V_ALIGNBYTE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((_pack32(S0.u32, S1.u32) >> (S2.u32[1 : 0] * 8)) & 0xffffffff) - return {'D0': D0._val} - -def _VOP3Op_V_MULLIT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if ((S1.f32 == -MAX_FLOAT_F32) or (F(S1.f32) == (-INF)) or isNAN(F(S1.f32)) or (S2.f32 <= 0.0) or isNAN(F(S2.f32))): - D0.f32 = -MAX_FLOAT_F32 - else: - D0.f32 = S0.f32 * S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32: - D0.i32 = v_max_i32(S1.i32, S2.i32) - elif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32: - D0.i32 = v_max_i32(S0.i32, S2.i32) - else: - D0.i32 = v_max_i32(S0.i32, S1.i32) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32: - D0.u32 = v_max_u32(S1.u32, S2.u32) - elif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32: - D0.u32 = v_max_u32(S0.u32, S2.u32) - else: - D0.u32 = v_max_u32(S0.u32, S1.u32) - return {'D0': D0._val} - -def _VOP3Op_V_SAD_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += (ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0])) - tmp += (ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8])) - tmp += (ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16])) - tmp += (ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24])) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_SAD_HI_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((v_sad_u8(S0, S1, 0)) << 16) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_SAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16) - tmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_SAD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_U8_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg((S2.u32 & (~(0xff << (S1.u32[1 : 0].u32 * 8))))) - tmp = Reg((tmp | (((f32_to_u8(S0.f32)) & 255) << (S1.u32[1 : 0].u32 * 8)))) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FIXUP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f32) ^ sign(S2.f32)) - if isNAN(F(S2.f32)): - D0.f32 = F(cvtToQuietNAN(F(S2.f32))) - elif isNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif ((F(S1.f32) == 0.0) and (F(S2.f32) == 0.0)): - D0.f32 = F(0xffc00000) - elif ((F(abs(S1.f32)) == INF) and (F(abs(S2.f32)) == INF)): - D0.f32 = F(0xffc00000) - elif ((F(S1.f32) == 0.0) or (F(abs(S2.f32)) == INF)): - D0.f32 = (((-INF).f32) if (sign_out) else (INF.f32)) - elif ((F(abs(S1.f32)) == INF) or (F(S2.f32) == 0.0)): - D0.f32 = ((-0.0) if (sign_out) else (0.0)) - elif exponent(S2.f32) - exponent(S1.f32) < -150: - D0.f32 = ((-UNDERFLOW_F32) if (sign_out) else (UNDERFLOW_F32)) - elif exponent(S1.f32) == 255: - D0.f32 = ((-OVERFLOW_F32) if (sign_out) else (OVERFLOW_F32)) - else: - D0.f32 = ((-OVERFLOW_F32) if (sign_out) else (OVERFLOW_F32)) if isNAN(S0.f32) else ((-abs(S0.f32)) if (sign_out) else (abs(S0.f32))) - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FIXUP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f64) ^ sign(S2.f64)) - if isNAN(S2.f64): - D0.f64 = cvtToQuietNAN(S2.f64) - elif isNAN(S1.f64): - D0.f64 = cvtToQuietNAN(S1.f64) - elif ((S1.f64 == 0.0) and (S2.f64 == 0.0)): - D0.f64 = F(0xfff8000000000000) - elif ((abs(S1.f64) == INF) and (abs(S2.f64) == INF)): - D0.f64 = F(0xfff8000000000000) - elif ((S1.f64 == 0.0) or (abs(S2.f64) == INF)): - D0.f64 = (((-INF)) if (sign_out) else (INF)) - elif ((abs(S1.f64) == INF) or (S2.f64 == 0.0)): - D0.f64 = ((-0.0) if (sign_out) else (0.0)) - elif exponent(S2.f64) - exponent(S1.f64) < -1075: - D0.f64 = ((-UNDERFLOW_F64) if (sign_out) else (UNDERFLOW_F64)) - elif exponent(S1.f64) == 2047: - D0.f64 = ((-OVERFLOW_F64) if (sign_out) else (OVERFLOW_F64)) - else: - D0.f64 = ((-OVERFLOW_F64) if (sign_out) else (OVERFLOW_F64)) if isNAN(S0.f64) else ((-abs(S0.f64)) if (sign_out) else (abs(S0.f64))) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_min_num_f32(v_min_num_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_max_num_f32(v_max_num_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_min_num_f16(v_min_num_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_max_num_f16(v_max_num_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MINIMUM3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_minimum_f32(v_minimum_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MAXIMUM3_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_maximum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MINIMUM3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_minimum_f16(v_minimum_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MAXIMUM3_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_maximum_f16(v_maximum_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if (isNAN(F(S0.f32)) or isNAN(F(S1.f32)) or isNAN(F(S2.f32))): - D0.f32 = v_min3_num_f32(S0.f32, S1.f32, S2.f32) - elif v_max3_num_f32(S0.f32, S1.f32, S2.f32) == S0.f32: - D0.f32 = v_max_num_f32(S1.f32, S2.f32) - elif v_max3_num_f32(S0.f32, S1.f32, S2.f32) == S1.f32: - D0.f32 = v_max_num_f32(S0.f32, S2.f32) - else: - D0.f32 = v_max_num_f32(S0.f32, S1.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if (isNAN(F(S0.f16)) or isNAN(F(S1.f16)) or isNAN(F(S2.f16))): - D0.f16 = v_min3_num_f16(S0.f16, S1.f16, S2.f16) - elif v_max3_num_f16(S0.f16, S1.f16, S2.f16) == S0.f16: - D0.f16 = v_max_num_f16(S1.f16, S2.f16) - elif v_max3_num_f16(S0.f16, S1.f16, S2.f16) == S1.f16: - D0.f16 = v_max_num_f16(S0.f16, S2.f16) - else: - D0.f16 = v_max_num_f16(S0.f16, S1.f16) - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FMAS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - if VCC.u64[laneId]: - D0.f32 = (2.0 ** 64 if exponent(S2.f32) > 127 else 2.0 ** -64) * fma(S0.f32, S1.f32, S2.f32) - else: - D0.f32 = fma(S0.f32, S1.f32, S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FMAS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - if VCC.u64[laneId]: - D0.f64 = (2.0 ** 128 if exponent(S2.f64) > 1023 else 2.0 ** -128) * fma(S0.f64, S1.f64, S2.f64) - else: - D0.f64 = fma(S0.f64, S1.f64, S2.f64) - return {'D0': D0._val} - -def _VOP3Op_V_MSAD_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += ((0) if (S1.u32[7 : 0] == 0) else ((ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0])))) - tmp += ((0) if (S1.u32[15 : 8] == 0) else ((ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8])))) - tmp += ((0) if (S1.u32[23 : 16] == 0) else ((ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16])))) - tmp += ((0) if (S1.u32[31 : 24] == 0) else ((ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24])))) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_QSAD_PK_U16_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[63 : 48] = (v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32)) - tmp[47 : 32] = (v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32)) - tmp[31 : 16] = (v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32)) - tmp[15 : 0] = (v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32)) - D0.b64 = tmp.b64 - return {'D0': D0._val} - -def _VOP3Op_V_MQSAD_PK_U16_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[63 : 48] = (v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32)) - tmp[47 : 32] = (v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32)) - tmp[31 : 16] = (v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32)) - tmp[15 : 0] = (v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32)) - D0.b64 = tmp.b64 - return {'D0': D0._val} - -def _VOP3Op_V_MQSAD_U32_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[127 : 96] = (v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32)) - tmp[95 : 64] = (v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32)) - tmp[63 : 32] = (v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32)) - tmp[31 : 0] = (v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32)) - D0.b128 = tmp.b128 - return {'D0': D0._val} - -def _VOP3Op_V_XOR3_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32 ^ S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 * S1.u16 + S2.u16 - return {'D0': D0._val} - -def _VOP3Op_V_PERM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0[31 : 24] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[31 : 24]) - D0[23 : 16] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[23 : 16]) - D0[15 : 8] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[15 : 8]) - D0[7 : 0] = BYTE_PERMUTE(_pack32(S0.u32, S1.u32), S2.u32[7 : 0]) - return {'D0': D0._val} - -def _VOP3Op_V_XAD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 ^ S1.u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_LSHL_ADD_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_ADD_LSHL_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_FMA_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = fma(S0.f16, S1.f16, S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16) - return {'D0': D0._val} - -def _VOP3Op_V_MIN3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16) - return {'D0': D0._val} - -def _VOP3Op_V_MAX3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16: - D0.i16 = v_max_i16(S1.i16, S2.i16) - elif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16: - D0.i16 = v_max_i16(S0.i16, S2.i16) - else: - D0.i16 = v_max_i16(S0.i16, S1.i16) - return {'D0': D0._val} - -def _VOP3Op_V_MED3_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16: - D0.u16 = v_max_u16(S1.u16, S2.u16) - elif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16: - D0.u16 = v_max_u16(S0.u16, S2.u16) - else: - D0.u16 = v_max_u16(S0.u16, S1.u16) - return {'D0': D0._val} - -def _VOP3Op_V_MAD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 * S1.i16 + S2.i16 - return {'D0': D0._val} - -def _VOP3Op_V_DIV_FIXUP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - sign_out = (sign(S1.f16) ^ sign(S2.f16)) - if isNAN(F(S2.f16)): - D0.f16 = F(cvtToQuietNAN(F(S2.f16))) - elif isNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif ((F(S1.f16) == 0.0) and (F(S2.f16) == 0.0)): - D0.f16 = F(0xfe00) - elif ((F(abs(S1.f16)) == INF) and (F(abs(S2.f16)) == INF)): - D0.f16 = F(0xfe00) - elif ((F(S1.f16) == 0.0) or (F(abs(S2.f16)) == INF)): - D0.f16 = (((-INF).f16) if (sign_out) else (INF.f16)) - elif ((F(abs(S1.f16)) == INF) or (F(S2.f16) == 0.0)): - D0.f16 = ((-0.0) if (sign_out) else (0.0)) - else: - D0.f16 = ((-abs(S0.f16)) if (sign_out) else (abs(S0.f16))) - return {'D0': D0._val} - -def _VOP3Op_V_ADD3_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 + S1.u32 + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_LSHL_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_AND_OR_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = ((S0.u32 & S1.u32) | S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_OR3_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u32 | S1.u32 | S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MAD_U32_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (S0.u16) * (S1.u16) + S2.u32 - return {'D0': D0._val} - -def _VOP3Op_V_MAD_I32_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (S0.i16) * (S1.i16) + S2.i32 - return {'D0': D0._val} - -def _VOP3Op_V_CNDMASK_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - D0.u16 = ((S1.u16) if (VCC.u64[laneId]) else (S0.u16)) - return {'D0': D0._val} - -def _VOP3Op_V_MAXMIN_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_min_u32(v_max_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MINMAX_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = v_max_u32(v_min_u32(S0.u32, S1.u32), S2.u32) - return {'D0': D0._val} - -def _VOP3Op_V_MAXMIN_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_min_i32(v_max_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3Op_V_MINMAX_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = v_max_i32(v_min_i32(S0.i32, S1.i32), S2.i32) - return {'D0': D0._val} - -def _VOP3Op_V_DOT2_F16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f16) - tmp += S0[15 : 0].f16 * S1[15 : 0].f16 - tmp += S0[31 : 16].f16 * S1[31 : 16].f16 - D0.f16 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_DOT2_BF16_BF16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.bf16) - tmp += S0[15 : 0].bf16 * S1[15 : 0].bf16 - tmp += S0[31 : 16].bf16 * S1[31 : 16].bf16 - D0.bf16 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_MINMAX_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_max_num_f32(v_min_num_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MAXMIN_NUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_min_num_f32(v_max_num_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MINMAX_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_max_num_f16(v_min_num_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MAXMIN_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_min_num_f16(v_max_num_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MINIMUMMAXIMUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_maximum_f32(v_minimum_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MAXIMUMMINIMUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = v_minimum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32) - return {'D0': D0._val} - -def _VOP3Op_V_MINIMUMMAXIMUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_maximum_f16(v_minimum_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_MAXIMUMMINIMUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = v_minimum_f16(v_maximum_f16(S0.f16, S1.f16), S2.f16) - return {'D0': D0._val} - -def _VOP3Op_V_S_EXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = pow(2.0, S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_S_EXP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = pow(2.0, S0.f16) - D0[31 : 16] = 0x0 - return {'D0': D0._val} - -def _VOP3Op_V_S_LOG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = log2(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_S_LOG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = log2(S0.f16) - D0[31 : 16] = 0x0 - return {'D0': D0._val} - -def _VOP3Op_V_S_RCP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / S0.f32 - return {'D0': D0._val} - -def _VOP3Op_V_S_RCP_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / S0.f16 - D0[31 : 16] = 0x0 - return {'D0': D0._val} - -def _VOP3Op_V_S_RSQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = 1.0 / sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_S_RSQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = 1.0 / sqrt(S0.f16) - D0[31 : 16] = 0x0 - return {'D0': D0._val} - -def _VOP3Op_V_S_SQRT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = sqrt(S0.f32) - return {'D0': D0._val} - -def _VOP3Op_V_S_SQRT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f16 = sqrt(S0.f16) - D0[31 : 16] = 0x0 - return {'D0': D0._val} - -def _VOP3Op_V_ADD_NC_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 + S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_NC_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 - S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_LO_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = S0.u16 * S1.u16 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_I16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16] = (v_cvt_i16_f32(S1.f32)) - tmp[15 : 0] = (v_cvt_i16_f32(S0.f32)) - return {} - -def _VOP3Op_V_CVT_PK_U16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16] = (v_cvt_u16_f32(S1.f32)) - tmp[15 : 0] = (v_cvt_u16_f32(S0.f32)) - return {} - -def _VOP3Op_V_MAX_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ((S0.u16) if (S0.u16 >= S1.u16) else (S1.u16)) - return {'D0': D0._val} - -def _VOP3Op_V_MAX_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = ((S0.i16) if (S0.i16 >= S1.i16) else (S1.i16)) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = ((S0.u16) if (S0.u16 < S1.u16) else (S1.u16)) - return {'D0': D0._val} - -def _VOP3Op_V_MIN_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = ((S0.i16) if (S0.i16 < S1.i16) else (S1.i16)) - return {'D0': D0._val} - -def _VOP3Op_V_ADD_NC_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 + S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_SUB_NC_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = S0.i16 - S1.i16 - return {'D0': D0._val} - -def _VOP3Op_V_PACK_B32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0[31 : 16].f16 = S1.f16 - D0[15 : 0].f16 = S0.f16 - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_NORM_I16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = f16_to_snorm(S0.f16) - tmp[31 : 16].i16 = f16_to_snorm(S1.f16) - return {} - -def _VOP3Op_V_CVT_PK_NORM_U16_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = f16_to_unorm(S0.f16) - tmp[31 : 16].u16 = f16_to_unorm(S1.f16) - return {} - -def _VOP3Op_V_LDEXP_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f32 = S0.f32 * 2.0 ** S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_BFM_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((1 << S0[4 : 0].u32) - 1) << S1[4 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_BCNT_U32_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S1.u32) - for i in range(0, int(31)+1): - tmp += S0[i].u32 - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3Op_V_CVT_PK_NORM_I16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = f32_to_snorm(S0.f32) - tmp[31 : 16].i16 = f32_to_snorm(S1.f32) - return {} - -def _VOP3Op_V_CVT_PK_NORM_U16_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = f32_to_unorm(S0.f32) - tmp[31 : 16].u16 = f32_to_unorm(S1.f32) - return {} - -def _VOP3Op_V_CVT_PK_U16_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = u32_to_u16(S0.u32) - tmp[31 : 16].u16 = u32_to_u16(S1.u32) - return {} - -def _VOP3Op_V_CVT_PK_I16_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = i32_to_i16(S0.i32) - tmp[31 : 16].i16 = i32_to_i16(S1.i32) - return {} - -def _VOP3Op_V_SUB_NC_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 - S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_ADD_NC_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = S0.i32 + S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_LDEXP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.f64 = S0.f64 * 2.0 ** S1.i32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_LO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = S0.u32 * S1.u32 - return {'D0': D0._val} - -def _VOP3Op_V_MUL_HI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u32 = (((S0.u32) * (S1.u32)) >> 32) - return {'D0': D0._val} - -def _VOP3Op_V_MUL_HI_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i32 = (((S0.i32) * (S1.i32)) >> 32) - return {'D0': D0._val} - -def _VOP3Op_V_TRIG_PREOP_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - shift = (S1[4 : 0].u32) * 53 - if exponent(S0.f64) > 1077: - shift += exponent(S0.f64) - 1077 - result = float(((TWO_OVER_PI_1201[1200 : 0] << int(shift)) >> (1201 - 53)) & 0x1fffffffffffff) - scale = -53 - shift - if exponent(S0.f64) >= 1968: - scale += 128 - D0.f64 = ldexp(result, scale) - return {'D0': D0._val} - -def _VOP3Op_V_LSHLREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S1.u16 << S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_LSHRREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S1.u16 >> S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_ASHRREV_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i16 = (S1.i16 >> S0[3 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_LSHRREV_B64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u64 = (S1.u64 >> S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_ASHRREV_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.i64 = (S1.i64 >> S0[5 : 0].u32) - return {'D0': D0._val} - -def _VOP3Op_V_MINIMUM_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(S0.f64) or isSignalNAN(S1.f64)): - TRAPSTS.INVALID = 1 - if isSignalNAN(S0.f64): - D0.f64 = cvtToQuietNAN(S0.f64) - elif isSignalNAN(S1.f64): - D0.f64 = cvtToQuietNAN(S1.f64) - elif isQuietNAN(S0.f64): - D0.f64 = S0.f64 - elif isQuietNAN(S1.f64): - D0.f64 = S1.f64 - elif ((S0.f64 < S1.f64) or ((abs(S0.f64) == 0.0) and (abs(S1.f64) == 0.0) and sign(S0.f64) and not sign(S1.f64))): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_MAXIMUM_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(S0.f64) or isSignalNAN(S1.f64)): - TRAPSTS.INVALID = 1 - if isSignalNAN(S0.f64): - D0.f64 = cvtToQuietNAN(S0.f64) - elif isSignalNAN(S1.f64): - D0.f64 = cvtToQuietNAN(S1.f64) - elif isQuietNAN(S0.f64): - D0.f64 = S0.f64 - elif isQuietNAN(S1.f64): - D0.f64 = S1.f64 - elif ((S0.f64 > S1.f64) or ((abs(S0.f64) == 0.0) and (abs(S1.f64) == 0.0) and not sign(S0.f64) and sign(S1.f64))): - D0.f64 = S0.f64 - else: - D0.f64 = S1.f64 - return {'D0': D0._val} - -def _VOP3Op_V_READLANE_B32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S1=Reg(s1); D0=Reg(d0); SRC0=Reg(src0_idx) - # --- compiled pseudocode --- - if WAVE32: - lane = S1.u32[4 : 0].u32 - else: - lane = S1.u32[5 : 0].u32 - D0.b32 = VGPR[lane][SRC0.u32] - return {'D0': D0._val} - -def _VOP3Op_V_AND_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S0.u16 & S1.u16) - return {'D0': D0._val} - -def _VOP3Op_V_OR_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S0.u16 | S1.u16) - return {'D0': D0._val} - -def _VOP3Op_V_XOR_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - D0.u16 = (S0.u16 ^ S1.u16) - return {'D0': D0._val} - -def _VOP3Op_V_MINIMUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S0.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S1.f32)): - D0.f32 = S1.f32 - elif ((S0.f32 < S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and sign(S0.f32) and not sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MAXIMUM_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f32)) or isSignalNAN(F(S1.f32))): - TRAPSTS.INVALID = 1 - if isSignalNAN(F(S0.f32)): - D0.f32 = F(cvtToQuietNAN(F(S0.f32))) - elif isSignalNAN(F(S1.f32)): - D0.f32 = F(cvtToQuietNAN(F(S1.f32))) - elif isQuietNAN(F(S0.f32)): - D0.f32 = S0.f32 - elif isQuietNAN(F(S1.f32)): - D0.f32 = S1.f32 - elif ((S0.f32 > S1.f32) or ((abs(S0.f32) == 0.0) and (abs(S1.f32) == 0.0) and not sign(S0.f32) and sign(S1.f32))): - D0.f32 = S0.f32 - else: - D0.f32 = S1.f32 - return {'D0': D0._val} - -def _VOP3Op_V_MINIMUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S0.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S1.f16)): - D0.f16 = S1.f16 - elif ((S0.f16 < S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and sign(S0.f16) and not sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -def _VOP3Op_V_MAXIMUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0) - # --- compiled pseudocode --- - if (isSignalNAN(F(S0.f16)) or isSignalNAN(F(S1.f16))): - TRAPSTS.INVALID = 1 - if isSignalNAN(F(S0.f16)): - D0.f16 = F(cvtToQuietNAN(F(S0.f16))) - elif isSignalNAN(F(S1.f16)): - D0.f16 = F(cvtToQuietNAN(F(S1.f16))) - elif isQuietNAN(F(S0.f16)): - D0.f16 = S0.f16 - elif isQuietNAN(F(S1.f16)): - D0.f16 = S1.f16 - elif ((S0.f16 > S1.f16) or ((abs(S0.f16) == 0.0) and (abs(S1.f16) == 0.0) and not sign(S0.f16) and sign(S1.f16))): - D0.f16 = S0.f16 - else: - D0.f16 = S1.f16 - return {'D0': D0._val} - -VOP3Op_FUNCTIONS = { - VOP3Op.V_CMP_LT_F16: _VOP3Op_V_CMP_LT_F16, - VOP3Op.V_CMP_EQ_F16: _VOP3Op_V_CMP_EQ_F16, - VOP3Op.V_CMP_LE_F16: _VOP3Op_V_CMP_LE_F16, - VOP3Op.V_CMP_GT_F16: _VOP3Op_V_CMP_GT_F16, - VOP3Op.V_CMP_LG_F16: _VOP3Op_V_CMP_LG_F16, - VOP3Op.V_CMP_GE_F16: _VOP3Op_V_CMP_GE_F16, - VOP3Op.V_CMP_O_F16: _VOP3Op_V_CMP_O_F16, - VOP3Op.V_CMP_U_F16: _VOP3Op_V_CMP_U_F16, - VOP3Op.V_CMP_NGE_F16: _VOP3Op_V_CMP_NGE_F16, - VOP3Op.V_CMP_NLG_F16: _VOP3Op_V_CMP_NLG_F16, - VOP3Op.V_CMP_NGT_F16: _VOP3Op_V_CMP_NGT_F16, - VOP3Op.V_CMP_NLE_F16: _VOP3Op_V_CMP_NLE_F16, - VOP3Op.V_CMP_NEQ_F16: _VOP3Op_V_CMP_NEQ_F16, - VOP3Op.V_CMP_NLT_F16: _VOP3Op_V_CMP_NLT_F16, - VOP3Op.V_CMP_LT_F32: _VOP3Op_V_CMP_LT_F32, - VOP3Op.V_CMP_EQ_F32: _VOP3Op_V_CMP_EQ_F32, - VOP3Op.V_CMP_LE_F32: _VOP3Op_V_CMP_LE_F32, - VOP3Op.V_CMP_GT_F32: _VOP3Op_V_CMP_GT_F32, - VOP3Op.V_CMP_LG_F32: _VOP3Op_V_CMP_LG_F32, - VOP3Op.V_CMP_GE_F32: _VOP3Op_V_CMP_GE_F32, - VOP3Op.V_CMP_O_F32: _VOP3Op_V_CMP_O_F32, - VOP3Op.V_CMP_U_F32: _VOP3Op_V_CMP_U_F32, - VOP3Op.V_CMP_NGE_F32: _VOP3Op_V_CMP_NGE_F32, - VOP3Op.V_CMP_NLG_F32: _VOP3Op_V_CMP_NLG_F32, - VOP3Op.V_CMP_NGT_F32: _VOP3Op_V_CMP_NGT_F32, - VOP3Op.V_CMP_NLE_F32: _VOP3Op_V_CMP_NLE_F32, - VOP3Op.V_CMP_NEQ_F32: _VOP3Op_V_CMP_NEQ_F32, - VOP3Op.V_CMP_NLT_F32: _VOP3Op_V_CMP_NLT_F32, - VOP3Op.V_CMP_LT_F64: _VOP3Op_V_CMP_LT_F64, - VOP3Op.V_CMP_EQ_F64: _VOP3Op_V_CMP_EQ_F64, - VOP3Op.V_CMP_LE_F64: _VOP3Op_V_CMP_LE_F64, - VOP3Op.V_CMP_GT_F64: _VOP3Op_V_CMP_GT_F64, - VOP3Op.V_CMP_LG_F64: _VOP3Op_V_CMP_LG_F64, - VOP3Op.V_CMP_GE_F64: _VOP3Op_V_CMP_GE_F64, - VOP3Op.V_CMP_O_F64: _VOP3Op_V_CMP_O_F64, - VOP3Op.V_CMP_U_F64: _VOP3Op_V_CMP_U_F64, - VOP3Op.V_CMP_NGE_F64: _VOP3Op_V_CMP_NGE_F64, - VOP3Op.V_CMP_NLG_F64: _VOP3Op_V_CMP_NLG_F64, - VOP3Op.V_CMP_NGT_F64: _VOP3Op_V_CMP_NGT_F64, - VOP3Op.V_CMP_NLE_F64: _VOP3Op_V_CMP_NLE_F64, - VOP3Op.V_CMP_NEQ_F64: _VOP3Op_V_CMP_NEQ_F64, - VOP3Op.V_CMP_NLT_F64: _VOP3Op_V_CMP_NLT_F64, - VOP3Op.V_CMP_LT_I16: _VOP3Op_V_CMP_LT_I16, - VOP3Op.V_CMP_EQ_I16: _VOP3Op_V_CMP_EQ_I16, - VOP3Op.V_CMP_LE_I16: _VOP3Op_V_CMP_LE_I16, - VOP3Op.V_CMP_GT_I16: _VOP3Op_V_CMP_GT_I16, - VOP3Op.V_CMP_NE_I16: _VOP3Op_V_CMP_NE_I16, - VOP3Op.V_CMP_GE_I16: _VOP3Op_V_CMP_GE_I16, - VOP3Op.V_CMP_LT_U16: _VOP3Op_V_CMP_LT_U16, - VOP3Op.V_CMP_EQ_U16: _VOP3Op_V_CMP_EQ_U16, - VOP3Op.V_CMP_LE_U16: _VOP3Op_V_CMP_LE_U16, - VOP3Op.V_CMP_GT_U16: _VOP3Op_V_CMP_GT_U16, - VOP3Op.V_CMP_NE_U16: _VOP3Op_V_CMP_NE_U16, - VOP3Op.V_CMP_GE_U16: _VOP3Op_V_CMP_GE_U16, - VOP3Op.V_CMP_LT_I32: _VOP3Op_V_CMP_LT_I32, - VOP3Op.V_CMP_EQ_I32: _VOP3Op_V_CMP_EQ_I32, - VOP3Op.V_CMP_LE_I32: _VOP3Op_V_CMP_LE_I32, - VOP3Op.V_CMP_GT_I32: _VOP3Op_V_CMP_GT_I32, - VOP3Op.V_CMP_NE_I32: _VOP3Op_V_CMP_NE_I32, - VOP3Op.V_CMP_GE_I32: _VOP3Op_V_CMP_GE_I32, - VOP3Op.V_CMP_LT_U32: _VOP3Op_V_CMP_LT_U32, - VOP3Op.V_CMP_EQ_U32: _VOP3Op_V_CMP_EQ_U32, - VOP3Op.V_CMP_LE_U32: _VOP3Op_V_CMP_LE_U32, - VOP3Op.V_CMP_GT_U32: _VOP3Op_V_CMP_GT_U32, - VOP3Op.V_CMP_NE_U32: _VOP3Op_V_CMP_NE_U32, - VOP3Op.V_CMP_GE_U32: _VOP3Op_V_CMP_GE_U32, - VOP3Op.V_CMP_LT_I64: _VOP3Op_V_CMP_LT_I64, - VOP3Op.V_CMP_EQ_I64: _VOP3Op_V_CMP_EQ_I64, - VOP3Op.V_CMP_LE_I64: _VOP3Op_V_CMP_LE_I64, - VOP3Op.V_CMP_GT_I64: _VOP3Op_V_CMP_GT_I64, - VOP3Op.V_CMP_NE_I64: _VOP3Op_V_CMP_NE_I64, - VOP3Op.V_CMP_GE_I64: _VOP3Op_V_CMP_GE_I64, - VOP3Op.V_CMP_LT_U64: _VOP3Op_V_CMP_LT_U64, - VOP3Op.V_CMP_EQ_U64: _VOP3Op_V_CMP_EQ_U64, - VOP3Op.V_CMP_LE_U64: _VOP3Op_V_CMP_LE_U64, - VOP3Op.V_CMP_GT_U64: _VOP3Op_V_CMP_GT_U64, - VOP3Op.V_CMP_NE_U64: _VOP3Op_V_CMP_NE_U64, - VOP3Op.V_CMP_GE_U64: _VOP3Op_V_CMP_GE_U64, - VOP3Op.V_CMP_CLASS_F16: _VOP3Op_V_CMP_CLASS_F16, - VOP3Op.V_CMP_CLASS_F32: _VOP3Op_V_CMP_CLASS_F32, - VOP3Op.V_CMP_CLASS_F64: _VOP3Op_V_CMP_CLASS_F64, - VOP3Op.V_CMPX_LT_F16: _VOP3Op_V_CMPX_LT_F16, - VOP3Op.V_CMPX_EQ_F16: _VOP3Op_V_CMPX_EQ_F16, - VOP3Op.V_CMPX_LE_F16: _VOP3Op_V_CMPX_LE_F16, - VOP3Op.V_CMPX_GT_F16: _VOP3Op_V_CMPX_GT_F16, - VOP3Op.V_CMPX_LG_F16: _VOP3Op_V_CMPX_LG_F16, - VOP3Op.V_CMPX_GE_F16: _VOP3Op_V_CMPX_GE_F16, - VOP3Op.V_CMPX_O_F16: _VOP3Op_V_CMPX_O_F16, - VOP3Op.V_CMPX_U_F16: _VOP3Op_V_CMPX_U_F16, - VOP3Op.V_CMPX_NGE_F16: _VOP3Op_V_CMPX_NGE_F16, - VOP3Op.V_CMPX_NLG_F16: _VOP3Op_V_CMPX_NLG_F16, - VOP3Op.V_CMPX_NGT_F16: _VOP3Op_V_CMPX_NGT_F16, - VOP3Op.V_CMPX_NLE_F16: _VOP3Op_V_CMPX_NLE_F16, - VOP3Op.V_CMPX_NEQ_F16: _VOP3Op_V_CMPX_NEQ_F16, - VOP3Op.V_CMPX_NLT_F16: _VOP3Op_V_CMPX_NLT_F16, - VOP3Op.V_CMPX_LT_F32: _VOP3Op_V_CMPX_LT_F32, - VOP3Op.V_CMPX_EQ_F32: _VOP3Op_V_CMPX_EQ_F32, - VOP3Op.V_CMPX_LE_F32: _VOP3Op_V_CMPX_LE_F32, - VOP3Op.V_CMPX_GT_F32: _VOP3Op_V_CMPX_GT_F32, - VOP3Op.V_CMPX_LG_F32: _VOP3Op_V_CMPX_LG_F32, - VOP3Op.V_CMPX_GE_F32: _VOP3Op_V_CMPX_GE_F32, - VOP3Op.V_CMPX_O_F32: _VOP3Op_V_CMPX_O_F32, - VOP3Op.V_CMPX_U_F32: _VOP3Op_V_CMPX_U_F32, - VOP3Op.V_CMPX_NGE_F32: _VOP3Op_V_CMPX_NGE_F32, - VOP3Op.V_CMPX_NLG_F32: _VOP3Op_V_CMPX_NLG_F32, - VOP3Op.V_CMPX_NGT_F32: _VOP3Op_V_CMPX_NGT_F32, - VOP3Op.V_CMPX_NLE_F32: _VOP3Op_V_CMPX_NLE_F32, - VOP3Op.V_CMPX_NEQ_F32: _VOP3Op_V_CMPX_NEQ_F32, - VOP3Op.V_CMPX_NLT_F32: _VOP3Op_V_CMPX_NLT_F32, - VOP3Op.V_CMPX_LT_F64: _VOP3Op_V_CMPX_LT_F64, - VOP3Op.V_CMPX_EQ_F64: _VOP3Op_V_CMPX_EQ_F64, - VOP3Op.V_CMPX_LE_F64: _VOP3Op_V_CMPX_LE_F64, - VOP3Op.V_CMPX_GT_F64: _VOP3Op_V_CMPX_GT_F64, - VOP3Op.V_CMPX_LG_F64: _VOP3Op_V_CMPX_LG_F64, - VOP3Op.V_CMPX_GE_F64: _VOP3Op_V_CMPX_GE_F64, - VOP3Op.V_CMPX_O_F64: _VOP3Op_V_CMPX_O_F64, - VOP3Op.V_CMPX_U_F64: _VOP3Op_V_CMPX_U_F64, - VOP3Op.V_CMPX_NGE_F64: _VOP3Op_V_CMPX_NGE_F64, - VOP3Op.V_CMPX_NLG_F64: _VOP3Op_V_CMPX_NLG_F64, - VOP3Op.V_CMPX_NGT_F64: _VOP3Op_V_CMPX_NGT_F64, - VOP3Op.V_CMPX_NLE_F64: _VOP3Op_V_CMPX_NLE_F64, - VOP3Op.V_CMPX_NEQ_F64: _VOP3Op_V_CMPX_NEQ_F64, - VOP3Op.V_CMPX_NLT_F64: _VOP3Op_V_CMPX_NLT_F64, - VOP3Op.V_CMPX_LT_I16: _VOP3Op_V_CMPX_LT_I16, - VOP3Op.V_CMPX_EQ_I16: _VOP3Op_V_CMPX_EQ_I16, - VOP3Op.V_CMPX_LE_I16: _VOP3Op_V_CMPX_LE_I16, - VOP3Op.V_CMPX_GT_I16: _VOP3Op_V_CMPX_GT_I16, - VOP3Op.V_CMPX_NE_I16: _VOP3Op_V_CMPX_NE_I16, - VOP3Op.V_CMPX_GE_I16: _VOP3Op_V_CMPX_GE_I16, - VOP3Op.V_CMPX_LT_U16: _VOP3Op_V_CMPX_LT_U16, - VOP3Op.V_CMPX_EQ_U16: _VOP3Op_V_CMPX_EQ_U16, - VOP3Op.V_CMPX_LE_U16: _VOP3Op_V_CMPX_LE_U16, - VOP3Op.V_CMPX_GT_U16: _VOP3Op_V_CMPX_GT_U16, - VOP3Op.V_CMPX_NE_U16: _VOP3Op_V_CMPX_NE_U16, - VOP3Op.V_CMPX_GE_U16: _VOP3Op_V_CMPX_GE_U16, - VOP3Op.V_CMPX_LT_I32: _VOP3Op_V_CMPX_LT_I32, - VOP3Op.V_CMPX_EQ_I32: _VOP3Op_V_CMPX_EQ_I32, - VOP3Op.V_CMPX_LE_I32: _VOP3Op_V_CMPX_LE_I32, - VOP3Op.V_CMPX_GT_I32: _VOP3Op_V_CMPX_GT_I32, - VOP3Op.V_CMPX_NE_I32: _VOP3Op_V_CMPX_NE_I32, - VOP3Op.V_CMPX_GE_I32: _VOP3Op_V_CMPX_GE_I32, - VOP3Op.V_CMPX_LT_U32: _VOP3Op_V_CMPX_LT_U32, - VOP3Op.V_CMPX_EQ_U32: _VOP3Op_V_CMPX_EQ_U32, - VOP3Op.V_CMPX_LE_U32: _VOP3Op_V_CMPX_LE_U32, - VOP3Op.V_CMPX_GT_U32: _VOP3Op_V_CMPX_GT_U32, - VOP3Op.V_CMPX_NE_U32: _VOP3Op_V_CMPX_NE_U32, - VOP3Op.V_CMPX_GE_U32: _VOP3Op_V_CMPX_GE_U32, - VOP3Op.V_CMPX_LT_I64: _VOP3Op_V_CMPX_LT_I64, - VOP3Op.V_CMPX_EQ_I64: _VOP3Op_V_CMPX_EQ_I64, - VOP3Op.V_CMPX_LE_I64: _VOP3Op_V_CMPX_LE_I64, - VOP3Op.V_CMPX_GT_I64: _VOP3Op_V_CMPX_GT_I64, - VOP3Op.V_CMPX_NE_I64: _VOP3Op_V_CMPX_NE_I64, - VOP3Op.V_CMPX_GE_I64: _VOP3Op_V_CMPX_GE_I64, - VOP3Op.V_CMPX_LT_U64: _VOP3Op_V_CMPX_LT_U64, - VOP3Op.V_CMPX_EQ_U64: _VOP3Op_V_CMPX_EQ_U64, - VOP3Op.V_CMPX_LE_U64: _VOP3Op_V_CMPX_LE_U64, - VOP3Op.V_CMPX_GT_U64: _VOP3Op_V_CMPX_GT_U64, - VOP3Op.V_CMPX_NE_U64: _VOP3Op_V_CMPX_NE_U64, - VOP3Op.V_CMPX_GE_U64: _VOP3Op_V_CMPX_GE_U64, - VOP3Op.V_CMPX_CLASS_F16: _VOP3Op_V_CMPX_CLASS_F16, - VOP3Op.V_CMPX_CLASS_F32: _VOP3Op_V_CMPX_CLASS_F32, - VOP3Op.V_CMPX_CLASS_F64: _VOP3Op_V_CMPX_CLASS_F64, - VOP3Op.V_MOV_B32: _VOP3Op_V_MOV_B32, - VOP3Op.V_READFIRSTLANE_B32: _VOP3Op_V_READFIRSTLANE_B32, - VOP3Op.V_CVT_I32_F64: _VOP3Op_V_CVT_I32_F64, - VOP3Op.V_CVT_F64_I32: _VOP3Op_V_CVT_F64_I32, - VOP3Op.V_CVT_F32_I32: _VOP3Op_V_CVT_F32_I32, - VOP3Op.V_CVT_F32_U32: _VOP3Op_V_CVT_F32_U32, - VOP3Op.V_CVT_U32_F32: _VOP3Op_V_CVT_U32_F32, - VOP3Op.V_CVT_I32_F32: _VOP3Op_V_CVT_I32_F32, - VOP3Op.V_CVT_F16_F32: _VOP3Op_V_CVT_F16_F32, - VOP3Op.V_CVT_F32_F16: _VOP3Op_V_CVT_F32_F16, - VOP3Op.V_CVT_NEAREST_I32_F32: _VOP3Op_V_CVT_NEAREST_I32_F32, - VOP3Op.V_CVT_FLOOR_I32_F32: _VOP3Op_V_CVT_FLOOR_I32_F32, - VOP3Op.V_CVT_F32_F64: _VOP3Op_V_CVT_F32_F64, - VOP3Op.V_CVT_F64_F32: _VOP3Op_V_CVT_F64_F32, - VOP3Op.V_CVT_F32_UBYTE0: _VOP3Op_V_CVT_F32_UBYTE0, - VOP3Op.V_CVT_F32_UBYTE1: _VOP3Op_V_CVT_F32_UBYTE1, - VOP3Op.V_CVT_F32_UBYTE2: _VOP3Op_V_CVT_F32_UBYTE2, - VOP3Op.V_CVT_F32_UBYTE3: _VOP3Op_V_CVT_F32_UBYTE3, - VOP3Op.V_CVT_U32_F64: _VOP3Op_V_CVT_U32_F64, - VOP3Op.V_CVT_F64_U32: _VOP3Op_V_CVT_F64_U32, - VOP3Op.V_TRUNC_F64: _VOP3Op_V_TRUNC_F64, - VOP3Op.V_CEIL_F64: _VOP3Op_V_CEIL_F64, - VOP3Op.V_RNDNE_F64: _VOP3Op_V_RNDNE_F64, - VOP3Op.V_FLOOR_F64: _VOP3Op_V_FLOOR_F64, - VOP3Op.V_MOV_B16: _VOP3Op_V_MOV_B16, - VOP3Op.V_FRACT_F32: _VOP3Op_V_FRACT_F32, - VOP3Op.V_TRUNC_F32: _VOP3Op_V_TRUNC_F32, - VOP3Op.V_CEIL_F32: _VOP3Op_V_CEIL_F32, - VOP3Op.V_RNDNE_F32: _VOP3Op_V_RNDNE_F32, - VOP3Op.V_FLOOR_F32: _VOP3Op_V_FLOOR_F32, - VOP3Op.V_EXP_F32: _VOP3Op_V_EXP_F32, - VOP3Op.V_LOG_F32: _VOP3Op_V_LOG_F32, - VOP3Op.V_RCP_F32: _VOP3Op_V_RCP_F32, - VOP3Op.V_RCP_IFLAG_F32: _VOP3Op_V_RCP_IFLAG_F32, - VOP3Op.V_RSQ_F32: _VOP3Op_V_RSQ_F32, - VOP3Op.V_RCP_F64: _VOP3Op_V_RCP_F64, - VOP3Op.V_RSQ_F64: _VOP3Op_V_RSQ_F64, - VOP3Op.V_SQRT_F32: _VOP3Op_V_SQRT_F32, - VOP3Op.V_SQRT_F64: _VOP3Op_V_SQRT_F64, - VOP3Op.V_SIN_F32: _VOP3Op_V_SIN_F32, - VOP3Op.V_COS_F32: _VOP3Op_V_COS_F32, - VOP3Op.V_NOT_B32: _VOP3Op_V_NOT_B32, - VOP3Op.V_BFREV_B32: _VOP3Op_V_BFREV_B32, - VOP3Op.V_CLZ_I32_U32: _VOP3Op_V_CLZ_I32_U32, - VOP3Op.V_CTZ_I32_B32: _VOP3Op_V_CTZ_I32_B32, - VOP3Op.V_CLS_I32: _VOP3Op_V_CLS_I32, - VOP3Op.V_FREXP_EXP_I32_F64: _VOP3Op_V_FREXP_EXP_I32_F64, - VOP3Op.V_FREXP_MANT_F64: _VOP3Op_V_FREXP_MANT_F64, - VOP3Op.V_FRACT_F64: _VOP3Op_V_FRACT_F64, - VOP3Op.V_FREXP_EXP_I32_F32: _VOP3Op_V_FREXP_EXP_I32_F32, - VOP3Op.V_FREXP_MANT_F32: _VOP3Op_V_FREXP_MANT_F32, - VOP3Op.V_MOVRELS_B32: _VOP3Op_V_MOVRELS_B32, - VOP3Op.V_CVT_F16_U16: _VOP3Op_V_CVT_F16_U16, - VOP3Op.V_CVT_F16_I16: _VOP3Op_V_CVT_F16_I16, - VOP3Op.V_CVT_U16_F16: _VOP3Op_V_CVT_U16_F16, - VOP3Op.V_CVT_I16_F16: _VOP3Op_V_CVT_I16_F16, - VOP3Op.V_RCP_F16: _VOP3Op_V_RCP_F16, - VOP3Op.V_SQRT_F16: _VOP3Op_V_SQRT_F16, - VOP3Op.V_RSQ_F16: _VOP3Op_V_RSQ_F16, - VOP3Op.V_LOG_F16: _VOP3Op_V_LOG_F16, - VOP3Op.V_EXP_F16: _VOP3Op_V_EXP_F16, - VOP3Op.V_FREXP_MANT_F16: _VOP3Op_V_FREXP_MANT_F16, - VOP3Op.V_FREXP_EXP_I16_F16: _VOP3Op_V_FREXP_EXP_I16_F16, - VOP3Op.V_FLOOR_F16: _VOP3Op_V_FLOOR_F16, - VOP3Op.V_CEIL_F16: _VOP3Op_V_CEIL_F16, - VOP3Op.V_TRUNC_F16: _VOP3Op_V_TRUNC_F16, - VOP3Op.V_RNDNE_F16: _VOP3Op_V_RNDNE_F16, - VOP3Op.V_FRACT_F16: _VOP3Op_V_FRACT_F16, - VOP3Op.V_SIN_F16: _VOP3Op_V_SIN_F16, - VOP3Op.V_COS_F16: _VOP3Op_V_COS_F16, - VOP3Op.V_SAT_PK_U8_I16: _VOP3Op_V_SAT_PK_U8_I16, - VOP3Op.V_CVT_NORM_I16_F16: _VOP3Op_V_CVT_NORM_I16_F16, - VOP3Op.V_CVT_NORM_U16_F16: _VOP3Op_V_CVT_NORM_U16_F16, - VOP3Op.V_NOT_B16: _VOP3Op_V_NOT_B16, - VOP3Op.V_CVT_I32_I16: _VOP3Op_V_CVT_I32_I16, - VOP3Op.V_CVT_U32_U16: _VOP3Op_V_CVT_U32_U16, - VOP3Op.V_CVT_F32_FP8: _VOP3Op_V_CVT_F32_FP8, - VOP3Op.V_CVT_F32_BF8: _VOP3Op_V_CVT_F32_BF8, - VOP3Op.V_CVT_PK_F32_FP8: _VOP3Op_V_CVT_PK_F32_FP8, - VOP3Op.V_CVT_PK_F32_BF8: _VOP3Op_V_CVT_PK_F32_BF8, - VOP3Op.V_CNDMASK_B32: _VOP3Op_V_CNDMASK_B32, - VOP3Op.V_ADD_F64: _VOP3Op_V_ADD_F64, - VOP3Op.V_ADD_F32: _VOP3Op_V_ADD_F32, - VOP3Op.V_SUB_F32: _VOP3Op_V_SUB_F32, - VOP3Op.V_SUBREV_F32: _VOP3Op_V_SUBREV_F32, - VOP3Op.V_MUL_F64: _VOP3Op_V_MUL_F64, - VOP3Op.V_MUL_DX9_ZERO_F32: _VOP3Op_V_MUL_DX9_ZERO_F32, - VOP3Op.V_MUL_F32: _VOP3Op_V_MUL_F32, - VOP3Op.V_MUL_I32_I24: _VOP3Op_V_MUL_I32_I24, - VOP3Op.V_MUL_HI_I32_I24: _VOP3Op_V_MUL_HI_I32_I24, - VOP3Op.V_MUL_U32_U24: _VOP3Op_V_MUL_U32_U24, - VOP3Op.V_MUL_HI_U32_U24: _VOP3Op_V_MUL_HI_U32_U24, - VOP3Op.V_MIN_NUM_F64: _VOP3Op_V_MIN_NUM_F64, - VOP3Op.V_MAX_NUM_F64: _VOP3Op_V_MAX_NUM_F64, - VOP3Op.V_MIN_I32: _VOP3Op_V_MIN_I32, - VOP3Op.V_MAX_I32: _VOP3Op_V_MAX_I32, - VOP3Op.V_MIN_U32: _VOP3Op_V_MIN_U32, - VOP3Op.V_MAX_U32: _VOP3Op_V_MAX_U32, - VOP3Op.V_MIN_NUM_F32: _VOP3Op_V_MIN_NUM_F32, - VOP3Op.V_MAX_NUM_F32: _VOP3Op_V_MAX_NUM_F32, - VOP3Op.V_LSHLREV_B32: _VOP3Op_V_LSHLREV_B32, - VOP3Op.V_LSHRREV_B32: _VOP3Op_V_LSHRREV_B32, - VOP3Op.V_ASHRREV_I32: _VOP3Op_V_ASHRREV_I32, - VOP3Op.V_AND_B32: _VOP3Op_V_AND_B32, - VOP3Op.V_OR_B32: _VOP3Op_V_OR_B32, - VOP3Op.V_XOR_B32: _VOP3Op_V_XOR_B32, - VOP3Op.V_XNOR_B32: _VOP3Op_V_XNOR_B32, - VOP3Op.V_LSHLREV_B64: _VOP3Op_V_LSHLREV_B64, - VOP3Op.V_ADD_NC_U32: _VOP3Op_V_ADD_NC_U32, - VOP3Op.V_SUB_NC_U32: _VOP3Op_V_SUB_NC_U32, - VOP3Op.V_SUBREV_NC_U32: _VOP3Op_V_SUBREV_NC_U32, - VOP3Op.V_FMAC_F32: _VOP3Op_V_FMAC_F32, - VOP3Op.V_CVT_PK_RTZ_F16_F32: _VOP3Op_V_CVT_PK_RTZ_F16_F32, - VOP3Op.V_MIN_NUM_F16: _VOP3Op_V_MIN_NUM_F16, - VOP3Op.V_MAX_NUM_F16: _VOP3Op_V_MAX_NUM_F16, - VOP3Op.V_ADD_F16: _VOP3Op_V_ADD_F16, - VOP3Op.V_SUB_F16: _VOP3Op_V_SUB_F16, - VOP3Op.V_SUBREV_F16: _VOP3Op_V_SUBREV_F16, - VOP3Op.V_MUL_F16: _VOP3Op_V_MUL_F16, - VOP3Op.V_FMAC_F16: _VOP3Op_V_FMAC_F16, - VOP3Op.V_LDEXP_F16: _VOP3Op_V_LDEXP_F16, - VOP3Op.V_FMA_DX9_ZERO_F32: _VOP3Op_V_FMA_DX9_ZERO_F32, - VOP3Op.V_MAD_I32_I24: _VOP3Op_V_MAD_I32_I24, - VOP3Op.V_MAD_U32_U24: _VOP3Op_V_MAD_U32_U24, - VOP3Op.V_CUBEID_F32: _VOP3Op_V_CUBEID_F32, - VOP3Op.V_CUBESC_F32: _VOP3Op_V_CUBESC_F32, - VOP3Op.V_CUBETC_F32: _VOP3Op_V_CUBETC_F32, - VOP3Op.V_CUBEMA_F32: _VOP3Op_V_CUBEMA_F32, - VOP3Op.V_BFE_U32: _VOP3Op_V_BFE_U32, - VOP3Op.V_BFE_I32: _VOP3Op_V_BFE_I32, - VOP3Op.V_BFI_B32: _VOP3Op_V_BFI_B32, - VOP3Op.V_FMA_F32: _VOP3Op_V_FMA_F32, - VOP3Op.V_FMA_F64: _VOP3Op_V_FMA_F64, - VOP3Op.V_LERP_U8: _VOP3Op_V_LERP_U8, - VOP3Op.V_ALIGNBIT_B32: _VOP3Op_V_ALIGNBIT_B32, - VOP3Op.V_ALIGNBYTE_B32: _VOP3Op_V_ALIGNBYTE_B32, - VOP3Op.V_MULLIT_F32: _VOP3Op_V_MULLIT_F32, - VOP3Op.V_MIN3_I32: _VOP3Op_V_MIN3_I32, - VOP3Op.V_MIN3_U32: _VOP3Op_V_MIN3_U32, - VOP3Op.V_MAX3_I32: _VOP3Op_V_MAX3_I32, - VOP3Op.V_MAX3_U32: _VOP3Op_V_MAX3_U32, - VOP3Op.V_MED3_I32: _VOP3Op_V_MED3_I32, - VOP3Op.V_MED3_U32: _VOP3Op_V_MED3_U32, - VOP3Op.V_SAD_U8: _VOP3Op_V_SAD_U8, - VOP3Op.V_SAD_HI_U8: _VOP3Op_V_SAD_HI_U8, - VOP3Op.V_SAD_U16: _VOP3Op_V_SAD_U16, - VOP3Op.V_SAD_U32: _VOP3Op_V_SAD_U32, - VOP3Op.V_CVT_PK_U8_F32: _VOP3Op_V_CVT_PK_U8_F32, - VOP3Op.V_DIV_FIXUP_F32: _VOP3Op_V_DIV_FIXUP_F32, - VOP3Op.V_DIV_FIXUP_F64: _VOP3Op_V_DIV_FIXUP_F64, - VOP3Op.V_MIN3_NUM_F32: _VOP3Op_V_MIN3_NUM_F32, - VOP3Op.V_MAX3_NUM_F32: _VOP3Op_V_MAX3_NUM_F32, - VOP3Op.V_MIN3_NUM_F16: _VOP3Op_V_MIN3_NUM_F16, - VOP3Op.V_MAX3_NUM_F16: _VOP3Op_V_MAX3_NUM_F16, - VOP3Op.V_MINIMUM3_F32: _VOP3Op_V_MINIMUM3_F32, - VOP3Op.V_MAXIMUM3_F32: _VOP3Op_V_MAXIMUM3_F32, - VOP3Op.V_MINIMUM3_F16: _VOP3Op_V_MINIMUM3_F16, - VOP3Op.V_MAXIMUM3_F16: _VOP3Op_V_MAXIMUM3_F16, - VOP3Op.V_MED3_NUM_F32: _VOP3Op_V_MED3_NUM_F32, - VOP3Op.V_MED3_NUM_F16: _VOP3Op_V_MED3_NUM_F16, - VOP3Op.V_DIV_FMAS_F32: _VOP3Op_V_DIV_FMAS_F32, - VOP3Op.V_DIV_FMAS_F64: _VOP3Op_V_DIV_FMAS_F64, - VOP3Op.V_MSAD_U8: _VOP3Op_V_MSAD_U8, - VOP3Op.V_QSAD_PK_U16_U8: _VOP3Op_V_QSAD_PK_U16_U8, - VOP3Op.V_MQSAD_PK_U16_U8: _VOP3Op_V_MQSAD_PK_U16_U8, - VOP3Op.V_MQSAD_U32_U8: _VOP3Op_V_MQSAD_U32_U8, - VOP3Op.V_XOR3_B32: _VOP3Op_V_XOR3_B32, - VOP3Op.V_MAD_U16: _VOP3Op_V_MAD_U16, - VOP3Op.V_PERM_B32: _VOP3Op_V_PERM_B32, - VOP3Op.V_XAD_U32: _VOP3Op_V_XAD_U32, - VOP3Op.V_LSHL_ADD_U32: _VOP3Op_V_LSHL_ADD_U32, - VOP3Op.V_ADD_LSHL_U32: _VOP3Op_V_ADD_LSHL_U32, - VOP3Op.V_FMA_F16: _VOP3Op_V_FMA_F16, - VOP3Op.V_MIN3_I16: _VOP3Op_V_MIN3_I16, - VOP3Op.V_MIN3_U16: _VOP3Op_V_MIN3_U16, - VOP3Op.V_MAX3_I16: _VOP3Op_V_MAX3_I16, - VOP3Op.V_MAX3_U16: _VOP3Op_V_MAX3_U16, - VOP3Op.V_MED3_I16: _VOP3Op_V_MED3_I16, - VOP3Op.V_MED3_U16: _VOP3Op_V_MED3_U16, - VOP3Op.V_MAD_I16: _VOP3Op_V_MAD_I16, - VOP3Op.V_DIV_FIXUP_F16: _VOP3Op_V_DIV_FIXUP_F16, - VOP3Op.V_ADD3_U32: _VOP3Op_V_ADD3_U32, - VOP3Op.V_LSHL_OR_B32: _VOP3Op_V_LSHL_OR_B32, - VOP3Op.V_AND_OR_B32: _VOP3Op_V_AND_OR_B32, - VOP3Op.V_OR3_B32: _VOP3Op_V_OR3_B32, - VOP3Op.V_MAD_U32_U16: _VOP3Op_V_MAD_U32_U16, - VOP3Op.V_MAD_I32_I16: _VOP3Op_V_MAD_I32_I16, - VOP3Op.V_CNDMASK_B16: _VOP3Op_V_CNDMASK_B16, - VOP3Op.V_MAXMIN_U32: _VOP3Op_V_MAXMIN_U32, - VOP3Op.V_MINMAX_U32: _VOP3Op_V_MINMAX_U32, - VOP3Op.V_MAXMIN_I32: _VOP3Op_V_MAXMIN_I32, - VOP3Op.V_MINMAX_I32: _VOP3Op_V_MINMAX_I32, - VOP3Op.V_DOT2_F16_F16: _VOP3Op_V_DOT2_F16_F16, - VOP3Op.V_DOT2_BF16_BF16: _VOP3Op_V_DOT2_BF16_BF16, - VOP3Op.V_MINMAX_NUM_F32: _VOP3Op_V_MINMAX_NUM_F32, - VOP3Op.V_MAXMIN_NUM_F32: _VOP3Op_V_MAXMIN_NUM_F32, - VOP3Op.V_MINMAX_NUM_F16: _VOP3Op_V_MINMAX_NUM_F16, - VOP3Op.V_MAXMIN_NUM_F16: _VOP3Op_V_MAXMIN_NUM_F16, - VOP3Op.V_MINIMUMMAXIMUM_F32: _VOP3Op_V_MINIMUMMAXIMUM_F32, - VOP3Op.V_MAXIMUMMINIMUM_F32: _VOP3Op_V_MAXIMUMMINIMUM_F32, - VOP3Op.V_MINIMUMMAXIMUM_F16: _VOP3Op_V_MINIMUMMAXIMUM_F16, - VOP3Op.V_MAXIMUMMINIMUM_F16: _VOP3Op_V_MAXIMUMMINIMUM_F16, - VOP3Op.V_S_EXP_F32: _VOP3Op_V_S_EXP_F32, - VOP3Op.V_S_EXP_F16: _VOP3Op_V_S_EXP_F16, - VOP3Op.V_S_LOG_F32: _VOP3Op_V_S_LOG_F32, - VOP3Op.V_S_LOG_F16: _VOP3Op_V_S_LOG_F16, - VOP3Op.V_S_RCP_F32: _VOP3Op_V_S_RCP_F32, - VOP3Op.V_S_RCP_F16: _VOP3Op_V_S_RCP_F16, - VOP3Op.V_S_RSQ_F32: _VOP3Op_V_S_RSQ_F32, - VOP3Op.V_S_RSQ_F16: _VOP3Op_V_S_RSQ_F16, - VOP3Op.V_S_SQRT_F32: _VOP3Op_V_S_SQRT_F32, - VOP3Op.V_S_SQRT_F16: _VOP3Op_V_S_SQRT_F16, - VOP3Op.V_ADD_NC_U16: _VOP3Op_V_ADD_NC_U16, - VOP3Op.V_SUB_NC_U16: _VOP3Op_V_SUB_NC_U16, - VOP3Op.V_MUL_LO_U16: _VOP3Op_V_MUL_LO_U16, - VOP3Op.V_CVT_PK_I16_F32: _VOP3Op_V_CVT_PK_I16_F32, - VOP3Op.V_CVT_PK_U16_F32: _VOP3Op_V_CVT_PK_U16_F32, - VOP3Op.V_MAX_U16: _VOP3Op_V_MAX_U16, - VOP3Op.V_MAX_I16: _VOP3Op_V_MAX_I16, - VOP3Op.V_MIN_U16: _VOP3Op_V_MIN_U16, - VOP3Op.V_MIN_I16: _VOP3Op_V_MIN_I16, - VOP3Op.V_ADD_NC_I16: _VOP3Op_V_ADD_NC_I16, - VOP3Op.V_SUB_NC_I16: _VOP3Op_V_SUB_NC_I16, - VOP3Op.V_PACK_B32_F16: _VOP3Op_V_PACK_B32_F16, - VOP3Op.V_CVT_PK_NORM_I16_F16: _VOP3Op_V_CVT_PK_NORM_I16_F16, - VOP3Op.V_CVT_PK_NORM_U16_F16: _VOP3Op_V_CVT_PK_NORM_U16_F16, - VOP3Op.V_LDEXP_F32: _VOP3Op_V_LDEXP_F32, - VOP3Op.V_BFM_B32: _VOP3Op_V_BFM_B32, - VOP3Op.V_BCNT_U32_B32: _VOP3Op_V_BCNT_U32_B32, - VOP3Op.V_CVT_PK_NORM_I16_F32: _VOP3Op_V_CVT_PK_NORM_I16_F32, - VOP3Op.V_CVT_PK_NORM_U16_F32: _VOP3Op_V_CVT_PK_NORM_U16_F32, - VOP3Op.V_CVT_PK_U16_U32: _VOP3Op_V_CVT_PK_U16_U32, - VOP3Op.V_CVT_PK_I16_I32: _VOP3Op_V_CVT_PK_I16_I32, - VOP3Op.V_SUB_NC_I32: _VOP3Op_V_SUB_NC_I32, - VOP3Op.V_ADD_NC_I32: _VOP3Op_V_ADD_NC_I32, - VOP3Op.V_LDEXP_F64: _VOP3Op_V_LDEXP_F64, - VOP3Op.V_MUL_LO_U32: _VOP3Op_V_MUL_LO_U32, - VOP3Op.V_MUL_HI_U32: _VOP3Op_V_MUL_HI_U32, - VOP3Op.V_MUL_HI_I32: _VOP3Op_V_MUL_HI_I32, - VOP3Op.V_TRIG_PREOP_F64: _VOP3Op_V_TRIG_PREOP_F64, - VOP3Op.V_LSHLREV_B16: _VOP3Op_V_LSHLREV_B16, - VOP3Op.V_LSHRREV_B16: _VOP3Op_V_LSHRREV_B16, - VOP3Op.V_ASHRREV_I16: _VOP3Op_V_ASHRREV_I16, - VOP3Op.V_LSHRREV_B64: _VOP3Op_V_LSHRREV_B64, - VOP3Op.V_ASHRREV_I64: _VOP3Op_V_ASHRREV_I64, - VOP3Op.V_MINIMUM_F64: _VOP3Op_V_MINIMUM_F64, - VOP3Op.V_MAXIMUM_F64: _VOP3Op_V_MAXIMUM_F64, - VOP3Op.V_READLANE_B32: _VOP3Op_V_READLANE_B32, - VOP3Op.V_AND_B16: _VOP3Op_V_AND_B16, - VOP3Op.V_OR_B16: _VOP3Op_V_OR_B16, - VOP3Op.V_XOR_B16: _VOP3Op_V_XOR_B16, - VOP3Op.V_MINIMUM_F32: _VOP3Op_V_MINIMUM_F32, - VOP3Op.V_MAXIMUM_F32: _VOP3Op_V_MAXIMUM_F32, - VOP3Op.V_MINIMUM_F16: _VOP3Op_V_MINIMUM_F16, - VOP3Op.V_MAXIMUM_F16: _VOP3Op_V_MAXIMUM_F16, -} - -def _VOP3SDOp_V_ADD_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32) + VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_SUB_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S1.u32) + VCC.u64[laneId] > (S0.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_SUBREV_CO_CI_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32 - VCC.u64[laneId]) - VCC.u64[laneId] = ((1) if ((S0.u32) + VCC.u64[laneId] > (S1.u32)) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_DIV_SCALE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(s0); VCC=Reg(vcc) - # --- compiled pseudocode --- - VCC = Reg(0x0) - if ((F(S2.f32) == 0.0) or (F(S1.f32) == 0.0)): - VCC = Reg(0x1); D0.f32 = float("nan") - elif exponent(S2.f32) - exponent(S1.f32) >= 96: - VCC = Reg(0x1) - if S0.f32 == S1.f32: - D0.f32 = ldexp(S0.f32, 64) - elif False: - pass - elif ((1.0 / F(S1.f32) == DENORM.f64) and (S2.f32 / S1.f32 == DENORM.f32)): - VCC = Reg(0x1) - if S0.f32 == S1.f32: - D0.f32 = ldexp(S0.f32, 64) - elif 1.0 / F(S1.f32) == DENORM.f64: - D0.f32 = ldexp(S0.f32, -64) - elif S2.f32 / S1.f32 == DENORM.f32: - VCC = Reg(0x1) - elif exponent(S2.f32) <= 23: - VCC = Reg(0x1); D0.f32 = ldexp(S0.f32, 64) - if S1.f32 == DENORM.f32: - D0.f32 = float("nan") - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_DIV_SCALE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(s0); VCC=Reg(vcc) - # --- compiled pseudocode --- - VCC = Reg(0x0) - if ((S2.f64 == 0.0) or (S1.f64 == 0.0)): - VCC = Reg(0x1); D0.f64 = float("nan") - elif exponent(S2.f64) - exponent(S1.f64) >= 768: - VCC = Reg(0x1) - if S0.f64 == S1.f64: - D0.f64 = ldexp(S0.f64, 128) - elif False: - pass - elif ((1.0 / S1.f64 == DENORM.f64) and (S2.f64 / S1.f64 == DENORM.f64)): - VCC = Reg(0x1) - if S0.f64 == S1.f64: - D0.f64 = ldexp(S0.f64, 128) - elif 1.0 / S1.f64 == DENORM.f64: - D0.f64 = ldexp(S0.f64, -128) - elif S2.f64 / S1.f64 == DENORM.f64: - VCC = Reg(0x1) - elif exponent(S2.f64) <= 53: - D0.f64 = ldexp(S0.f64, 128) - if S1.f64 == DENORM.f64: - D0.f64 = float("nan") - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_MAD_CO_U64_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); D1=Reg(0) - # --- compiled pseudocode --- - _full = ((S0.u32) * (S1.u32) + (S2.u64)) - D0.u64 = int(_full) & 0xffffffffffffffff - D1 = Reg((int(_full) >> 64) & 1) - return {'D0': D0._val, 'D1': D1._val} - -def _VOP3SDOp_V_MAD_CO_I64_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); D1=Reg(0) - # --- compiled pseudocode --- - _full = ((S0.i32) * (S1.i32) + (S2.i64)) - D0.u64 = int(_full) & 0xffffffffffffffff - D1 = Reg((int(_full) >> 64) & 1) - return {'D0': D0._val, 'D1': D1._val} - -def _VOP3SDOp_V_ADD_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg((S0.u32) + (S1.u32)) - VCC.u64[laneId] = ((1) if (tmp >= 0x100000000) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_SUB_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S0.u32 - S1.u32) - VCC.u64[laneId] = ((1) if (S1.u32 > S0.u32) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -def _VOP3SDOp_V_SUBREV_CO_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc) - # --- compiled pseudocode --- - tmp = Reg(S1.u32 - S0.u32) - VCC.u64[laneId] = ((1) if (S0.u32 > S1.u32) else (0)) - D0.u32 = tmp.u32 - return {'D0': D0._val, 'VCC': VCC._val} - -VOP3SDOp_FUNCTIONS = { - VOP3SDOp.V_ADD_CO_CI_U32: _VOP3SDOp_V_ADD_CO_CI_U32, - VOP3SDOp.V_SUB_CO_CI_U32: _VOP3SDOp_V_SUB_CO_CI_U32, - VOP3SDOp.V_SUBREV_CO_CI_U32: _VOP3SDOp_V_SUBREV_CO_CI_U32, - VOP3SDOp.V_DIV_SCALE_F32: _VOP3SDOp_V_DIV_SCALE_F32, - VOP3SDOp.V_DIV_SCALE_F64: _VOP3SDOp_V_DIV_SCALE_F64, - VOP3SDOp.V_MAD_CO_U64_U32: _VOP3SDOp_V_MAD_CO_U64_U32, - VOP3SDOp.V_MAD_CO_I64_I32: _VOP3SDOp_V_MAD_CO_I64_I32, - VOP3SDOp.V_ADD_CO_U32: _VOP3SDOp_V_ADD_CO_U32, - VOP3SDOp.V_SUB_CO_U32: _VOP3SDOp_V_SUB_CO_U32, - VOP3SDOp.V_SUBREV_CO_U32: _VOP3SDOp_V_SUBREV_CO_U32, -} - -def _VOP3POp_V_PK_MAD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16 - tmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MUL_LO_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 - tmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16 - tmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_SUB_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16 - tmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_LSHLREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32) - tmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_LSHRREV_B16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32) - tmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_ASHRREV_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32) - tmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32) - D0.b32 = tmp.b32 - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = ((S0[15 : 0].i16) if (S0[15 : 0].i16 >= S1[15 : 0].i16) else (S1[15 : 0].i16)) - tmp[31 : 16].i16 = ((S0[31 : 16].i16) if (S0[31 : 16].i16 >= S1[31 : 16].i16) else (S1[31 : 16].i16)) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].i16 = ((S0[15 : 0].i16) if (S0[15 : 0].i16 < S1[15 : 0].i16) else (S1[15 : 0].i16)) - tmp[31 : 16].i16 = ((S0[31 : 16].i16) if (S0[31 : 16].i16 < S1[31 : 16].i16) else (S1[31 : 16].i16)) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16 - tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16 - tmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_SUB_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16 - tmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = ((S0[15 : 0].u16) if (S0[15 : 0].u16 >= S1[15 : 0].u16) else (S1[15 : 0].u16)) - tmp[31 : 16].u16 = ((S0[31 : 16].u16) if (S0[31 : 16].u16 >= S1[31 : 16].u16) else (S1[31 : 16].u16)) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].u16 = ((S0[15 : 0].u16) if (S0[15 : 0].u16 < S1[15 : 0].u16) else (S1[15 : 0].u16)) - tmp[31 : 16].u16 = ((S0[31 : 16].u16) if (S0[31 : 16].u16 < S1[31 : 16].u16) else (S1[31 : 16].u16)) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_FMA_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16) - tmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_ADD_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16 - tmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MUL_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16 - tmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16 - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT2_F32_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16) - tmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT4_U32_U8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8) - tmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8) - tmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8) - tmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT8_U32_U4(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.u32) - tmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4) - tmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4) - tmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4) - tmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4) - tmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4) - tmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4) - tmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4) - tmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4) - D0.u32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT2_F32_BF16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16) - tmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MIN_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = v_min_num_f16(S0[15 : 0].f16, S1[15 : 0].f16) - tmp[31 : 16].f16 = v_min_num_f16(S0[31 : 16].f16, S1[31 : 16].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAX_NUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = v_max_num_f16(S0[15 : 0].f16, S1[15 : 0].f16) - tmp[31 : 16].f16 = v_max_num_f16(S0[31 : 16].f16, S1[31 : 16].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MINIMUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = v_minimum_f16(S0[15 : 0].f16, S1[15 : 0].f16) - tmp[31 : 16].f16 = v_minimum_f16(S0[31 : 16].f16, S1[31 : 16].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_PK_MAXIMUM_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); tmp=Reg(0) - # --- compiled pseudocode --- - tmp[15 : 0].f16 = v_maximum_f16(S0[15 : 0].f16, S1[15 : 0].f16) - tmp[31 : 16].f16 = v_maximum_f16(S0[31 : 16].f16, S1[31 : 16].f16) - D0.b32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_FMA_MIX_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[31 : 0].f32 = fma(ins[0], ins[1], ins[2]) - return {'D0': D0._val} - -def _VOP3POp_V_FMA_MIXLO_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[15 : 0].f16 = f32_to_f16(fma(ins[0], ins[1], ins[2])) - return {'D0': D0._val} - -def _VOP3POp_V_FMA_MIXHI_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); S=[S0,S1,S2]; D0=Reg(d0); OPSEL=Reg(opsel); OPSEL_HI=Reg(opsel_hi); ins=[Reg(0),Reg(0),Reg(0)] - # --- compiled pseudocode --- - for i in range(0, int(2)+1): - if not OPSEL_HI.u3[i]: - ins[i] = S[i].f32 - elif OPSEL.u3[i]: - ins[i] = f16_to_f32(S[i][31 : 16].f16) - else: - ins[i] = f16_to_f32(S[i][15 : 0].f16) - D0[31 : 16].f16 = f32_to_f16(fma(ins[0], ins[1], ins[2])) - return {'D0': D0._val} - -def _VOP3POp_V_DOT4_F32_FP8_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += F(S0[7 : 0].fp8) * F(S1[7 : 0].bf8) - tmp += F(S0[15 : 8].fp8) * F(S1[15 : 8].bf8) - tmp += F(S0[23 : 16].fp8) * F(S1[23 : 16].bf8) - tmp += F(S0[31 : 24].fp8) * F(S1[31 : 24].bf8) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT4_F32_BF8_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += F(S0[7 : 0].bf8) * F(S1[7 : 0].fp8) - tmp += F(S0[15 : 8].bf8) * F(S1[15 : 8].fp8) - tmp += F(S0[23 : 16].bf8) * F(S1[23 : 16].fp8) - tmp += F(S0[31 : 24].bf8) * F(S1[31 : 24].fp8) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT4_F32_FP8_FP8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += F(S0[7 : 0].fp8) * F(S1[7 : 0].fp8) - tmp += F(S0[15 : 8].fp8) * F(S1[15 : 8].fp8) - tmp += F(S0[23 : 16].fp8) * F(S1[23 : 16].fp8) - tmp += F(S0[31 : 24].fp8) * F(S1[31 : 24].fp8) - D0.f32 = tmp - return {'D0': D0._val} - -def _VOP3POp_V_DOT4_F32_BF8_BF8(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); S2=Reg(s2); D0=Reg(d0) - # --- compiled pseudocode --- - tmp = Reg(S2.f32) - tmp += F(S0[7 : 0].bf8) * F(S1[7 : 0].bf8) - tmp += F(S0[15 : 8].bf8) * F(S1[15 : 8].bf8) - tmp += F(S0[23 : 16].bf8) * F(S1[23 : 16].bf8) - tmp += F(S0[31 : 24].bf8) * F(S1[31 : 24].bf8) - D0.f32 = tmp - return {'D0': D0._val} - -VOP3POp_FUNCTIONS = { - VOP3POp.V_PK_MAD_I16: _VOP3POp_V_PK_MAD_I16, - VOP3POp.V_PK_MUL_LO_U16: _VOP3POp_V_PK_MUL_LO_U16, - VOP3POp.V_PK_ADD_I16: _VOP3POp_V_PK_ADD_I16, - VOP3POp.V_PK_SUB_I16: _VOP3POp_V_PK_SUB_I16, - VOP3POp.V_PK_LSHLREV_B16: _VOP3POp_V_PK_LSHLREV_B16, - VOP3POp.V_PK_LSHRREV_B16: _VOP3POp_V_PK_LSHRREV_B16, - VOP3POp.V_PK_ASHRREV_I16: _VOP3POp_V_PK_ASHRREV_I16, - VOP3POp.V_PK_MAX_I16: _VOP3POp_V_PK_MAX_I16, - VOP3POp.V_PK_MIN_I16: _VOP3POp_V_PK_MIN_I16, - VOP3POp.V_PK_MAD_U16: _VOP3POp_V_PK_MAD_U16, - VOP3POp.V_PK_ADD_U16: _VOP3POp_V_PK_ADD_U16, - VOP3POp.V_PK_SUB_U16: _VOP3POp_V_PK_SUB_U16, - VOP3POp.V_PK_MAX_U16: _VOP3POp_V_PK_MAX_U16, - VOP3POp.V_PK_MIN_U16: _VOP3POp_V_PK_MIN_U16, - VOP3POp.V_PK_FMA_F16: _VOP3POp_V_PK_FMA_F16, - VOP3POp.V_PK_ADD_F16: _VOP3POp_V_PK_ADD_F16, - VOP3POp.V_PK_MUL_F16: _VOP3POp_V_PK_MUL_F16, - VOP3POp.V_DOT2_F32_F16: _VOP3POp_V_DOT2_F32_F16, - VOP3POp.V_DOT4_U32_U8: _VOP3POp_V_DOT4_U32_U8, - VOP3POp.V_DOT8_U32_U4: _VOP3POp_V_DOT8_U32_U4, - VOP3POp.V_DOT2_F32_BF16: _VOP3POp_V_DOT2_F32_BF16, - VOP3POp.V_PK_MIN_NUM_F16: _VOP3POp_V_PK_MIN_NUM_F16, - VOP3POp.V_PK_MAX_NUM_F16: _VOP3POp_V_PK_MAX_NUM_F16, - VOP3POp.V_PK_MINIMUM_F16: _VOP3POp_V_PK_MINIMUM_F16, - VOP3POp.V_PK_MAXIMUM_F16: _VOP3POp_V_PK_MAXIMUM_F16, - VOP3POp.V_FMA_MIX_F32: _VOP3POp_V_FMA_MIX_F32, - VOP3POp.V_FMA_MIXLO_F16: _VOP3POp_V_FMA_MIXLO_F16, - VOP3POp.V_FMA_MIXHI_F16: _VOP3POp_V_FMA_MIXHI_F16, - VOP3POp.V_DOT4_F32_FP8_BF8: _VOP3POp_V_DOT4_F32_FP8_BF8, - VOP3POp.V_DOT4_F32_BF8_FP8: _VOP3POp_V_DOT4_F32_BF8_FP8, - VOP3POp.V_DOT4_F32_FP8_FP8: _VOP3POp_V_DOT4_F32_FP8_FP8, - VOP3POp.V_DOT4_F32_BF8_BF8: _VOP3POp_V_DOT4_F32_BF8_BF8, -} - -def _VOPCOp_V_CMP_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 < S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 == S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 <= S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 > S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 != S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f16 >= S1.f16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 >= S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 != S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 > S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 <= S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 == S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f16 < S1.f16) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 < S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 == S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 <= S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 > S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 != S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f32 >= S1.f32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 >= S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 != S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 > S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 <= S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 == S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f32 < S1.f32) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 < S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 == S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 <= S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 > S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 != S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.f64 >= S1.f64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 >= S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 != S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 > S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 <= S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 == S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = not (S0.f64 < S1.f64) - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 < S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 == S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 <= S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 > S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 != S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i16 >= S1.i16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 < S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 == S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 <= S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 > S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 != S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u16 >= S1.u16 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 < S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 == S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 <= S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 > S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 != S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i32 >= S1.i32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 < S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 == S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 <= S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 > S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 != S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u32 >= S1.u32 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 < S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 == S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 <= S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 > S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 != S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.i64 >= S1.i64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 < S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 == S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 <= S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 > S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 != S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - D0.u64[laneId] = S0.u64 >= S1.u64 - return {'D0': D0._val} - -def _VOPCOp_V_CMP_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMP_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMP_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); D0=Reg(d0); VCC=Reg(vcc); PC=Reg(pc) if pc is not None else None - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - D0.u64[laneId] = result - return {'D0': D0._val} - -def _VOPCOp_V_CMPX_LT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 < S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 == S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 <= S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 > S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 != S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f16 >= S1.f16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(F(S0.f16)) and not isNAN(F(S1.f16))) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(F(S0.f16)) or isNAN(F(S1.f16))) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 >= S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 != S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 > S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 <= S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 == S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f16 < S1.f16) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 < S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 == S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 <= S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 > S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 != S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f32 >= S1.f32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(F(S0.f32)) and not isNAN(F(S1.f32))) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(F(S0.f32)) or isNAN(F(S1.f32))) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 >= S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 != S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 > S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 <= S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 == S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f32 < S1.f32) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 < S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 == S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 <= S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 > S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 != S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.f64 >= S1.f64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_O_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = ( not isNAN(S0.f64) and not isNAN(S1.f64)) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_U_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = (isNAN(S0.f64) or isNAN(S1.f64)) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 >= S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLG_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 != S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NGT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 > S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLE_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 <= S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NEQ_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 == S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NLT_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = not (S0.f64 < S1.f64) - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 < S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 == S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 <= S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 > S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 != S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i16 >= S1.i16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 < S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 == S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 <= S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 > S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 != S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u16 >= S1.u16 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 < S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 == S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 <= S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 > S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 != S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i32 >= S1.i32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 < S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 == S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 <= S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 > S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 != S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u32 >= S1.u32 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 < S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 == S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 <= S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 > S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 != S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_I64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.i64 >= S1.i64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 < S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_EQ_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 == S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_LE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 <= S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GT_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 > S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_NE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 != S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_GE_U64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - EXEC.u64[laneId] = S0.u64 >= S1.u64 - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_CLASS_F16(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f16)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f16)): - result = S1.u32[1] - elif exponent(S0.f16) == 31: - result = S1.u32[((2) if (sign(S0.f16)) else (9))] - elif exponent(S0.f16) > 0: - result = S1.u32[((3) if (sign(S0.f16)) else (8))] - elif F(abs(S0.f16)) > 0.0: - result = S1.u32[((4) if (sign(S0.f16)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f16)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_CLASS_F32(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(F(S0.f32)): - result = S1.u32[0] - elif isQuietNAN(F(S0.f32)): - result = S1.u32[1] - elif exponent(S0.f32) == 255: - result = S1.u32[((2) if (sign(S0.f32)) else (9))] - elif exponent(S0.f32) > 0: - result = S1.u32[((3) if (sign(S0.f32)) else (8))] - elif F(abs(S0.f32)) > 0.0: - result = S1.u32[((4) if (sign(S0.f32)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f32)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -def _VOPCOp_V_CMPX_CLASS_F64(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None): - S0=Reg(s0); S1=Reg(s1); EXEC=Reg(exec_mask) - # --- compiled pseudocode --- - if isSignalNAN(S0.f64): - result = S1.u32[0] - elif isQuietNAN(S0.f64): - result = S1.u32[1] - elif exponent(S0.f64) == 2047: - result = S1.u32[((2) if (sign(S0.f64)) else (9))] - elif exponent(S0.f64) > 0: - result = S1.u32[((3) if (sign(S0.f64)) else (8))] - elif abs(S0.f64) > 0.0: - result = S1.u32[((4) if (sign(S0.f64)) else (7))] - else: - result = S1.u32[((5) if (sign(S0.f64)) else (6))] - EXEC.u64[laneId] = result - return {'EXEC': EXEC._val} - -VOPCOp_FUNCTIONS = { - VOPCOp.V_CMP_LT_F16: _VOPCOp_V_CMP_LT_F16, - VOPCOp.V_CMP_EQ_F16: _VOPCOp_V_CMP_EQ_F16, - VOPCOp.V_CMP_LE_F16: _VOPCOp_V_CMP_LE_F16, - VOPCOp.V_CMP_GT_F16: _VOPCOp_V_CMP_GT_F16, - VOPCOp.V_CMP_LG_F16: _VOPCOp_V_CMP_LG_F16, - VOPCOp.V_CMP_GE_F16: _VOPCOp_V_CMP_GE_F16, - VOPCOp.V_CMP_O_F16: _VOPCOp_V_CMP_O_F16, - VOPCOp.V_CMP_U_F16: _VOPCOp_V_CMP_U_F16, - VOPCOp.V_CMP_NGE_F16: _VOPCOp_V_CMP_NGE_F16, - VOPCOp.V_CMP_NLG_F16: _VOPCOp_V_CMP_NLG_F16, - VOPCOp.V_CMP_NGT_F16: _VOPCOp_V_CMP_NGT_F16, - VOPCOp.V_CMP_NLE_F16: _VOPCOp_V_CMP_NLE_F16, - VOPCOp.V_CMP_NEQ_F16: _VOPCOp_V_CMP_NEQ_F16, - VOPCOp.V_CMP_NLT_F16: _VOPCOp_V_CMP_NLT_F16, - VOPCOp.V_CMP_LT_F32: _VOPCOp_V_CMP_LT_F32, - VOPCOp.V_CMP_EQ_F32: _VOPCOp_V_CMP_EQ_F32, - VOPCOp.V_CMP_LE_F32: _VOPCOp_V_CMP_LE_F32, - VOPCOp.V_CMP_GT_F32: _VOPCOp_V_CMP_GT_F32, - VOPCOp.V_CMP_LG_F32: _VOPCOp_V_CMP_LG_F32, - VOPCOp.V_CMP_GE_F32: _VOPCOp_V_CMP_GE_F32, - VOPCOp.V_CMP_O_F32: _VOPCOp_V_CMP_O_F32, - VOPCOp.V_CMP_U_F32: _VOPCOp_V_CMP_U_F32, - VOPCOp.V_CMP_NGE_F32: _VOPCOp_V_CMP_NGE_F32, - VOPCOp.V_CMP_NLG_F32: _VOPCOp_V_CMP_NLG_F32, - VOPCOp.V_CMP_NGT_F32: _VOPCOp_V_CMP_NGT_F32, - VOPCOp.V_CMP_NLE_F32: _VOPCOp_V_CMP_NLE_F32, - VOPCOp.V_CMP_NEQ_F32: _VOPCOp_V_CMP_NEQ_F32, - VOPCOp.V_CMP_NLT_F32: _VOPCOp_V_CMP_NLT_F32, - VOPCOp.V_CMP_LT_F64: _VOPCOp_V_CMP_LT_F64, - VOPCOp.V_CMP_EQ_F64: _VOPCOp_V_CMP_EQ_F64, - VOPCOp.V_CMP_LE_F64: _VOPCOp_V_CMP_LE_F64, - VOPCOp.V_CMP_GT_F64: _VOPCOp_V_CMP_GT_F64, - VOPCOp.V_CMP_LG_F64: _VOPCOp_V_CMP_LG_F64, - VOPCOp.V_CMP_GE_F64: _VOPCOp_V_CMP_GE_F64, - VOPCOp.V_CMP_O_F64: _VOPCOp_V_CMP_O_F64, - VOPCOp.V_CMP_U_F64: _VOPCOp_V_CMP_U_F64, - VOPCOp.V_CMP_NGE_F64: _VOPCOp_V_CMP_NGE_F64, - VOPCOp.V_CMP_NLG_F64: _VOPCOp_V_CMP_NLG_F64, - VOPCOp.V_CMP_NGT_F64: _VOPCOp_V_CMP_NGT_F64, - VOPCOp.V_CMP_NLE_F64: _VOPCOp_V_CMP_NLE_F64, - VOPCOp.V_CMP_NEQ_F64: _VOPCOp_V_CMP_NEQ_F64, - VOPCOp.V_CMP_NLT_F64: _VOPCOp_V_CMP_NLT_F64, - VOPCOp.V_CMP_LT_I16: _VOPCOp_V_CMP_LT_I16, - VOPCOp.V_CMP_EQ_I16: _VOPCOp_V_CMP_EQ_I16, - VOPCOp.V_CMP_LE_I16: _VOPCOp_V_CMP_LE_I16, - VOPCOp.V_CMP_GT_I16: _VOPCOp_V_CMP_GT_I16, - VOPCOp.V_CMP_NE_I16: _VOPCOp_V_CMP_NE_I16, - VOPCOp.V_CMP_GE_I16: _VOPCOp_V_CMP_GE_I16, - VOPCOp.V_CMP_LT_U16: _VOPCOp_V_CMP_LT_U16, - VOPCOp.V_CMP_EQ_U16: _VOPCOp_V_CMP_EQ_U16, - VOPCOp.V_CMP_LE_U16: _VOPCOp_V_CMP_LE_U16, - VOPCOp.V_CMP_GT_U16: _VOPCOp_V_CMP_GT_U16, - VOPCOp.V_CMP_NE_U16: _VOPCOp_V_CMP_NE_U16, - VOPCOp.V_CMP_GE_U16: _VOPCOp_V_CMP_GE_U16, - VOPCOp.V_CMP_LT_I32: _VOPCOp_V_CMP_LT_I32, - VOPCOp.V_CMP_EQ_I32: _VOPCOp_V_CMP_EQ_I32, - VOPCOp.V_CMP_LE_I32: _VOPCOp_V_CMP_LE_I32, - VOPCOp.V_CMP_GT_I32: _VOPCOp_V_CMP_GT_I32, - VOPCOp.V_CMP_NE_I32: _VOPCOp_V_CMP_NE_I32, - VOPCOp.V_CMP_GE_I32: _VOPCOp_V_CMP_GE_I32, - VOPCOp.V_CMP_LT_U32: _VOPCOp_V_CMP_LT_U32, - VOPCOp.V_CMP_EQ_U32: _VOPCOp_V_CMP_EQ_U32, - VOPCOp.V_CMP_LE_U32: _VOPCOp_V_CMP_LE_U32, - VOPCOp.V_CMP_GT_U32: _VOPCOp_V_CMP_GT_U32, - VOPCOp.V_CMP_NE_U32: _VOPCOp_V_CMP_NE_U32, - VOPCOp.V_CMP_GE_U32: _VOPCOp_V_CMP_GE_U32, - VOPCOp.V_CMP_LT_I64: _VOPCOp_V_CMP_LT_I64, - VOPCOp.V_CMP_EQ_I64: _VOPCOp_V_CMP_EQ_I64, - VOPCOp.V_CMP_LE_I64: _VOPCOp_V_CMP_LE_I64, - VOPCOp.V_CMP_GT_I64: _VOPCOp_V_CMP_GT_I64, - VOPCOp.V_CMP_NE_I64: _VOPCOp_V_CMP_NE_I64, - VOPCOp.V_CMP_GE_I64: _VOPCOp_V_CMP_GE_I64, - VOPCOp.V_CMP_LT_U64: _VOPCOp_V_CMP_LT_U64, - VOPCOp.V_CMP_EQ_U64: _VOPCOp_V_CMP_EQ_U64, - VOPCOp.V_CMP_LE_U64: _VOPCOp_V_CMP_LE_U64, - VOPCOp.V_CMP_GT_U64: _VOPCOp_V_CMP_GT_U64, - VOPCOp.V_CMP_NE_U64: _VOPCOp_V_CMP_NE_U64, - VOPCOp.V_CMP_GE_U64: _VOPCOp_V_CMP_GE_U64, - VOPCOp.V_CMP_CLASS_F16: _VOPCOp_V_CMP_CLASS_F16, - VOPCOp.V_CMP_CLASS_F32: _VOPCOp_V_CMP_CLASS_F32, - VOPCOp.V_CMP_CLASS_F64: _VOPCOp_V_CMP_CLASS_F64, - VOPCOp.V_CMPX_LT_F16: _VOPCOp_V_CMPX_LT_F16, - VOPCOp.V_CMPX_EQ_F16: _VOPCOp_V_CMPX_EQ_F16, - VOPCOp.V_CMPX_LE_F16: _VOPCOp_V_CMPX_LE_F16, - VOPCOp.V_CMPX_GT_F16: _VOPCOp_V_CMPX_GT_F16, - VOPCOp.V_CMPX_LG_F16: _VOPCOp_V_CMPX_LG_F16, - VOPCOp.V_CMPX_GE_F16: _VOPCOp_V_CMPX_GE_F16, - VOPCOp.V_CMPX_O_F16: _VOPCOp_V_CMPX_O_F16, - VOPCOp.V_CMPX_U_F16: _VOPCOp_V_CMPX_U_F16, - VOPCOp.V_CMPX_NGE_F16: _VOPCOp_V_CMPX_NGE_F16, - VOPCOp.V_CMPX_NLG_F16: _VOPCOp_V_CMPX_NLG_F16, - VOPCOp.V_CMPX_NGT_F16: _VOPCOp_V_CMPX_NGT_F16, - VOPCOp.V_CMPX_NLE_F16: _VOPCOp_V_CMPX_NLE_F16, - VOPCOp.V_CMPX_NEQ_F16: _VOPCOp_V_CMPX_NEQ_F16, - VOPCOp.V_CMPX_NLT_F16: _VOPCOp_V_CMPX_NLT_F16, - VOPCOp.V_CMPX_LT_F32: _VOPCOp_V_CMPX_LT_F32, - VOPCOp.V_CMPX_EQ_F32: _VOPCOp_V_CMPX_EQ_F32, - VOPCOp.V_CMPX_LE_F32: _VOPCOp_V_CMPX_LE_F32, - VOPCOp.V_CMPX_GT_F32: _VOPCOp_V_CMPX_GT_F32, - VOPCOp.V_CMPX_LG_F32: _VOPCOp_V_CMPX_LG_F32, - VOPCOp.V_CMPX_GE_F32: _VOPCOp_V_CMPX_GE_F32, - VOPCOp.V_CMPX_O_F32: _VOPCOp_V_CMPX_O_F32, - VOPCOp.V_CMPX_U_F32: _VOPCOp_V_CMPX_U_F32, - VOPCOp.V_CMPX_NGE_F32: _VOPCOp_V_CMPX_NGE_F32, - VOPCOp.V_CMPX_NLG_F32: _VOPCOp_V_CMPX_NLG_F32, - VOPCOp.V_CMPX_NGT_F32: _VOPCOp_V_CMPX_NGT_F32, - VOPCOp.V_CMPX_NLE_F32: _VOPCOp_V_CMPX_NLE_F32, - VOPCOp.V_CMPX_NEQ_F32: _VOPCOp_V_CMPX_NEQ_F32, - VOPCOp.V_CMPX_NLT_F32: _VOPCOp_V_CMPX_NLT_F32, - VOPCOp.V_CMPX_LT_F64: _VOPCOp_V_CMPX_LT_F64, - VOPCOp.V_CMPX_EQ_F64: _VOPCOp_V_CMPX_EQ_F64, - VOPCOp.V_CMPX_LE_F64: _VOPCOp_V_CMPX_LE_F64, - VOPCOp.V_CMPX_GT_F64: _VOPCOp_V_CMPX_GT_F64, - VOPCOp.V_CMPX_LG_F64: _VOPCOp_V_CMPX_LG_F64, - VOPCOp.V_CMPX_GE_F64: _VOPCOp_V_CMPX_GE_F64, - VOPCOp.V_CMPX_O_F64: _VOPCOp_V_CMPX_O_F64, - VOPCOp.V_CMPX_U_F64: _VOPCOp_V_CMPX_U_F64, - VOPCOp.V_CMPX_NGE_F64: _VOPCOp_V_CMPX_NGE_F64, - VOPCOp.V_CMPX_NLG_F64: _VOPCOp_V_CMPX_NLG_F64, - VOPCOp.V_CMPX_NGT_F64: _VOPCOp_V_CMPX_NGT_F64, - VOPCOp.V_CMPX_NLE_F64: _VOPCOp_V_CMPX_NLE_F64, - VOPCOp.V_CMPX_NEQ_F64: _VOPCOp_V_CMPX_NEQ_F64, - VOPCOp.V_CMPX_NLT_F64: _VOPCOp_V_CMPX_NLT_F64, - VOPCOp.V_CMPX_LT_I16: _VOPCOp_V_CMPX_LT_I16, - VOPCOp.V_CMPX_EQ_I16: _VOPCOp_V_CMPX_EQ_I16, - VOPCOp.V_CMPX_LE_I16: _VOPCOp_V_CMPX_LE_I16, - VOPCOp.V_CMPX_GT_I16: _VOPCOp_V_CMPX_GT_I16, - VOPCOp.V_CMPX_NE_I16: _VOPCOp_V_CMPX_NE_I16, - VOPCOp.V_CMPX_GE_I16: _VOPCOp_V_CMPX_GE_I16, - VOPCOp.V_CMPX_LT_U16: _VOPCOp_V_CMPX_LT_U16, - VOPCOp.V_CMPX_EQ_U16: _VOPCOp_V_CMPX_EQ_U16, - VOPCOp.V_CMPX_LE_U16: _VOPCOp_V_CMPX_LE_U16, - VOPCOp.V_CMPX_GT_U16: _VOPCOp_V_CMPX_GT_U16, - VOPCOp.V_CMPX_NE_U16: _VOPCOp_V_CMPX_NE_U16, - VOPCOp.V_CMPX_GE_U16: _VOPCOp_V_CMPX_GE_U16, - VOPCOp.V_CMPX_LT_I32: _VOPCOp_V_CMPX_LT_I32, - VOPCOp.V_CMPX_EQ_I32: _VOPCOp_V_CMPX_EQ_I32, - VOPCOp.V_CMPX_LE_I32: _VOPCOp_V_CMPX_LE_I32, - VOPCOp.V_CMPX_GT_I32: _VOPCOp_V_CMPX_GT_I32, - VOPCOp.V_CMPX_NE_I32: _VOPCOp_V_CMPX_NE_I32, - VOPCOp.V_CMPX_GE_I32: _VOPCOp_V_CMPX_GE_I32, - VOPCOp.V_CMPX_LT_U32: _VOPCOp_V_CMPX_LT_U32, - VOPCOp.V_CMPX_EQ_U32: _VOPCOp_V_CMPX_EQ_U32, - VOPCOp.V_CMPX_LE_U32: _VOPCOp_V_CMPX_LE_U32, - VOPCOp.V_CMPX_GT_U32: _VOPCOp_V_CMPX_GT_U32, - VOPCOp.V_CMPX_NE_U32: _VOPCOp_V_CMPX_NE_U32, - VOPCOp.V_CMPX_GE_U32: _VOPCOp_V_CMPX_GE_U32, - VOPCOp.V_CMPX_LT_I64: _VOPCOp_V_CMPX_LT_I64, - VOPCOp.V_CMPX_EQ_I64: _VOPCOp_V_CMPX_EQ_I64, - VOPCOp.V_CMPX_LE_I64: _VOPCOp_V_CMPX_LE_I64, - VOPCOp.V_CMPX_GT_I64: _VOPCOp_V_CMPX_GT_I64, - VOPCOp.V_CMPX_NE_I64: _VOPCOp_V_CMPX_NE_I64, - VOPCOp.V_CMPX_GE_I64: _VOPCOp_V_CMPX_GE_I64, - VOPCOp.V_CMPX_LT_U64: _VOPCOp_V_CMPX_LT_U64, - VOPCOp.V_CMPX_EQ_U64: _VOPCOp_V_CMPX_EQ_U64, - VOPCOp.V_CMPX_LE_U64: _VOPCOp_V_CMPX_LE_U64, - VOPCOp.V_CMPX_GT_U64: _VOPCOp_V_CMPX_GT_U64, - VOPCOp.V_CMPX_NE_U64: _VOPCOp_V_CMPX_NE_U64, - VOPCOp.V_CMPX_GE_U64: _VOPCOp_V_CMPX_GE_U64, - VOPCOp.V_CMPX_CLASS_F16: _VOPCOp_V_CMPX_CLASS_F16, - VOPCOp.V_CMPX_CLASS_F32: _VOPCOp_V_CMPX_CLASS_F32, - VOPCOp.V_CMPX_CLASS_F64: _VOPCOp_V_CMPX_CLASS_F64, -} - -def _DSOp_DS_ADD_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 = DATA.u32 - MEM[addr].u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STORE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET.u32].b32 = DATA[31 : 0] - return {} - -def _DSOp_DS_STORE_2ADDR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET0.u32 * 4].b32 = DATA[31 : 0] - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET1.u32 * 4].b32 = DATA2[31 : 0] - return {} - -def _DSOp_DS_STORE_2ADDR_STRIDE64_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET0.u32 * 256].b32 = DATA[31 : 0] - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET1.u32 * 256].b32 = DATA2[31 : 0] - return {} - -def _DSOp_DS_CMPSTORE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - src = DATA.b32 - cmp = DATA2.b32 - MEM[addr].b32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].f32) - MEM[addr].f32 += DATA.f32 - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STORE_B8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b8 = DATA[7 : 0] - return {} - -def _DSOp_DS_STORE_B16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b16 = DATA[15 : 0] - return {} - -def _DSOp_DS_ADD_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 += DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 -= DATA.u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - MEM[addr].u32 = DATA.u32 - MEM[addr].u32 - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_I32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].i32) - src = DATA.i32 - MEM[addr].i32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[addr].u32 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp & DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp | DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = (tmp ^ DATA.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - MEM[addr].b32 = DATA.b32 - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_2ADDR_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4 - tmp1 = MEM[addr1].b32 - tmp2 = MEM[addr2].b32 - MEM[addr1].b32 = DATA.b32 - MEM[addr2].b32 = DATA2.b32 - RETURN_DATA[31 : 0] = tmp1 - RETURN_DATA[63 : 32] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256 - tmp1 = MEM[addr1].b32 - tmp2 = MEM[addr2].b32 - MEM[addr1].b32 = DATA.b32 - MEM[addr2].b32 = DATA2.b32 - RETURN_DATA[31 : 0] = tmp1 - RETURN_DATA[63 : 32] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPSTORE_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b32) - src = DATA.b32 - cmp = DATA2.b32 - MEM[addr].b32 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_2ADDR_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4].b32 - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_2ADDR_STRIDE64_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256].b32 - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_I8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.i32 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U8(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.u32 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_I16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.i32 = (signext(MEM[ADDR].i16)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA.u32 = (_pack(0, MEM[ADDR].u16)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CONSUME(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0) - # --- compiled pseudocode --- - addr = offset - rtnval = LDS(addr) - GPR[VDST] = rtnval - return {} - -def _DSOp_DS_APPEND(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0) - # --- compiled pseudocode --- - addr = offset - rtnval = LDS(addr) - GPR[VDST] = rtnval - return {} - -def _DSOp_DS_ADD_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 = DATA.u64 - MEM[addr].u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STORE_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET.u32].b32 = DATA[31 : 0] - MEM[addr + OFFSET.u32 + 4].b32 = DATA[63 : 32] - return {} - -def _DSOp_DS_STORE_2ADDR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET0.u32 * 8].b32 = DATA[31 : 0] - MEM[addr + OFFSET0.u32 * 8 + 4].b32 = DATA[63 : 32] - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET1.u32 * 8].b32 = DATA2[31 : 0] - MEM[addr + OFFSET1.u32 * 8 + 4].b32 = DATA2[63 : 32] - return {} - -def _DSOp_DS_STORE_2ADDR_STRIDE64_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET0.u32 * 512].b32 = DATA[31 : 0] - MEM[addr + OFFSET0.u32 * 512 + 4].b32 = DATA[63 : 32] - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET1.u32 * 512].b32 = DATA2[31 : 0] - MEM[addr + OFFSET1.u32 * 512 + 4].b32 = DATA2[63 : 32] - return {} - -def _DSOp_DS_CMPSTORE_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - src = DATA.b64 - cmp = DATA2.b64 - MEM[addr].b64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 += DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 -= DATA.u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_RSUB_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - MEM[addr].u64 = DATA.u64 - MEM[addr].u64 - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_INC_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((0) if (tmp >= src) else (tmp + 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_DEC_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (((tmp == 0) or (tmp > src))) else (tmp - 1)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_I64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].i64) - src = DATA.i64 - MEM[addr].i64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.i64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MIN_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src < tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MAX_RTN_U64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u64) - src = DATA.u64 - MEM[addr].u64 = ((src) if (src >= tmp) else (tmp)) - RETURN_DATA.u64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_AND_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp & DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_OR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp | DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_XOR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = (tmp ^ DATA.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_MSKOR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - MEM[addr].b64 = DATA.b64 - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_2ADDR_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8 - tmp1 = MEM[addr1].b64 - tmp2 = MEM[addr2].b64 - MEM[addr1].b64 = DATA.b64 - MEM[addr2].b64 = DATA2.b64 - RETURN_DATA[63 : 0] = tmp1 - RETURN_DATA[127 : 64] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1; OFFSET=OFFSET0; ADDR_BASE=ADDR - # --- compiled pseudocode --- - addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512 - addr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512 - tmp1 = MEM[addr1].b64 - tmp2 = MEM[addr2].b64 - MEM[addr1].b64 = DATA.b64 - MEM[addr2].b64 = DATA2.b64 - RETURN_DATA[63 : 0] = tmp1 - RETURN_DATA[127 : 64] = tmp2 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CMPSTORE_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; DATA2=DATA1 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].b64) - src = DATA.b64 - cmp = DATA2.b64 - MEM[addr].b64 = ((src) if (tmp == cmp) else (tmp)) - RETURN_DATA.b64 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_2ADDR_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8 + 4].b32 - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8].b32 - RETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_2ADDR_STRIDE64_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512 + 4].b32 - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512].b32 - RETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512 + 4].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_ADD_RTN_F32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].f32) - MEM[addr].f32 += DATA.f32 - RETURN_DATA.f32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_CONDXCHG32_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - ADDR = S0.u32 - DATA = S1.u64 - offset = _pack(OFFSET1, OFFSET0) - RETURN_DATA[0] = LDS[ADDR0].u32 - if DATA[31]: - LDS[ADDR0] = _pack(0, DATA[30 : 0]) - RETURN_DATA[1] = LDS[ADDR1].u32 - if DATA[63]: - LDS[ADDR1] = _pack(0, DATA[62 : 32]) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_COND_SUB_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((tmp - src) if (tmp >= src) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_CLAMP_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - old_value = MEM[ADDR].u32 - if old_value < DATA.u32: - new_value = 0 - else: - new_value = old_value - DATA.u32 - MEM[ADDR].u32 = new_value - RETURN_DATA.u32 = old_value - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PK_ADD_F16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - src = DATA.b32 - dst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16 - dst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16 - MEM[ADDR].b32 = dst.b32 - RETURN_DATA.b32 = tmp.b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PK_ADD_BF16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - src = DATA.b32 - dst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16 - dst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16 - MEM[ADDR].b32 = dst.b32 - RETURN_DATA.b32 = tmp.b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_STORE_B8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b8 = DATA[23 : 16] - return {} - -def _DSOp_DS_STORE_B16_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - MEM[ADDR].b16 = DATA[31 : 16] - return {} - -def _DSOp_DS_LOAD_U8_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].u16 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].u16 = (_pack(0, MEM[ADDR].u8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_I8_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].i16 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_I8_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].i16 = (signext(MEM[ADDR].i8)) - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U16_D16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_U16_D16_HI(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_COND_SUB_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, offset.b32) - tmp = Reg(MEM[addr].u32) - src = DATA.u32 - MEM[ADDR].u32 = ((tmp - src) if (tmp >= src) else (tmp)) - RETURN_DATA.u32 = tmp - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_SUB_CLAMP_RTN_U32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - old_value = MEM[ADDR].u32 - if old_value < DATA.u32: - new_value = 0 - else: - new_value = old_value - DATA.u32 - MEM[ADDR].u32 = new_value - RETURN_DATA.u32 = old_value - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PK_ADD_RTN_F16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - src = DATA.b32 - dst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16 - dst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16 - MEM[ADDR].b32 = dst.b32 - RETURN_DATA.b32 = tmp.b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PK_ADD_RTN_BF16(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0 - # --- compiled pseudocode --- - tmp = Reg(MEM[ADDR].b32) - src = DATA.b32 - dst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16 - dst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16 - MEM[ADDR].b32 = dst.b32 - RETURN_DATA.b32 = tmp.b32 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_PERMUTE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - num_lanes = ((64) if (WAVE64) else (32)) - for i in range(0, int(num_lanes - 1)+1): - tmp[i] = 0x0 - for i in range(0, int(num_lanes - 1)+1): - if EXEC[i].u1: - dst_lane = (VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes - tmp[dst_lane] = VGPR[i][DATA0] - for i in range(0, int(num_lanes - 1)+1): - if EXEC[i].u1: - VGPR[i][VDST] = tmp[i] - return {} - -def _DSOp_DS_BPERMUTE_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - num_lanes = ((64) if (WAVE64) else (32)) - for i in range(0, int(num_lanes - 1)+1): - tmp[i] = 0x0 - for i in range(0, int(num_lanes - 1)+1): - src_lane = (VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes - if EXEC[src_lane].u1: - tmp[i] = VGPR[src_lane][DATA0] - for i in range(0, int(num_lanes - 1)+1): - if EXEC[i].u1: - VGPR[i][VDST] = tmp[i] - return {} - -def _DSOp_DS_STORE_B96(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET.u32].b32 = DATA[31 : 0] - MEM[addr + OFFSET.u32 + 4].b32 = DATA[63 : 32] - MEM[addr + OFFSET.u32 + 8].b32 = DATA[95 : 64] - return {} - -def _DSOp_DS_STORE_B128(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - MEM[addr + OFFSET.u32].b32 = DATA[31 : 0] - MEM[addr + OFFSET.u32 + 4].b32 = DATA[63 : 32] - MEM[addr + OFFSET.u32 + 8].b32 = DATA[95 : 64] - MEM[addr + OFFSET.u32 + 12].b32 = DATA[127 : 96] - return {} - -def _DSOp_DS_BVH_STACK_PUSH4_POP1_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - _pack(stack_base, stack_index) = (DECODE_ADDR(ADDR, OFFSET0)) - last_node_ptr = DATA0.b32 - for i in range(0, int(2)+1): - if DATA_VALID(DATA1[i * 32 + 31 : i * 32]): - MEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32] - stack_index += 1 - elif DATA1[i].b32 == last_node_ptr: - pass - if DATA_VALID(DATA1[127 : 96]): - RETURN_DATA[31 : 0] = DATA1[127 : 96] - else: - RETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index] - MEM[stack_base.u32 + stack_index] = INVALID_NODE - stack_index -= 1 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_BVH_STACK_PUSH8_POP1_RTN_B32(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - _pack(stack_base, stack_index) = (DECODE_ADDR(ADDR, OFFSET0)) - last_node_ptr = DATA0.b32 - for i in range(0, int(6)+1): - if DATA_VALID(DATA1[i * 32 + 31 : i * 32]): - MEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32] - stack_index += 1 - elif DATA1[i].b32 == last_node_ptr: - pass - if DATA_VALID(DATA1[255 : 224]): - RETURN_DATA[31 : 0] = DATA1[255 : 224] - else: - RETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index] - MEM[stack_base.u32 + stack_index] = INVALID_NODE - stack_index -= 1 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_BVH_STACK_PUSH8_POP2_RTN_B64(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - _pack(stack_base, stack_index) = (DECODE_ADDR(ADDR, OFFSET0)) - last_node_ptr = DATA0.b32 - for i in range(0, int(6)+1): - if DATA_VALID(DATA1[i * 32 + 31 : i * 32]): - MEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32] - stack_index += 1 - elif DATA1[i].b32 == last_node_ptr: - pass - if DATA_VALID(DATA1[255 : 224]): - RETURN_DATA[31 : 0] = DATA1[255 : 224] - else: - RETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index] - MEM[stack_base.u32 + stack_index] = INVALID_NODE - stack_index -= 1 - if DATA_VALID(MEM[stack_base.u32 + stack_index]): - RETURN_DATA[63 : 32] = MEM[stack_base.u32 + stack_index] - MEM[stack_base.u32 + stack_index] = INVALID_NODE - stack_index -= 1 - return {'RETURN_DATA': RETURN_DATA._val} - -def _DSOp_DS_LOAD_B96(MEM, addr, data0, data1, offset0, offset1): - ADDR=Reg(addr); DATA0=Reg(data0); DATA1=Reg(data1); OFFSET0=Reg(offset0); OFFSET1=Reg(offset1); RETURN_DATA=Reg(0); DATA=DATA0; OFFSET=OFFSET0 - # --- compiled pseudocode --- - addr = CalcDsAddr(vgpr_a.b32, 0x0) - RETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32 - RETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4].b32 - RETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8].b32 - return {'RETURN_DATA': RETURN_DATA._val} - -DSOp_FUNCTIONS = { - DSOp.DS_ADD_U32: _DSOp_DS_ADD_U32, - DSOp.DS_SUB_U32: _DSOp_DS_SUB_U32, - DSOp.DS_RSUB_U32: _DSOp_DS_RSUB_U32, - DSOp.DS_INC_U32: _DSOp_DS_INC_U32, - DSOp.DS_DEC_U32: _DSOp_DS_DEC_U32, - DSOp.DS_MIN_I32: _DSOp_DS_MIN_I32, - DSOp.DS_MAX_I32: _DSOp_DS_MAX_I32, - DSOp.DS_MIN_U32: _DSOp_DS_MIN_U32, - DSOp.DS_MAX_U32: _DSOp_DS_MAX_U32, - DSOp.DS_AND_B32: _DSOp_DS_AND_B32, - DSOp.DS_OR_B32: _DSOp_DS_OR_B32, - DSOp.DS_XOR_B32: _DSOp_DS_XOR_B32, - DSOp.DS_MSKOR_B32: _DSOp_DS_MSKOR_B32, - DSOp.DS_STORE_B32: _DSOp_DS_STORE_B32, - DSOp.DS_STORE_2ADDR_B32: _DSOp_DS_STORE_2ADDR_B32, - DSOp.DS_STORE_2ADDR_STRIDE64_B32: _DSOp_DS_STORE_2ADDR_STRIDE64_B32, - DSOp.DS_CMPSTORE_B32: _DSOp_DS_CMPSTORE_B32, - DSOp.DS_ADD_F32: _DSOp_DS_ADD_F32, - DSOp.DS_STORE_B8: _DSOp_DS_STORE_B8, - DSOp.DS_STORE_B16: _DSOp_DS_STORE_B16, - DSOp.DS_ADD_RTN_U32: _DSOp_DS_ADD_RTN_U32, - DSOp.DS_SUB_RTN_U32: _DSOp_DS_SUB_RTN_U32, - DSOp.DS_RSUB_RTN_U32: _DSOp_DS_RSUB_RTN_U32, - DSOp.DS_INC_RTN_U32: _DSOp_DS_INC_RTN_U32, - DSOp.DS_DEC_RTN_U32: _DSOp_DS_DEC_RTN_U32, - DSOp.DS_MIN_RTN_I32: _DSOp_DS_MIN_RTN_I32, - DSOp.DS_MAX_RTN_I32: _DSOp_DS_MAX_RTN_I32, - DSOp.DS_MIN_RTN_U32: _DSOp_DS_MIN_RTN_U32, - DSOp.DS_MAX_RTN_U32: _DSOp_DS_MAX_RTN_U32, - DSOp.DS_AND_RTN_B32: _DSOp_DS_AND_RTN_B32, - DSOp.DS_OR_RTN_B32: _DSOp_DS_OR_RTN_B32, - DSOp.DS_XOR_RTN_B32: _DSOp_DS_XOR_RTN_B32, - DSOp.DS_MSKOR_RTN_B32: _DSOp_DS_MSKOR_RTN_B32, - DSOp.DS_STOREXCHG_RTN_B32: _DSOp_DS_STOREXCHG_RTN_B32, - DSOp.DS_STOREXCHG_2ADDR_RTN_B32: _DSOp_DS_STOREXCHG_2ADDR_RTN_B32, - DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: _DSOp_DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32, - DSOp.DS_CMPSTORE_RTN_B32: _DSOp_DS_CMPSTORE_RTN_B32, - DSOp.DS_LOAD_B32: _DSOp_DS_LOAD_B32, - DSOp.DS_LOAD_2ADDR_B32: _DSOp_DS_LOAD_2ADDR_B32, - DSOp.DS_LOAD_2ADDR_STRIDE64_B32: _DSOp_DS_LOAD_2ADDR_STRIDE64_B32, - DSOp.DS_LOAD_I8: _DSOp_DS_LOAD_I8, - DSOp.DS_LOAD_U8: _DSOp_DS_LOAD_U8, - DSOp.DS_LOAD_I16: _DSOp_DS_LOAD_I16, - DSOp.DS_LOAD_U16: _DSOp_DS_LOAD_U16, - DSOp.DS_CONSUME: _DSOp_DS_CONSUME, - DSOp.DS_APPEND: _DSOp_DS_APPEND, - DSOp.DS_ADD_U64: _DSOp_DS_ADD_U64, - DSOp.DS_SUB_U64: _DSOp_DS_SUB_U64, - DSOp.DS_RSUB_U64: _DSOp_DS_RSUB_U64, - DSOp.DS_INC_U64: _DSOp_DS_INC_U64, - DSOp.DS_DEC_U64: _DSOp_DS_DEC_U64, - DSOp.DS_MIN_I64: _DSOp_DS_MIN_I64, - DSOp.DS_MAX_I64: _DSOp_DS_MAX_I64, - DSOp.DS_MIN_U64: _DSOp_DS_MIN_U64, - DSOp.DS_MAX_U64: _DSOp_DS_MAX_U64, - DSOp.DS_AND_B64: _DSOp_DS_AND_B64, - DSOp.DS_OR_B64: _DSOp_DS_OR_B64, - DSOp.DS_XOR_B64: _DSOp_DS_XOR_B64, - DSOp.DS_MSKOR_B64: _DSOp_DS_MSKOR_B64, - DSOp.DS_STORE_B64: _DSOp_DS_STORE_B64, - DSOp.DS_STORE_2ADDR_B64: _DSOp_DS_STORE_2ADDR_B64, - DSOp.DS_STORE_2ADDR_STRIDE64_B64: _DSOp_DS_STORE_2ADDR_STRIDE64_B64, - DSOp.DS_CMPSTORE_B64: _DSOp_DS_CMPSTORE_B64, - DSOp.DS_ADD_RTN_U64: _DSOp_DS_ADD_RTN_U64, - DSOp.DS_SUB_RTN_U64: _DSOp_DS_SUB_RTN_U64, - DSOp.DS_RSUB_RTN_U64: _DSOp_DS_RSUB_RTN_U64, - DSOp.DS_INC_RTN_U64: _DSOp_DS_INC_RTN_U64, - DSOp.DS_DEC_RTN_U64: _DSOp_DS_DEC_RTN_U64, - DSOp.DS_MIN_RTN_I64: _DSOp_DS_MIN_RTN_I64, - DSOp.DS_MAX_RTN_I64: _DSOp_DS_MAX_RTN_I64, - DSOp.DS_MIN_RTN_U64: _DSOp_DS_MIN_RTN_U64, - DSOp.DS_MAX_RTN_U64: _DSOp_DS_MAX_RTN_U64, - DSOp.DS_AND_RTN_B64: _DSOp_DS_AND_RTN_B64, - DSOp.DS_OR_RTN_B64: _DSOp_DS_OR_RTN_B64, - DSOp.DS_XOR_RTN_B64: _DSOp_DS_XOR_RTN_B64, - DSOp.DS_MSKOR_RTN_B64: _DSOp_DS_MSKOR_RTN_B64, - DSOp.DS_STOREXCHG_RTN_B64: _DSOp_DS_STOREXCHG_RTN_B64, - DSOp.DS_STOREXCHG_2ADDR_RTN_B64: _DSOp_DS_STOREXCHG_2ADDR_RTN_B64, - DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: _DSOp_DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64, - DSOp.DS_CMPSTORE_RTN_B64: _DSOp_DS_CMPSTORE_RTN_B64, - DSOp.DS_LOAD_B64: _DSOp_DS_LOAD_B64, - DSOp.DS_LOAD_2ADDR_B64: _DSOp_DS_LOAD_2ADDR_B64, - DSOp.DS_LOAD_2ADDR_STRIDE64_B64: _DSOp_DS_LOAD_2ADDR_STRIDE64_B64, - DSOp.DS_ADD_RTN_F32: _DSOp_DS_ADD_RTN_F32, - DSOp.DS_CONDXCHG32_RTN_B64: _DSOp_DS_CONDXCHG32_RTN_B64, - DSOp.DS_COND_SUB_U32: _DSOp_DS_COND_SUB_U32, - DSOp.DS_SUB_CLAMP_U32: _DSOp_DS_SUB_CLAMP_U32, - DSOp.DS_PK_ADD_F16: _DSOp_DS_PK_ADD_F16, - DSOp.DS_PK_ADD_BF16: _DSOp_DS_PK_ADD_BF16, - DSOp.DS_STORE_B8_D16_HI: _DSOp_DS_STORE_B8_D16_HI, - DSOp.DS_STORE_B16_D16_HI: _DSOp_DS_STORE_B16_D16_HI, - DSOp.DS_LOAD_U8_D16: _DSOp_DS_LOAD_U8_D16, - DSOp.DS_LOAD_U8_D16_HI: _DSOp_DS_LOAD_U8_D16_HI, - DSOp.DS_LOAD_I8_D16: _DSOp_DS_LOAD_I8_D16, - DSOp.DS_LOAD_I8_D16_HI: _DSOp_DS_LOAD_I8_D16_HI, - DSOp.DS_LOAD_U16_D16: _DSOp_DS_LOAD_U16_D16, - DSOp.DS_LOAD_U16_D16_HI: _DSOp_DS_LOAD_U16_D16_HI, - DSOp.DS_COND_SUB_RTN_U32: _DSOp_DS_COND_SUB_RTN_U32, - DSOp.DS_SUB_CLAMP_RTN_U32: _DSOp_DS_SUB_CLAMP_RTN_U32, - DSOp.DS_PK_ADD_RTN_F16: _DSOp_DS_PK_ADD_RTN_F16, - DSOp.DS_PK_ADD_RTN_BF16: _DSOp_DS_PK_ADD_RTN_BF16, - DSOp.DS_PERMUTE_B32: _DSOp_DS_PERMUTE_B32, - DSOp.DS_BPERMUTE_B32: _DSOp_DS_BPERMUTE_B32, - DSOp.DS_STORE_B96: _DSOp_DS_STORE_B96, - DSOp.DS_STORE_B128: _DSOp_DS_STORE_B128, - DSOp.DS_BVH_STACK_PUSH4_POP1_RTN_B32: _DSOp_DS_BVH_STACK_PUSH4_POP1_RTN_B32, - DSOp.DS_BVH_STACK_PUSH8_POP1_RTN_B32: _DSOp_DS_BVH_STACK_PUSH8_POP1_RTN_B32, - DSOp.DS_BVH_STACK_PUSH8_POP2_RTN_B64: _DSOp_DS_BVH_STACK_PUSH8_POP2_RTN_B64, - DSOp.DS_LOAD_B96: _DSOp_DS_LOAD_B96, -} - -COMPILED_FUNCTIONS = { - SOP1Op: SOP1Op_FUNCTIONS, - SOP2Op: SOP2Op_FUNCTIONS, - SOPCOp: SOPCOp_FUNCTIONS, - SOPKOp: SOPKOp_FUNCTIONS, - SOPPOp: SOPPOp_FUNCTIONS, - SMEMOp: SMEMOp_FUNCTIONS, - VOP1Op: VOP1Op_FUNCTIONS, - VOP2Op: VOP2Op_FUNCTIONS, - VOP3Op: VOP3Op_FUNCTIONS, - VOP3SDOp: VOP3SDOp_FUNCTIONS, - VOP3POp: VOP3POp_FUNCTIONS, - VOPCOp: VOPCOp_FUNCTIONS, - DSOp: DSOp_FUNCTIONS, -} \ No newline at end of file diff --git a/extra/assembly/amd/autogen/rdna4/str_pcode.py b/extra/assembly/amd/autogen/rdna4/str_pcode.py new file mode 100644 index 0000000000..7e8c5aa25e --- /dev/null +++ b/extra/assembly/amd/autogen/rdna4/str_pcode.py @@ -0,0 +1,1229 @@ +# autogenerated by pdf.py - do not edit +# to regenerate: python -m extra.assembly.amd.pdf --arch rdna4 +# ruff: noqa: E501 +from extra.assembly.amd.autogen.rdna4.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp + +SOP1Op_PCODE = { + SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32', + SOP1Op.S_MOV_B64: 'D0.b64 = S0.b64', + SOP1Op.S_CMOV_B32: 'if SCC then\nD0.b32 = S0.b32\nendif', + SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif', + SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]', + SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CTZ_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLZ_I32_U64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_CLS_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))", + SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U", + SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U", + SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U", + SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U", + SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor', + SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0', + SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U', + SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_WQM_B32: "tmp = 0U;\ndeclare i : 6'U;\nfor i in 6'0U : 6'31U do\ntmp[i] = S0.u32[i & 6'60U +: 6'4U] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U', + SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_AND_SAVEEXEC_B32: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_SAVEEXEC_B32: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XOR_SAVEEXEC_B32: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_XOR_SAVEEXEC_B64: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NAND_SAVEEXEC_B32: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_NAND_SAVEEXEC_B64: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NOR_SAVEEXEC_B32: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_NOR_SAVEEXEC_B64: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XNOR_SAVEEXEC_B32: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_XNOR_SAVEEXEC_B64: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT0_SAVEEXEC_B32: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT0_SAVEEXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_NOT0_SAVEEXEC_B32: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_NOT0_SAVEEXEC_B64: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT1_SAVEEXEC_B32: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT1_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_NOT1_SAVEEXEC_B32: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_NOT1_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT0_WREXEC_B32: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT0_WREXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT1_WREXEC_B32: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT1_WREXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = SGPR[addr].b32', + SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b64 = SGPR[addr].b64', + SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b32 = S0.b32', + SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b64 = S0.b64', + SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL', + SOP1Op.S_SETPC_B64: 'PC = S0.i64', + SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64', + SOP1Op.S_RFE_B64: 'PC = S0.i64', + SOP1Op.S_SENDMSG_RTN_B32: 'If SDST is VCC then VCCZ is undefined.', + SOP1Op.S_SENDMSG_RTN_B64: 'If SDST is VCC then VCCZ is undefined.', + SOP1Op.S_BARRIER_SIGNAL: "if !InWorkgroup() then\nelsif ((barrierNumber == -2) && !WAVE_STATUS.PRIV) then\nelsif barrierNumber == 0 then\nelse\nBARRIER_STATE[barrierNumber & 63].signalCnt += 7'1U\nendif;", + SOP1Op.S_BARRIER_SIGNAL_ISFIRST: "if !InWorkgroup() then\nSCC = 1'0U\nelsif ((barrierNumber == -2) && !WAVE_STATUS.PRIV) then\nSCC = 1'0U\nelsif barrierNumber == 0 then\nSCC = 1'0U\nelse\n// Set SCC if this is the first signaling event for this barrier.\nSCC = BARRIER_STATE[barrierNumber & 63].signalCnt.u32 == 0U;\nBARRIER_STATE[barrierNumber & 63].signalCnt += 7'1U\nendif;", + SOP1Op.S_GET_BARRIER_STATE: "D0.u32 = 32'U({ 9'0, BARRIER_STATE[barrierNumber & 63].signalCnt.u7, 5'0, BARRIER_STATE[barrierNumber &", + SOP1Op.S_ALLOC_VGPR: "n = ReallocVgprs(32'I(S0[8 : 0].u32));\nif n < 0 then\nSCC = 1'0U\nelse\nNUM_VGPRS = n;\nSCC = 1'1U\nendif", + SOP1Op.S_SLEEP_VAR: 'S0[6:0] determines the sleep duration. The wave sleeps for (64*(S0[6:0]-1) … 64*S0[6:0]) clocks. The exact', + SOP1Op.S_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + SOP1Op.S_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + SOP1Op.S_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + SOP1Op.S_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + SOP1Op.S_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + SOP1Op.S_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + SOP1Op.S_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + SOP1Op.S_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + SOP1Op.S_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + SOP1Op.S_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + SOP1Op.S_CVT_HI_F32_F16: 'D0.f32 = f16_to_f32(S0[31 : 16].f16)', + SOP1Op.S_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + SOP1Op.S_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + SOP1Op.S_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + SOP1Op.S_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", +} + +SOP2Op_PCODE = { + SOP2Op.S_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_ADD_CO_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', + SOP2Op.S_SUB_CO_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', + SOP2Op.S_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0', + SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0", + SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MAX_U32: 'SCC = S0.u32 >= S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_AND_B32: 'D0.u32 = (S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_AND_B64: 'D0.u64 = (S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_OR_B32: 'D0.u32 = (S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_OR_B64: 'D0.u64 = (S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XOR_B64: 'D0.u64 = (S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NAND_B32: 'D0.u32 = ~(S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NAND_B64: 'D0.u64 = ~(S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NOR_B32: 'D0.u32 = ~(S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NOR_B64: 'D0.u64 = ~(S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XNOR_B64: 'D0.u64 = ~(S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_AND_NOT1_B32: 'D0.u32 = (S0.u32 & ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_AND_NOT1_B64: 'D0.u64 = (S0.u64 & ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_OR_NOT1_B32: 'D0.u32 = (S0.u32 | ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_OR_NOT1_B64: 'D0.u64 = (S0.u64 | ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S1[22 : 16].u32) - 1U));\nSCC = D0.u32 != 0U', + SOP2Op.S_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32);\nSCC = D0.i32 != 0', + SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL', + SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + SOP2Op.S_BFM_B64: 'D0.u64 = (((1ULL << S0[5 : 0].u32) - 1ULL) << S1[5 : 0].u32)', + SOP2Op.S_MUL_I32: 'D0.i32 = S0.i32 * S1.i32', + SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + SOP2Op.S_CSELECT_B32: 'D0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_CSELECT_B64: 'D0.u64 = SCC ? S0.u64 : S1.u64', + SOP2Op.S_PACK_LL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_LH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_HH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[31 : 16].u16 }', + SOP2Op.S_PACK_HL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[31 : 16].u16 }', + SOP2Op.S_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + SOP2Op.S_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + SOP2Op.S_MIN_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 < S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && sign(S0.f32) &&\n!sign(S1.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + SOP2Op.S_MAX_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 > S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && !sign(S0.f32) &&\nsign(S1.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + SOP2Op.S_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + SOP2Op.S_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', + SOP2Op.S_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', + SOP2Op.S_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + SOP2Op.S_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + SOP2Op.S_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + SOP2Op.S_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + SOP2Op.S_MIN_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + SOP2Op.S_MAX_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 > S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && !sign(S0.f16) &&\nsign(S1.f16))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + SOP2Op.S_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + SOP2Op.S_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', + SOP2Op.S_MINIMUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S1.f32\nelsif ((S0.f32 < S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && sign(S0.f32) &&\n!sign(S1.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + SOP2Op.S_MAXIMUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S1.f32\nelsif ((S0.f32 > S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && !sign(S0.f32) &&\nsign(S1.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + SOP2Op.S_MINIMUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S1.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + SOP2Op.S_MAXIMUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S1.f16\nelsif ((S0.f16 > S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && !sign(S0.f16) &&\nsign(S1.f16))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + SOP2Op.S_ADD_NC_U64: 'D0.u64 = S0.u64 + S1.u64', + SOP2Op.S_SUB_NC_U64: 'D0.u64 = S0.u64 - S1.u64', + SOP2Op.S_MUL_U64: 'D0.u64 = S0.u64 * S1.u64', +} + +SOPCOp_PCODE = { + SOPCOp.S_CMP_EQ_I32: 'SCC = S0.i32 == S1.i32', + SOPCOp.S_CMP_LG_I32: 'SCC = S0.i32 <> S1.i32', + SOPCOp.S_CMP_GT_I32: 'SCC = S0.i32 > S1.i32', + SOPCOp.S_CMP_GE_I32: 'SCC = S0.i32 >= S1.i32', + SOPCOp.S_CMP_LT_I32: 'SCC = S0.i32 < S1.i32', + SOPCOp.S_CMP_LE_I32: 'SCC = S0.i32 <= S1.i32', + SOPCOp.S_CMP_EQ_U32: 'SCC = S0.u32 == S1.u32', + SOPCOp.S_CMP_LG_U32: 'SCC = S0.u32 <> S1.u32', + SOPCOp.S_CMP_GT_U32: 'SCC = S0.u32 > S1.u32', + SOPCOp.S_CMP_GE_U32: 'SCC = S0.u32 >= S1.u32', + SOPCOp.S_CMP_LT_U32: 'SCC = S0.u32 < S1.u32', + SOPCOp.S_CMP_LE_U32: 'SCC = S0.u32 <= S1.u32', + SOPCOp.S_BITCMP0_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U", + SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U", + SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64', + SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64', + SOPCOp.S_CMP_LT_F32: 'SCC = S0.f32 < S1.f32', + SOPCOp.S_CMP_LT_F16: 'SCC = S0.f16 < S1.f16', + SOPCOp.S_CMP_EQ_F32: 'SCC = S0.f32 == S1.f32', + SOPCOp.S_CMP_EQ_F16: 'SCC = S0.f16 == S1.f16', + SOPCOp.S_CMP_LE_F32: 'SCC = S0.f32 <= S1.f32', + SOPCOp.S_CMP_LE_F16: 'SCC = S0.f16 <= S1.f16', + SOPCOp.S_CMP_GT_F32: 'SCC = S0.f32 > S1.f32', + SOPCOp.S_CMP_GT_F16: 'SCC = S0.f16 > S1.f16', + SOPCOp.S_CMP_LG_F32: 'SCC = S0.f32 <> S1.f32', + SOPCOp.S_CMP_LG_F16: 'SCC = S0.f16 <> S1.f16', + SOPCOp.S_CMP_GE_F32: 'SCC = S0.f32 >= S1.f32', + SOPCOp.S_CMP_GE_F16: 'SCC = S0.f16 >= S1.f16', + SOPCOp.S_CMP_O_F32: "SCC = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + SOPCOp.S_CMP_O_F16: "SCC = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + SOPCOp.S_CMP_U_F32: "SCC = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + SOPCOp.S_CMP_U_F16: "SCC = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + SOPCOp.S_CMP_NGE_F32: 'SCC = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + SOPCOp.S_CMP_NGE_F16: 'SCC = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + SOPCOp.S_CMP_NLG_F32: 'SCC = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + SOPCOp.S_CMP_NLG_F16: 'SCC = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + SOPCOp.S_CMP_NGT_F32: 'SCC = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + SOPCOp.S_CMP_NGT_F16: 'SCC = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + SOPCOp.S_CMP_NLE_F32: 'SCC = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + SOPCOp.S_CMP_NLE_F16: 'SCC = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + SOPCOp.S_CMP_NEQ_F32: 'SCC = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + SOPCOp.S_CMP_NEQ_F16: 'SCC = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + SOPCOp.S_CMP_NLT_F32: 'SCC = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + SOPCOp.S_CMP_NLT_F16: 'SCC = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', +} + +SOPKOp_PCODE = { + SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(S0.i16))", + SOPKOp.S_VERSION: '// Do nothing - for use by tools only', + SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(S0.i16))\nendif", + SOPKOp.S_ADDK_CO_I32: "tmp = D0.i32;\nD0.i32 = D0.i32 + 32'I(signext(S0.i16));\nSCC = ((tmp[31] == S0.i16[15]) && (tmp[31] != D0.i32[31]));", + SOPKOp.S_MULK_I32: "D0.i32 = D0.i32 * 32'I(signext(S0.i16))", + SOPKOp.S_GETREG_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", + SOPKOp.S_SETREG_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_SETREG_IMM32_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL", +} + +SOPPOp_PCODE = { + SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nendfor', + SOPPOp.S_SETHALT: 'When halt type control is set to 1 (FATAL HALT bit select): Set FATAL_HALT bit to value of SIMM16[0]; 1 =\nfatal_halt, 0 = clear FATAL_HALT bit. Setting the fatal_halt flag halts the shader in or outside of the trap', + SOPPOp.S_DELAY_ALU: 'instruction may be omitted. For wave64 the compiler may not know the status of the EXEC mask and hence\n// 1 cycle delay here\n// 2 cycles delay here', + SOPPOp.S_TRAP: '// PC passed into trap handler points to S_TRAP itself,\nPC = TBA.i64;\n// trap base address', + SOPPOp.S_BARRIER_WAIT: '// barrierBit 0: reserved\n// barrierBit 1: workgroup\n// barrierBit 2: trap\n// Implemented as a power-saving idle', + SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;", + SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCZ: "If VCCZ is 1 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCNZ: "If VCCZ is 0 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", +} + +SMEMOp_PCODE = { + SMEMOp.S_LOAD_B32: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32', + SMEMOp.S_LOAD_B64: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', + SMEMOp.S_LOAD_B128: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', + SMEMOp.S_LOAD_B256: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32', + SMEMOp.S_LOAD_B512: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32', + SMEMOp.S_LOAD_B96: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32', + SMEMOp.S_LOAD_I8: "SDATA.i32 = 32'I(signext(MEM[ADDR].i8))", + SMEMOp.S_LOAD_U8: "SDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + SMEMOp.S_LOAD_I16: "SDATA.i32 = 32'I(signext(MEM[ADDR].i16))", + SMEMOp.S_LOAD_U16: "SDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + SMEMOp.S_BUFFER_LOAD_B32: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32', + SMEMOp.S_BUFFER_LOAD_B64: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', + SMEMOp.S_BUFFER_LOAD_B128: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', + SMEMOp.S_BUFFER_LOAD_B256: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32', + SMEMOp.S_BUFFER_LOAD_B512: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32', + SMEMOp.S_BUFFER_LOAD_B96: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32', + SMEMOp.S_BUFFER_LOAD_I8: "SDATA.i32 = 32'I(signext(MEM[ADDR].i8))", + SMEMOp.S_BUFFER_LOAD_U8: "SDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + SMEMOp.S_BUFFER_LOAD_I16: "SDATA.i32 = 32'I(signext(MEM[ADDR].i16))", + SMEMOp.S_BUFFER_LOAD_U16: "SDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + SMEMOp.S_PREFETCH_INST: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[63 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", + SMEMOp.S_PREFETCH_INST_PC_REL: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(PC[63 : 0].i64 + 8LL + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S1.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", + SMEMOp.S_PREFETCH_DATA: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[63 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", + SMEMOp.S_BUFFER_PREFETCH_DATA: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[47 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", + SMEMOp.S_PREFETCH_DATA_PC_REL: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(PC[63 : 0].i64 + 8LL + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S1.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", +} + +VOP1Op_PCODE = { + VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32', + VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP1Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP1Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP1Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP1Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP1Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP1Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP1Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP1Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP1Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP1Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP1Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP1Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP1Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP1Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP1Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP1Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP1Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP1Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP1Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP1Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP1Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP1Op.V_MOV_B16: 'D0.b16 = S0.b16', + VOP1Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP1Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', + VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', + VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", + VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', + VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', + VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", + VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", + VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nVGPR[laneId][addr].b32 = S0.b32', + VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = VGPR[laneId][addr].b32', + VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", + VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', + VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", + VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', + VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", + VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", + VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", + VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", + VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', + VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", + VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", + VOP1Op.V_SAT_PK_U8_I16: "tmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", + VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', + VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', + VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp', + VOP1Op.V_SWAP_B16: 'tmp = D0.b16;\nD0.b16 = S0.b16;\nS0.b16 = tmp', + VOP1Op.V_PERMLANE64_B32: "declare tmp : 32'B[64];\ndeclare lane : 32'U;\nif WAVE32 then\n// Supported in wave64 ONLY; treated as scalar NOP in wave32\nelse\nfor lane in 0U : 63U do\n// Copy original S0 in case D==S0\ntmp[lane] = VGPR[lane][SRC0.u32]\nendfor;\nfor lane in 0U : 63U do\naltlane = { ~lane[5], lane[4 : 0] };\n// 0<->32, ..., 31<->63\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altlane]\nendif\nendfor\nendif", + VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\ntmp = VGPR[laneId][addrd].b32;', + VOP1Op.V_NOT_B16: 'D0.u16 = ~S0.u16', + VOP1Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + VOP1Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", + VOP1Op.V_CVT_F32_FP8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].fp8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].fp8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].fp8)\nelse\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].fp8)\nendif", + VOP1Op.V_CVT_F32_BF8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].bf8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].bf8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].bf8)\nelse\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].bf8)\nendif", + VOP1Op.V_CVT_PK_F32_FP8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8);\nD0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8)', + VOP1Op.V_CVT_PK_F32_BF8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)', +} + +VOP2Op_PCODE = { + VOP2Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP2Op.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', + VOP2Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP2Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP2Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP2Op.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64', + VOP2Op.V_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP2Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP2Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP2Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP2Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP2Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP2Op.V_MIN_NUM_F64: 'if (isSignalNAN(S0.f64) || isSignalNAN(S1.f64)) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(S0.f64) && isNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 < S1.f64) || ((abs(S0.f64) == 0.0) && (abs(S1.f64) == 0.0) && sign(S0.f64) &&\n!sign(S1.f64))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif', + VOP2Op.V_MAX_NUM_F64: 'if (isSignalNAN(S0.f64) || isSignalNAN(S1.f64)) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(S0.f64) && isNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 > S1.f64) || ((abs(S0.f64) == 0.0) && (abs(S1.f64) == 0.0) && !sign(S0.f64) &&\nsign(S1.f64))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif', + VOP2Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP2Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP2Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP2Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP2Op.V_MIN_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 < S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && sign(S0.f32) &&\n!sign(S1.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + VOP2Op.V_MAX_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 > S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && !sign(S0.f32) &&\nsign(S1.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + VOP2Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP2Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP2Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP2Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP2Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP2Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP2Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP2Op.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)', + VOP2Op.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP2Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP2Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP2Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP2Op.V_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', + VOP2Op.V_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', + VOP2Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + VOP2Op.V_MIN_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + VOP2Op.V_MAX_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 > S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && !sign(S0.f16) &&\nsign(S1.f16))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + VOP2Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP2Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP2Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP2Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP2Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', + VOP2Op.V_FMAMK_F16: 'D0.f16 = fma(S0.f16, SIMM32.f16, S1.f16)', + VOP2Op.V_FMAAK_F16: 'D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16)', + VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP2Op.V_PK_FMAC_F16: 'D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16);\nD0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16)', +} + +VOP3Op_PCODE = { + VOP3Op.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', + VOP3Op.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f16 == S1.f16', + VOP3Op.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', + VOP3Op.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', + VOP3Op.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', + VOP3Op.V_CMPX_GE_F16: 'EXEC.u64[laneId] = S0.f16 >= S1.f16', + VOP3Op.V_CMPX_O_F16: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + VOP3Op.V_CMPX_U_F16: "EXEC.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + VOP3Op.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', + VOP3Op.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f32 == S1.f32', + VOP3Op.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', + VOP3Op.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', + VOP3Op.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', + VOP3Op.V_CMPX_GE_F32: 'EXEC.u64[laneId] = S0.f32 >= S1.f32', + VOP3Op.V_CMPX_O_F32: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + VOP3Op.V_CMPX_U_F32: "EXEC.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + VOP3Op.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', + VOP3Op.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f64 == S1.f64', + VOP3Op.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', + VOP3Op.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', + VOP3Op.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', + VOP3Op.V_CMPX_GE_F64: 'EXEC.u64[laneId] = S0.f64 >= S1.f64', + VOP3Op.V_CMPX_O_F64: 'EXEC.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64))', + VOP3Op.V_CMPX_U_F64: 'EXEC.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64))', + VOP3Op.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', + VOP3Op.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i16 == S1.i16', + VOP3Op.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', + VOP3Op.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', + VOP3Op.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', + VOP3Op.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', + VOP3Op.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', + VOP3Op.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u16 == S1.u16', + VOP3Op.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', + VOP3Op.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', + VOP3Op.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', + VOP3Op.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', + VOP3Op.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', + VOP3Op.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i32 == S1.i32', + VOP3Op.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', + VOP3Op.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', + VOP3Op.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', + VOP3Op.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', + VOP3Op.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', + VOP3Op.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u32 == S1.u32', + VOP3Op.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', + VOP3Op.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', + VOP3Op.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', + VOP3Op.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', + VOP3Op.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', + VOP3Op.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i64 == S1.i64', + VOP3Op.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', + VOP3Op.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', + VOP3Op.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', + VOP3Op.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', + VOP3Op.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', + VOP3Op.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u64 == S1.u64', + VOP3Op.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', + VOP3Op.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', + VOP3Op.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', + VOP3Op.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', + VOP3Op.V_CMPX_CLASS_F16: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CMPX_CLASS_F32: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CMPX_CLASS_F64: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32', + VOP3Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP3Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP3Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP3Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP3Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP3Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP3Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP3Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP3Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP3Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP3Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP3Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP3Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP3Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP3Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP3Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP3Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP3Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP3Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP3Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP3Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP3Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP3Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP3Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP3Op.V_MOV_B16: 'D0.b16 = S0.b16', + VOP3Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP3Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP3Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP3Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP3Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', + VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', + VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP3Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP3Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP3Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP3Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", + VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP3Op.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP3Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", + VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', + VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', + VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP3Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", + VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", + VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nVGPR[laneId][addr].b32 = S0.b32', + VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = VGPR[laneId][addr].b32', + VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP3Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP3Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP3Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP3Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", + VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', + VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", + VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', + VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", + VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", + VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", + VOP3Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + VOP3Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + VOP3Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + VOP3Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", + VOP3Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', + VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", + VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", + VOP3Op.V_SAT_PK_U8_I16: "tmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", + VOP3Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', + VOP3Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', + VOP3Op.V_NOT_B16: 'D0.u16 = ~S0.u16', + VOP3Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + VOP3Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", + VOP3Op.V_CVT_F32_FP8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].fp8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].fp8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].fp8)\nelse\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].fp8)\nendif", + VOP3Op.V_CVT_F32_BF8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].bf8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].bf8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].bf8)\nelse\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].bf8)\nendif", + VOP3Op.V_CVT_PK_F32_FP8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8);\nD0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8)', + VOP3Op.V_CVT_PK_F32_BF8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)', + VOP3Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP3Op.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', + VOP3Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP3Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP3Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP3Op.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64', + VOP3Op.V_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP3Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP3Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP3Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP3Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP3Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP3Op.V_MIN_NUM_F64: 'if (isSignalNAN(S0.f64) || isSignalNAN(S1.f64)) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(S0.f64) && isNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 < S1.f64) || ((abs(S0.f64) == 0.0) && (abs(S1.f64) == 0.0) && sign(S0.f64) &&\n!sign(S1.f64))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif', + VOP3Op.V_MAX_NUM_F64: 'if (isSignalNAN(S0.f64) || isSignalNAN(S1.f64)) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(S0.f64) && isNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 > S1.f64) || ((abs(S0.f64) == 0.0) && (abs(S1.f64) == 0.0) && !sign(S0.f64) &&\nsign(S1.f64))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif', + VOP3Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP3Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP3Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP3Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP3Op.V_MIN_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 < S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && sign(S0.f32) &&\n!sign(S1.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + VOP3Op.V_MAX_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 > S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && !sign(S0.f32) &&\nsign(S1.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + VOP3Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP3Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP3Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP3Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP3Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP3Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP3Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP3Op.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)', + VOP3Op.V_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP3Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP3Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP3Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP3Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + VOP3Op.V_MIN_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + VOP3Op.V_MAX_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 > S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && !sign(S0.f16) &&\nsign(S1.f16))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + VOP3Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP3Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP3Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP3Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP3Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', + VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP3Op.V_FMA_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif", + VOP3Op.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32", + VOP3Op.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32", + VOP3Op.V_CUBEID_F32: '// Set D0.f = cubemap face ID ({0.0, 1.0, ..., 5.0}).\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = 5.0F\nelse\nD0.f32 = 4.0F\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = 3.0F\nelse\nD0.f32 = 2.0F\nendif\nelse\nif S0.f32 < 0.0F then\nD0.f32 = 1.0F\nelse\nD0.f32 = 0.0F\nendif\nendif', + VOP3Op.V_CUBESC_F32: '// D0.f = cubemap S coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = -S0.f32\nelse\nD0.f32 = S0.f32\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S0.f32\nelse\nif S0.f32 < 0.0F then\nD0.f32 = S2.f32\nelse\nD0.f32 = -S2.f32\nendif\nendif', + VOP3Op.V_CUBETC_F32: '// D0.f = cubemap T coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = -S1.f32\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = -S2.f32\nelse\nD0.f32 = S2.f32\nendif\nelse\nD0.f32 = -S1.f32\nendif', + VOP3Op.V_CUBEMA_F32: '// D0.f = 2.0 * cubemap major axis.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = S2.f32 * 2.0F\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S1.f32 * 2.0F\nelse\nD0.f32 = S0.f32 * 2.0F\nendif', + VOP3Op.V_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S2[4 : 0].u32) - 1U))', + VOP3Op.V_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32)', + VOP3Op.V_BFI_B32: 'D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32))', + VOP3Op.V_FMA_F32: 'D0.f32 = fma(S0.f32, S1.f32, S2.f32)', + VOP3Op.V_FMA_F64: 'D0.f64 = fma(S0.f64, S1.f64, S2.f64)', + VOP3Op.V_LERP_U8: 'tmp = ((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1U << 24U);\ntmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1U << 16U);\ntmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1U << 8U);\ntmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1U);\nD0.u32 = tmp.u32', + VOP3Op.V_ALIGNBIT_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> S2.u32[4 : 0]) & 0xffffffffLL)", + VOP3Op.V_ALIGNBYTE_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> (S2.u32[1 : 0] * 8U)) & 0xffffffffLL)", + VOP3Op.V_MULLIT_F32: "if ((S1.f32 == -MAX_FLOAT_F32) || (64'F(S1.f32) == -INF) || isNAN(64'F(S1.f32)) || (S2.f32 <= 0.0F) ||\nisNAN(64'F(S2.f32))) then\nD0.f32 = -MAX_FLOAT_F32\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP3Op.V_MIN3_I32: 'D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MIN3_U32: 'D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MAX3_I32: 'D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MAX3_U32: 'D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MED3_I32: 'if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32 then\nD0.i32 = v_max_i32(S1.i32, S2.i32)\nelsif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32 then\nD0.i32 = v_max_i32(S0.i32, S2.i32)\nelse\nD0.i32 = v_max_i32(S0.i32, S1.i32)\nendif', + VOP3Op.V_MED3_U32: 'if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32 then\nD0.u32 = v_max_u32(S1.u32, S2.u32)\nelsif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32 then\nD0.u32 = v_max_u32(S0.u32, S2.u32)\nelse\nD0.u32 = v_max_u32(S0.u32, S1.u32)\nendif', + VOP3Op.V_SAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3Op.V_SAD_HI_U8: "D0.u32 = (32'U(v_sad_u8(S0, S1, 0U)) << 16U) + S2.u32", + VOP3Op.V_SAD_U16: '// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', + VOP3Op.V_SAD_U32: '// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', + VOP3Op.V_CVT_PK_U8_F32: "tmp = (S2.u32 & 32'U(~(0xff << (S1.u32[1 : 0].u32 * 8U))));\ntmp = (tmp | ((32'U(f32_to_u8(S0.f32)) & 255U) << (S1.u32[1 : 0].u32 * 8U)));\nD0.u32 = tmp", + VOP3Op.V_DIV_FIXUP_F32: "sign_out = (sign(S1.f32) ^ sign(S2.f32));\nif isNAN(64'F(S2.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S2.f32)))\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif ((64'F(S1.f32) == 0.0) && (64'F(S2.f32) == 0.0)) then\n// 0/0\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(abs(S1.f32)) == +INF) && (64'F(abs(S2.f32)) == +INF)) then\n// inf/inf\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(S1.f32) == 0.0) || (64'F(abs(S2.f32)) == +INF)) then\n// x/0, or inf/y\nD0.f32 = sign_out ? -INF.f32 : +INF.f32\nelsif ((64'F(abs(S1.f32)) == +INF) || (64'F(S2.f32) == 0.0)) then\n// x/inf, 0/y\nD0.f32 = sign_out ? -0.0F : 0.0F\nelsif exponent(S2.f32) - exponent(S1.f32) < -150 then\nD0.f32 = sign_out ? -UNDERFLOW_F32 : UNDERFLOW_F32\nelsif exponent(S1.f32) == 255 then\nD0.f32 = sign_out ? -OVERFLOW_F32 : OVERFLOW_F32\nelse\nD0.f32 = sign_out ? -abs(S0.f32) : abs(S0.f32)\nendif", + VOP3Op.V_DIV_FIXUP_F64: "sign_out = (sign(S1.f64) ^ sign(S2.f64));\nif isNAN(S2.f64) then\nD0.f64 = cvtToQuietNAN(S2.f64)\nelsif isNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif ((S1.f64 == 0.0) && (S2.f64 == 0.0)) then\n// 0/0\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((abs(S1.f64) == +INF) && (abs(S2.f64) == +INF)) then\n// inf/inf\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((S1.f64 == 0.0) || (abs(S2.f64) == +INF)) then\n// x/0, or inf/y\nD0.f64 = sign_out ? -INF : +INF\nelsif ((abs(S1.f64) == +INF) || (S2.f64 == 0.0)) then\n// x/inf, 0/y\nD0.f64 = sign_out ? -0.0 : 0.0\nelsif exponent(S2.f64) - exponent(S1.f64) < -1075 then\nD0.f64 = sign_out ? -UNDERFLOW_F64 : UNDERFLOW_F64\nelsif exponent(S1.f64) == 2047 then\nD0.f64 = sign_out ? -OVERFLOW_F64 : OVERFLOW_F64\nelse\nD0.f64 = sign_out ? -abs(S0.f64) : abs(S0.f64)\nendif", + VOP3Op.V_MIN3_NUM_F32: 'D0.f32 = v_min_num_f32(v_min_num_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MAX3_NUM_F32: 'D0.f32 = v_max_num_f32(v_max_num_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MIN3_NUM_F16: 'D0.f16 = v_min_num_f16(v_min_num_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MAX3_NUM_F16: 'D0.f16 = v_max_num_f16(v_max_num_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MINIMUM3_F32: 'D0.f32 = v_minimum_f32(v_minimum_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MAXIMUM3_F32: 'D0.f32 = v_maximum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MINIMUM3_F16: 'D0.f16 = v_minimum_f16(v_minimum_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MAXIMUM3_F16: 'D0.f16 = v_maximum_f16(v_maximum_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MED3_NUM_F32: "if (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)) || isNAN(64'F(S2.f32))) then\nD0.f32 = v_min3_num_f32(S0.f32, S1.f32, S2.f32)\nelsif v_max3_num_f32(S0.f32, S1.f32, S2.f32) == S0.f32 then\nD0.f32 = v_max_num_f32(S1.f32, S2.f32)\nelsif v_max3_num_f32(S0.f32, S1.f32, S2.f32) == S1.f32 then\nD0.f32 = v_max_num_f32(S0.f32, S2.f32)\nelse\nD0.f32 = v_max_num_f32(S0.f32, S1.f32)\nendif", + VOP3Op.V_MED3_NUM_F16: "if (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)) || isNAN(64'F(S2.f16))) then\nD0.f16 = v_min3_num_f16(S0.f16, S1.f16, S2.f16)\nelsif v_max3_num_f16(S0.f16, S1.f16, S2.f16) == S0.f16 then\nD0.f16 = v_max_num_f16(S1.f16, S2.f16)\nelsif v_max3_num_f16(S0.f16, S1.f16, S2.f16) == S1.f16 then\nD0.f16 = v_max_num_f16(S0.f16, S2.f16)\nelse\nD0.f16 = v_max_num_f16(S0.f16, S1.f16)\nendif", + VOP3Op.V_DIV_FMAS_F32: 'if VCC.u64[laneId] then\nD0.f32 = 2.0F ** 32 * fma(S0.f32, S1.f32, S2.f32)\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif', + VOP3Op.V_DIV_FMAS_F64: 'if VCC.u64[laneId] then\nD0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)\nelse\nD0.f64 = fma(S0.f64, S1.f64, S2.f64)\nendif', + VOP3Op.V_MSAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3Op.V_QSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", + VOP3Op.V_MQSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", + VOP3Op.V_MQSAD_U32_U8: "tmp[127 : 96] = 32'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32));\ntmp[95 : 64] = 32'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32));\ntmp[63 : 32] = 32'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32));\ntmp[31 : 0] = 32'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32));\nD0.b128 = tmp.b128", + VOP3Op.V_XOR3_B32: 'D0.u32 = (S0.u32 ^ S1.u32 ^ S2.u32)', + VOP3Op.V_MAD_U16: 'D0.u16 = S0.u16 * S1.u16 + S2.u16', + VOP3Op.V_PERM_B32: 'D0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])', + VOP3Op.V_XAD_U32: 'D0.u32 = (S0.u32 ^ S1.u32) + S2.u32', + VOP3Op.V_LSHL_ADD_U32: 'D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32', + VOP3Op.V_ADD_LSHL_U32: 'D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32)', + VOP3Op.V_FMA_F16: 'D0.f16 = fma(S0.f16, S1.f16, S2.f16)', + VOP3Op.V_MIN3_I16: 'D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16)', + VOP3Op.V_MIN3_U16: 'D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16)', + VOP3Op.V_MAX3_I16: 'D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16)', + VOP3Op.V_MAX3_U16: 'D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16)', + VOP3Op.V_MED3_I16: 'if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16 then\nD0.i16 = v_max_i16(S1.i16, S2.i16)\nelsif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16 then\nD0.i16 = v_max_i16(S0.i16, S2.i16)\nelse\nD0.i16 = v_max_i16(S0.i16, S1.i16)\nendif', + VOP3Op.V_MED3_U16: 'if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16 then\nD0.u16 = v_max_u16(S1.u16, S2.u16)\nelsif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16 then\nD0.u16 = v_max_u16(S0.u16, S2.u16)\nelse\nD0.u16 = v_max_u16(S0.u16, S1.u16)\nendif', + VOP3Op.V_MAD_I16: 'D0.i16 = S0.i16 * S1.i16 + S2.i16', + VOP3Op.V_DIV_FIXUP_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S2.f16)))\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\nD0.f16 = sign_out ? -INF.f16 : +INF.f16\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\nD0.f16 = sign_out ? -16'0.0 : 16'0.0\nelse\nD0.f16 = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif", + VOP3Op.V_ADD3_U32: 'D0.u32 = S0.u32 + S1.u32 + S2.u32', + VOP3Op.V_LSHL_OR_B32: 'D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32)', + VOP3Op.V_AND_OR_B32: 'D0.u32 = ((S0.u32 & S1.u32) | S2.u32)', + VOP3Op.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)', + VOP3Op.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32", + VOP3Op.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32", + VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor", + VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor", + VOP3Op.V_CNDMASK_B16: 'D0.u16 = VCC.u64[laneId] ? S1.u16 : S0.u16', + VOP3Op.V_MAXMIN_U32: 'D0.u32 = v_min_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MINMAX_U32: 'D0.u32 = v_max_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MAXMIN_I32: 'D0.i32 = v_min_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MINMAX_I32: 'D0.i32 = v_max_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_DOT2_F16_F16: 'tmp = S2.f16;\ntmp += S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp += S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.f16 = tmp', + VOP3Op.V_DOT2_BF16_BF16: 'tmp = S2.bf16;\ntmp += S0[15 : 0].bf16 * S1[15 : 0].bf16;\ntmp += S0[31 : 16].bf16 * S1[31 : 16].bf16;\nD0.bf16 = tmp', + VOP3Op.V_MINMAX_NUM_F32: 'D0.f32 = v_max_num_f32(v_min_num_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MAXMIN_NUM_F32: 'D0.f32 = v_min_num_f32(v_max_num_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MINMAX_NUM_F16: 'D0.f16 = v_max_num_f16(v_min_num_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MAXMIN_NUM_F16: 'D0.f16 = v_min_num_f16(v_max_num_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MINIMUMMAXIMUM_F32: 'D0.f32 = v_maximum_f32(v_minimum_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MAXIMUMMINIMUM_F32: 'D0.f32 = v_minimum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MINIMUMMAXIMUM_F16: 'D0.f16 = v_maximum_f16(v_minimum_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MAXIMUMMINIMUM_F16: 'D0.f16 = v_minimum_f16(v_maximum_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_S_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', + VOP3Op.V_S_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16);\nD0[31 : 16] = 16'0x0", + VOP3Op.V_S_LOG_F32: 'D0.f32 = log2(S0.f32)', + VOP3Op.V_S_LOG_F16: "D0.f16 = log2(S0.f16);\nD0[31 : 16] = 16'0x0", + VOP3Op.V_S_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP3Op.V_S_RCP_F16: "D0.f16 = 16'1.0 / S0.f16;\nD0[31 : 16] = 16'0x0", + VOP3Op.V_S_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP3Op.V_S_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16);\nD0[31 : 16] = 16'0x0", + VOP3Op.V_S_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP3Op.V_S_SQRT_F16: "D0.f16 = sqrt(S0.f16);\nD0[31 : 16] = 16'0x0", + VOP3Op.V_ADD_NC_U16: 'D0.u16 = S0.u16 + S1.u16', + VOP3Op.V_SUB_NC_U16: 'D0.u16 = S0.u16 - S1.u16', + VOP3Op.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', + VOP3Op.V_CVT_PK_I16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_i16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_i16_f32(S0.f32));", + VOP3Op.V_CVT_PK_U16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_u16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_u16_f32(S0.f32));", + VOP3Op.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', + VOP3Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', + VOP3Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', + VOP3Op.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', + VOP3Op.V_ADD_NC_I16: 'D0.i16 = S0.i16 + S1.i16', + VOP3Op.V_SUB_NC_I16: 'D0.i16 = S0.i16 - S1.i16', + VOP3Op.V_PERMLANE16_VAR_B32: "declare tmp : 32'B[64];\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : wave32.u1 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nlane = row * 16 + i;\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[row * 16 + VGPR[lane][SRC1.u32][3 : 0].i32]\nendif\nendfor\nendfor", + VOP3Op.V_PERMLANEX16_VAR_B32: "declare tmp : 32'B[64];\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : wave32.u1 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nlane = row * 16 + i;\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altrow.i32 * 16 + VGPR[lane][SRC1.u32][3 : 0].i32]\nendif\nendfor\nendfor", + VOP3Op.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16', + VOP3Op.V_CVT_PK_NORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);", + VOP3Op.V_CVT_PK_NORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);", + VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32', + VOP3Op.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + VOP3Op.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp", + VOP3Op.V_MBCNT_LO_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', + VOP3Op.V_MBCNT_HI_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', + VOP3Op.V_CVT_PK_NORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);", + VOP3Op.V_CVT_PK_NORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);", + VOP3Op.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);", + VOP3Op.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);", + VOP3Op.V_SUB_NC_I32: 'D0.i32 = S0.i32 - S1.i32', + VOP3Op.V_ADD_NC_I32: 'D0.i32 = S0.i32 + S1.i32', + VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32', + VOP3Op.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32', + VOP3Op.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + VOP3Op.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + VOP3Op.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", + VOP3Op.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', + VOP3Op.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', + VOP3Op.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', + VOP3Op.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)', + VOP3Op.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)', + VOP3Op.V_MINIMUM_F64: 'if (isSignalNAN(S0.f64) || isSignalNAN(S1.f64)) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S1.f64\nelsif ((S0.f64 < S1.f64) || ((abs(S0.f64) == 0.0) && (abs(S1.f64) == 0.0) && sign(S0.f64) &&\n!sign(S1.f64))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif', + VOP3Op.V_MAXIMUM_F64: 'if (isSignalNAN(S0.f64) || isSignalNAN(S1.f64)) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S1.f64\nelsif ((S0.f64 > S1.f64) || ((abs(S0.f64) == 0.0) && (abs(S1.f64) == 0.0) && !sign(S0.f64) &&\nsign(S1.f64))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif', + VOP3Op.V_READLANE_B32: "declare lane : 32'U;\nif WAVE32 then\nlane = S1.u32[4 : 0].u32;\n// Lane select for wave32\nelse\nlane = S1.u32[5 : 0].u32;\n// Lane select for wave64\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP3Op.V_WRITELANE_B32: "declare lane : 32'U;\nif WAVE32 then\nlane = S1.u32[4 : 0].u32;\n// Lane select for wave32\nelse\nlane = S1.u32[5 : 0].u32;\n// Lane select for wave64\nendif;\nVGPR[lane][VDST.u32] = S0.b32", + VOP3Op.V_AND_B16: 'D0.u16 = (S0.u16 & S1.u16)', + VOP3Op.V_OR_B16: 'D0.u16 = (S0.u16 | S1.u16)', + VOP3Op.V_XOR_B16: 'D0.u16 = (S0.u16 ^ S1.u16)', + VOP3Op.V_MINIMUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S1.f32\nelsif ((S0.f32 < S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && sign(S0.f32) &&\n!sign(S1.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + VOP3Op.V_MAXIMUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S1.f32\nelsif ((S0.f32 > S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && !sign(S0.f32) &&\nsign(S1.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + VOP3Op.V_MINIMUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S1.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + VOP3Op.V_MAXIMUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S1.f16\nelsif ((S0.f16 > S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && !sign(S0.f16) &&\nsign(S1.f16))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", + VOP3Op.V_CVT_PK_FP8_F32: 'prev_mode = ROUND_MODE;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[15:0] are preserved\nendif;', + VOP3Op.V_CVT_PK_BF8_F32: 'prev_mode = ROUND_MODE;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[15:0] are preserved\nendif;', + VOP3Op.V_CVT_SR_FP8_F32: "prev_mode = ROUND_MODE;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 12].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].fp8 = f32_to_fp8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].fp8 = f32_to_fp8(tmp.f32)\nendif;", + VOP3Op.V_CVT_SR_BF8_F32: "prev_mode = ROUND_MODE;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 11].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].bf8 = f32_to_bf8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].bf8 = f32_to_bf8(tmp.f32)\nendif;", +} + +VOP3SDOp_PCODE = { + VOP3SDOp.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_DIV_SCALE_F32: "VCC = 0x0LL;\nif ((64'F(S2.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\nD0.f32 = NAN.f32\nelsif exponent(S2.f32) - exponent(S1.f32) >= 96 then\n// N/D near MAX_FLOAT_F32\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif S1.f32 == DENORM.f32 then\nD0.f32 = ldexp(S0.f32, 64)\nelsif ((1.0 / 64'F(S1.f32) == DENORM.f64) && (S2.f32 / S1.f32 == DENORM.f32)) then\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif 1.0 / 64'F(S1.f32) == DENORM.f64 then\nD0.f32 = ldexp(S0.f32, -64)\nelsif S2.f32 / S1.f32 == DENORM.f32 then\nVCC = 0x1LL;\nif S0.f32 == S2.f32 then\n// Only scale the numerator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif exponent(S2.f32) <= 23 then\n// Numerator is tiny\nD0.f32 = ldexp(S0.f32, 64)\nendif", + VOP3SDOp.V_DIV_SCALE_F64: 'VCC = 0x0LL;\nif ((S2.f64 == 0.0) || (S1.f64 == 0.0)) then\nD0.f64 = NAN.f64\nelsif exponent(S2.f64) - exponent(S1.f64) >= 768 then\n// N/D near MAX_FLOAT_F64\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, 128)\nelsif ((1.0 / S1.f64 == DENORM.f64) && (S2.f64 / S1.f64 == DENORM.f64)) then\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif 1.0 / S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, -128)\nelsif S2.f64 / S1.f64 == DENORM.f64 then\nVCC = 0x1LL;\nif S0.f64 == S2.f64 then\n// Only scale the numerator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif exponent(S2.f64) <= 53 then\n// Numerator is tiny\nD0.f64 = ldexp(S0.f64, 128)\nendif', + VOP3SDOp.V_MAD_CO_U64_U32: "{ D1.u1, D0.u64 } = 65'B(65'U(S0.u32) * 65'U(S1.u32) + 65'U(S2.u64))", + VOP3SDOp.V_MAD_CO_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))", + VOP3SDOp.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", +} + +VOP3POp_PCODE = { + VOP3POp.V_PK_MAD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MUL_LO_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ADD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_SUB_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_LSHLREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_LSHRREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ASHRREV_I16: 'tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 >= S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 >= S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MIN_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 < S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 < S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MAD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_ADD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_SUB_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_MAX_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 >= S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 >= S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_MIN_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 < S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 < S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_FMA_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16);\ntmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_ADD_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\nD0.b32 = tmp", + VOP3POp.V_PK_MUL_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.b32 = tmp", + VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP3POp.V_DOT4_I32_IU8: "declare A : 32'I[4];\ndeclare B : 32'I[4];\nfor i in 0 : 3 do\nA8 = S0[i * 8 + 7 : i * 8];\nB8 = S1[i * 8 + 7 : i * 8];\nendfor;\nC = S2.i32;\ntmp = C.i32;\nD0.i32 = tmp", + VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp', + VOP3POp.V_DOT8_I32_IU4: "declare A : 32'I[8];\ndeclare B : 32'I[8];\nfor i in 0 : 7 do\nA4 = S0[i * 4 + 3 : i * 4];\nB4 = S1[i * 4 + 3 : i * 4];\nendfor;\nC = S2.i32;\ntmp = C.i32;\nD0.i32 = tmp", + VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp', + VOP3POp.V_DOT2_F32_BF16: 'tmp = S2.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', + VOP3POp.V_PK_MIN_NUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_min_num_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_min_num_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_MAX_NUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_max_num_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_max_num_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_MINIMUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_minimum_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_minimum_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_MAXIMUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_maximum_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_maximum_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_FMA_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])", + VOP3POp.V_FMA_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", + VOP3POp.V_FMA_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", + VOP3POp.V_DOT4_F32_FP8_BF8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].fp8) * 32'F(S1[7 : 0].bf8);\ntmp += 32'F(S0[15 : 8].fp8) * 32'F(S1[15 : 8].bf8);\ntmp += 32'F(S0[23 : 16].fp8) * 32'F(S1[23 : 16].bf8);\ntmp += 32'F(S0[31 : 24].fp8) * 32'F(S1[31 : 24].bf8);\nD0.f32 = tmp", + VOP3POp.V_DOT4_F32_BF8_FP8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].bf8) * 32'F(S1[7 : 0].fp8);\ntmp += 32'F(S0[15 : 8].bf8) * 32'F(S1[15 : 8].fp8);\ntmp += 32'F(S0[23 : 16].bf8) * 32'F(S1[23 : 16].fp8);\ntmp += 32'F(S0[31 : 24].bf8) * 32'F(S1[31 : 24].fp8);\nD0.f32 = tmp", + VOP3POp.V_DOT4_F32_FP8_FP8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].fp8) * 32'F(S1[7 : 0].fp8);\ntmp += 32'F(S0[15 : 8].fp8) * 32'F(S1[15 : 8].fp8);\ntmp += 32'F(S0[23 : 16].fp8) * 32'F(S1[23 : 16].fp8);\ntmp += 32'F(S0[31 : 24].fp8) * 32'F(S1[31 : 24].fp8);\nD0.f32 = tmp", + VOP3POp.V_DOT4_F32_BF8_BF8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].bf8) * 32'F(S1[7 : 0].bf8);\ntmp += 32'F(S0[15 : 8].bf8) * 32'F(S1[15 : 8].bf8);\ntmp += 32'F(S0[23 : 16].bf8) * 32'F(S1[23 : 16].bf8);\ntmp += 32'F(S0[31 : 24].bf8) * 32'F(S1[31 : 24].bf8);\nD0.f32 = tmp", + VOP3POp.V_WMMA_F32_16X16X16_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F16_16X16X16_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.bf16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X16_IU8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X16_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_FP8_FP8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.fp8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_FP8_BF8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.bf8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_BF8_FP8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.fp8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_BF8_BF8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.bf8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X32_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x32) * S1.iu4(32x16) + S2.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F16_16X16X32_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(32x16, index set from S2) + D0.f16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_BF16_16X16X32_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(32x16, index set from S2) + D0.bf16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_I32_16X16X32_IU8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(32x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_I32_16X16X32_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(32x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_I32_16X16X64_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x32) * S1.iu4(64x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_FP8_FP8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.fp8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_FP8_BF8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.bf8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_BF8_FP8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.fp8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_BF8_BF8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.bf8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', +} + +VOPCOp_PCODE = { + VOPCOp.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', + VOPCOp.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f16 == S1.f16', + VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', + VOPCOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', + VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', + VOPCOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = S0.f16 >= S1.f16', + VOPCOp.V_CMPX_O_F16: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + VOPCOp.V_CMPX_U_F16: "EXEC.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + VOPCOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', + VOPCOp.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f32 == S1.f32', + VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', + VOPCOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', + VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', + VOPCOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = S0.f32 >= S1.f32', + VOPCOp.V_CMPX_O_F32: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + VOPCOp.V_CMPX_U_F32: "EXEC.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + VOPCOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', + VOPCOp.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f64 == S1.f64', + VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', + VOPCOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', + VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', + VOPCOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = S0.f64 >= S1.f64', + VOPCOp.V_CMPX_O_F64: 'EXEC.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64))', + VOPCOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64))', + VOPCOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', + VOPCOp.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i16 == S1.i16', + VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', + VOPCOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', + VOPCOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', + VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', + VOPCOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', + VOPCOp.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u16 == S1.u16', + VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', + VOPCOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', + VOPCOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', + VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', + VOPCOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', + VOPCOp.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i32 == S1.i32', + VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', + VOPCOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', + VOPCOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', + VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', + VOPCOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', + VOPCOp.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u32 == S1.u32', + VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', + VOPCOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', + VOPCOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', + VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', + VOPCOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', + VOPCOp.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i64 == S1.i64', + VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', + VOPCOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', + VOPCOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', + VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', + VOPCOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', + VOPCOp.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u64 == S1.u64', + VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', + VOPCOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', + VOPCOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', + VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', + VOPCOp.V_CMPX_CLASS_F16: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F32: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F64: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", +} + +DSOp_PCODE = { + DSOp.DS_ADD_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_STORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]', + DSOp.DS_STORE_2ADDR_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]', + DSOp.DS_STORE_2ADDR_STRIDE64_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]', + DSOp.DS_CMPSTORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', + DSOp.DS_MIN_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + DSOp.DS_MAX_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + DSOp.DS_ADD_F32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_STORE_B8: 'MEM[ADDR].b8 = DATA[7 : 0]', + DSOp.DS_STORE_B16: 'MEM[ADDR].b16 = DATA[15 : 0]', + DSOp.DS_ADD_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_RTN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_RTN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_STOREXCHG_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + DSOp.DS_STOREXCHG_2ADDR_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_CMPSTORE_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', + DSOp.DS_MIN_NUM_RTN_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + DSOp.DS_MAX_NUM_RTN_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;', + DSOp.DS_LOAD_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32', + DSOp.DS_LOAD_2ADDR_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4U].b32', + DSOp.DS_LOAD_2ADDR_STRIDE64_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256U].b32', + DSOp.DS_LOAD_I8: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i8))", + DSOp.DS_LOAD_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + DSOp.DS_LOAD_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))", + DSOp.DS_LOAD_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + DSOp.DS_CONSUME: 'addr = offset; // offset by LDS HWBASE\nrtnval = LDS(addr);\nGPR[VDST] = rtnval; // return to all valid threads', + DSOp.DS_APPEND: 'addr = offset; // offset by LDS HWBASE\nrtnval = LDS(addr);\nGPR[VDST] = rtnval; // return to all valid threads', + DSOp.DS_ADD_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_STORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]', + DSOp.DS_STORE_2ADDR_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_STORE_2ADDR_STRIDE64_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_CMPSTORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', + DSOp.DS_MIN_NUM_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 < tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && sign(src.f64) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_NUM_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 > tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && !sign(src.f64) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', + DSOp.DS_ADD_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_RTN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_RTN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_STOREXCHG_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + DSOp.DS_STOREXCHG_2ADDR_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_CMPSTORE_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', + DSOp.DS_MIN_NUM_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 < tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && sign(src.f64) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_NUM_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 > tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && !sign(src.f64) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', + DSOp.DS_LOAD_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32', + DSOp.DS_LOAD_2ADDR_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8U + 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8U + 4U].b32', + DSOp.DS_LOAD_2ADDR_STRIDE64_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512U + 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512U + 4U].b32', + DSOp.DS_ADD_RTN_F32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", + DSOp.DS_COND_SUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_CLAMP_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + DSOp.DS_PK_ADD_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + DSOp.DS_PK_ADD_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + DSOp.DS_STORE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]', + DSOp.DS_STORE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]', + DSOp.DS_LOAD_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + DSOp.DS_LOAD_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", + DSOp.DS_LOAD_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", + DSOp.DS_LOAD_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", + DSOp.DS_LOAD_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;', + DSOp.DS_LOAD_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;', + DSOp.DS_COND_SUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_CLAMP_RTN_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + DSOp.DS_PK_ADD_RTN_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + DSOp.DS_PK_ADD_RTN_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + DSOp.DS_STORE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32", + DSOp.DS_LOAD_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32", + DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_BPERMUTE_B32: "Note that EXEC mask is applied to both VGPR read and write. If src_lane selects a disabled thread then zero is\n// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_BPERMUTE_FI_B32: "if no LDS memory is allocated to the wave. It uses LDS hardware to implement an arbitrary swizzle across\n// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\ntmp[i] = VGPR[src_lane][DATA0]\nendfor;\n// Copy data into destination VGPRs. Some source\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_STORE_B96: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]', + DSOp.DS_STORE_B128: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]', + DSOp.DS_BVH_STACK_PUSH4_POP1_RTN_B32: "The LDS stack address is computed using values packed into ADDR and part of OFFSET0. ADDR carries the\nstack address for the lane. OFFSET0[4:0] contains stack_size[4:0] -- this value is constant for all lanes and is\ndeclare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 3 passes: push data onto stack\nfor i in 0 : 2 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[127 : 96]) then\nRETURN_DATA[31 : 0] = DATA1[127 : 96]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;", + DSOp.DS_BVH_STACK_PUSH8_POP1_RTN_B32: "The LDS stack address is computed using values packed into ADDR and part of OFFSET0. ADDR carries the\nstack address for the lane. OFFSET0[4:0] contains stack_size[4:0] -- this value is constant for all lanes and is\ndeclare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 7 passes: push data onto stack\nfor i in 0 : 6 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[255 : 224]) then\nRETURN_DATA[31 : 0] = DATA1[255 : 224]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;", + DSOp.DS_BVH_STACK_PUSH8_POP2_RTN_B64: "The LDS stack address is computed using values packed into ADDR and part of OFFSET0. ADDR carries the\nstack address for the lane. OFFSET0[4:0] contains stack_size[4:0] -- this value is constant for all lanes and is\ndeclare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 7 passes: push data onto stack\nfor i in 0 : 6 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[255 : 224]) then\nRETURN_DATA[31 : 0] = DATA1[255 : 224]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;\n// Attempt a second pop\nif DATA_VALID(MEM[stack_base.u32 + stack_index]) then\nRETURN_DATA[63 : 32] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;", + DSOp.DS_LOAD_B96: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32', + DSOp.DS_LOAD_B128: "addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b8 = VDATA[7 : 0]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b16 = VDATA[15 : 0]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = MEM[addr].b16;\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].b16 = MEM[addr].b16;\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b8 = VDATA[23 : 16]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b16 = VDATA[31 : 16]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp\ndeclare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp\ntmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp\ntmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp", +} + +PSEUDOCODE_STRINGS = { + SOP1Op: SOP1Op_PCODE, + SOP2Op: SOP2Op_PCODE, + SOPCOp: SOPCOp_PCODE, + SOPKOp: SOPKOp_PCODE, + SOPPOp: SOPPOp_PCODE, + SMEMOp: SMEMOp_PCODE, + VOP1Op: VOP1Op_PCODE, + VOP2Op: VOP2Op_PCODE, + VOP3Op: VOP3Op_PCODE, + VOP3SDOp: VOP3SDOp_PCODE, + VOP3POp: VOP3POp_PCODE, + VOPCOp: VOPCOp_PCODE, + DSOp: DSOp_PCODE, +} \ No newline at end of file diff --git a/extra/assembly/amd/emu.py b/extra/assembly/amd/emu.py index b689466dd7..fd4def7a52 100644 --- a/extra/assembly/amd/emu.py +++ b/extra/assembly/amd/emu.py @@ -5,7 +5,8 @@ import ctypes, functools from tinygrad.runtime.autogen import hsa from extra.assembly.amd.dsl import Inst, unwrap, FLOAT_ENC, MASK32, MASK64, _f32, _i32, _sext, _f16, _i16, _f64, _i64 from extra.assembly.amd.asm import detect_format -from extra.assembly.amd.autogen.rdna3.gen_pcode import COMPILED_FUNCTIONS +from extra.assembly.amd.pcode import compile_pseudocode +from extra.assembly.amd.autogen.rdna3.str_pcode import PSEUDOCODE_STRINGS from extra.assembly.amd.autogen.rdna3.ins import (SOP1, SOP2, SOPC, SOPK, SOPP, SMEM, VOP1, VOP2, VOP3, VOP3SD, VOP3P, VOPC, DS, FLAT, VOPD, SrcEnum, SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp, VOPDOp) @@ -236,9 +237,8 @@ def exec_vopd(st: WaveState, inst, V: list, lane: int) -> None: """VOPD: dual-issue, execute two ops simultaneously (read all inputs before writes).""" literal, vdstx, vdsty = inst._literal, inst.vdstx, (inst.vdsty << 1) | ((inst.vdstx & 1) ^ 1) sx0, sx1, dx, sy0, sy1, dy = st.rsrc(inst.srcx0, lane, literal), V[inst.vsrcx1], V[vdstx], st.rsrc(inst.srcy0, lane, literal), V[inst.vsrcy1], V[vdsty] - opx, opy = _VOPD_TO_VOP[inst.opx], _VOPD_TO_VOP[inst.opy] - V[vdstx] = COMPILED_FUNCTIONS[type(opx)][opx](sx0, sx1, 0, dx, st.scc, st.vcc, lane, st.exec_mask, literal, None)['D0'] - V[vdsty] = COMPILED_FUNCTIONS[type(opy)][opy](sy0, sy1, 0, dy, st.scc, st.vcc, lane, st.exec_mask, literal, None)['D0'] + V[vdstx] = inst._fnx(sx0, sx1, 0, dx, st.scc, st.vcc, lane, st.exec_mask, literal, None)['D0'] + V[vdsty] = inst._fny(sy0, sy1, 0, dy, st.scc, st.vcc, lane, st.exec_mask, literal, None)['D0'] def exec_flat(st: WaveState, inst, V: list, lane: int) -> None: """FLAT/GLOBAL/SCRATCH memory ops.""" @@ -359,15 +359,14 @@ def decode_program(data: bytes) -> dict[int, Inst]: result: dict[int, Inst] = {} i = 0 while i < len(data): - try: inst_class = detect_format(data[i:]) - except ValueError: break # stop at invalid instruction (padding/metadata after code) - inst = inst_class.from_bytes(data[i:i+inst_class._size()+8]) # +8 for potential 64-bit literal + inst = detect_format(data[i:]).from_bytes(data[i:]) inst._words = inst.size() // 4 # Determine dispatch function and pcode function - fn = COMPILED_FUNCTIONS.get(type(inst.op), {}).get(inst.op) - if isinstance(inst, SOPP) and inst.op == SOPPOp.S_ENDPGM: inst._dispatch = dispatch_endpgm + if isinstance(inst, SOPP) and inst.op == SOPPOp.S_CODE_END: break + elif isinstance(inst, SOPP) and inst.op == SOPPOp.S_ENDPGM: inst._dispatch = dispatch_endpgm elif isinstance(inst, SOPP) and inst.op == SOPPOp.S_BARRIER: inst._dispatch = dispatch_barrier + elif isinstance(inst, SOPP) and inst.op in (SOPPOp.S_CLAUSE, SOPPOp.S_WAITCNT, SOPPOp.S_WAITCNT_DEPCTR, SOPPOp.S_SENDMSG, SOPPOp.S_SET_INST_PREFETCH_DISTANCE): inst._dispatch = dispatch_nop elif isinstance(inst, (SOP1, SOP2, SOPC, SOPK, SOPP, SMEM)): inst._dispatch = exec_scalar elif isinstance(inst, VOP1) and inst.op == VOP1Op.V_NOP: inst._dispatch = dispatch_nop elif isinstance(inst, VOP3P) and 'WMMA' in inst.op_name: inst._dispatch = dispatch_wmma @@ -378,11 +377,14 @@ def decode_program(data: bytes) -> dict[int, Inst]: elif isinstance(inst, DS): inst._dispatch = dispatch_lane(exec_ds) else: inst._dispatch = dispatch_lane(exec_vop) - # Validate pcode exists for instructions that need it (scalar/wave-level ops and VOPD don't need pcode) - needs_pcode = inst._dispatch not in (dispatch_endpgm, dispatch_barrier, exec_scalar, dispatch_nop, dispatch_wmma, - dispatch_writelane, dispatch_readlane, dispatch_lane(exec_vopd)) - if fn is None and inst.op_name and needs_pcode: raise NotImplementedError(f"{inst.op_name} not in pseudocode") - inst._fn = fn if fn else lambda *args, **kwargs: {} + # Compile pcode for instructions that use it (not VOPD which has _fnx/_fny, not special dispatches) + # VOPD needs separate functions for X and Y ops + if isinstance(inst, VOPD): + def _compile_vopd_op(op): return compile_pseudocode(type(op).__name__, op.name, PSEUDOCODE_STRINGS[type(op)][op]) + inst._fnx, inst._fny = _compile_vopd_op(_VOPD_TO_VOP[inst.opx]), _compile_vopd_op(_VOPD_TO_VOP[inst.opy]) + elif inst._dispatch not in (dispatch_endpgm, dispatch_barrier, dispatch_nop, dispatch_wmma, dispatch_writelane): + assert type(inst.op) != int, f"inst op of {inst} is int" + inst._fn = compile_pseudocode(type(inst.op).__name__, inst.op.name, PSEUDOCODE_STRINGS[type(inst.op)][inst.op]) result[i // 4] = inst i += inst._words * 4 return result diff --git a/extra/assembly/amd/pcode.py b/extra/assembly/amd/pcode.py index 04133af553..a64a990bc5 100644 --- a/extra/assembly/amd/pcode.py +++ b/extra/assembly/amd/pcode.py @@ -1,9 +1,9 @@ # DSL for RDNA3 pseudocode - makes pseudocode expressions work directly as Python -import struct, math +import struct, math, re, functools from extra.assembly.amd.dsl import MASK32, MASK64, _f32, _i32, _sext, _f16, _i16, _f64, _i64 # ═══════════════════════════════════════════════════════════════════════════════ -# HELPER FUNCTIONS +# INTERNAL HELPERS # ═══════════════════════════════════════════════════════════════════════════════ def _div(a, b): @@ -11,143 +11,35 @@ def _div(a, b): except ZeroDivisionError: if a == 0.0 or math.isnan(a): return float("nan") return math.copysign(float("inf"), a * b) if b == 0.0 else float("inf") if a > 0 else float("-inf") -def _to_f16_bits(v): return v if isinstance(v, int) else _i16(v) -def _isnan(x): - try: return math.isnan(float(x)) - except (TypeError, ValueError): return False def _check_nan_type(x, quiet_bit_expected, default): - """Check NaN type by examining quiet bit. Returns default if can't determine.""" try: if not math.isnan(float(x)): return False if hasattr(x, '_reg') and hasattr(x, '_bits'): bits = x._reg._val & ((1 << x._bits) - 1) - # NaN format: exponent all 1s, quiet bit, mantissa != 0 - # f16: exp[14:10]=31, quiet=bit9, mant[8:0] | f32: exp[30:23]=255, quiet=bit22, mant[22:0] | f64: exp[62:52]=2047, quiet=bit51, mant[51:0] exp_bits, quiet_pos, mant_mask = {16: (0x1f, 9, 0x3ff), 32: (0xff, 22, 0x7fffff), 64: (0x7ff, 51, 0xfffffffffffff)}.get(x._bits, (0,0,0)) exp_shift = {16: 10, 32: 23, 64: 52}.get(x._bits, 0) if exp_bits and ((bits >> exp_shift) & exp_bits) == exp_bits and (bits & mant_mask) != 0: return ((bits >> quiet_pos) & 1) == quiet_bit_expected return default except (TypeError, ValueError): return False -def _isquietnan(x): return _check_nan_type(x, 1, True) # quiet NaN has quiet bit = 1 -def _issignalnan(x): return _check_nan_type(x, 0, False) # signaling NaN has quiet bit = 0 def _gt_neg_zero(a, b): return (a > b) or (a == 0 and b == 0 and not math.copysign(1, a) < 0 and math.copysign(1, b) < 0) def _lt_neg_zero(a, b): return (a < b) or (a == 0 and b == 0 and math.copysign(1, a) < 0 and not math.copysign(1, b) < 0) -def _fma(a, b, c): - try: return math.fma(a, b, c) - except ValueError: return float('nan') # inf * 0 + c is NaN per IEEE 754 -def _signext(v): return v def _fpop(fn): def wrapper(x): x = float(x) if math.isnan(x) or math.isinf(x): return x result = float(fn(x)) - # Preserve sign of zero (IEEE 754: ceil(-0.0) = -0.0, ceil(-0.1) = -0.0) - if result == 0.0: return math.copysign(0.0, x) - return result + return math.copysign(0.0, x) if result == 0.0 else result return wrapper -trunc, floor, ceil = _fpop(math.trunc), _fpop(math.floor), _fpop(math.ceil) -class _SafeFloat(float): - """Float subclass that uses _div for division to handle 0/inf correctly.""" - def __truediv__(self, o): return _div(float(self), float(o)) - def __rtruediv__(self, o): return _div(float(o), float(self)) -def sqrt(x): return _SafeFloat(math.sqrt(x)) if x >= 0 else _SafeFloat(float("nan")) -def log2(x): return math.log2(x) if x > 0 else (float("-inf") if x == 0 else float("nan")) -i32_to_f32 = u32_to_f32 = i32_to_f64 = u32_to_f64 = f32_to_f64 = f64_to_f32 = float def _f_to_int(f, lo, hi): f = float(f); return 0 if math.isnan(f) else (hi if f >= hi else lo if f <= lo else int(f)) -def f32_to_i32(f): return _f_to_int(f, -2147483648, 2147483647) -def f32_to_u32(f): return _f_to_int(f, 0, 4294967295) -f64_to_i32, f64_to_u32 = f32_to_i32, f32_to_u32 -def f32_to_f16(f): - f = float(f) - if math.isnan(f): return 0x7e00 # f16 NaN - if math.isinf(f): return 0x7c00 if f > 0 else 0xfc00 # f16 ±infinity - try: return struct.unpack(" 0 else 0xfc00 # overflow -> ±infinity def _f16_to_f32_bits(bits): return struct.unpack(" 0 else 0.0 def _brev(v, bits): return int(bin(v & ((1 << bits) - 1))[2:].zfill(bits)[::-1], 2) -def _brev32(v): return _brev(v, 32) -def _brev64(v): return _brev(v, 64) def _ctz(v, bits): v, n = int(v) & ((1 << bits) - 1), 0 if v == 0: return bits while (v & 1) == 0: v >>= 1; n += 1 return n -def _ctz32(v): return _ctz(v, 32) -def _ctz64(v): return _ctz(v, 64) -def _exponent(f): - # Handle TypedView (f16/f32/f64) to get correct exponent for that type - if hasattr(f, '_bits') and hasattr(f, '_float') and f._float: - raw = f._val - if f._bits == 16: return (raw >> 10) & 0x1f # f16: 5-bit exponent - if f._bits == 32: return (raw >> 23) & 0xff # f32: 8-bit exponent - if f._bits == 64: return (raw >> 52) & 0x7ff # f64: 11-bit exponent - # Fallback: convert to f32 and get exponent - f = float(f) - if math.isinf(f) or math.isnan(f): return 255 - if f == 0.0: return 0 - try: bits = struct.unpack("> 23) & 0xff - except: return 0 -def _is_denorm_f32(f): - if not isinstance(f, float): f = _f32(int(f) & 0xffffffff) - if math.isinf(f) or math.isnan(f) or f == 0.0: return False - bits = struct.unpack("> 23) & 0xff == 0 -def _is_denorm_f64(f): - if not isinstance(f, float): f = _f64(int(f) & 0xffffffffffffffff) - if math.isinf(f) or math.isnan(f) or f == 0.0: return False - bits = struct.unpack("> 52) & 0x7ff == 0 -def v_min_f32(a, b): return a if math.isnan(b) else b if math.isnan(a) else (a if _lt_neg_zero(a, b) else b) -def v_max_f32(a, b): return a if math.isnan(b) else b if math.isnan(a) else (a if _gt_neg_zero(a, b) else b) -v_min_f16, v_max_f16 = v_min_f32, v_max_f32 -v_min_i32, v_max_i32 = min, max -v_min_i16, v_max_i16 = min, max -def v_min_u32(a, b): return min(a & MASK32, b & MASK32) -def v_max_u32(a, b): return max(a & MASK32, b & MASK32) -def v_min_u16(a, b): return min(a & 0xffff, b & 0xffff) -def v_max_u16(a, b): return max(a & 0xffff, b & 0xffff) -def v_min3_f32(a, b, c): return v_min_f32(v_min_f32(a, b), c) -def v_max3_f32(a, b, c): return v_max_f32(v_max_f32(a, b), c) -v_min3_f16, v_max3_f16 = v_min3_f32, v_max3_f32 -v_min3_i32, v_max3_i32, v_min3_i16, v_max3_i16 = min, max, min, max -def v_min3_u32(a, b, c): return min(a & MASK32, b & MASK32, c & MASK32) -def v_max3_u32(a, b, c): return max(a & MASK32, b & MASK32, c & MASK32) -def v_min3_u16(a, b, c): return min(a & 0xffff, b & 0xffff, c & 0xffff) -def v_max3_u16(a, b, c): return max(a & 0xffff, b & 0xffff, c & 0xffff) -def ABSDIFF(a, b): return abs(int(a) - int(b)) -# BF16 (bfloat16) conversion functions def _bf16(i): """Convert bf16 bits to float. BF16 is just the top 16 bits of f32.""" return struct.unpack(" 0 else 0xff80 # bf16 ±infinity try: return (struct.unpack("> 16) & 0xffff except (OverflowError, struct.error): return 0x7f80 if f > 0 else 0xff80 -def bf16_to_f32(v): return _bf16(v) if isinstance(v, int) else float(v) -def f32_to_bf16(f): return _ibf16(f) +def _trig(fn, x): + # V_SIN/COS_F32: hardware does frac on input cycles before computing + if math.isinf(x) or math.isnan(x): return float("nan") + frac_cycles = fract(x / (2 * math.pi)) + result = fn(frac_cycles * 2 * math.pi) + # Hardware returns exactly 0 for cos(π/2), sin(π), etc. due to lookup table + # Round very small results (below f32 precision) to exactly 0 + if abs(result) < 1e-7: return 0.0 + return result -# BYTE_PERMUTE for V_PERM_B32 - select bytes from 64-bit data based on selector -def BYTE_PERMUTE(data, sel): - """Select a byte from 64-bit data based on selector value. - sel 0-7: select byte from data (S1 is bytes 0-3, S0 is bytes 4-7 in {S0,S1}) - sel 8-11: sign-extend from specific bytes (8->byte1, 9->byte3, 10->byte5, 11->byte7) - sel 12: constant 0x00 - sel >= 13: constant 0xFF""" - sel = int(sel) & 0xff - if sel <= 7: return (int(data) >> (sel * 8)) & 0xff - if sel == 8: return 0xff if ((int(data) >> 15) & 1) else 0x00 # sign of byte 1 - if sel == 9: return 0xff if ((int(data) >> 31) & 1) else 0x00 # sign of byte 3 - if sel == 10: return 0xff if ((int(data) >> 47) & 1) else 0x00 # sign of byte 5 - if sel == 11: return 0xff if ((int(data) >> 63) & 1) else 0x00 # sign of byte 7 - if sel == 12: return 0x00 - return 0xff # sel >= 13 +class _SafeFloat(float): + """Float subclass that uses _div for division to handle 0/inf correctly.""" + def __truediv__(self, o): return _div(float(self), float(o)) + def __rtruediv__(self, o): return _div(float(o), float(self)) -# v_sad_u8 helper for V_SAD instructions (sum of absolute differences of 4 bytes) -def v_sad_u8(s0, s1, s2): - """V_SAD_U8: Sum of absolute differences of 4 byte pairs plus accumulator.""" - s0, s1, s2 = int(s0), int(s1), int(s2) - result = s2 - for i in range(4): - a = (s0 >> (i * 8)) & 0xff - b = (s1 >> (i * 8)) & 0xff - result += abs(a - b) - return result & 0xffffffff - -# v_msad_u8 helper (masked SAD - skip when reference byte is 0) -def v_msad_u8(s0, s1, s2): - """V_MSAD_U8: Masked sum of absolute differences (skip if reference byte is 0).""" - s0, s1, s2 = int(s0), int(s1), int(s2) - result = s2 - for i in range(4): - a = (s0 >> (i * 8)) & 0xff - b = (s1 >> (i * 8)) & 0xff - if b != 0: # Only add diff if reference (s1) byte is non-zero - result += abs(a - b) - return result & 0xffffffff -def f16_to_snorm(f): return max(-32768, min(32767, int(round(max(-1.0, min(1.0, f)) * 32767)))) -def f16_to_unorm(f): return max(0, min(65535, int(round(max(0.0, min(1.0, f)) * 65535)))) -def f32_to_snorm(f): return max(-32768, min(32767, int(round(max(-1.0, min(1.0, f)) * 32767)))) -def f32_to_unorm(f): return max(0, min(65535, int(round(max(0.0, min(1.0, f)) * 65535)))) -def v_cvt_i16_f32(f): return max(-32768, min(32767, int(f))) if not math.isnan(f) else 0 -def v_cvt_u16_f32(f): return max(0, min(65535, int(f))) if not math.isnan(f) else 0 -def u32_to_u16(u): return int(u) & 0xffff -def i32_to_i16(i): return ((int(i) + 32768) & 0xffff) - 32768 -def SAT8(v): return max(0, min(255, int(v))) -def f32_to_u8(f): return max(0, min(255, int(f))) if not math.isnan(f) else 0 -def mantissa(f): - if f == 0.0 or math.isinf(f) or math.isnan(f): return f - m, _ = math.frexp(f) - return m # AMD V_FREXP_MANT returns mantissa in [0.5, 1.0) range -def signext_from_bit(val, bit): - bit = int(bit) - if bit == 0: return 0 - mask = (1 << bit) - 1 - val = int(val) & mask - if val & (1 << (bit - 1)): return val - (1 << bit) - return val - -# Aliases used in pseudocode -s_ff1_i32_b32, s_ff1_i32_b64 = _ctz32, _ctz64 -GT_NEG_ZERO, LT_NEG_ZERO = _gt_neg_zero, _lt_neg_zero -isNAN = _isnan -isQuietNAN = _isquietnan -isSignalNAN = _issignalnan -fma, ldexp, sign, exponent = _fma, _ldexp, _sign, _exponent -def F(x): - """32'F(x) or 64'F(x) - interpret x as float. If x is int, treat as bit pattern.""" - if isinstance(x, int): return _f32(x) # int -> interpret as f32 bits - if isinstance(x, TypedView): return x # preserve TypedView for bit-pattern checks - return float(x) # already a float or float-like -signext = lambda x: int(x) # sign-extend to full width - already handled by Python's arbitrary precision ints -pack = lambda hi, lo: ((int(hi) & 0xffff) << 16) | (int(lo) & 0xffff) -pack32 = lambda hi, lo: ((int(hi) & 0xffffffff) << 32) | (int(lo) & 0xffffffff) -_pack, _pack32 = pack, pack32 # Aliases for internal use -WAVE32, WAVE64 = True, False - -# Float overflow/underflow constants -OVERFLOW_F32 = float('inf') -UNDERFLOW_F32 = 0.0 -OVERFLOW_F64 = float('inf') -UNDERFLOW_F64 = 0.0 -MAX_FLOAT_F32 = 3.4028235e+38 # Largest finite float32 - -# INF object that supports .f16/.f32/.f64 access and comparison with floats class _Inf: f16 = f32 = f64 = float('inf') def __neg__(self): return _NegInf() @@ -260,26 +78,24 @@ class _NegInf: def __float__(self): return float('-inf') def __eq__(self, other): return float(other) == float('-inf') if not isinstance(other, _Inf) else False def __req__(self, other): return self.__eq__(other) -INF = _Inf() -# Rounding mode placeholder class _RoundMode: NEAREST_EVEN = 0 -ROUND_MODE = _RoundMode() - -# Helper functions for pseudocode -def cvtToQuietNAN(x): return float('nan') -DST = None # Placeholder, will be set in context class _WaveMode: IEEE = False -WAVE_MODE = _WaveMode() class _DenormChecker: """Comparator for denormalized floats. x == DENORM.f32 checks if x is denormalized.""" def __init__(self, bits): self._bits = bits def _check(self, other): - return _is_denorm_f64(float(other)) if self._bits == 64 else _is_denorm_f32(float(other)) + f = float(other) + if math.isinf(f) or math.isnan(f) or f == 0.0: return False + if self._bits == 64: + bits = struct.unpack("> 52) & 0x7ff == 0 + bits = struct.unpack("> 23) & 0xff == 0 def __eq__(self, other): return self._check(other) def __req__(self, other): return self._check(other) def __ne__(self, other): return not self._check(other) @@ -287,7 +103,9 @@ class _DenormChecker: class _Denorm: f32 = _DenormChecker(32) f64 = _DenormChecker(64) -DENORM = _Denorm() + +_pack = lambda hi, lo: ((int(hi) & 0xffff) << 16) | (int(lo) & 0xffff) +_pack32 = lambda hi, lo: ((int(hi) & 0xffffffff) << 32) | (int(lo) & 0xffffffff) class TypedView: """View into a Reg with typed access. Used for both full-width (Reg.u32) and slices (Reg[31:16]).""" @@ -396,8 +214,6 @@ class TypedView: def __gt__(s, o): return float(s) > float(o) if s._float else int(s) > int(o) def __ge__(s, o): return float(s) >= float(o) if s._float else int(s) >= int(o) -SliceProxy = TypedView # Alias for compatibility - class Reg: """GPU register: D0.f32 = S0.f32 + S1.f32 just works. Supports up to 128 bits for DS_LOAD_B128.""" __slots__ = ('_val',) @@ -466,5 +282,484 @@ class Reg: def __eq__(s, o): return s._val == int(o) def __ne__(s, o): return s._val != int(o) +# ═══════════════════════════════════════════════════════════════════════════════ +# PSEUDOCODE API - Functions and constants from AMD ISA pseudocode +# ═══════════════════════════════════════════════════════════════════════════════ + +# Rounding and float operations +trunc, floor, ceil = _fpop(math.trunc), _fpop(math.floor), _fpop(math.ceil) +def sqrt(x): return _SafeFloat(math.sqrt(x)) if x >= 0 else _SafeFloat(float("nan")) +def log2(x): return math.log2(x) if x > 0 else (float("-inf") if x == 0 else float("nan")) +def fract(x): return x - math.floor(x) +def sin(x): return _trig(math.sin, x) +def cos(x): return _trig(math.cos, x) +def pow(a, b): + try: return a ** b + except OverflowError: return float("inf") if b > 0 else 0.0 +def isEven(x): + x = float(x) + if math.isinf(x) or math.isnan(x): return False + return int(x) % 2 == 0 +def mantissa(f): + if f == 0.0 or math.isinf(f) or math.isnan(f): return f + m, _ = math.frexp(f) + return m # AMD V_FREXP_MANT returns mantissa in [0.5, 1.0) range +def signext_from_bit(val, bit): + bit = int(bit) + if bit == 0: return 0 + mask = (1 << bit) - 1 + val = int(val) & mask + if val & (1 << (bit - 1)): return val - (1 << bit) + return val + +# Type conversions +i32_to_f32 = u32_to_f32 = i32_to_f64 = u32_to_f64 = f32_to_f64 = f64_to_f32 = float +def f32_to_i32(f): return _f_to_int(f, -2147483648, 2147483647) +def f32_to_u32(f): return _f_to_int(f, 0, 4294967295) +f64_to_i32, f64_to_u32 = f32_to_i32, f32_to_u32 +def f32_to_f16(f): + f = float(f) + if math.isnan(f): return 0x7e00 # f16 NaN + if math.isinf(f): return 0x7c00 if f > 0 else 0xfc00 # f16 ±infinity + try: return struct.unpack(" 0 else 0xfc00 # overflow -> ±infinity +def f16_to_f32(v): return v if isinstance(v, float) else _f16_to_f32_bits(v) +def i16_to_f16(v): return f32_to_f16(float(_sext(int(v) & 0xffff, 16))) +def u16_to_f16(v): return f32_to_f16(float(int(v) & 0xffff)) +def f16_to_i16(bits): f = _f16_to_f32_bits(bits); return max(-32768, min(32767, int(f))) if not math.isnan(f) else 0 +def f16_to_u16(bits): f = _f16_to_f32_bits(bits); return max(0, min(65535, int(f))) if not math.isnan(f) else 0 +def bf16_to_f32(v): return _bf16(v) if isinstance(v, int) else float(v) +def f32_to_bf16(f): return _ibf16(f) +def u8_to_u32(v): return int(v) & 0xff +def u4_to_u32(v): return int(v) & 0xf +def u32_to_u16(u): return int(u) & 0xffff +def i32_to_i16(i): return ((int(i) + 32768) & 0xffff) - 32768 +def f16_to_snorm(f): return max(-32768, min(32767, int(round(max(-1.0, min(1.0, f)) * 32767)))) +def f16_to_unorm(f): return max(0, min(65535, int(round(max(0.0, min(1.0, f)) * 65535)))) +def f32_to_snorm(f): return max(-32768, min(32767, int(round(max(-1.0, min(1.0, f)) * 32767)))) +def f32_to_unorm(f): return max(0, min(65535, int(round(max(0.0, min(1.0, f)) * 65535)))) +def v_cvt_i16_f32(f): return max(-32768, min(32767, int(f))) if not math.isnan(f) else 0 +def v_cvt_u16_f32(f): return max(0, min(65535, int(f))) if not math.isnan(f) else 0 +def SAT8(v): return max(0, min(255, int(v))) +def f32_to_u8(f): return max(0, min(255, int(f))) if not math.isnan(f) else 0 + +# Min/max operations +def v_min_f32(a, b): return a if math.isnan(b) else b if math.isnan(a) else (a if _lt_neg_zero(a, b) else b) +def v_max_f32(a, b): return a if math.isnan(b) else b if math.isnan(a) else (a if _gt_neg_zero(a, b) else b) +v_min_f16, v_max_f16 = v_min_f32, v_max_f32 +v_min_i32, v_max_i32 = min, max +v_min_i16, v_max_i16 = min, max +def v_min_u32(a, b): return min(a & MASK32, b & MASK32) +def v_max_u32(a, b): return max(a & MASK32, b & MASK32) +def v_min_u16(a, b): return min(a & 0xffff, b & 0xffff) +def v_max_u16(a, b): return max(a & 0xffff, b & 0xffff) +def v_min3_f32(a, b, c): return v_min_f32(v_min_f32(a, b), c) +def v_max3_f32(a, b, c): return v_max_f32(v_max_f32(a, b), c) +v_min3_f16, v_max3_f16 = v_min3_f32, v_max3_f32 +v_min3_i32, v_max3_i32, v_min3_i16, v_max3_i16 = min, max, min, max +def v_min3_u32(a, b, c): return min(a & MASK32, b & MASK32, c & MASK32) +def v_max3_u32(a, b, c): return max(a & MASK32, b & MASK32, c & MASK32) +def v_min3_u16(a, b, c): return min(a & 0xffff, b & 0xffff, c & 0xffff) +def v_max3_u16(a, b, c): return max(a & 0xffff, b & 0xffff, c & 0xffff) + +# SAD/MSAD operations +def ABSDIFF(a, b): return abs(int(a) - int(b)) +def v_sad_u8(s0, s1, s2): + """V_SAD_U8: Sum of absolute differences of 4 byte pairs plus accumulator.""" + s0, s1, s2 = int(s0), int(s1), int(s2) + result = s2 + for i in range(4): + a = (s0 >> (i * 8)) & 0xff + b = (s1 >> (i * 8)) & 0xff + result += abs(a - b) + return result & 0xffffffff +def v_msad_u8(s0, s1, s2): + """V_MSAD_U8: Masked sum of absolute differences (skip if reference byte is 0).""" + s0, s1, s2 = int(s0), int(s1), int(s2) + result = s2 + for i in range(4): + a = (s0 >> (i * 8)) & 0xff + b = (s1 >> (i * 8)) & 0xff + if b != 0: # Only add diff if reference (s1) byte is non-zero + result += abs(a - b) + return result & 0xffffffff + +def BYTE_PERMUTE(data, sel): + """Select a byte from 64-bit data based on selector value.""" + sel = int(sel) & 0xff + if sel <= 7: return (int(data) >> (sel * 8)) & 0xff + if sel == 8: return 0xff if ((int(data) >> 15) & 1) else 0x00 + if sel == 9: return 0xff if ((int(data) >> 31) & 1) else 0x00 + if sel == 10: return 0xff if ((int(data) >> 47) & 1) else 0x00 + if sel == 11: return 0xff if ((int(data) >> 63) & 1) else 0x00 + if sel == 12: return 0x00 + return 0xff + +# Pseudocode functions +def s_ff1_i32_b32(v): return _ctz(v, 32) +def s_ff1_i32_b64(v): return _ctz(v, 64) +GT_NEG_ZERO, LT_NEG_ZERO = _gt_neg_zero, _lt_neg_zero +def isNAN(x): + try: return math.isnan(float(x)) + except (TypeError, ValueError): return False +def isQuietNAN(x): return _check_nan_type(x, 1, True) +def isSignalNAN(x): return _check_nan_type(x, 0, False) +def fma(a, b, c): + try: return math.fma(a, b, c) + except ValueError: return float('nan') +def ldexp(m, e): return math.ldexp(m, e) +def sign(f): return 1 if math.copysign(1.0, f) < 0 else 0 +def exponent(f): + if hasattr(f, '_bits') and hasattr(f, '_float') and f._float: + raw = f._val + if f._bits == 16: return (raw >> 10) & 0x1f + if f._bits == 32: return (raw >> 23) & 0xff + if f._bits == 64: return (raw >> 52) & 0x7ff + f = float(f) + if math.isinf(f) or math.isnan(f): return 255 + if f == 0.0: return 0 + try: bits = struct.unpack("> 23) & 0xff + except: return 0 +def signext(x): return int(x) +def cvtToQuietNAN(x): return float('nan') + +def F(x): + """32'F(x) or 64'F(x) - interpret x as float. If x is int, treat as bit pattern.""" + if isinstance(x, int): return _f32(x) + if isinstance(x, TypedView): return x + return float(x) + +# Constants +PI = math.pi +WAVE32, WAVE64 = True, False +OVERFLOW_F32, UNDERFLOW_F32 = float('inf'), 0.0 +OVERFLOW_F64, UNDERFLOW_F64 = float('inf'), 0.0 +MAX_FLOAT_F32 = 3.4028235e+38 +INF = _Inf() +ROUND_MODE = _RoundMode() +WAVE_MODE = _WaveMode() +DENORM = _Denorm() + # 2/PI with 1201 bits of precision for V_TRIG_PREOP_F64 TWO_OVER_PI_1201 = Reg(0x0145f306dc9c882a53f84eafa3ea69bb81b6c52b3278872083fca2c757bd778ac36e48dc74849ba5c00c925dd413a32439fc3bd63962534e7dd1046bea5d768909d338e04d68befc827323ac7306a673e93908bf177bf250763ff12fffbc0b301fde5e2316b414da3eda6cfd9e4f96136e9e8c7ecd3cbfd45aea4f758fd7cbe2f67a0e73ef14a525d4d7f6bf623f1aba10ac06608df8f6) + +# ═══════════════════════════════════════════════════════════════════════════════ +# COMPILER: pseudocode -> Python (minimal transforms) +# ═══════════════════════════════════════════════════════════════════════════════ + +def _compile_pseudocode(pseudocode: str) -> str: + """Compile pseudocode to Python. Transforms are minimal - most syntax just works.""" + pseudocode = re.sub(r'\bpass\b', 'pass_', pseudocode) # 'pass' is Python keyword + raw_lines = pseudocode.strip().split('\n') + joined_lines: list[str] = [] + for line in raw_lines: + line = line.strip() + if joined_lines and (joined_lines[-1].rstrip().endswith(('||', '&&', '(', ',')) or + (joined_lines[-1].count('(') > joined_lines[-1].count(')'))): + joined_lines[-1] = joined_lines[-1].rstrip() + ' ' + line + else: + joined_lines.append(line) + + lines = [] + indent, need_pass, in_first_match_loop = 0, False, False + for line in joined_lines: + line = line.split('//')[0].strip() # Strip C-style comments + if not line: continue + if line.startswith('if '): + lines.append(' ' * indent + f"if {_expr(line[3:].rstrip(' then'))}:") + indent += 1 + need_pass = True + elif line.startswith('elsif '): + if need_pass: lines.append(' ' * indent + "pass") + indent -= 1 + lines.append(' ' * indent + f"elif {_expr(line[6:].rstrip(' then'))}:") + indent += 1 + need_pass = True + elif line == 'else': + if need_pass: lines.append(' ' * indent + "pass") + indent -= 1 + lines.append(' ' * indent + "else:") + indent += 1 + need_pass = True + elif line.startswith('endif'): + if need_pass: lines.append(' ' * indent + "pass") + indent -= 1 + need_pass = False + elif line.startswith('endfor'): + if need_pass: lines.append(' ' * indent + "pass") + indent -= 1 + need_pass, in_first_match_loop = False, False + elif line.startswith('declare '): + pass + elif m := re.match(r'for (\w+) in (.+?)\s*:\s*(.+?) do', line): + start, end = _expr(m[2].strip()), _expr(m[3].strip()) + lines.append(' ' * indent + f"for {m[1]} in range({start}, int({end})+1):") + indent += 1 + need_pass, in_first_match_loop = True, True + elif '=' in line and not line.startswith('=='): + need_pass = False + line = line.rstrip(';') + if m := re.match(r'\{\s*D1\.[ui]1\s*,\s*D0\.[ui]64\s*\}\s*=\s*(.+)', line): + rhs = _expr(m[1]) + lines.append(' ' * indent + f"_full = {rhs}") + lines.append(' ' * indent + f"D0.u64 = int(_full) & 0xffffffffffffffff") + lines.append(' ' * indent + f"D1 = Reg((int(_full) >> 64) & 1)") + elif any(op in line for op in ('+=', '-=', '*=', '/=', '|=', '&=', '^=')): + for op in ('+=', '-=', '*=', '/=', '|=', '&=', '^='): + if op in line: + lhs, rhs = line.split(op, 1) + lines.append(' ' * indent + f"{lhs.strip()} {op} {_expr(rhs.strip())}") + break + else: + lhs, rhs = line.split('=', 1) + lhs_s, rhs_s = _expr(lhs.strip()), rhs.strip() + stmt = _assign(lhs_s, _expr(rhs_s)) + if in_first_match_loop and rhs_s == 'i' and (lhs_s == 'tmp' or lhs_s == 'D0.i32'): + stmt += "; break" + lines.append(' ' * indent + stmt) + if need_pass: lines.append(' ' * indent + "pass") + return '\n'.join(lines) + +def _assign(lhs: str, rhs: str) -> str: + if lhs in ('tmp', 'SCC', 'VCC', 'EXEC', 'D0', 'D1', 'saveexec', 'PC'): + return f"{lhs} = Reg({rhs})" + return f"{lhs} = {rhs}" + +def _expr(e: str) -> str: + e = e.strip() + e = e.replace('&&', ' and ').replace('||', ' or ').replace('<>', ' != ') + e = re.sub(r'!([^=])', r' not \1', e) + e = re.sub(r'\{\s*(\w+\.u32)\s*,\s*(\w+\.u32)\s*\}', r'_pack32(\1, \2)', e) + def pack(m): + hi, lo = _expr(m[1].strip()), _expr(m[2].strip()) + return f'_pack({hi}, {lo})' + e = re.sub(r'\{\s*([^,{}]+)\s*,\s*([^,{}]+)\s*\}', pack, e) + e = re.sub(r"1201'B\(2\.0\s*/\s*PI\)", "TWO_OVER_PI_1201", e) + e = re.sub(r"\d+'([0-9a-fA-Fx]+)[UuFf]*", r'\1', e) + e = re.sub(r"\d+'[FIBU]\(", "(", e) + e = re.sub(r'\bB\(', '(', e) + e = re.sub(r'([0-9a-fA-Fx])ULL\b', r'\1', e) + e = re.sub(r'([0-9a-fA-Fx])LL\b', r'\1', e) + e = re.sub(r'([0-9a-fA-Fx])U\b', r'\1', e) + e = re.sub(r'(\d\.?\d*)F\b', r'\1', e) + e = re.sub(r'(\[laneId\])\.[uib]\d+', r'\1', e) + e = e.replace('+INF', 'INF').replace('-INF', '(-INF)') + e = re.sub(r'NAN\.f\d+', 'float("nan")', e) + def convert_verilog_slice(m): + start, width = m.group(1).strip(), m.group(2).strip() + return f'[({start}) + ({width}) - 1 : ({start})]' + e = re.sub(r'\[([^:\[\]]+)\s*\+:\s*([^:\[\]]+)\]', convert_verilog_slice, e) + def process_brackets(s): + result, i = [], 0 + while i < len(s): + if s[i] == '[': + depth, start = 1, i + 1 + j = start + while j < len(s) and depth > 0: + if s[j] == '[': depth += 1 + elif s[j] == ']': depth -= 1 + j += 1 + inner = _expr(s[start:j-1]) + result.append('[' + inner + ']') + i = j + else: + result.append(s[i]) + i += 1 + return ''.join(result) + e = process_brackets(e) + while '?' in e: + depth, bracket, q = 0, 0, -1 + for i, c in enumerate(e): + if c == '(': depth += 1 + elif c == ')': depth -= 1 + elif c == '[': bracket += 1 + elif c == ']': bracket -= 1 + elif c == '?' and depth == 0 and bracket == 0: q = i; break + if q < 0: break + depth, bracket, col = 0, 0, -1 + for i in range(q + 1, len(e)): + if e[i] == '(': depth += 1 + elif e[i] == ')': depth -= 1 + elif e[i] == '[': bracket += 1 + elif e[i] == ']': bracket -= 1 + elif e[i] == ':' and depth == 0 and bracket == 0: col = i; break + if col < 0: break + cond, t, f = e[:q].strip(), e[q+1:col].strip(), e[col+1:].strip() + e = f'(({t}) if ({cond}) else ({f}))' + return e + +def _apply_pseudocode_fixes(op_name: str, code: str) -> str: + """Apply known fixes for PDF pseudocode bugs.""" + if op_name == 'V_DIV_FMAS_F32': + code = code.replace('D0.f32 = 2.0 ** 32 * fma(S0.f32, S1.f32, S2.f32)', + 'D0.f32 = (2.0 ** 64 if exponent(S2.f32) > 127 else 2.0 ** -64) * fma(S0.f32, S1.f32, S2.f32)') + if op_name == 'V_DIV_FMAS_F64': + code = code.replace('D0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)', + 'D0.f64 = (2.0 ** 128 if exponent(S2.f64) > 1023 else 2.0 ** -128) * fma(S0.f64, S1.f64, S2.f64)') + if op_name == 'V_DIV_SCALE_F32': + code = code.replace('D0.f32 = float("nan")', 'VCC = Reg(0x1); D0.f32 = float("nan")') + code = code.replace('elif S1.f32 == DENORM.f32:\n D0.f32 = ldexp(S0.f32, 64)', 'elif False:\n pass') + code += '\nif S1.f32 == DENORM.f32:\n D0.f32 = float("nan")' + code = code.replace('elif exponent(S2.f32) <= 23:\n D0.f32 = ldexp(S0.f32, 64)', 'elif exponent(S2.f32) <= 23:\n VCC = Reg(0x1); D0.f32 = ldexp(S0.f32, 64)') + code = code.replace('elif S2.f32 / S1.f32 == DENORM.f32:\n VCC = Reg(0x1)\n if S0.f32 == S2.f32:\n D0.f32 = ldexp(S0.f32, 64)', 'elif S2.f32 / S1.f32 == DENORM.f32:\n VCC = Reg(0x1)') + if op_name == 'V_DIV_SCALE_F64': + code = code.replace('D0.f64 = float("nan")', 'VCC = Reg(0x1); D0.f64 = float("nan")') + code = code.replace('elif S1.f64 == DENORM.f64:\n D0.f64 = ldexp(S0.f64, 128)', 'elif False:\n pass') + code += '\nif S1.f64 == DENORM.f64:\n D0.f64 = float("nan")' + code = code.replace('elif exponent(S2.f64) <= 52:\n D0.f64 = ldexp(S0.f64, 128)', 'elif exponent(S2.f64) <= 52:\n VCC = Reg(0x1); D0.f64 = ldexp(S0.f64, 128)') + code = code.replace('elif S2.f64 / S1.f64 == DENORM.f64:\n VCC = Reg(0x1)\n if S0.f64 == S2.f64:\n D0.f64 = ldexp(S0.f64, 128)', 'elif S2.f64 / S1.f64 == DENORM.f64:\n VCC = Reg(0x1)') + if op_name == 'V_DIV_FIXUP_F32': + code = code.replace('D0.f32 = ((-abs(S0.f32)) if (sign_out) else (abs(S0.f32)))', + 'D0.f32 = ((-OVERFLOW_F32) if (sign_out) else (OVERFLOW_F32)) if isNAN(S0.f32) else ((-abs(S0.f32)) if (sign_out) else (abs(S0.f32)))') + if op_name == 'V_DIV_FIXUP_F64': + code = code.replace('D0.f64 = ((-abs(S0.f64)) if (sign_out) else (abs(S0.f64)))', + 'D0.f64 = ((-OVERFLOW_F64) if (sign_out) else (OVERFLOW_F64)) if isNAN(S0.f64) else ((-abs(S0.f64)) if (sign_out) else (abs(S0.f64)))') + if op_name == 'V_TRIG_PREOP_F64': + code = code.replace('result = F((TWO_OVER_PI_1201[1200 : 0] << shift.u32) & 0x1fffffffffffff)', + 'result = float(((TWO_OVER_PI_1201[1200 : 0] << int(shift)) >> (1201 - 53)) & 0x1fffffffffffff)') + return code + +def _generate_function(cls_name: str, op_name: str, pc: str, code: str) -> str: + """Generate a single compiled pseudocode function. + Functions take int parameters and return dict of int values. + Reg wrapping happens inside the function, only for registers actually used.""" + has_d1 = '{ D1' in pc + is_cmpx = (cls_name in ('VOPCOp', 'VOP3Op')) and 'EXEC.u64[laneId]' in pc + is_div_scale = 'DIV_SCALE' in op_name + has_sdst = cls_name == 'VOP3SDOp' and ('VCC.u64[laneId]' in pc or is_div_scale) + is_ds = cls_name == 'DSOp' + is_flat = cls_name in ('FLATOp', 'GLOBALOp', 'SCRATCHOp') + is_smem = cls_name == 'SMEMOp' + has_s_array = 'S[i]' in pc # FMA_MIX style: S[0], S[1], S[2] array access + combined = code + pc + + fn_name = f"_{cls_name}_{op_name}" + + # Detect which registers are used/modified + def needs_init(name): return name in combined and not re.search(rf'^\s*{name}\s*=\s*Reg\(', code, re.MULTILINE) + modifies_d0 = is_div_scale or bool(re.search(r'\bD0\b[.\[]', combined)) + modifies_exec = is_cmpx or bool(re.search(r'EXEC\.(u32|u64|b32|b64)\s*=', combined)) + modifies_vcc = has_sdst or bool(re.search(r'VCC\.(u32|u64|b32|b64)\s*=|VCC\.u64\[laneId\]\s*=', combined)) + modifies_scc = bool(re.search(r'\bSCC\s*=', combined)) + modifies_pc = bool(re.search(r'\bPC\s*=', combined)) + + # Build function signature and Reg init lines + if is_smem: + lines = [f"def {fn_name}(MEM, addr):"] + reg_inits = ["ADDR=Reg(addr)", "SDATA=Reg(0)"] + special_regs = [] + elif is_ds: + lines = [f"def {fn_name}(MEM, addr, data0, data1, offset0, offset1):"] + reg_inits = ["ADDR=Reg(addr)", "DATA0=Reg(data0)", "DATA1=Reg(data1)", "OFFSET0=Reg(offset0)", "OFFSET1=Reg(offset1)", "RETURN_DATA=Reg(0)"] + special_regs = [('DATA', 'DATA0'), ('DATA2', 'DATA1'), ('OFFSET', 'OFFSET0'), ('ADDR_BASE', 'ADDR')] + elif is_flat: + lines = [f"def {fn_name}(MEM, addr, vdata, vdst):"] + reg_inits = ["ADDR=addr", "VDATA=Reg(vdata)", "VDST=Reg(vdst)", "RETURN_DATA=Reg(0)"] + special_regs = [('DATA', 'VDATA')] + elif has_s_array: + # FMA_MIX style: needs S[i] array, opsel, opsel_hi for source selection (neg/neg_hi applied in emu.py before call) + lines = [f"def {fn_name}(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0):"] + reg_inits = ["S0=Reg(s0)", "S1=Reg(s1)", "S2=Reg(s2)", "S=[S0,S1,S2]", "D0=Reg(d0)", "OPSEL=Reg(opsel)", "OPSEL_HI=Reg(opsel_hi)"] + special_regs = [] + # Detect array declarations like "declare in : 32'F[3]" and create them (rename 'in' to 'ins' since 'in' is a keyword) + if "in[" in combined: + reg_inits.append("ins=[Reg(0),Reg(0),Reg(0)]") + code = code.replace("in[", "ins[") + else: + lines = [f"def {fn_name}(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None):"] + # Only create Regs for registers actually used in the pseudocode + reg_inits = [] + if 'S0' in combined: reg_inits.append("S0=Reg(s0)") + if 'S1' in combined: reg_inits.append("S1=Reg(s1)") + if 'S2' in combined: reg_inits.append("S2=Reg(s2)") + if modifies_d0 or 'D0' in combined: reg_inits.append("D0=Reg(s0)" if is_div_scale else "D0=Reg(d0)") + if modifies_scc or 'SCC' in combined: reg_inits.append("SCC=Reg(scc)") + if modifies_vcc or 'VCC' in combined: reg_inits.append("VCC=Reg(vcc)") + if modifies_exec or 'EXEC' in combined: reg_inits.append("EXEC=Reg(exec_mask)") + if modifies_pc or 'PC' in combined: reg_inits.append("PC=Reg(pc) if pc is not None else None") + special_regs = [('D1', 'Reg(0)'), ('SIMM16', 'Reg(literal)'), ('SIMM32', 'Reg(literal)'), + ('SRC0', 'Reg(src0_idx)'), ('VDST', 'Reg(vdst_idx)')] + if needs_init('tmp'): special_regs.insert(0, ('tmp', 'Reg(0)')) + if needs_init('saveexec'): special_regs.insert(0, ('saveexec', 'Reg(EXEC._val)')) + + # Build init code + init_parts = reg_inits.copy() + for name, init in special_regs: + if name in combined: init_parts.append(f"{name}={init}") + if 'EXEC_LO' in code: init_parts.append("EXEC_LO=TypedView(EXEC, 31, 0)") + if 'EXEC_HI' in code: init_parts.append("EXEC_HI=TypedView(EXEC, 63, 32)") + if 'VCCZ' in code and not re.search(r'^\s*VCCZ\s*=', code, re.MULTILINE): init_parts.append("VCCZ=Reg(1 if VCC._val == 0 else 0)") + if 'EXECZ' in code and not re.search(r'^\s*EXECZ\s*=', code, re.MULTILINE): init_parts.append("EXECZ=Reg(1 if EXEC._val == 0 else 0)") + + # Add init line and separator + if init_parts: lines.append(f" {'; '.join(init_parts)}") + + # Add compiled pseudocode + for line in code.split('\n'): + if line.strip(): lines.append(f" {line}") + + # Build result dict + result_items = [] + if modifies_d0: result_items.append("'D0': D0._val") + if modifies_scc: result_items.append("'SCC': SCC._val") + if modifies_vcc: result_items.append("'VCC': VCC._val") + if modifies_exec: result_items.append("'EXEC': EXEC._val") + if has_d1: result_items.append("'D1': D1._val") + if modifies_pc: result_items.append("'PC': PC._val") + if is_smem and 'SDATA' in combined and re.search(r'^\s*SDATA[\.\[].*=', code, re.MULTILINE): + result_items.append("'SDATA': SDATA._val") + if is_ds and 'RETURN_DATA' in combined and re.search(r'^\s*RETURN_DATA[\.\[].*=', code, re.MULTILINE): + result_items.append("'RETURN_DATA': RETURN_DATA._val") + if is_flat: + if 'RETURN_DATA' in combined and re.search(r'^\s*RETURN_DATA[\.\[].*=', code, re.MULTILINE): + result_items.append("'RETURN_DATA': RETURN_DATA._val") + if re.search(r'^\s*VDATA[\.\[].*=', code, re.MULTILINE): + result_items.append("'VDATA': VDATA._val") + lines.append(f" return {{{', '.join(result_items)}}}") + return '\n'.join(lines) + +# Build the globals dict for exec() - includes all pcode symbols +_PCODE_GLOBALS = { + 'Reg': Reg, 'TypedView': TypedView, '_pack': _pack, '_pack32': _pack32, + 'ABSDIFF': ABSDIFF, 'BYTE_PERMUTE': BYTE_PERMUTE, 'DENORM': DENORM, 'F': F, + 'GT_NEG_ZERO': GT_NEG_ZERO, 'LT_NEG_ZERO': LT_NEG_ZERO, 'INF': INF, + 'MAX_FLOAT_F32': MAX_FLOAT_F32, 'OVERFLOW_F32': OVERFLOW_F32, 'OVERFLOW_F64': OVERFLOW_F64, + 'UNDERFLOW_F32': UNDERFLOW_F32, 'UNDERFLOW_F64': UNDERFLOW_F64, + 'PI': PI, 'ROUND_MODE': ROUND_MODE, 'WAVE_MODE': WAVE_MODE, + 'WAVE32': WAVE32, 'WAVE64': WAVE64, 'TWO_OVER_PI_1201': TWO_OVER_PI_1201, + 'SAT8': SAT8, 'trunc': trunc, 'floor': floor, 'ceil': ceil, 'sqrt': sqrt, + 'log2': log2, 'fract': fract, 'sin': sin, 'cos': cos, 'pow': pow, + 'isEven': isEven, 'mantissa': mantissa, 'signext_from_bit': signext_from_bit, + 'i32_to_f32': i32_to_f32, 'u32_to_f32': u32_to_f32, 'i32_to_f64': i32_to_f64, + 'u32_to_f64': u32_to_f64, 'f32_to_f64': f32_to_f64, 'f64_to_f32': f64_to_f32, + 'f32_to_i32': f32_to_i32, 'f32_to_u32': f32_to_u32, 'f64_to_i32': f64_to_i32, + 'f64_to_u32': f64_to_u32, 'f32_to_f16': f32_to_f16, 'f16_to_f32': f16_to_f32, + 'i16_to_f16': i16_to_f16, 'u16_to_f16': u16_to_f16, 'f16_to_i16': f16_to_i16, + 'f16_to_u16': f16_to_u16, 'bf16_to_f32': bf16_to_f32, 'f32_to_bf16': f32_to_bf16, + 'u8_to_u32': u8_to_u32, 'u4_to_u32': u4_to_u32, 'u32_to_u16': u32_to_u16, + 'i32_to_i16': i32_to_i16, 'f16_to_snorm': f16_to_snorm, 'f16_to_unorm': f16_to_unorm, + 'f32_to_snorm': f32_to_snorm, 'f32_to_unorm': f32_to_unorm, + 'v_cvt_i16_f32': v_cvt_i16_f32, 'v_cvt_u16_f32': v_cvt_u16_f32, 'f32_to_u8': f32_to_u8, + 'v_min_f32': v_min_f32, 'v_max_f32': v_max_f32, 'v_min_f16': v_min_f16, 'v_max_f16': v_max_f16, + 'v_min_i32': v_min_i32, 'v_max_i32': v_max_i32, 'v_min_i16': v_min_i16, 'v_max_i16': v_max_i16, + 'v_min_u32': v_min_u32, 'v_max_u32': v_max_u32, 'v_min_u16': v_min_u16, 'v_max_u16': v_max_u16, + 'v_min3_f32': v_min3_f32, 'v_max3_f32': v_max3_f32, 'v_min3_f16': v_min3_f16, 'v_max3_f16': v_max3_f16, + 'v_min3_i32': v_min3_i32, 'v_max3_i32': v_max3_i32, 'v_min3_i16': v_min3_i16, 'v_max3_i16': v_max3_i16, + 'v_min3_u32': v_min3_u32, 'v_max3_u32': v_max3_u32, 'v_min3_u16': v_min3_u16, 'v_max3_u16': v_max3_u16, + 'v_sad_u8': v_sad_u8, 'v_msad_u8': v_msad_u8, + 's_ff1_i32_b32': s_ff1_i32_b32, 's_ff1_i32_b64': s_ff1_i32_b64, + 'isNAN': isNAN, 'isQuietNAN': isQuietNAN, 'isSignalNAN': isSignalNAN, + 'fma': fma, 'ldexp': ldexp, 'sign': sign, 'exponent': exponent, + 'signext': signext, 'cvtToQuietNAN': cvtToQuietNAN, +} + +@functools.cache +def compile_pseudocode(cls_name: str, op_name: str, pseudocode: str): + """Compile pseudocode string to executable function. Cached for performance.""" + code = _compile_pseudocode(pseudocode) + code = _apply_pseudocode_fixes(op_name, code) + fn_code = _generate_function(cls_name, op_name, pseudocode, code) + fn_name = f"_{cls_name}_{op_name}" + local_ns = {} + exec(fn_code, _PCODE_GLOBALS, local_ns) + return local_ns[fn_name] diff --git a/extra/assembly/amd/pdf.py b/extra/assembly/amd/pdf.py index 9e3f96ae98..7079c2b07b 100644 --- a/extra/assembly/amd/pdf.py +++ b/extra/assembly/amd/pdf.py @@ -38,161 +38,7 @@ FLOAT_MAP = {'0.5': 'POS_HALF', '-0.5': 'NEG_HALF', '1.0': 'POS_ONE', '-1.0': 'N '4.0': 'POS_FOUR', '-4.0': 'NEG_FOUR', '1/(2*PI)': 'INV_2PI', '0': 'ZERO'} INST_PATTERN = re.compile(r'^([SVD]S?_[A-Z0-9_]+|(?:FLAT|GLOBAL|SCRATCH)_[A-Z0-9_]+)\s+(\d+)\s*$', re.M) -# Patterns that can't be handled by the DSL (require special handling in emu.py) -UNSUPPORTED = ['SGPR[', 'V_SWAP', 'eval ', 'FATAL_HALT', 'HW_REGISTERS', - 'vscnt', 'vmcnt', 'expcnt', 'lgkmcnt', - 'CVT_OFF_TABLE', 'ThreadMask', - 'S1[i', 'C.i32', 'thread_', - 'if n.', 'DST.u32', 'addrd = DST', 'addr = DST', - 'BARRIER_STATE', 'ReallocVgprs', - 'GPR_IDX', 'VSKIP', 'specified in', 'TTBL', - 'fp6', 'bf6', 'GS_REGS', 'M0.base', 'DS_DATA', '= 0..', 'sign(src', 'if no LDS', 'gds_base', 'vector mask', - 'SGPR_ADDR', 'INST_OFFSET', 'laneID'] # FLAT ops with non-standard vars -# ═══════════════════════════════════════════════════════════════════════════════ -# COMPILER: pseudocode -> Python (minimal transforms) -# ═══════════════════════════════════════════════════════════════════════════════ - -def compile_pseudocode(pseudocode: str) -> str: - """Compile pseudocode to Python. Transforms are minimal - most syntax just works.""" - pseudocode = re.sub(r'\bpass\b', 'pass_', pseudocode) # 'pass' is Python keyword - raw_lines = pseudocode.strip().split('\n') - joined_lines: list[str] = [] - for line in raw_lines: - line = line.strip() - if joined_lines and (joined_lines[-1].rstrip().endswith(('||', '&&', '(', ',')) or - (joined_lines[-1].count('(') > joined_lines[-1].count(')'))): - joined_lines[-1] = joined_lines[-1].rstrip() + ' ' + line - else: - joined_lines.append(line) - - lines = [] - indent, need_pass, in_first_match_loop = 0, False, False - for line in joined_lines: - line = line.split('//')[0].strip() # Strip C-style comments - if not line: continue - if line.startswith('if '): - lines.append(' ' * indent + f"if {_expr(line[3:].rstrip(' then'))}:") - indent += 1 - need_pass = True - elif line.startswith('elsif '): - if need_pass: lines.append(' ' * indent + "pass") - indent -= 1 - lines.append(' ' * indent + f"elif {_expr(line[6:].rstrip(' then'))}:") - indent += 1 - need_pass = True - elif line == 'else': - if need_pass: lines.append(' ' * indent + "pass") - indent -= 1 - lines.append(' ' * indent + "else:") - indent += 1 - need_pass = True - elif line.startswith('endif'): - if need_pass: lines.append(' ' * indent + "pass") - indent -= 1 - need_pass = False - elif line.startswith('endfor'): - if need_pass: lines.append(' ' * indent + "pass") - indent -= 1 - need_pass, in_first_match_loop = False, False - elif line.startswith('declare '): - pass - elif m := re.match(r'for (\w+) in (.+?)\s*:\s*(.+?) do', line): - start, end = _expr(m[2].strip()), _expr(m[3].strip()) - lines.append(' ' * indent + f"for {m[1]} in range({start}, int({end})+1):") - indent += 1 - need_pass, in_first_match_loop = True, True - elif '=' in line and not line.startswith('=='): - need_pass = False - line = line.rstrip(';') - if m := re.match(r'\{\s*D1\.[ui]1\s*,\s*D0\.[ui]64\s*\}\s*=\s*(.+)', line): - rhs = _expr(m[1]) - lines.append(' ' * indent + f"_full = {rhs}") - lines.append(' ' * indent + f"D0.u64 = int(_full) & 0xffffffffffffffff") - lines.append(' ' * indent + f"D1 = Reg((int(_full) >> 64) & 1)") - elif any(op in line for op in ('+=', '-=', '*=', '/=', '|=', '&=', '^=')): - for op in ('+=', '-=', '*=', '/=', '|=', '&=', '^='): - if op in line: - lhs, rhs = line.split(op, 1) - lines.append(' ' * indent + f"{lhs.strip()} {op} {_expr(rhs.strip())}") - break - else: - lhs, rhs = line.split('=', 1) - lhs_s, rhs_s = _expr(lhs.strip()), rhs.strip() - stmt = _assign(lhs_s, _expr(rhs_s)) - if in_first_match_loop and rhs_s == 'i' and (lhs_s == 'tmp' or lhs_s == 'D0.i32'): - stmt += "; break" - lines.append(' ' * indent + stmt) - if need_pass: lines.append(' ' * indent + "pass") - return '\n'.join(lines) - -def _assign(lhs: str, rhs: str) -> str: - if lhs in ('tmp', 'SCC', 'VCC', 'EXEC', 'D0', 'D1', 'saveexec', 'PC'): - return f"{lhs} = Reg({rhs})" - return f"{lhs} = {rhs}" - -def _expr(e: str) -> str: - e = e.strip() - e = e.replace('&&', ' and ').replace('||', ' or ').replace('<>', ' != ') - e = re.sub(r'!([^=])', r' not \1', e) - e = re.sub(r'\{\s*(\w+\.u32)\s*,\s*(\w+\.u32)\s*\}', r'_pack32(\1, \2)', e) - def pack(m): - hi, lo = _expr(m[1].strip()), _expr(m[2].strip()) - return f'_pack({hi}, {lo})' - e = re.sub(r'\{\s*([^,{}]+)\s*,\s*([^,{}]+)\s*\}', pack, e) - e = re.sub(r"1201'B\(2\.0\s*/\s*PI\)", "TWO_OVER_PI_1201", e) - e = re.sub(r"\d+'([0-9a-fA-Fx]+)[UuFf]*", r'\1', e) - e = re.sub(r"\d+'[FIBU]\(", "(", e) - e = re.sub(r'\bB\(', '(', e) - e = re.sub(r'([0-9a-fA-Fx])ULL\b', r'\1', e) - e = re.sub(r'([0-9a-fA-Fx])LL\b', r'\1', e) - e = re.sub(r'([0-9a-fA-Fx])U\b', r'\1', e) - e = re.sub(r'(\d\.?\d*)F\b', r'\1', e) - e = re.sub(r'(\[laneId\])\.[uib]\d+', r'\1', e) - e = e.replace('+INF', 'INF').replace('-INF', '(-INF)') - e = re.sub(r'NAN\.f\d+', 'float("nan")', e) - def convert_verilog_slice(m): - start, width = m.group(1).strip(), m.group(2).strip() - return f'[({start}) + ({width}) - 1 : ({start})]' - e = re.sub(r'\[([^:\[\]]+)\s*\+:\s*([^:\[\]]+)\]', convert_verilog_slice, e) - def process_brackets(s): - result, i = [], 0 - while i < len(s): - if s[i] == '[': - depth, start = 1, i + 1 - j = start - while j < len(s) and depth > 0: - if s[j] == '[': depth += 1 - elif s[j] == ']': depth -= 1 - j += 1 - inner = _expr(s[start:j-1]) - result.append('[' + inner + ']') - i = j - else: - result.append(s[i]) - i += 1 - return ''.join(result) - e = process_brackets(e) - while '?' in e: - depth, bracket, q = 0, 0, -1 - for i, c in enumerate(e): - if c == '(': depth += 1 - elif c == ')': depth -= 1 - elif c == '[': bracket += 1 - elif c == ']': bracket -= 1 - elif c == '?' and depth == 0 and bracket == 0: q = i; break - if q < 0: break - depth, bracket, col = 0, 0, -1 - for i in range(q + 1, len(e)): - if e[i] == '(': depth += 1 - elif e[i] == ')': depth -= 1 - elif e[i] == '[': bracket += 1 - elif e[i] == ']': bracket -= 1 - elif e[i] == ':' and depth == 0 and bracket == 0: col = i; break - if col < 0: break - cond, t, f = e[:q].strip(), e[q+1:col].strip(), e[col+1:].strip() - e = f'(({t}) if ({cond}) else ({f}))' - return e # ═══════════════════════════════════════════════════════════════════════════════ # PDF PARSING WITH PAGE CACHING @@ -472,8 +318,8 @@ def _generate_ins_py(formats, enums, src_enum, doc_name) -> str: if "NULL" in src_names: lines.append("OFF = NULL\n") return '\n'.join(lines) -def _generate_gen_pcode_py(enums, pseudocode, arch) -> str: - """Generate gen_pcode.py content (compiled pseudocode functions).""" +def _generate_str_pcode_py(enums, pseudocode, arch) -> str: + """Generate str_pcode.py content (raw pseudocode strings).""" # Get op enums for this arch (import from .ins which re-exports from .enum) import importlib autogen = importlib.import_module(f"extra.assembly.amd.autogen.{arch}.ins") @@ -491,186 +337,35 @@ def _generate_gen_pcode_py(enums, pseudocode, arch) -> str: if key in defined_ops: for enum_cls, enum_val in defined_ops[key]: instructions[enum_cls][enum_val] = pc - # First pass: generate all function code - fn_lines: list[str] = [] - all_fn_entries: dict = {} - for enum_cls in OP_ENUMS: - cls_name = enum_cls.__name__ - if not instructions.get(enum_cls): continue - fn_entries = [] - for op, pc in instructions[enum_cls].items(): - if any(p in pc for p in UNSUPPORTED): continue - try: - code = compile_pseudocode(pc) - code = _apply_pseudocode_fixes(op, code) - fn_name, fn_code = _generate_function(cls_name, op, pc, code) - fn_lines.append(fn_code) - fn_entries.append((op, fn_name)) - except Exception as e: print(f" Warning: Failed to compile {op.name}: {e}") - if fn_entries: - all_fn_entries[enum_cls] = fn_entries - fn_lines.append(f'{cls_name}_FUNCTIONS = {{') - for op, fn_name in fn_entries: fn_lines.append(f" {cls_name}.{op.name}: {fn_name},") - fn_lines.append('}\n') - - fn_lines.append('COMPILED_FUNCTIONS = {') - for enum_cls in OP_ENUMS: - if all_fn_entries.get(enum_cls): fn_lines.append(f' {enum_cls.__name__}: {enum_cls.__name__}_FUNCTIONS,') - fn_lines.append('}') - - # Second pass: scan generated code for pcode imports - fn_code_str = '\n'.join(fn_lines) - import extra.assembly.amd.pcode as pcode_module - pcode_exports = [name for name in dir(pcode_module) if not name.startswith('_') or name.startswith('_') and not name.startswith('__')] - used_imports = sorted(name for name in pcode_exports if re.search(rf'\b{re.escape(name)}\b', fn_code_str)) - - # Build final output with explicit imports + # Build string dictionaries for each enum lines = [f'''# autogenerated by pdf.py - do not edit # to regenerate: python -m extra.assembly.amd.pdf --arch {arch} # ruff: noqa: E501 -# mypy: ignore-errors from extra.assembly.amd.autogen.{arch}.enum import {", ".join(enum_names)} -from extra.assembly.amd.pcode import {", ".join(used_imports)} -'''] + fn_lines +'''] + all_dict_entries: dict = {} + for enum_cls in OP_ENUMS: + cls_name = enum_cls.__name__ + if not instructions.get(enum_cls): continue + dict_entries = [(op, repr(pc)) for op, pc in instructions[enum_cls].items()] + if dict_entries: + all_dict_entries[enum_cls] = dict_entries + lines.append(f'{cls_name}_PCODE = {{') + for op, escaped in dict_entries: lines.append(f" {cls_name}.{op.name}: {escaped},") + lines.append('}\n') + + lines.append('PSEUDOCODE_STRINGS = {') + for enum_cls in OP_ENUMS: + if all_dict_entries.get(enum_cls): lines.append(f' {enum_cls.__name__}: {enum_cls.__name__}_PCODE,') + lines.append('}') return '\n'.join(lines) -def _apply_pseudocode_fixes(op, code: str) -> str: - """Apply known fixes for PDF pseudocode bugs.""" - if op.name == 'V_DIV_FMAS_F32': - code = code.replace('D0.f32 = 2.0 ** 32 * fma(S0.f32, S1.f32, S2.f32)', - 'D0.f32 = (2.0 ** 64 if exponent(S2.f32) > 127 else 2.0 ** -64) * fma(S0.f32, S1.f32, S2.f32)') - if op.name == 'V_DIV_FMAS_F64': - code = code.replace('D0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)', - 'D0.f64 = (2.0 ** 128 if exponent(S2.f64) > 1023 else 2.0 ** -128) * fma(S0.f64, S1.f64, S2.f64)') - if op.name == 'V_DIV_SCALE_F32': - code = code.replace('D0.f32 = float("nan")', 'VCC = Reg(0x1); D0.f32 = float("nan")') - code = code.replace('elif S1.f32 == DENORM.f32:\n D0.f32 = ldexp(S0.f32, 64)', 'elif False:\n pass') - code += '\nif S1.f32 == DENORM.f32:\n D0.f32 = float("nan")' - code = code.replace('elif exponent(S2.f32) <= 23:\n D0.f32 = ldexp(S0.f32, 64)', 'elif exponent(S2.f32) <= 23:\n VCC = Reg(0x1); D0.f32 = ldexp(S0.f32, 64)') - code = code.replace('elif S2.f32 / S1.f32 == DENORM.f32:\n VCC = Reg(0x1)\n if S0.f32 == S2.f32:\n D0.f32 = ldexp(S0.f32, 64)', 'elif S2.f32 / S1.f32 == DENORM.f32:\n VCC = Reg(0x1)') - if op.name == 'V_DIV_SCALE_F64': - code = code.replace('D0.f64 = float("nan")', 'VCC = Reg(0x1); D0.f64 = float("nan")') - code = code.replace('elif S1.f64 == DENORM.f64:\n D0.f64 = ldexp(S0.f64, 128)', 'elif False:\n pass') - code += '\nif S1.f64 == DENORM.f64:\n D0.f64 = float("nan")' - code = code.replace('elif exponent(S2.f64) <= 52:\n D0.f64 = ldexp(S0.f64, 128)', 'elif exponent(S2.f64) <= 52:\n VCC = Reg(0x1); D0.f64 = ldexp(S0.f64, 128)') - code = code.replace('elif S2.f64 / S1.f64 == DENORM.f64:\n VCC = Reg(0x1)\n if S0.f64 == S2.f64:\n D0.f64 = ldexp(S0.f64, 128)', 'elif S2.f64 / S1.f64 == DENORM.f64:\n VCC = Reg(0x1)') - if op.name == 'V_DIV_FIXUP_F32': - code = code.replace('D0.f32 = ((-abs(S0.f32)) if (sign_out) else (abs(S0.f32)))', - 'D0.f32 = ((-OVERFLOW_F32) if (sign_out) else (OVERFLOW_F32)) if isNAN(S0.f32) else ((-abs(S0.f32)) if (sign_out) else (abs(S0.f32)))') - if op.name == 'V_DIV_FIXUP_F64': - code = code.replace('D0.f64 = ((-abs(S0.f64)) if (sign_out) else (abs(S0.f64)))', - 'D0.f64 = ((-OVERFLOW_F64) if (sign_out) else (OVERFLOW_F64)) if isNAN(S0.f64) else ((-abs(S0.f64)) if (sign_out) else (abs(S0.f64)))') - if op.name == 'V_TRIG_PREOP_F64': - code = code.replace('result = F((TWO_OVER_PI_1201[1200 : 0] << shift.u32) & 0x1fffffffffffff)', - 'result = float(((TWO_OVER_PI_1201[1200 : 0] << int(shift)) >> (1201 - 53)) & 0x1fffffffffffff)') - return code - -def _generate_function(cls_name: str, op, pc: str, code: str) -> tuple[str, str]: - """Generate a single compiled pseudocode function. - Functions take int parameters and return dict of int values. - Reg wrapping happens inside the function, only for registers actually used.""" - has_d1 = '{ D1' in pc - is_cmpx = (cls_name in ('VOPCOp', 'VOP3Op')) and 'EXEC.u64[laneId]' in pc - is_div_scale = 'DIV_SCALE' in op.name - has_sdst = cls_name == 'VOP3SDOp' and ('VCC.u64[laneId]' in pc or is_div_scale) - is_ds = cls_name == 'DSOp' - is_flat = cls_name in ('FLATOp', 'GLOBALOp', 'SCRATCHOp') - is_smem = cls_name == 'SMEMOp' - has_s_array = 'S[i]' in pc # FMA_MIX style: S[0], S[1], S[2] array access - combined = code + pc - - fn_name = f"_{cls_name}_{op.name}" - - # Detect which registers are used/modified - def needs_init(name): return name in combined and not re.search(rf'^\s*{name}\s*=\s*Reg\(', code, re.MULTILINE) - modifies_d0 = is_div_scale or bool(re.search(r'\bD0\b[.\[]', combined)) - modifies_exec = is_cmpx or bool(re.search(r'EXEC\.(u32|u64|b32|b64)\s*=', combined)) - modifies_vcc = has_sdst or bool(re.search(r'VCC\.(u32|u64|b32|b64)\s*=|VCC\.u64\[laneId\]\s*=', combined)) - modifies_scc = bool(re.search(r'\bSCC\s*=', combined)) - modifies_pc = bool(re.search(r'\bPC\s*=', combined)) - - # Build function signature and Reg init lines - if is_smem: - lines = [f"def {fn_name}(MEM, addr):"] - reg_inits = ["ADDR=Reg(addr)", "SDATA=Reg(0)"] - special_regs = [] - elif is_ds: - lines = [f"def {fn_name}(MEM, addr, data0, data1, offset0, offset1):"] - reg_inits = ["ADDR=Reg(addr)", "DATA0=Reg(data0)", "DATA1=Reg(data1)", "OFFSET0=Reg(offset0)", "OFFSET1=Reg(offset1)", "RETURN_DATA=Reg(0)"] - special_regs = [('DATA', 'DATA0'), ('DATA2', 'DATA1'), ('OFFSET', 'OFFSET0'), ('ADDR_BASE', 'ADDR')] - elif is_flat: - lines = [f"def {fn_name}(MEM, addr, vdata, vdst):"] - reg_inits = ["ADDR=addr", "VDATA=Reg(vdata)", "VDST=Reg(vdst)", "RETURN_DATA=Reg(0)"] - special_regs = [('DATA', 'VDATA')] - elif has_s_array: - # FMA_MIX style: needs S[i] array, opsel, opsel_hi for source selection (neg/neg_hi applied in emu.py before call) - lines = [f"def {fn_name}(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None, opsel=0, opsel_hi=0):"] - reg_inits = ["S0=Reg(s0)", "S1=Reg(s1)", "S2=Reg(s2)", "S=[S0,S1,S2]", "D0=Reg(d0)", "OPSEL=Reg(opsel)", "OPSEL_HI=Reg(opsel_hi)"] - special_regs = [] - # Detect array declarations like "declare in : 32'F[3]" and create them (rename 'in' to 'ins' since 'in' is a keyword) - if "in[" in combined: - reg_inits.append("ins=[Reg(0),Reg(0),Reg(0)]") - code = code.replace("in[", "ins[") - else: - lines = [f"def {fn_name}(s0, s1, s2, d0, scc, vcc, laneId, exec_mask, literal, VGPR, src0_idx=0, vdst_idx=0, pc=None):"] - # Only create Regs for registers actually used in the pseudocode - reg_inits = [] - if 'S0' in combined: reg_inits.append("S0=Reg(s0)") - if 'S1' in combined: reg_inits.append("S1=Reg(s1)") - if 'S2' in combined: reg_inits.append("S2=Reg(s2)") - if modifies_d0 or 'D0' in combined: reg_inits.append("D0=Reg(s0)" if is_div_scale else "D0=Reg(d0)") - if modifies_scc or 'SCC' in combined: reg_inits.append("SCC=Reg(scc)") - if modifies_vcc or 'VCC' in combined: reg_inits.append("VCC=Reg(vcc)") - if modifies_exec or 'EXEC' in combined: reg_inits.append("EXEC=Reg(exec_mask)") - if modifies_pc or 'PC' in combined: reg_inits.append("PC=Reg(pc) if pc is not None else None") - special_regs = [('D1', 'Reg(0)'), ('SIMM16', 'Reg(literal)'), ('SIMM32', 'Reg(literal)'), - ('SRC0', 'Reg(src0_idx)'), ('VDST', 'Reg(vdst_idx)')] - if needs_init('tmp'): special_regs.insert(0, ('tmp', 'Reg(0)')) - if needs_init('saveexec'): special_regs.insert(0, ('saveexec', 'Reg(EXEC._val)')) - - # Build init code - init_parts = reg_inits.copy() - for name, init in special_regs: - if name in combined: init_parts.append(f"{name}={init}") - if 'EXEC_LO' in code: init_parts.append("EXEC_LO=SliceProxy(EXEC, 31, 0)") - if 'EXEC_HI' in code: init_parts.append("EXEC_HI=SliceProxy(EXEC, 63, 32)") - if 'VCCZ' in code and not re.search(r'^\s*VCCZ\s*=', code, re.MULTILINE): init_parts.append("VCCZ=Reg(1 if VCC._val == 0 else 0)") - if 'EXECZ' in code and not re.search(r'^\s*EXECZ\s*=', code, re.MULTILINE): init_parts.append("EXECZ=Reg(1 if EXEC._val == 0 else 0)") - - # Add init line and separator - if init_parts: lines.append(f" {'; '.join(init_parts)}") - lines.append(" # --- compiled pseudocode ---") - - # Add compiled pseudocode - for line in code.split('\n'): - if line.strip(): lines.append(f" {line}") - - # Build result dict - result_items = [] - if modifies_d0: result_items.append("'D0': D0._val") - if modifies_scc: result_items.append("'SCC': SCC._val") - if modifies_vcc: result_items.append("'VCC': VCC._val") - if modifies_exec: result_items.append("'EXEC': EXEC._val") - if has_d1: result_items.append("'D1': D1._val") - if modifies_pc: result_items.append("'PC': PC._val") - if is_smem and 'SDATA' in combined and re.search(r'^\s*SDATA[\.\[].*=', code, re.MULTILINE): - result_items.append("'SDATA': SDATA._val") - if is_ds and 'RETURN_DATA' in combined and re.search(r'^\s*RETURN_DATA[\.\[].*=', code, re.MULTILINE): - result_items.append("'RETURN_DATA': RETURN_DATA._val") - if is_flat: - if 'RETURN_DATA' in combined and re.search(r'^\s*RETURN_DATA[\.\[].*=', code, re.MULTILINE): - result_items.append("'RETURN_DATA': RETURN_DATA._val") - if re.search(r'^\s*VDATA[\.\[].*=', code, re.MULTILINE): - result_items.append("'VDATA': VDATA._val") - lines.append(f" return {{{', '.join(result_items)}}}\n") - return fn_name, '\n'.join(lines) - # ═══════════════════════════════════════════════════════════════════════════════ # MAIN GENERATION # ═══════════════════════════════════════════════════════════════════════════════ def generate_arch(arch: str) -> dict: - """Generate enum.py, ins.py and gen_pcode.py for a single architecture.""" + """Generate enum.py, ins.py and str_pcode.py for a single architecture.""" urls = PDF_URLS[arch] if isinstance(urls, str): urls = [urls] @@ -696,9 +391,9 @@ def generate_arch(arch: str) -> dict: ins_path.write_text(ins_content) print(f"Generated {ins_path}: {len(merged['formats'])} formats") - # Write gen_pcode.py (needs enum.py to exist first for imports) - pcode_path = base_path / "gen_pcode.py" - pcode_content = _generate_gen_pcode_py(merged["enums"], merged["pseudocode"], arch) + # Write str_pcode.py (needs enum.py to exist first for imports) + pcode_path = base_path / "str_pcode.py" + pcode_content = _generate_str_pcode_py(merged["enums"], merged["pseudocode"], arch) pcode_path.write_text(pcode_content) print(f"Generated {pcode_path}: {len(merged['pseudocode'])} instructions") diff --git a/extra/assembly/amd/test/helpers.py b/extra/assembly/amd/test/helpers.py index cbfb1b1c73..a4db9ca3f3 100644 --- a/extra/assembly/amd/test/helpers.py +++ b/extra/assembly/amd/test/helpers.py @@ -30,8 +30,8 @@ def get_llvm_objdump(): class ExecContext: """Context for running compiled pseudocode in tests.""" def __init__(self, s0=0, s1=0, s2=0, d0=0, scc=0, vcc=0, lane=0, exec_mask=0xffffffff, literal=0, vgprs=None, src0_idx=0, vdst_idx=0): - from extra.assembly.amd.pcode import Reg, MASK32, MASK64, SliceProxy - self._Reg, self._MASK64, self._SliceProxy = Reg, MASK64, SliceProxy + from extra.assembly.amd.pcode import Reg, MASK32, MASK64, TypedView + self._Reg, self._MASK64, self._TypedView = Reg, MASK64, TypedView self.S0, self.S1, self.S2 = Reg(s0), Reg(s1), Reg(s2) self.D0, self.D1 = Reg(d0), Reg(0) self.SCC, self.VCC, self.EXEC = Reg(scc), Reg(vcc), Reg(exec_mask) @@ -51,7 +51,7 @@ class ExecContext: ns.update({ 'S0': self.S0, 'S1': self.S1, 'S2': self.S2, 'D0': self.D0, 'D1': self.D1, 'SCC': self.SCC, 'VCC': self.VCC, 'EXEC': self.EXEC, - 'EXEC_LO': self._SliceProxy(self.EXEC, 31, 0), 'EXEC_HI': self._SliceProxy(self.EXEC, 63, 32), + 'EXEC_LO': self._TypedView(self.EXEC, 31, 0), 'EXEC_HI': self._TypedView(self.EXEC, 63, 32), 'tmp': self.tmp, 'saveexec': self.saveexec, 'lane': self.lane, 'laneId': self.laneId, 'literal': self.literal, 'SIMM16': self.SIMM16, 'SIMM32': self.SIMM32, 'VGPR': self.VGPR, 'SRC0': self.SRC0, 'VDST': self.VDST, diff --git a/extra/assembly/amd/test/test_mockgpu_invalid.py b/extra/assembly/amd/test/test_mockgpu_invalid.py index 2b666a1c4f..d85954b9a8 100644 --- a/extra/assembly/amd/test/test_mockgpu_invalid.py +++ b/extra/assembly/amd/test/test_mockgpu_invalid.py @@ -47,8 +47,7 @@ dev.synchronize() elapsed = time.perf_counter() - st self.assertNotEqual(result.returncode, 0, "should have raised") - self.assertTrue("NotImplementedError" in result.stderr or "ValueError" in result.stderr, - f"expected NotImplementedError or ValueError in stderr") + self.assertTrue("Error" in result.stderr, f"expected an error in stderr, got: {result.stderr[:500]}") # Should exit immediately, not wait for the full timeout self.assertLess(elapsed, 9.0, f"should exit immediately on emulator exception, took {elapsed:.1f}s") diff --git a/extra/assembly/amd/test/test_pcode.py b/extra/assembly/amd/test/test_pcode.py index f6694e75aa..c1ce10b91e 100644 --- a/extra/assembly/amd/test/test_pcode.py +++ b/extra/assembly/amd/test/test_pcode.py @@ -1,12 +1,16 @@ #!/usr/bin/env python3 """Tests for the RDNA3 pseudocode DSL.""" import unittest -from extra.assembly.amd.pcode import (Reg, TypedView, SliceProxy, MASK32, MASK64, - _f32, _i32, _f16, _i16, f32_to_f16, _isnan, _bf16, _ibf16, bf16_to_f32, f32_to_bf16, - BYTE_PERMUTE, v_sad_u8, v_msad_u8) -from extra.assembly.amd.pdf import compile_pseudocode, _expr +from extra.assembly.amd.pcode import (Reg, TypedView, TypedView, MASK32, MASK64, + _f32, _i32, _f16, _i16, f32_to_f16, isNAN, _bf16, _ibf16, bf16_to_f32, f32_to_bf16, + BYTE_PERMUTE, v_sad_u8, v_msad_u8, _compile_pseudocode, _expr, compile_pseudocode) from extra.assembly.amd.test.helpers import ExecContext -from extra.assembly.amd.autogen.rdna3.gen_pcode import _VOP3SDOp_V_DIV_SCALE_F32, _VOPCOp_V_CMP_CLASS_F32 +from extra.assembly.amd.autogen.rdna3.str_pcode import VOP3SDOp_PCODE, VOPCOp_PCODE +from extra.assembly.amd.autogen.rdna3.enum import VOP3SDOp, VOPCOp + +# Compile pseudocode functions on demand for regression tests +_VOP3SDOp_V_DIV_SCALE_F32 = compile_pseudocode('VOP3SDOp', 'V_DIV_SCALE_F32', VOP3SDOp_PCODE[VOP3SDOp.V_DIV_SCALE_F32]) +_VOPCOp_V_CMP_CLASS_F32 = compile_pseudocode('VOPCOp', 'V_CMP_CLASS_F32', VOPCOp_PCODE[VOPCOp.V_CMP_CLASS_F32]) class TestReg(unittest.TestCase): def test_u32_read(self): @@ -42,7 +46,7 @@ class TestReg(unittest.TestCase): class TestTypedView(unittest.TestCase): def test_bit_slice(self): r = Reg(0xDEADBEEF) - # Slices return SliceProxy which supports .u32, .u16 etc (matching pseudocode like S1.u32[1:0].u32) + # Slices return TypedView which supports .u32, .u16 etc (matching pseudocode like S1.u32[1:0].u32) self.assertEqual(r.u32[7:0].u32, 0xEF) self.assertEqual(r.u32[15:8].u32, 0xBE) self.assertEqual(r.u32[23:16].u32, 0xAD) @@ -67,7 +71,7 @@ class TestTypedView(unittest.TestCase): # S0.u32[S1.u32[4:0]] - access bit at position from another register s0 = Reg(0b11010101) s1 = Reg(3) - bit_pos = s1.u32[4:0] # SliceProxy, int value = 3 + bit_pos = s1.u32[4:0] # TypedView, int value = 3 bit_val = s0.u32[int(bit_pos)] # bit 3 of s0 = 0 self.assertEqual(int(bit_pos), 3) self.assertEqual(bit_val, 0) @@ -85,7 +89,7 @@ class TestTypedView(unittest.TestCase): self.assertFalse(r1.u32 < r2.u32) self.assertTrue(r1.u32 != r2.u32) -class TestSliceProxy(unittest.TestCase): +class TestTypedView(unittest.TestCase): def test_slice_read(self): r = Reg(0x56781234) self.assertEqual(r[15:0].u16, 0x1234) @@ -154,19 +158,19 @@ class TestExecContext(unittest.TestCase): self.assertEqual(ctx.SCC._val, 0) def test_ternary(self): - code = compile_pseudocode("D0.u32 = S0.u32 > S1.u32 ? 1'1U : 1'0U") + code = _compile_pseudocode("D0.u32 = S0.u32 > S1.u32 ? 1'1U : 1'0U") ctx = ExecContext(s0=5, s1=3) ctx.run(code) self.assertEqual(ctx.D0._val, 1) def test_pack(self): - code = compile_pseudocode("D0 = { S1[15:0].u16, S0[15:0].u16 }") + code = _compile_pseudocode("D0 = { S1[15:0].u16, S0[15:0].u16 }") ctx = ExecContext(s0=0x1234, s1=0x5678) ctx.run(code) self.assertEqual(ctx.D0._val, 0x56781234) def test_tmp_with_typed_access(self): - code = compile_pseudocode("""tmp = S0.u32 + S1.u32 + code = _compile_pseudocode("""tmp = S0.u32 + S1.u32 D0.u32 = tmp.u32""") ctx = ExecContext(s0=100, s1=200) ctx.run(code) @@ -174,7 +178,7 @@ D0.u32 = tmp.u32""") def test_s_add_u32_pattern(self): # Real pseudocode pattern from S_ADD_U32 - code = compile_pseudocode("""tmp = 64'U(S0.u32) + 64'U(S1.u32) + code = _compile_pseudocode("""tmp = 64'U(S0.u32) + 64'U(S1.u32) SCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U D0.u32 = tmp.u32""") # Test overflow case @@ -184,7 +188,7 @@ D0.u32 = tmp.u32""") self.assertEqual(ctx.SCC._val, 1) # Carry set def test_s_add_u32_no_overflow(self): - code = compile_pseudocode("""tmp = 64'U(S0.u32) + 64'U(S1.u32) + code = _compile_pseudocode("""tmp = 64'U(S0.u32) + 64'U(S1.u32) SCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U D0.u32 = tmp.u32""") ctx = ExecContext(s0=100, s1=200) @@ -206,7 +210,7 @@ D0.u32 = tmp.u32""") def test_for_loop(self): # CTZ pattern - find first set bit - code = compile_pseudocode("""tmp = -1 + code = _compile_pseudocode("""tmp = -1 for i in 0 : 31 do if S0.u32[i] == 1 then tmp = i @@ -261,15 +265,15 @@ class TestPseudocodeRegressions(unittest.TestCase): result = _VOPCOp_V_CMP_CLASS_F32(signal_nan, s1_quiet, 0, 0, 0, 0, 0, 0xffffffff, 0, None) self.assertEqual(result['D0'] & 1, 0, "Signaling NaN should not match quiet NaN mask") - def test_isnan_with_typed_view(self): - """_isnan must work with TypedView objects, not just Python floats. - Bug: _isnan checked isinstance(x, float) which returned False for TypedView.""" + def testisNAN_with_typed_view(self): + """isNAN must work with TypedView objects, not just Python floats. + Bug: isNAN checked isinstance(x, float) which returned False for TypedView.""" nan_reg = Reg(0x7fc00000) # quiet NaN normal_reg = Reg(0x3f800000) # 1.0 inf_reg = Reg(0x7f800000) # +inf - self.assertTrue(_isnan(nan_reg.f32), "_isnan should return True for NaN TypedView") - self.assertFalse(_isnan(normal_reg.f32), "_isnan should return False for normal TypedView") - self.assertFalse(_isnan(inf_reg.f32), "_isnan should return False for inf TypedView") + self.assertTrue(isNAN(nan_reg.f32), "isNAN should return True for NaN TypedView") + self.assertFalse(isNAN(normal_reg.f32), "isNAN should return False for normal TypedView") + self.assertFalse(isNAN(inf_reg.f32), "isNAN should return False for inf TypedView") class TestBF16(unittest.TestCase): """Tests for BF16 (bfloat16) support.""" @@ -308,7 +312,7 @@ class TestBF16(unittest.TestCase): self.assertAlmostEqual(float(r.bf16), 3.0, places=1) def test_bf16_slice_property(self): - """Test SliceProxy.bf16 property.""" + """Test TypedView.bf16 property.""" r = Reg(0x40404040) # Two bf16 3.0 values self.assertAlmostEqual(r[15:0].bf16, 3.0, places=1) self.assertAlmostEqual(r[31:16].bf16, 3.0, places=1)