diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c8da280197..9c3d91190f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -670,6 +670,10 @@ jobs: deps: testing_minimal amd: 'true' python-version: '3.13' + - name: Verify AMD autogen is up to date + run: | + python -m extra.assembly.amd.pdf + git diff --exit-code extra/assembly/amd/autogen/ - name: Install LLVM 21 run: | wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc @@ -690,23 +694,6 @@ jobs: - name: Run RDNA3 ops tests run: SKIP_SLOW_TEST=1 AMD=1 PYTHON_REMU=1 MOCKGPU=1 AMD_LLVM=0 pytest -n=auto test/test_ops.py -k "test_sparse_categorical_crossentropy or test_tril" - testamdautogen: - name: AMD autogen - runs-on: ubuntu-24.04 - timeout-minutes: 10 - steps: - - name: Checkout Code - uses: actions/checkout@v4 - - name: Setup Environment - uses: ./.github/actions/setup-tinygrad - with: - key: rdna3-autogen - pydeps: "pdfplumber" - - name: Verify AMD autogen is up to date - run: | - python -m extra.assembly.amd.pdf --arch all - git diff --exit-code extra/assembly/amd/autogen/ - testnvidia: strategy: fail-fast: false diff --git a/extra/assembly/amd/autogen/cdna/enum.py b/extra/assembly/amd/autogen/cdna/enum.py index 4d63278a22..5226f8a39e 100644 --- a/extra/assembly/amd/autogen/cdna/enum.py +++ b/extra/assembly/amd/autogen/cdna/enum.py @@ -1,46 +1,6 @@ -# autogenerated from AMD CDNA3+CDNA4 ISA PDF by pdf.py - do not edit +# autogenerated from AMD ISA PDF by pdf.py - do not edit from enum import IntEnum -class SrcEnum(IntEnum): - S_ADD_U32 = 0 - S_SUB_U32 = 1 - S_ADD_I32 = 2 - S_SUB_I32 = 3 - S_ADDC_U32 = 4 - S_SUBB_U32 = 5 - S_MIN_I32 = 6 - FLAT_SCRATCH_LO = 102 - FLAT_SCRATCH_HI = 103 - XNACK_MASK_LO = 104 - XNACK_MASK_HI = 105 - VCC_LO = 106 - VCC_HI = 107 - M0 = 124 - EXEC_LO = 126 - EXEC_HI = 127 - ZERO = 128 - DPP8 = 233 - DPP8FI = 234 - SHARED_BASE = 235 - SHARED_LIMIT = 236 - PRIVATE_BASE = 237 - PRIVATE_LIMIT = 238 - RESERVED = 239 - POS_HALF = 240 - NEG_HALF = 241 - POS_ONE = 242 - NEG_ONE = 243 - POS_TWO = 244 - NEG_TWO = 245 - POS_FOUR = 246 - NEG_FOUR = 247 - INV_2PI = 248 - DPP16 = 250 - VCCZ = 251 - EXECZ = 252 - SCC = 253 - LDS_DIRECT = 254 - class DSOp(IntEnum): DS_ADD_U32 = 0 DS_SUB_U32 = 1 @@ -155,12 +115,6 @@ class DSOp(IntEnum): DS_READ2ST64_B64 = 120 DS_ADD_RTN_F64 = 124 DS_CONDXCHG32_RTN_B64 = 126 - DS_GWS_SEMA_RELEASE_ALL = 152 - DS_GWS_INIT = 153 - DS_GWS_SEMA_V = 154 - DS_GWS_SEMA_BR = 155 - DS_GWS_SEMA_P = 156 - DS_GWS_BARRIER = 157 DS_READ_ADDTID_B32 = 182 DS_PK_ADD_RTN_F16 = 183 DS_PK_ADD_RTN_BF16 = 184 @@ -174,7 +128,6 @@ class DSOp(IntEnum): DS_READ_B64_TR_B16 = 227 DS_READ_B96 = 254 DS_READ_B128 = 255 - CDNA4 = 600 class FLATOp(IntEnum): FLAT_LOAD_UBYTE = 16 @@ -231,7 +184,6 @@ class FLATOp(IntEnum): FLAT_ATOMIC_XOR_X2 = 106 FLAT_ATOMIC_INC_X2 = 107 FLAT_ATOMIC_DEC_X2 = 108 - CDNA4 = 600 class GLOBALOp(IntEnum): GLOBAL_LOAD_UBYTE = 16 @@ -295,7 +247,6 @@ class GLOBALOp(IntEnum): GLOBAL_ATOMIC_DEC_X2 = 108 GLOBAL_LOAD_LDS_DWORDX4 = 125 GLOBAL_LOAD_LDS_DWORDX3 = 126 - CDNA4 = 600 class MTBUFOp(IntEnum): TBUFFER_LOAD_FORMAT_X = 0 @@ -390,7 +341,6 @@ class MUBUFOp(IntEnum): BUFFER_ATOMIC_XOR_X2 = 106 BUFFER_ATOMIC_INC_X2 = 107 BUFFER_ATOMIC_DEC_X2 = 108 - CDNA4 = 600 class SCRATCHOp(IntEnum): SCRATCH_LOAD_UBYTE = 16 @@ -504,7 +454,6 @@ class SMEMOp(IntEnum): S_ATOMIC_XOR_X2 = 170 S_ATOMIC_INC_X2 = 171 S_ATOMIC_DEC_X2 = 172 - CDNA4 = 600 class SOP1Op(IntEnum): S_MOV_B32 = 0 @@ -561,7 +510,6 @@ class SOP1Op(IntEnum): S_ANDN1_WREXEC_B64 = 53 S_ANDN2_WREXEC_B64 = 54 S_BITREPLICATE_B64_B32 = 55 - CDNA4 = 600 class SOP2Op(IntEnum): S_ADD_U32 = 0 @@ -616,7 +564,6 @@ class SOP2Op(IntEnum): S_PACK_LL_B32_B16 = 50 S_PACK_LH_B32_B16 = 51 S_PACK_HH_B32_B16 = 52 - CDNA4 = 600 class SOPCOp(IntEnum): S_CMP_EQ_I32 = 0 @@ -639,7 +586,6 @@ class SOPCOp(IntEnum): S_SET_GPR_IDX_ON = 17 S_CMP_EQ_U64 = 18 S_CMP_LG_U64 = 19 - CDNA4 = 600 class SOPKOp(IntEnum): S_MOVK_I32 = 0 @@ -695,7 +641,6 @@ class SOPPOp(IntEnum): S_ENDPGM_SAVED = 27 S_SET_GPR_IDX_OFF = 28 S_SET_GPR_IDX_MODE = 29 - CDNA4 = 600 class VOP1Op(IntEnum): V_NOP = 0 @@ -783,7 +728,6 @@ class VOP1Op(IntEnum): V_PERMLANE16_SWAP_B32 = 89 V_PERMLANE32_SWAP_B32 = 90 V_CVT_F32_BF16 = 91 - CDNA4 = 600 class VOP2Op(IntEnum): V_CNDMASK_B32 = 0 @@ -848,7 +792,6 @@ class VOP2Op(IntEnum): V_FMAC_F32 = 59 V_PK_FMAC_F16 = 60 V_XNOR_B32 = 61 - CDNA4 = 600 class VOP3AOp(IntEnum): V_CMP_CLASS_F32 = 16 @@ -1268,7 +1211,7 @@ class VOP3AOp(IntEnum): V_CVT_SCALEF32_SR_PK32_BF6_F32 = 597 V_CVT_SCALEF32_PK32_F32_FP6 = 598 V_CVT_SCALEF32_PK32_F32_BF6 = 599 - CDNA4 = 600 + V_CVT_SCALEF32_PK32_FP6_F16 = 600 V_CVT_SCALEF32_PK32_FP6_BF16 = 601 V_CVT_SCALEF32_PK32_BF6_F16 = 602 V_CVT_SCALEF32_PK32_BF6_BF16 = 603 @@ -1338,7 +1281,6 @@ class VOP3BOp(IntEnum): V_DIV_SCALE_F64 = 481 V_MAD_U64_U32 = 488 V_MAD_I64_I32 = 489 - CDNA4 = 600 class VOP3POp(IntEnum): V_PK_MAD_I16 = 0 @@ -1388,8 +1330,6 @@ class VOP3POp(IntEnum): V_SMFMAC_F32_16X16X128_BF8_BF8 = 59 V_SMFMAC_F32_16X16X128_BF8_FP8 = 60 V_SMFMAC_F32_16X16X128_FP8_BF8 = 61 - V_MFMA_F32_16X16X8_XF32 = 62 - V_MFMA_F32_32X32X4_XF32 = 63 V_MFMA_F32_32X32X1_2B_F32 = 64 V_MFMA_F32_16X16X1_4B_F32 = 65 V_MFMA_F32_4X4X1_16B_F32 = 66 @@ -1447,7 +1387,6 @@ class VOP3POp(IntEnum): V_SMFMAC_F32_32X32X32_BF8_FP8 = 125 V_SMFMAC_F32_32X32X32_FP8_BF8 = 126 V_SMFMAC_F32_32X32X32_FP8_FP8 = 127 - CDNA4 = 600 class VOPCOp(IntEnum): V_CMP_CLASS_F32 = 16 @@ -1648,4 +1587,3 @@ class VOPCOp(IntEnum): V_CMPX_NE_U64 = 253 V_CMPX_GE_U64 = 254 V_CMPX_T_U64 = 255 - CDNA4 = 600 diff --git a/extra/assembly/amd/autogen/cdna/ins.py b/extra/assembly/amd/autogen/cdna/ins.py index 3b6e278a9f..4da91de82b 100644 --- a/extra/assembly/amd/autogen/cdna/ins.py +++ b/extra/assembly/amd/autogen/cdna/ins.py @@ -1,19 +1,18 @@ -# autogenerated from AMD CDNA3+CDNA4 ISA PDF by pdf.py - do not edit +# autogenerated from AMD ISA PDF by pdf.py - do not edit # ruff: noqa: F401,F403 from typing import Annotated -from extra.assembly.amd.dsl import bits, BitField, Inst32, Inst64, Inst96, SGPR, VGPR, TTMP as TTMP, s as s, v as v, ttmp as ttmp, SSrc, Src, SImm, Imm, VDSTYEnc, SGPRField, VGPRField +from extra.assembly.amd.dsl import * from extra.assembly.amd.autogen.cdna.enum import * import functools -# instruction formats -class DPP(Inst64): +class DPP(Inst): encoding = bits[8:0] == 0b11111010 - vop_op = bits[16:9] vdst:VGPRField = bits[24:17] - vop2_op = bits[31:25] src0:Src = bits[39:32] + vop_op = bits[16:9] + vop2_op = bits[31:25] dpp_ctrl = bits[48:40] - bound_ctrl = bits[51] + bc = bits[51] src0_neg = bits[52] src0_abs = bits[53] src1_neg = bits[54] @@ -21,7 +20,7 @@ class DPP(Inst64): bank_mask = bits[59:56] row_mask = bits[63:60] -class DS(Inst64): +class DS(Inst): encoding = bits[31:26] == 0b110110 op:Annotated[BitField, DSOp] = bits[24:17] vdst:VGPRField = bits[63:56] @@ -33,7 +32,7 @@ class DS(Inst64): gds = bits[16] acc = bits[25] -class FLAT(Inst64): +class FLAT(Inst): encoding = bits[31:26] == 0b110111 op:Annotated[BitField, FLATOp] = bits[24:18] vdst:VGPRField = bits[63:56] @@ -48,7 +47,7 @@ class FLAT(Inst64): sc1 = bits[25] acc = bits[55] -class MTBUF(Inst64): +class MTBUF(Inst): encoding = bits[31:26] == 0b111010 op:Annotated[BitField, MTBUFOp] = bits[18:15] vdata:VGPRField = bits[47:40] @@ -56,15 +55,16 @@ class MTBUF(Inst64): srsrc:SGPRField = bits[52:48] soffset:SSrc = bits[63:56] offset:Imm = bits[11:0] - format = bits[25:19] offen = bits[12] idxen = bits[13] + sc0 = bits[14] + dfmt = bits[22:19] + nfmt = bits[25:23] sc1 = bits[53] nt = bits[54] acc = bits[55] - sc0 = bits[14] -class MUBUF(Inst64): +class MUBUF(Inst): encoding = bits[31:26] == 0b111000 op:Annotated[BitField, MUBUFOp] = bits[24:18] vdata:VGPRField = bits[47:40] @@ -80,16 +80,16 @@ class MUBUF(Inst64): nt = bits[17] acc = bits[55] -class SDWA(Inst64): +class SDWA(Inst): encoding = bits[8:0] == 0b11111001 - vop_op = bits[16:9] vdst:VGPRField = bits[24:17] + src0:Src = bits[39:32] + omod = bits[47:46] + clmp = bits[45] + vop_op = bits[16:9] vop2_op = bits[31:25] - src0:Src = bits[39:32] dst_sel = bits[42:40] dst_u = bits[44:43] - clmp = bits[45] - omod = bits[47:46] src0_sel = bits[50:48] src0_sext = bits[51] src0_neg = bits[52] @@ -101,12 +101,10 @@ class SDWA(Inst64): src1_abs = bits[61] s1 = bits[63] -class SDWAB(Inst64): +class SDWAB(Inst): + sdst:SGPRField = bits[46:40] src0:Src = bits[39:32] - dst_sel = bits[42:40] - dst_u = bits[44:43] - clmp = bits[45] - omod = bits[47:46] + sd = bits[47] src0_sel = bits[50:48] src0_sext = bits[51] src0_neg = bits[52] @@ -118,7 +116,7 @@ class SDWAB(Inst64): src1_abs = bits[61] s1 = bits[63] -class SMEM(Inst64): +class SMEM(Inst): encoding = bits[31:26] == 0b110000 op:Annotated[BitField, SMEMOp] = bits[25:18] sdata:SGPRField = bits[12:6] @@ -128,79 +126,78 @@ class SMEM(Inst64): glc = bits[16] soe = bits[14] nv = bits[15] - imm = bits[17] + imm:Imm = bits[17] -class SOP1(Inst32): +class SOP1(Inst): encoding = bits[31:23] == 0b101111101 op:Annotated[BitField, SOP1Op] = bits[15:8] sdst:SGPRField = bits[22:16] ssrc0:SSrc = bits[7:0] -class SOP2(Inst32): +class SOP2(Inst): encoding = bits[31:30] == 0b10 op:Annotated[BitField, SOP2Op] = bits[29:23] sdst:SGPRField = bits[22:16] ssrc0:SSrc = bits[7:0] ssrc1:SSrc = bits[15:8] -class SOPC(Inst32): +class SOPC(Inst): encoding = bits[31:23] == 0b101111110 op:Annotated[BitField, SOPCOp] = bits[22:16] ssrc0:SSrc = bits[7:0] ssrc1:SSrc = bits[15:8] -class SOPK(Inst32): +class SOPK(Inst): encoding = bits[31:28] == 0b1011 op:Annotated[BitField, SOPKOp] = bits[27:23] sdst:SGPRField = bits[22:16] simm16:SImm = bits[15:0] -class SOPP(Inst32): +class SOPP(Inst): encoding = bits[31:23] == 0b101111111 op:Annotated[BitField, SOPPOp] = bits[22:16] simm16:SImm = bits[15:0] -class VOP1(Inst32): - encoding = bits[31:25] == 0b111111 +class VOP1(Inst): + encoding = bits[31:25] == 0b0111111 op:Annotated[BitField, VOP1Op] = bits[16:9] vdst:VGPRField = bits[24:17] src0:Src = bits[8:0] -class VOP2(Inst32): - encoding = bits[31] == 0 +class VOP2(Inst): + encoding = bits[31] == 0b0 op:Annotated[BitField, VOP2Op] = bits[30:25] vdst:VGPRField = bits[24:17] src0:Src = bits[8:0] vsrc1:VGPRField = bits[16:9] -class VOP3A(Inst64): +class VOP3A(Inst): encoding = bits[31:26] == 0b110100 - vdst:VGPRField = bits[7:0] - abs = bits[10:8] - opsel = bits[14:11] - clmp = bits[15] op:Annotated[BitField, VOP3AOp] = bits[25:16] + vdst:VGPRField = bits[7:0] src0:Src = bits[40:32] src1:Src = bits[49:41] src2:Src = bits[58:50] omod = bits[60:59] neg = bits[63:61] + abs = bits[10:8] + clmp = bits[15] + opsel = bits[14:11] -class VOP3B(Inst64): +class VOP3B(Inst): encoding = bits[31:26] == 0b110100 + op:Annotated[BitField, VOP3BOp] = bits[25:16] vdst:VGPRField = bits[7:0] sdst:SGPRField = bits[14:8] - clmp = bits[15] - op:Annotated[BitField, VOP3BOp] = bits[25:16] src0:Src = bits[40:32] src1:Src = bits[49:41] src2:Src = bits[58:50] omod = bits[60:59] neg = bits[63:61] + clmp = bits[15] -class VOP3P(Inst64): +class VOP3P(Inst): encoding = bits[31:23] == 0b110100111 - _defaults = {'opsel_hi': 3, 'opsel_hi2': 1} op:Annotated[BitField, VOP3POp] = bits[22:16] vdst:VGPRField = bits[7:0] src0:Src = bits[40:32] @@ -208,13 +205,13 @@ class VOP3P(Inst64): src2:Src = bits[58:50] neg = bits[63:61] neg_hi = bits[10:8] + clmp = bits[15] opsel = bits[13:11] opsel_hi = bits[60:59] - clmp = bits[15] opsel_hi2 = bits[14] -class VOPC(Inst32): - encoding = bits[31:25] == 0b111110 +class VOPC(Inst): + encoding = bits[31:25] == 0b0111110 op:Annotated[BitField, VOPCOp] = bits[24:17] src0:Src = bits[8:0] vsrc1:VGPRField = bits[16:9] @@ -333,12 +330,6 @@ ds_read2_b64 = functools.partial(DS, DSOp.DS_READ2_B64) ds_read2st64_b64 = functools.partial(DS, DSOp.DS_READ2ST64_B64) ds_add_rtn_f64 = functools.partial(DS, DSOp.DS_ADD_RTN_F64) ds_condxchg32_rtn_b64 = functools.partial(DS, DSOp.DS_CONDXCHG32_RTN_B64) -ds_gws_sema_release_all = functools.partial(DS, DSOp.DS_GWS_SEMA_RELEASE_ALL) -ds_gws_init = functools.partial(DS, DSOp.DS_GWS_INIT) -ds_gws_sema_v = functools.partial(DS, DSOp.DS_GWS_SEMA_V) -ds_gws_sema_br = functools.partial(DS, DSOp.DS_GWS_SEMA_BR) -ds_gws_sema_p = functools.partial(DS, DSOp.DS_GWS_SEMA_P) -ds_gws_barrier = functools.partial(DS, DSOp.DS_GWS_BARRIER) ds_read_addtid_b32 = functools.partial(DS, DSOp.DS_READ_ADDTID_B32) ds_pk_add_rtn_f16 = functools.partial(DS, DSOp.DS_PK_ADD_RTN_F16) ds_pk_add_rtn_bf16 = functools.partial(DS, DSOp.DS_PK_ADD_RTN_BF16) @@ -352,7 +343,6 @@ ds_read_b64_tr_b8 = functools.partial(DS, DSOp.DS_READ_B64_TR_B8) ds_read_b64_tr_b16 = functools.partial(DS, DSOp.DS_READ_B64_TR_B16) ds_read_b96 = functools.partial(DS, DSOp.DS_READ_B96) ds_read_b128 = functools.partial(DS, DSOp.DS_READ_B128) -cdna4 = functools.partial(DS, DSOp.CDNA4) flat_load_ubyte = functools.partial(FLAT, FLATOp.FLAT_LOAD_UBYTE) flat_load_sbyte = functools.partial(FLAT, FLATOp.FLAT_LOAD_SBYTE) flat_load_ushort = functools.partial(FLAT, FLATOp.FLAT_LOAD_USHORT) @@ -407,7 +397,6 @@ flat_atomic_or_x2 = functools.partial(FLAT, FLATOp.FLAT_ATOMIC_OR_X2) flat_atomic_xor_x2 = functools.partial(FLAT, FLATOp.FLAT_ATOMIC_XOR_X2) flat_atomic_inc_x2 = functools.partial(FLAT, FLATOp.FLAT_ATOMIC_INC_X2) flat_atomic_dec_x2 = functools.partial(FLAT, FLATOp.FLAT_ATOMIC_DEC_X2) -cdna4 = functools.partial(FLAT, FLATOp.CDNA4) global_load_ubyte = functools.partial(FLAT, GLOBALOp.GLOBAL_LOAD_UBYTE, seg=2) global_load_sbyte = functools.partial(FLAT, GLOBALOp.GLOBAL_LOAD_SBYTE, seg=2) global_load_ushort = functools.partial(FLAT, GLOBALOp.GLOBAL_LOAD_USHORT, seg=2) @@ -469,7 +458,6 @@ global_atomic_inc_x2 = functools.partial(FLAT, GLOBALOp.GLOBAL_ATOMIC_INC_X2, se global_atomic_dec_x2 = functools.partial(FLAT, GLOBALOp.GLOBAL_ATOMIC_DEC_X2, seg=2) global_load_lds_dwordx4 = functools.partial(FLAT, GLOBALOp.GLOBAL_LOAD_LDS_DWORDX4, seg=2) global_load_lds_dwordx3 = functools.partial(FLAT, GLOBALOp.GLOBAL_LOAD_LDS_DWORDX3, seg=2) -cdna4 = functools.partial(FLAT, GLOBALOp.CDNA4, seg=2) tbuffer_load_format_x = functools.partial(MTBUF, MTBUFOp.TBUFFER_LOAD_FORMAT_X) tbuffer_load_format_xy = functools.partial(MTBUF, MTBUFOp.TBUFFER_LOAD_FORMAT_XY) tbuffer_load_format_xyz = functools.partial(MTBUF, MTBUFOp.TBUFFER_LOAD_FORMAT_XYZ) @@ -560,7 +548,6 @@ buffer_atomic_or_x2 = functools.partial(MUBUF, MUBUFOp.BUFFER_ATOMIC_OR_X2) buffer_atomic_xor_x2 = functools.partial(MUBUF, MUBUFOp.BUFFER_ATOMIC_XOR_X2) buffer_atomic_inc_x2 = functools.partial(MUBUF, MUBUFOp.BUFFER_ATOMIC_INC_X2) buffer_atomic_dec_x2 = functools.partial(MUBUF, MUBUFOp.BUFFER_ATOMIC_DEC_X2) -cdna4 = functools.partial(MUBUF, MUBUFOp.CDNA4) scratch_load_ubyte = functools.partial(FLAT, SCRATCHOp.SCRATCH_LOAD_UBYTE, seg=1) scratch_load_sbyte = functools.partial(FLAT, SCRATCHOp.SCRATCH_LOAD_SBYTE, seg=1) scratch_load_ushort = functools.partial(FLAT, SCRATCHOp.SCRATCH_LOAD_USHORT, seg=1) @@ -670,7 +657,6 @@ s_atomic_or_x2 = functools.partial(SMEM, SMEMOp.S_ATOMIC_OR_X2) s_atomic_xor_x2 = functools.partial(SMEM, SMEMOp.S_ATOMIC_XOR_X2) s_atomic_inc_x2 = functools.partial(SMEM, SMEMOp.S_ATOMIC_INC_X2) s_atomic_dec_x2 = functools.partial(SMEM, SMEMOp.S_ATOMIC_DEC_X2) -cdna4 = functools.partial(SMEM, SMEMOp.CDNA4) s_mov_b32 = functools.partial(SOP1, SOP1Op.S_MOV_B32) s_mov_b64 = functools.partial(SOP1, SOP1Op.S_MOV_B64) s_cmov_b32 = functools.partial(SOP1, SOP1Op.S_CMOV_B32) @@ -725,7 +711,6 @@ s_orn1_saveexec_b64 = functools.partial(SOP1, SOP1Op.S_ORN1_SAVEEXEC_B64) s_andn1_wrexec_b64 = functools.partial(SOP1, SOP1Op.S_ANDN1_WREXEC_B64) s_andn2_wrexec_b64 = functools.partial(SOP1, SOP1Op.S_ANDN2_WREXEC_B64) s_bitreplicate_b64_b32 = functools.partial(SOP1, SOP1Op.S_BITREPLICATE_B64_B32) -cdna4 = functools.partial(SOP1, SOP1Op.CDNA4) s_add_u32 = functools.partial(SOP2, SOP2Op.S_ADD_U32) s_sub_u32 = functools.partial(SOP2, SOP2Op.S_SUB_U32) s_add_i32 = functools.partial(SOP2, SOP2Op.S_ADD_I32) @@ -778,7 +763,6 @@ s_lshl4_add_u32 = functools.partial(SOP2, SOP2Op.S_LSHL4_ADD_U32) s_pack_ll_b32_b16 = functools.partial(SOP2, SOP2Op.S_PACK_LL_B32_B16) s_pack_lh_b32_b16 = functools.partial(SOP2, SOP2Op.S_PACK_LH_B32_B16) s_pack_hh_b32_b16 = functools.partial(SOP2, SOP2Op.S_PACK_HH_B32_B16) -cdna4 = functools.partial(SOP2, SOP2Op.CDNA4) s_cmp_eq_i32 = functools.partial(SOPC, SOPCOp.S_CMP_EQ_I32) s_cmp_lg_i32 = functools.partial(SOPC, SOPCOp.S_CMP_LG_I32) s_cmp_gt_i32 = functools.partial(SOPC, SOPCOp.S_CMP_GT_I32) @@ -799,7 +783,6 @@ s_setvskip = functools.partial(SOPC, SOPCOp.S_SETVSKIP) s_set_gpr_idx_on = functools.partial(SOPC, SOPCOp.S_SET_GPR_IDX_ON) s_cmp_eq_u64 = functools.partial(SOPC, SOPCOp.S_CMP_EQ_U64) s_cmp_lg_u64 = functools.partial(SOPC, SOPCOp.S_CMP_LG_U64) -cdna4 = functools.partial(SOPC, SOPCOp.CDNA4) s_movk_i32 = functools.partial(SOPK, SOPKOp.S_MOVK_I32) s_cmovk_i32 = functools.partial(SOPK, SOPKOp.S_CMOVK_I32) s_cmpk_eq_i32 = functools.partial(SOPK, SOPKOp.S_CMPK_EQ_I32) @@ -851,7 +834,6 @@ s_cbranch_cdbgsys_and_user = functools.partial(SOPP, SOPPOp.S_CBRANCH_CDBGSYS_AN s_endpgm_saved = functools.partial(SOPP, SOPPOp.S_ENDPGM_SAVED) s_set_gpr_idx_off = functools.partial(SOPP, SOPPOp.S_SET_GPR_IDX_OFF) s_set_gpr_idx_mode = functools.partial(SOPP, SOPPOp.S_SET_GPR_IDX_MODE) -cdna4 = functools.partial(SOPP, SOPPOp.CDNA4) v_nop_e32 = functools.partial(VOP1, VOP1Op.V_NOP) v_mov_b32_e32 = functools.partial(VOP1, VOP1Op.V_MOV_B32) v_readfirstlane_b32_e32 = functools.partial(VOP1, VOP1Op.V_READFIRSTLANE_B32) @@ -937,7 +919,6 @@ v_prng_b32_e32 = functools.partial(VOP1, VOP1Op.V_PRNG_B32) v_permlane16_swap_b32_e32 = functools.partial(VOP1, VOP1Op.V_PERMLANE16_SWAP_B32) v_permlane32_swap_b32_e32 = functools.partial(VOP1, VOP1Op.V_PERMLANE32_SWAP_B32) v_cvt_f32_bf16_e32 = functools.partial(VOP1, VOP1Op.V_CVT_F32_BF16) -cdna4_e32 = functools.partial(VOP1, VOP1Op.CDNA4) v_cndmask_b32_e32 = functools.partial(VOP2, VOP2Op.V_CNDMASK_B32) v_add_f32_e32 = functools.partial(VOP2, VOP2Op.V_ADD_F32) v_sub_f32_e32 = functools.partial(VOP2, VOP2Op.V_SUB_F32) @@ -961,8 +942,8 @@ v_and_b32_e32 = functools.partial(VOP2, VOP2Op.V_AND_B32) v_or_b32_e32 = functools.partial(VOP2, VOP2Op.V_OR_B32) v_xor_b32_e32 = functools.partial(VOP2, VOP2Op.V_XOR_B32) v_dot2c_f32_bf16_e32 = functools.partial(VOP2, VOP2Op.V_DOT2C_F32_BF16) -def v_fmamk_f32_e32(vdst, src0, K, vsrc1): return VOP2(VOP2Op.V_FMAMK_F32, vdst, src0, vsrc1, literal=K) -def v_fmaak_f32_e32(vdst, src0, vsrc1, K): return VOP2(VOP2Op.V_FMAAK_F32, vdst, src0, vsrc1, literal=K) +v_fmamk_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAMK_F32) +v_fmaak_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAAK_F32) v_add_co_u32_e32 = functools.partial(VOP2, VOP2Op.V_ADD_CO_U32) v_sub_co_u32_e32 = functools.partial(VOP2, VOP2Op.V_SUB_CO_U32) v_subrev_co_u32_e32 = functools.partial(VOP2, VOP2Op.V_SUBREV_CO_U32) @@ -1000,7 +981,6 @@ v_dot8c_i32_i4_e32 = functools.partial(VOP2, VOP2Op.V_DOT8C_I32_I4) v_fmac_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAC_F32) v_pk_fmac_f16_e32 = functools.partial(VOP2, VOP2Op.V_PK_FMAC_F16) v_xnor_b32_e32 = functools.partial(VOP2, VOP2Op.V_XNOR_B32) -cdna4_e32 = functools.partial(VOP2, VOP2Op.CDNA4) v_cmp_class_f32 = functools.partial(VOP3A, VOP3AOp.V_CMP_CLASS_F32) v_cmpx_class_f32 = functools.partial(VOP3A, VOP3AOp.V_CMPX_CLASS_F32) v_cmp_class_f64 = functools.partial(VOP3A, VOP3AOp.V_CMP_CLASS_F64) @@ -1418,7 +1398,7 @@ v_cvt_scalef32_sr_pk32_fp6_f32 = functools.partial(VOP3A, VOP3AOp.V_CVT_SCALEF32 v_cvt_scalef32_sr_pk32_bf6_f32 = functools.partial(VOP3A, VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F32) v_cvt_scalef32_pk32_f32_fp6 = functools.partial(VOP3A, VOP3AOp.V_CVT_SCALEF32_PK32_F32_FP6) v_cvt_scalef32_pk32_f32_bf6 = functools.partial(VOP3A, VOP3AOp.V_CVT_SCALEF32_PK32_F32_BF6) -cdna4 = functools.partial(VOP3A, VOP3AOp.CDNA4) +v_cvt_scalef32_pk32_fp6_f16 = functools.partial(VOP3A, VOP3AOp.V_CVT_SCALEF32_PK32_FP6_F16) v_cvt_scalef32_pk32_fp6_bf16 = functools.partial(VOP3A, VOP3AOp.V_CVT_SCALEF32_PK32_FP6_BF16) v_cvt_scalef32_pk32_bf6_f16 = functools.partial(VOP3A, VOP3AOp.V_CVT_SCALEF32_PK32_BF6_F16) v_cvt_scalef32_pk32_bf6_bf16 = functools.partial(VOP3A, VOP3AOp.V_CVT_SCALEF32_PK32_BF6_BF16) @@ -1486,7 +1466,6 @@ v_div_scale_f32 = functools.partial(VOP3B, VOP3BOp.V_DIV_SCALE_F32) v_div_scale_f64 = functools.partial(VOP3B, VOP3BOp.V_DIV_SCALE_F64) v_mad_u64_u32 = functools.partial(VOP3B, VOP3BOp.V_MAD_U64_U32) v_mad_i64_i32 = functools.partial(VOP3B, VOP3BOp.V_MAD_I64_I32) -cdna4 = functools.partial(VOP3B, VOP3BOp.CDNA4) v_pk_mad_i16 = functools.partial(VOP3P, VOP3POp.V_PK_MAD_I16) v_pk_mul_lo_u16 = functools.partial(VOP3P, VOP3POp.V_PK_MUL_LO_U16) v_pk_add_i16 = functools.partial(VOP3P, VOP3POp.V_PK_ADD_I16) @@ -1534,8 +1513,6 @@ v_smfmac_i32_16x16x128_i8 = functools.partial(VOP3P, VOP3POp.V_SMFMAC_I32_16X16X v_smfmac_f32_16x16x128_bf8_bf8 = functools.partial(VOP3P, VOP3POp.V_SMFMAC_F32_16X16X128_BF8_BF8) v_smfmac_f32_16x16x128_bf8_fp8 = functools.partial(VOP3P, VOP3POp.V_SMFMAC_F32_16X16X128_BF8_FP8) v_smfmac_f32_16x16x128_fp8_bf8 = functools.partial(VOP3P, VOP3POp.V_SMFMAC_F32_16X16X128_FP8_BF8) -v_mfma_f32_16x16x8_xf32 = functools.partial(VOP3P, VOP3POp.V_MFMA_F32_16X16X8_XF32) -v_mfma_f32_32x32x4_xf32 = functools.partial(VOP3P, VOP3POp.V_MFMA_F32_32X32X4_XF32) v_mfma_f32_32x32x1_2b_f32 = functools.partial(VOP3P, VOP3POp.V_MFMA_F32_32X32X1_2B_F32) v_mfma_f32_16x16x1_4b_f32 = functools.partial(VOP3P, VOP3POp.V_MFMA_F32_16X16X1_4B_F32) v_mfma_f32_4x4x1_16b_f32 = functools.partial(VOP3P, VOP3POp.V_MFMA_F32_4X4X1_16B_F32) @@ -1593,7 +1570,6 @@ v_smfmac_f32_32x32x32_bf8_bf8 = functools.partial(VOP3P, VOP3POp.V_SMFMAC_F32_32 v_smfmac_f32_32x32x32_bf8_fp8 = functools.partial(VOP3P, VOP3POp.V_SMFMAC_F32_32X32X32_BF8_FP8) v_smfmac_f32_32x32x32_fp8_bf8 = functools.partial(VOP3P, VOP3POp.V_SMFMAC_F32_32X32X32_FP8_BF8) v_smfmac_f32_32x32x32_fp8_fp8 = functools.partial(VOP3P, VOP3POp.V_SMFMAC_F32_32X32X32_FP8_FP8) -cdna4 = functools.partial(VOP3P, VOP3POp.CDNA4) v_cmp_class_f32_e32 = functools.partial(VOPC, VOPCOp.V_CMP_CLASS_F32) v_cmpx_class_f32_e32 = functools.partial(VOPC, VOPCOp.V_CMPX_CLASS_F32) v_cmp_class_f64_e32 = functools.partial(VOPC, VOPCOp.V_CMP_CLASS_F64) @@ -1791,42 +1767,4 @@ v_cmpx_le_u64_e32 = functools.partial(VOPC, VOPCOp.V_CMPX_LE_U64) v_cmpx_gt_u64_e32 = functools.partial(VOPC, VOPCOp.V_CMPX_GT_U64) v_cmpx_ne_u64_e32 = functools.partial(VOPC, VOPCOp.V_CMPX_NE_U64) v_cmpx_ge_u64_e32 = functools.partial(VOPC, VOPCOp.V_CMPX_GE_U64) -v_cmpx_t_u64_e32 = functools.partial(VOPC, VOPCOp.V_CMPX_T_U64) -cdna4_e32 = functools.partial(VOPC, VOPCOp.CDNA4) - -S_ADD_U32 = SrcEnum.S_ADD_U32 -S_SUB_U32 = SrcEnum.S_SUB_U32 -S_ADD_I32 = SrcEnum.S_ADD_I32 -S_SUB_I32 = SrcEnum.S_SUB_I32 -S_ADDC_U32 = SrcEnum.S_ADDC_U32 -S_SUBB_U32 = SrcEnum.S_SUBB_U32 -S_MIN_I32 = SrcEnum.S_MIN_I32 -FLAT_SCRATCH_LO = SrcEnum.FLAT_SCRATCH_LO -FLAT_SCRATCH_HI = SrcEnum.FLAT_SCRATCH_HI -XNACK_MASK_LO = SrcEnum.XNACK_MASK_LO -XNACK_MASK_HI = SrcEnum.XNACK_MASK_HI -VCC_LO = SrcEnum.VCC_LO -VCC_HI = SrcEnum.VCC_HI -M0 = SrcEnum.M0 -EXEC_LO = SrcEnum.EXEC_LO -EXEC_HI = SrcEnum.EXEC_HI -ZERO = SrcEnum.ZERO -DPP8FI = SrcEnum.DPP8FI -SHARED_BASE = SrcEnum.SHARED_BASE -SHARED_LIMIT = SrcEnum.SHARED_LIMIT -PRIVATE_BASE = SrcEnum.PRIVATE_BASE -PRIVATE_LIMIT = SrcEnum.PRIVATE_LIMIT -RESERVED = SrcEnum.RESERVED -POS_HALF = SrcEnum.POS_HALF -NEG_HALF = SrcEnum.NEG_HALF -POS_ONE = SrcEnum.POS_ONE -NEG_ONE = SrcEnum.NEG_ONE -POS_TWO = SrcEnum.POS_TWO -NEG_TWO = SrcEnum.NEG_TWO -POS_FOUR = SrcEnum.POS_FOUR -NEG_FOUR = SrcEnum.NEG_FOUR -INV_2PI = SrcEnum.INV_2PI -VCCZ = SrcEnum.VCCZ -EXECZ = SrcEnum.EXECZ -SCC = SrcEnum.SCC -LDS_DIRECT = SrcEnum.LDS_DIRECT \ No newline at end of file +v_cmpx_t_u64_e32 = functools.partial(VOPC, VOPCOp.V_CMPX_T_U64) \ No newline at end of file diff --git a/extra/assembly/amd/autogen/cdna/str_pcode.py b/extra/assembly/amd/autogen/cdna/str_pcode.py index 63c1d1fe6a..b62fd9ca99 100644 --- a/extra/assembly/amd/autogen/cdna/str_pcode.py +++ b/extra/assembly/amd/autogen/cdna/str_pcode.py @@ -1,182 +1,361 @@ # autogenerated by pdf.py - do not edit -# to regenerate: python -m extra.assembly.amd.pdf --arch cdna +# to regenerate: python -m extra.assembly.amd.pdf # ruff: noqa: E501 -from extra.assembly.amd.autogen.cdna.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3POp, VOPCOp, VOP3AOp, VOP3BOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp +from extra.assembly.amd.autogen.cdna.enum import DSOp, FLATOp, GLOBALOp, MTBUFOp, MUBUFOp, SCRATCHOp, SMEMOp, SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, VOP1Op, VOP2Op, VOP3AOp, VOP3BOp, VOP3POp, VOPCOp -SOP1Op_PCODE = { - SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32', - SOP1Op.S_MOV_B64: 'D0.b64 = S0.b64', - SOP1Op.S_CMOV_B32: 'if SCC then\nD0.b32 = S0.b32\nendif', - SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif', - SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U', - SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL', - SOP1Op.S_WQM_B32: "tmp = 0U;\ndeclare i : 6'U;\nfor i in 6'0U : 6'31U do\ntmp[i] = S0.u32[i & 6'60U +: 6'4U] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U", - SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL", - SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', - SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]', - SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", - SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", - SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", - SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", - SOP1Op.S_FF0_I32_B32: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'0U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_FF0_I32_B64: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'0U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_FF1_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_FF1_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_FLBIT_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_FLBIT_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_FLBIT_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', - SOP1Op.S_FLBIT_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', - SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))", - SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", - SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U", - SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U", - SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U", - SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U", - SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL', - SOP1Op.S_SETPC_B64: 'PC = S0.i64', - SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64', - SOP1Op.S_RFE_B64: 'PC = S0.i64', - SOP1Op.S_AND_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_OR_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_XOR_SAVEEXEC_B64: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_ANDN2_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_ORN2_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_NAND_SAVEEXEC_B64: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_NOR_SAVEEXEC_B64: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_XNOR_SAVEEXEC_B64: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U', - SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL', - SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = SGPR[addr].b32', - SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b64 = SGPR[addr].b64', - SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b32 = S0.b32', - SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b64 = S0.b64', - SOP1Op.S_CBRANCH_JOIN: "saved_csp = S0.u32;\nif WAVE_MODE.CSP.u32 == saved_csp then\nPC += 4LL;\nelse\nWAVE_MODE.CSP -= 3'1U;\n{ PC, EXEC } = SGPR[WAVE_MODE.CSP.u32 * 4U].b128;\nendif", - SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0', - SOP1Op.S_SET_GPR_IDX_IDX: 'M0[7 : 0] = S0.u32[7 : 0].b8', - SOP1Op.S_ANDN1_SAVEEXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_ORN1_SAVEEXEC_B64: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_ANDN1_WREXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_ANDN2_WREXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor', +DSOp_PCODE = { + DSOp.DS_ADD_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_WRITE_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]', + DSOp.DS_WRITE2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]', + DSOp.DS_WRITE2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]', + DSOp.DS_CMPST_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp', + DSOp.DS_CMPST_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp', + DSOp.DS_MIN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MAX_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_ADD_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + DSOp.DS_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + DSOp.DS_WRITE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32", + DSOp.DS_WRITE_B8: 'MEM[ADDR].b8 = DATA[7 : 0]', + DSOp.DS_WRITE_B16: 'MEM[ADDR].b16 = DATA[15 : 0]', + DSOp.DS_ADD_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_RTN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_RTN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_WRXCHG_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + DSOp.DS_WRXCHG2_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_WRXCHG2ST64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_CMPST_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp', + DSOp.DS_CMPST_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp', + DSOp.DS_MIN_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_MAX_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_WRAP_RTN_B32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = tmp >= DATA.u32 ? tmp - DATA.u32 : tmp + DATA2.u32;\nRETURN_DATA = tmp', + DSOp.DS_ADD_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_READ_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32', + DSOp.DS_READ2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4U].b32', + DSOp.DS_READ2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256U].b32', + DSOp.DS_READ_I8: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i8))", + DSOp.DS_READ_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + DSOp.DS_READ_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))", + DSOp.DS_READ_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + DSOp.DS_SWIZZLE_B32: 'Offset[4:0]: Swizzle\n0x00: {1,11,9,19,5,15,d,1d,3,13,b,1b,7,17,f,1f,2,12,a,1a,6,16,e,1e,4,14,c,1c,8,18,10,20}\n0x10: {1,9,5,d,3,b,7,f,2,a,6,e,4,c,8,10,11,19,15,1d,13,1b,17,1f,12,1a,16,1e,14,1c,18,20}\n0x1f: No swizzle\nOffset[9:5]: Swizzle\n0x01, mask=0, rotate left:\n{2,3,4,5,6,7,8,9,a,b,c,d,e,f,10,11,12,13,14,15,16,17,18,19,1a,1b,1c,1d,1e,1f,20,1}\n0x01, mask=0, rotate right:\n{20,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f,10,11,12,13,14,15,16,17,18,19,1a,1b,1c,1d,1e,1f}\n0x01, mask=1, rotate left:\n{1,4,3,6,5,8,7,a,9,c,b,e,d,10,f,12,11,14,13,16,15,18,17,1a,19,1c,1b,1e,1d,20,1f,2}\n0x01, mask=1, rotate right:\n{1f,2,1,4,3,6,5,8,7,a,9,c,b,e,d,10,f,12,11,14,13,16,15,18,17,1a,19,1c,1b,1e,1d,20}\noffset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\n}\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset[15]) {\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n}\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n}', + DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\ndst_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\nsrc_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_ADD_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_WRITE_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]', + DSOp.DS_WRITE2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_WRITE2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_CMPST_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp', + DSOp.DS_CMPST_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp', + DSOp.DS_MIN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_WRITE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]', + DSOp.DS_WRITE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]', + DSOp.DS_READ_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// RETURN_DATA[31:16] is preserved.", + DSOp.DS_READ_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// RETURN_DATA[15:0] is preserved.", + DSOp.DS_READ_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));\n// RETURN_DATA[31:16] is preserved.", + DSOp.DS_READ_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));\n// RETURN_DATA[15:0] is preserved.", + DSOp.DS_READ_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;\n// RETURN_DATA[31:16] is preserved.', + DSOp.DS_READ_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;\n// RETURN_DATA[15:0] is preserved.', + DSOp.DS_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + DSOp.DS_ADD_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_RTN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_RTN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_WRXCHG_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + DSOp.DS_WRXCHG2_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_WRXCHG2ST64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_CMPST_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp', + DSOp.DS_CMPST_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp', + DSOp.DS_MIN_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_READ_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32', + DSOp.DS_READ2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8U + 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8U + 4U].b32', + DSOp.DS_READ2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512U + 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512U + 4U].b32', + DSOp.DS_ADD_RTN_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nADDR0 = ((ADDR + offset.u32) & 0xfff8U);\nADDR1 = ADDR0 + 4U;\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", + DSOp.DS_READ_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32", + DSOp.DS_PK_ADD_RTN_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + DSOp.DS_PK_ADD_RTN_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + DSOp.DS_WRITE_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]', + DSOp.DS_WRITE_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]', + DSOp.DS_READ_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32', + DSOp.DS_READ_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32\nwhere:\nOFFSET = Unsigned immediate byte offset.\nOFFEN = Send offset either as VADDR or as zero..\nIDXEN = Send index either as VADDR or as zero.\nLDS = Data read from/written to LDS or VGPR.\nOP = Instruction Opcode.\nVADDR = VGPR address source.\nVDATA = Destination vector GPR.\nSRSRC = Scalar GPR that specifies resource constant.\nACC = Return to ACC VGPRs\nSC = Scope\nNT = Non-Temporal\nSOFFSET = Byte offset added to the memory address of an SGPR.', } -SOP2Op_PCODE = { - SOP2Op.S_ADD_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_SUB_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_ADD_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', - SOP2Op.S_SUB_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', - SOP2Op.S_ADDC_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_SUBB_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', - SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', - SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', - SOP2Op.S_MAX_U32: 'SCC = S0.u32 >= S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', - SOP2Op.S_CSELECT_B32: 'D0.u32 = SCC ? S0.u32 : S1.u32', - SOP2Op.S_CSELECT_B64: 'D0.u64 = SCC ? S0.u64 : S1.u64', - SOP2Op.S_AND_B32: 'D0.u32 = (S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_AND_B64: 'D0.u64 = (S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_OR_B32: 'D0.u32 = (S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_OR_B64: 'D0.u64 = (S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_XOR_B64: 'D0.u64 = (S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_ANDN2_B32: 'D0.u32 = (S0.u32 & ~S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_ANDN2_B64: 'D0.u64 = (S0.u64 & ~S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_ORN2_B32: 'D0.u32 = (S0.u32 | ~S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_ORN2_B64: 'D0.u64 = (S0.u64 | ~S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_NAND_B32: 'D0.u32 = ~(S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_NAND_B64: 'D0.u64 = ~(S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_NOR_B32: 'D0.u32 = ~(S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_NOR_B64: 'D0.u64 = ~(S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_XNOR_B64: 'D0.u64 = ~(S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0", - SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL', - SOP2Op.S_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', - SOP2Op.S_BFM_B64: 'D0.u64 = (((1ULL << S0[5 : 0].u32) - 1ULL) << S1[5 : 0].u32)', - SOP2Op.S_MUL_I32: 'D0.i32 = S0.i32 * S1.i32', - SOP2Op.S_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S1[22 : 16].u32) - 1U));\nSCC = D0.u32 != 0U', - SOP2Op.S_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32);\nSCC = D0.i32 != 0', - SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL', - SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL', - SOP2Op.S_CBRANCH_G_FORK: "S0 = compare mask (VCC or any SGPR) and S1 = 64-bit byte address of target instruction. See also\nmask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\nif mask_pass == EXEC.u64 then\nPC = 64'I(S1.u64)\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { S1.u64, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = 64'I(S1.u64)\nendif", - SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0', - SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", - SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", - SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_PACK_LL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[15 : 0].u16 }', - SOP2Op.S_PACK_LH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[15 : 0].u16 }', - SOP2Op.S_PACK_HH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[31 : 16].u16 }', +FLATOp_PCODE = { + FLATOp.FLAT_LOAD_UBYTE: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + FLATOp.FLAT_LOAD_SBYTE: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + FLATOp.FLAT_LOAD_USHORT: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + FLATOp.FLAT_LOAD_SSHORT: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + FLATOp.FLAT_LOAD_DWORD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', + FLATOp.FLAT_LOAD_DWORDX2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + FLATOp.FLAT_LOAD_DWORDX3: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + FLATOp.FLAT_LOAD_DWORDX4: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + FLATOp.FLAT_STORE_BYTE: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', + FLATOp.FLAT_STORE_BYTE_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', + FLATOp.FLAT_STORE_SHORT: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', + FLATOp.FLAT_STORE_SHORT_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', + FLATOp.FLAT_STORE_DWORD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', + FLATOp.FLAT_STORE_DWORDX2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + FLATOp.FLAT_STORE_DWORDX3: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + FLATOp.FLAT_STORE_DWORDX4: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + FLATOp.FLAT_LOAD_UBYTE_D16: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.", + FLATOp.FLAT_LOAD_UBYTE_D16_HI: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.", + FLATOp.FLAT_LOAD_SBYTE_D16: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.", + FLATOp.FLAT_LOAD_SBYTE_D16_HI: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.", + FLATOp.FLAT_LOAD_SHORT_D16: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.', + FLATOp.FLAT_LOAD_SHORT_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.', + FLATOp.FLAT_ATOMIC_SWAP: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_CMPSWAP: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_ADD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_SUB: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_SMIN: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + FLATOp.FLAT_ATOMIC_UMIN: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_SMAX: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + FLATOp.FLAT_ATOMIC_UMAX: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_AND: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_OR: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_XOR: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + FLATOp.FLAT_ATOMIC_INC: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_DEC: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + FLATOp.FLAT_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp', + FLATOp.FLAT_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + FLATOp.FLAT_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + FLATOp.FLAT_ATOMIC_MIN_F64: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + FLATOp.FLAT_ATOMIC_MAX_F64: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + FLATOp.FLAT_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + FLATOp.FLAT_ATOMIC_SWAP_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_CMPSWAP_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_ADD_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_SUB_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_SMIN_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + FLATOp.FLAT_ATOMIC_UMIN_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_SMAX_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + FLATOp.FLAT_ATOMIC_UMAX_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_AND_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_OR_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_XOR_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + FLATOp.FLAT_ATOMIC_INC_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + FLATOp.FLAT_ATOMIC_DEC_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', } -SOPCOp_PCODE = { - SOPCOp.S_CMP_EQ_I32: 'SCC = S0.i32 == S1.i32', - SOPCOp.S_CMP_LG_I32: 'SCC = S0.i32 <> S1.i32', - SOPCOp.S_CMP_GT_I32: 'SCC = S0.i32 > S1.i32', - SOPCOp.S_CMP_GE_I32: 'SCC = S0.i32 >= S1.i32', - SOPCOp.S_CMP_LT_I32: 'SCC = S0.i32 < S1.i32', - SOPCOp.S_CMP_LE_I32: 'SCC = S0.i32 <= S1.i32', - SOPCOp.S_CMP_EQ_U32: 'SCC = S0.u32 == S1.u32', - SOPCOp.S_CMP_LG_U32: 'SCC = S0.u32 <> S1.u32', - SOPCOp.S_CMP_GT_U32: 'SCC = S0.u32 > S1.u32', - SOPCOp.S_CMP_GE_U32: 'SCC = S0.u32 >= S1.u32', - SOPCOp.S_CMP_LT_U32: 'SCC = S0.u32 < S1.u32', - SOPCOp.S_CMP_LE_U32: 'SCC = S0.u32 <= S1.u32', - SOPCOp.S_BITCMP0_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'0U", - SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U", - SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U", - SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U", - SOPCOp.S_SETVSKIP: 'VSKIP = S0.u32[S1.u32[4 : 0]]', - SOPCOp.S_SET_GPR_IDX_ON: 'specified in the SRC0 operand. The raw bits of the SRC1 field are read and used to set the enable bits. S1[0] =\nVSRC0_REL, S1[1] = VSRC1_REL, S1[2] = VSRC2_REL and S1[3] = VDST_REL.\nM0[7 : 0] = S0.u32[7 : 0].b8;\n// this is the direct content of raw S1 field', - SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64', - SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64', +GLOBALOp_PCODE = { + GLOBALOp.GLOBAL_LOAD_UBYTE: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + GLOBALOp.GLOBAL_LOAD_SBYTE: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + GLOBALOp.GLOBAL_LOAD_USHORT: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + GLOBALOp.GLOBAL_LOAD_SSHORT: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + GLOBALOp.GLOBAL_LOAD_DWORD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', + GLOBALOp.GLOBAL_LOAD_DWORDX2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + GLOBALOp.GLOBAL_LOAD_DWORDX3: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + GLOBALOp.GLOBAL_LOAD_DWORDX4: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + GLOBALOp.GLOBAL_STORE_BYTE: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', + GLOBALOp.GLOBAL_STORE_BYTE_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', + GLOBALOp.GLOBAL_STORE_SHORT: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', + GLOBALOp.GLOBAL_STORE_SHORT_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', + GLOBALOp.GLOBAL_STORE_DWORD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', + GLOBALOp.GLOBAL_STORE_DWORDX2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + GLOBALOp.GLOBAL_STORE_DWORDX3: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + GLOBALOp.GLOBAL_STORE_DWORDX4: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + GLOBALOp.GLOBAL_LOAD_UBYTE_D16: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.", + GLOBALOp.GLOBAL_LOAD_UBYTE_D16_HI: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.", + GLOBALOp.GLOBAL_LOAD_SBYTE_D16: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.", + GLOBALOp.GLOBAL_LOAD_SBYTE_D16_HI: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.", + GLOBALOp.GLOBAL_LOAD_SHORT_D16: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.', + GLOBALOp.GLOBAL_LOAD_SHORT_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.', + GLOBALOp.GLOBAL_ATOMIC_SWAP: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_CMPSWAP: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SUB: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SMIN: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_UMIN: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SMAX: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_UMAX: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_AND: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_OR: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_XOR: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_INC: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_DEC: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp', + GLOBALOp.GLOBAL_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + GLOBALOp.GLOBAL_ATOMIC_MIN_F64: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_MAX_F64: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + GLOBALOp.GLOBAL_ATOMIC_SWAP_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_ADD_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SUB_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SMIN_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_UMIN_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_SMAX_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_UMAX_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_AND_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_OR_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_XOR_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_INC_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + GLOBALOp.GLOBAL_ATOMIC_DEC_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', } -SOPKOp_PCODE = { - SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(S0.i16))", - SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(S0.i16))\nendif", - SOPKOp.S_CMPK_EQ_I32: "SCC = S0.i32 == 32'I(signext(S1.i16))", - SOPKOp.S_CMPK_LG_I32: "SCC = S0.i32 != 32'I(signext(S1.i16))", - SOPKOp.S_CMPK_GT_I32: "SCC = S0.i32 > 32'I(signext(S1.i16))", - SOPKOp.S_CMPK_GE_I32: "SCC = S0.i32 >= 32'I(signext(S1.i16))", - SOPKOp.S_CMPK_LT_I32: "SCC = S0.i32 < 32'I(signext(S1.i16))", - SOPKOp.S_CMPK_LE_I32: "SCC = S0.i32 <= 32'I(signext(S1.i16))", - SOPKOp.S_CMPK_EQ_U32: "SCC = S0.u32 == 32'U(S1.u16)", - SOPKOp.S_CMPK_LG_U32: "SCC = S0.u32 != 32'U(S1.u16)", - SOPKOp.S_CMPK_GT_U32: "SCC = S0.u32 > 32'U(S1.u16)", - SOPKOp.S_CMPK_GE_U32: "SCC = S0.u32 >= 32'U(S1.u16)", - SOPKOp.S_CMPK_LT_U32: "SCC = S0.u32 < 32'U(S1.u16)", - SOPKOp.S_CMPK_LE_U32: "SCC = S0.u32 <= 32'U(S1.u16)", - SOPKOp.S_ADDK_I32: "tmp = D0.i32;\nD0.i32 = D0.i32 + 32'I(signext(S0.i16));\nSCC = ((tmp[31] == S0.i16[15]) && (tmp[31] != D0.i32[31]));", - SOPKOp.S_MULK_I32: "D0.i32 = D0.i32 * 32'I(signext(S0.i16))", - SOPKOp.S_CBRANCH_I_FORK: "S0 = compare mask (VCC or any SGPR), and SIMM16 = signed DWORD branch offset relative to next\nmask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\ntarget_addr = PC + signext(SIMM16.i32 * 4) + 4LL;\nif mask_pass == EXEC.u64 then\nPC = target_addr\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { target_addr, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = target_addr\nendif", - SOPKOp.S_GETREG_B32: "offset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", - SOPKOp.S_SETREG_B32: "offset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", - SOPKOp.S_SETREG_IMM32_B32: "offset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", - SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL", +MTBUFOp_PCODE = { + MTBUFOp.TBUFFER_LOAD_FORMAT_X: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format', + MTBUFOp.TBUFFER_LOAD_FORMAT_XY: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])', + MTBUFOp.TBUFFER_LOAD_FORMAT_XYZ: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])', + MTBUFOp.TBUFFER_LOAD_FORMAT_XYZW: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])', + MTBUFOp.TBUFFER_STORE_FORMAT_X: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format', + MTBUFOp.TBUFFER_STORE_FORMAT_XY: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)', + MTBUFOp.TBUFFER_STORE_FORMAT_XYZ: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)', + MTBUFOp.TBUFFER_STORE_FORMAT_XYZW: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)', + MTBUFOp.TBUFFER_LOAD_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[31:16].b16 is preserved.", + MTBUFOp.TBUFFER_LOAD_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))", + MTBUFOp.TBUFFER_LOAD_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\n// VDATA[63:48].b16 is preserved.", + MTBUFOp.TBUFFER_LOAD_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))", + MTBUFOp.TBUFFER_STORE_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format", + MTBUFOp.TBUFFER_STORE_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))", + MTBUFOp.TBUFFER_STORE_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))", + MTBUFOp.TBUFFER_STORE_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))\nwhere:\nOP = Instruction Opcode.\nADDR = Source of flat address VGPR.\nDATA = Source data.\nVDST = Destination VGPR.\nNV = Access to non-volatile memory.\nSADDR = SGPR holding address or offset\nSEG = Instruction type: Flat, Scratch, or Global\nLDS = Data is transferred between LDS and Memory, not VGPRs.\nOFFSET = Immediate address byte-offset.\nSC = Scope\nNT = Non-Temporal", } -SOPPOp_PCODE = { - SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nendfor', - SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;", - SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_VCCZ: "If VCCZ is 1 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_VCCNZ: "If VCCZ is 0 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_TRAP: '// PC passed into trap handler points to S_TRAP itself,\nPC = TBA.i64;\n// trap base address', - SOPPOp.S_CBRANCH_CDBGSYS: "if WAVE_STATUS.COND_DBG_SYS.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_CDBGUSER: "if WAVE_STATUS.COND_DBG_USER.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: "if (WAVE_STATUS.COND_DBG_SYS || WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: "if (WAVE_STATUS.COND_DBG_SYS && WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_SET_GPR_IDX_MODE: 'SIMM16[1] = VSRC1_REL, SIMM16[2] = VSRC2_REL and SIMM16[3] = VDST_REL.\nGet Doorbell ID 10 - Returns doorbell into EXEC, with the doorbell physical address in bits', +MUBUFOp_PCODE = { + MUBUFOp.BUFFER_LOAD_FORMAT_X: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format', + MUBUFOp.BUFFER_LOAD_FORMAT_XY: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])', + MUBUFOp.BUFFER_LOAD_FORMAT_XYZ: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])', + MUBUFOp.BUFFER_LOAD_FORMAT_XYZW: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])', + MUBUFOp.BUFFER_STORE_FORMAT_X: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format', + MUBUFOp.BUFFER_STORE_FORMAT_XY: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)', + MUBUFOp.BUFFER_STORE_FORMAT_XYZ: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)', + MUBUFOp.BUFFER_STORE_FORMAT_XYZW: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)', + MUBUFOp.BUFFER_LOAD_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[31:16].b16 is preserved.", + MUBUFOp.BUFFER_LOAD_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))", + MUBUFOp.BUFFER_LOAD_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\n// VDATA[63:48].b16 is preserved.", + MUBUFOp.BUFFER_LOAD_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))", + MUBUFOp.BUFFER_STORE_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format", + MUBUFOp.BUFFER_STORE_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))", + MUBUFOp.BUFFER_STORE_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))", + MUBUFOp.BUFFER_STORE_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))", + MUBUFOp.BUFFER_LOAD_UBYTE: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + MUBUFOp.BUFFER_LOAD_SBYTE: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + MUBUFOp.BUFFER_LOAD_USHORT: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + MUBUFOp.BUFFER_LOAD_SSHORT: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + MUBUFOp.BUFFER_LOAD_DWORD: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', + MUBUFOp.BUFFER_LOAD_DWORDX2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + MUBUFOp.BUFFER_LOAD_DWORDX3: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + MUBUFOp.BUFFER_LOAD_DWORDX4: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + MUBUFOp.BUFFER_STORE_BYTE: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', + MUBUFOp.BUFFER_STORE_BYTE_D16_HI: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', + MUBUFOp.BUFFER_STORE_SHORT: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', + MUBUFOp.BUFFER_STORE_SHORT_D16_HI: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', + MUBUFOp.BUFFER_STORE_DWORD: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', + MUBUFOp.BUFFER_STORE_DWORDX2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + MUBUFOp.BUFFER_STORE_DWORDX3: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + MUBUFOp.BUFFER_STORE_DWORDX4: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + MUBUFOp.BUFFER_LOAD_UBYTE_D16: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.", + MUBUFOp.BUFFER_LOAD_UBYTE_D16_HI: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.", + MUBUFOp.BUFFER_LOAD_SBYTE_D16: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.", + MUBUFOp.BUFFER_LOAD_SBYTE_D16_HI: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.", + MUBUFOp.BUFFER_LOAD_SHORT_D16: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.', + MUBUFOp.BUFFER_LOAD_SHORT_D16_HI: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.', + MUBUFOp.BUFFER_LOAD_FORMAT_D16_HI_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[15:0].b16 is preserved.", + MUBUFOp.BUFFER_STORE_FORMAT_D16_HI_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\n// Mem access size depends on format", + MUBUFOp.BUFFER_ATOMIC_SWAP: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + MUBUFOp.BUFFER_ATOMIC_CMPSWAP: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_ADD: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_SUB: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_SMIN: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + MUBUFOp.BUFFER_ATOMIC_UMIN: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_SMAX: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + MUBUFOp.BUFFER_ATOMIC_UMAX: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_AND: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + MUBUFOp.BUFFER_ATOMIC_OR: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + MUBUFOp.BUFFER_ATOMIC_XOR: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + MUBUFOp.BUFFER_ATOMIC_INC: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_DEC: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp', + MUBUFOp.BUFFER_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + MUBUFOp.BUFFER_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', + MUBUFOp.BUFFER_ATOMIC_MIN_F64: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + MUBUFOp.BUFFER_ATOMIC_MAX_F64: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + MUBUFOp.BUFFER_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', + MUBUFOp.BUFFER_ATOMIC_SWAP_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + MUBUFOp.BUFFER_ATOMIC_CMPSWAP_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_ADD_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_SUB_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_SMIN_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + MUBUFOp.BUFFER_ATOMIC_UMIN_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_SMAX_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + MUBUFOp.BUFFER_ATOMIC_UMAX_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_AND_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + MUBUFOp.BUFFER_ATOMIC_OR_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + MUBUFOp.BUFFER_ATOMIC_XOR_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + MUBUFOp.BUFFER_ATOMIC_INC_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_DEC_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp\nwhere:\nOFFSET = Unsigned immediate byte offset.\nOFFEN = Send offset either as VADDR or as zero.\nIDXEN = Send index either as VADDR or as zero.\nLDS = Data is transferred between LDS and Memory, not VGPRs.\nOP = Instruction Opcode.\nDFMT = Data format for typed buffer.\nNFMT = Number format for typed buffer.\nVADDR = VGPR address source.\nVDATA = Vector GPR for read/write result.\nSRSRC = Scalar GPR that specifies resource constant.\nSOFFSET = Unsigned byte offset from an SGPR.\nSC = Scope\nNT = Non-Temporal\nACC = Return to ACC VGPRs', +} + +SCRATCHOp_PCODE = { + SCRATCHOp.SCRATCH_LOAD_UBYTE: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + SCRATCHOp.SCRATCH_LOAD_SBYTE: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + SCRATCHOp.SCRATCH_LOAD_USHORT: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + SCRATCHOp.SCRATCH_LOAD_SSHORT: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + SCRATCHOp.SCRATCH_LOAD_DWORD: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', + SCRATCHOp.SCRATCH_LOAD_DWORDX2: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + SCRATCHOp.SCRATCH_LOAD_DWORDX3: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + SCRATCHOp.SCRATCH_LOAD_DWORDX4: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + SCRATCHOp.SCRATCH_STORE_BYTE: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', + SCRATCHOp.SCRATCH_STORE_BYTE_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', + SCRATCHOp.SCRATCH_STORE_SHORT: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', + SCRATCHOp.SCRATCH_STORE_SHORT_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', + SCRATCHOp.SCRATCH_STORE_DWORD: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', + SCRATCHOp.SCRATCH_STORE_DWORDX2: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + SCRATCHOp.SCRATCH_STORE_DWORDX3: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + SCRATCHOp.SCRATCH_STORE_DWORDX4: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + SCRATCHOp.SCRATCH_LOAD_UBYTE_D16: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.", + SCRATCHOp.SCRATCH_LOAD_UBYTE_D16_HI: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.", + SCRATCHOp.SCRATCH_LOAD_SBYTE_D16: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.", + SCRATCHOp.SCRATCH_LOAD_SBYTE_D16_HI: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.", + SCRATCHOp.SCRATCH_LOAD_SHORT_D16: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.', + SCRATCHOp.SCRATCH_LOAD_SHORT_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.', } SMEMOp_PCODE = { @@ -256,8 +435,187 @@ SMEMOp_PCODE = { SMEMOp.S_ATOMIC_DEC_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', } +SOP1Op_PCODE = { + SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32', + SOP1Op.S_MOV_B64: 'D0.b64 = S0.b64', + SOP1Op.S_CMOV_B32: 'if SCC then\nD0.b32 = S0.b32\nendif', + SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif', + SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U', + SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_WQM_B32: "tmp = 0U;\ndeclare i : 6'U;\nfor i in 6'0U : 6'31U do\ntmp[i] = S0.u32[i & 6'60U +: 6'4U] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]', + SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT0_I32_B32(0x00000000) => 32\nS_BCNT0_I32_B32(0xcccccccc) => 16\nS_BCNT0_I32_B32(0xffffffff) => 0", + SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT1_I32_B32(0x00000000) => 0\nS_BCNT1_I32_B32(0xcccccccc) => 16\nS_BCNT1_I32_B32(0xffffffff) => 32", + SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_FF0_I32_B32: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'0U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FF0_I32_B32(0xaaaaaaaa) => 0\nS_FF0_I32_B32(0x55555555) => 1\nS_FF0_I32_B32(0x00000000) => 0\nS_FF0_I32_B32(0xffffffff) => 0xffffffff\nS_FF0_I32_B32(0xfffeffff) => 16", + SOP1Op.S_FF0_I32_B64: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'0U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FF1_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FF1_I32_B32(0xaaaaaaaa) => 1\nS_FF1_I32_B32(0x55555555) => 0\nS_FF1_I32_B32(0x00000000) => 0xffffffff\nS_FF1_I32_B32(0xffffffff) => 0\nS_FF1_I32_B32(0x00010000) => 16", + SOP1Op.S_FF1_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FLBIT_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FLBIT_I32_B32(0x00000000) => 0xffffffff\nS_FLBIT_I32_B32(0x0000cccc) => 16\nS_FLBIT_I32_B32(0xffff3333) => 0\nS_FLBIT_I32_B32(0x7fffffff) => 1\nS_FLBIT_I32_B32(0x80000000) => 0\nS_FLBIT_I32_B32(0xffffffff) => 0", + SOP1Op.S_FLBIT_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_FLBIT_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FLBIT_I32(0x00000000) => 0xffffffff\nS_FLBIT_I32(0x0000cccc) => 16\nS_FLBIT_I32(0xffff3333) => 16\nS_FLBIT_I32(0x7fffffff) => 1\nS_FLBIT_I32(0x80000000) => 1\nS_FLBIT_I32(0xffffffff) => 0xffffffff', + SOP1Op.S_FLBIT_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))", + SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U", + SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U", + SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U", + SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U", + SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL', + SOP1Op.S_SETPC_B64: 'PC = S0.i64', + SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64', + SOP1Op.S_RFE_B64: "WAVE_STATUS.PRIV = 1'0U;\nPC = S0.i64", + SOP1Op.S_AND_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ANDN2_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ORN2_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NAND_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XNOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U', + SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32\ns_mov_b32 m0, 10\ns_movrels_b32 s5, s7', + SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b64 = SGPR[addr].b64', + SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32\ns_mov_b32 m0, 10\ns_movreld_b32 s5, s7', + SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b64 = S0.b64', + SOP1Op.S_CBRANCH_JOIN: "saved_csp = S0.u32;\nif WAVE_MODE.CSP.u32 == saved_csp then\nPC += 4LL;\n// Second time to JOIN: continue with program.\nelse\nWAVE_MODE.CSP -= 3'1U;\n// First time to JOIN; jump to other FORK path.\n{ PC, EXEC } = SGPR[WAVE_MODE.CSP.u32 * 4U].b128;\n// Read 128 bits from 4 consecutive SGPRs.\nendif", + SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0\nS_ABS_I32(0x00000001) => 0x00000001\nS_ABS_I32(0x7fffffff) => 0x7fffffff\nS_ABS_I32(0x80000000) => 0x80000000 // Note this is negative!\nS_ABS_I32(0x80000001) => 0x7fffffff\nS_ABS_I32(0x80000002) => 0x7ffffffe\nS_ABS_I32(0xffffffff) => 0x00000001', + SOP1Op.S_SET_GPR_IDX_IDX: 'M0[7 : 0] = S0.u32[7 : 0].b8', + SOP1Op.S_ANDN1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ORN1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ANDN1_WREXEC_B64: 'EXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_ANDN2_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL\n// V0 holds the index value per lane\n// save exec mask for restore at the end\ns_mov_b64 s2, exec\n// exec mask of remaining (unprocessed) threads\ns_mov_b64 s4, exec\nloop:\n// get the index value for the first active lane\nv_readfirstlane_b32 s0, v0\n// find all other lanes with same index value\nv_cmpx_eq s0, v0\n // do the operation using the current EXEC mask. S0 holds the index.\n// mask out thread that was just executed\n// s_andn2_b64 s4, s4, exec\n// s_mov_b64 exec, s4\ns_andn2_wrexec_b64 s4, s4 // replaces above 2 ops\n// repeat until EXEC==0\ns_cbranch_scc1 loop\ns_mov_b64 exec, s2', + SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor\ns_bitreplicate_b64 s2, s0\ns_bitreplicate_b64 s2, s2', +} + +SOP2Op_PCODE = { + SOP2Op.S_ADD_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADDC_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_SUB_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUBB_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_ADD_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32', + SOP2Op.S_SUB_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32', + SOP2Op.S_ADDC_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADDC_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_SUBB_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUBB_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MAX_U32: 'SCC = S0.u32 >= S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_CSELECT_B32: 'D0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_CSELECT_B64: 'D0.u64 = SCC ? S0.u64 : S1.u64', + SOP2Op.S_AND_B32: 'D0.u32 = (S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_AND_B64: 'D0.u64 = (S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_OR_B32: 'D0.u32 = (S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_OR_B64: 'D0.u64 = (S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XOR_B64: 'D0.u64 = (S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ANDN2_B32: 'D0.u32 = (S0.u32 & ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_ANDN2_B64: 'D0.u64 = (S0.u64 & ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ORN2_B32: 'D0.u32 = (S0.u32 | ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_ORN2_B64: 'D0.u64 = (S0.u64 | ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NAND_B32: 'D0.u32 = ~(S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NAND_B64: 'D0.u64 = ~(S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NOR_B32: 'D0.u32 = ~(S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NOR_B64: 'D0.u64 = ~(S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XNOR_B64: 'D0.u64 = ~(S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0", + SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + SOP2Op.S_BFM_B64: 'D0.u64 = (((1ULL << S0[5 : 0].u32) - 1ULL) << S1[5 : 0].u32)', + SOP2Op.S_MUL_I32: 'D0.i32 = S0.i32 * S1.i32', + SOP2Op.S_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S1[22 : 16].u32) - 1U));\nSCC = D0.u32 != 0U', + SOP2Op.S_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32);\nSCC = D0.i32 != 0', + SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL', + SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_CBRANCH_G_FORK: "mask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\nif mask_pass == EXEC.u64 then\nPC = 64'I(S1.u64)\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { S1.u64, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = 64'I(S1.u64)\nendif", + SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0\nS_ABSDIFF_I32(0x00000002, 0x00000005) => 0x00000003\nS_ABSDIFF_I32(0xffffffff, 0x00000000) => 0x00000001\nS_ABSDIFF_I32(0x80000000, 0x00000000) => 0x80000000 // Note: result is negative!\nS_ABSDIFF_I32(0x80000000, 0x00000001) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xffffffff) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xfffffffe) => 0x7ffffffe', + SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_PACK_LL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_LH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_HH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[31 : 16].u16 }', +} + +SOPCOp_PCODE = { + SOPCOp.S_CMP_EQ_I32: 'SCC = S0.i32 == S1.i32', + SOPCOp.S_CMP_LG_I32: 'SCC = S0.i32 <> S1.i32', + SOPCOp.S_CMP_GT_I32: 'SCC = S0.i32 > S1.i32', + SOPCOp.S_CMP_GE_I32: 'SCC = S0.i32 >= S1.i32', + SOPCOp.S_CMP_LT_I32: 'SCC = S0.i32 < S1.i32', + SOPCOp.S_CMP_LE_I32: 'SCC = S0.i32 <= S1.i32', + SOPCOp.S_CMP_EQ_U32: 'SCC = S0.u32 == S1.u32', + SOPCOp.S_CMP_LG_U32: 'SCC = S0.u32 <> S1.u32', + SOPCOp.S_CMP_GT_U32: 'SCC = S0.u32 > S1.u32', + SOPCOp.S_CMP_GE_U32: 'SCC = S0.u32 >= S1.u32', + SOPCOp.S_CMP_LT_U32: 'SCC = S0.u32 < S1.u32', + SOPCOp.S_CMP_LE_U32: 'SCC = S0.u32 <= S1.u32', + SOPCOp.S_BITCMP0_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U", + SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U", + SOPCOp.S_SETVSKIP: 'VSKIP = S0.u32[S1.u32[4 : 0]]\ns_setvskip 1, 0 // Enable vskip mode.\ns_setvskip 0, 0 // Disable vskip mode.', + SOPCOp.S_SET_GPR_IDX_ON: "WAVE_MODE.GPR_IDX_EN = 1'1U;\nM0[7 : 0] = S0.u32[7 : 0].b8;\nM0[15 : 12] = SRC1.u32[3 : 0].b4;\n// this is the direct content of raw S1 field\n// Remaining bits of M0 are unmodified.", + SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64', + SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64', +} + +SOPKOp_PCODE = { + SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(S0.i16))", + SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(S0.i16))\nendif", + SOPKOp.S_CMPK_EQ_I32: "SCC = S0.i32 == 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_LG_I32: "SCC = S0.i32 != 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_GT_I32: "SCC = S0.i32 > 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_GE_I32: "SCC = S0.i32 >= 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_LT_I32: "SCC = S0.i32 < 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_LE_I32: "SCC = S0.i32 <= 32'I(signext(S1.i16))", + SOPKOp.S_CMPK_EQ_U32: "SCC = S0.u32 == 32'U(S1.u16)", + SOPKOp.S_CMPK_LG_U32: "SCC = S0.u32 != 32'U(S1.u16)", + SOPKOp.S_CMPK_GT_U32: "SCC = S0.u32 > 32'U(S1.u16)", + SOPKOp.S_CMPK_GE_U32: "SCC = S0.u32 >= 32'U(S1.u16)", + SOPKOp.S_CMPK_LT_U32: "SCC = S0.u32 < 32'U(S1.u16)", + SOPKOp.S_CMPK_LE_U32: "SCC = S0.u32 <= 32'U(S1.u16)", + SOPKOp.S_ADDK_I32: "tmp = D0.i32;\n// Save value to check sign bits for overflow later.\nD0.i32 = D0.i32 + 32'I(signext(S0.i16));\nSCC = ((tmp[31] == S0.i16[15]) && (tmp[31] != D0.i32[31]));\n// signed overflow.", + SOPKOp.S_MULK_I32: "D0.i32 = D0.i32 * 32'I(signext(S0.i16))", + SOPKOp.S_CBRANCH_I_FORK: "// Initial setup.\nmask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\ntarget_addr = PC + signext(SIMM16.i32 * 4) + 4LL;\n// Decide where to jump to.\nif mask_pass == EXEC.u64 then\nPC = target_addr\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { target_addr, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = target_addr\nendif", + SOPKOp.S_GETREG_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", + SOPKOp.S_SETREG_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\nHW_REGISTERS[hwRegId] = value.b32;\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_SETREG_IMM32_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\nHW_REGISTERS[hwRegId] = value.b32;\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL", +} + +SOPPOp_PCODE = { + SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor\ns_nop 0 // Wait 1 cycle.\ns_nop 0xf // Wait 16 cycles.', + SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.\ns_branch label // Set SIMM16 = +4 = 0x0004\ns_nop 0 // 4 bytes\nlabel:\ns_nop 0 // 4 bytes\ns_branch label // Set SIMM16 = -8 = 0xfff8", + SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCZ: "if VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCNZ: "if VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_WAITCNT: 'SIMM16[3:0] = vmcount (vector memory operations) lower bits [3:0],\nSIMM16[6:4] = export/mem-write-data count,\nSIMM16[11:8] = LGKMcnt (scalar-mem/GDS/LDS count),\nSIMM16[15:14] = vmcount (vector memory operations) upper bits [5:4].', + SOPPOp.S_SLEEP: 's_sleep 0 // Wait for 0 clocks.\ns_sleep 1 // Wait for 1-64 clocks.\ns_sleep 2 // Wait for 65-128 clocks.', + SOPPOp.S_TRAP: 'TrapID = SIMM16.u16[7 : 0];\n"Wait for all instructions to complete";\n// PC passed into trap handler points to S_TRAP itself,\n// *not* to the next instruction.\n{ TTMP[1], TTMP[0] } = { 3\'0, PCRewind[3 : 0], HT[0], TrapID[7 : 0], PC[47 : 0] };\nPC = TBA.i64;\n// trap base address\nWAVE_STATUS.PRIV = 1\'1U', + SOPPOp.S_CBRANCH_CDBGSYS: "if WAVE_STATUS.COND_DBG_SYS.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGUSER: "if WAVE_STATUS.COND_DBG_USER.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: "if (WAVE_STATUS.COND_DBG_SYS || WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: "if (WAVE_STATUS.COND_DBG_SYS && WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_SET_GPR_IDX_OFF: "WAVE_MODE.GPR_IDX_EN = 1'0U", + SOPPOp.S_SET_GPR_IDX_MODE: 'M0[15 : 12] = SIMM16.u16[3 : 0].b4', +} + VOP1Op_PCODE = { - VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32', + VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1', VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'I;\nif EXEC == 0x0LL then\nlane = 0;\n// Force lane 0 if all lanes are disabled\nelse\nlane = s_ff1_i32_b64(EXEC);\n// Lowest active lane\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', @@ -269,7 +627,7 @@ VOP1Op_PCODE = { VOP1Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', VOP1Op.V_CVT_RPI_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', VOP1Op.V_CVT_FLR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', - VOP1Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP1Op.V_CVT_OFF_F32_I4: "declare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", VOP1Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', VOP1Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', VOP1Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', @@ -287,49 +645,49 @@ VOP1Op_PCODE = { VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', - VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', - VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', - VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF', + VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF', + VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0', VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', - VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0', VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', - VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF', VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', - VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", - VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN", + VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN", VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32', VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', - VOP1Op.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP1Op.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP1Op.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', - VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', - VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP1Op.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_U32(0x00000000) => 0xffffffff\nV_FFBH_U32(0x800000ff) => 0\nV_FFBH_U32(0x100000ff) => 3\nV_FFBH_U32(0x0000ffff) => 16\nV_FFBH_U32(0x00000001) => 31", + VOP1Op.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBL_B32(0x00000000) => 0xffffffff\nV_FFBL_B32(0xff000001) => 0\nV_FFBL_B32(0xff000008) => 3\nV_FFBL_B32(0xffff0000) => 16\nV_FFBL_B32(0x80000000) => 31", + VOP1Op.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_I32(0x00000000) => 0xffffffff\nV_FFBH_I32(0x40000000) => 1\nV_FFBH_I32(0x80000000) => 1\nV_FFBH_I32(0x0fffffff) => 4\nV_FFBH_I32(0xffff0000) => 16\nV_FFBH_I32(0xfffffffe) => 31\nV_FFBH_I32(0xffffffff) => 0xffffffff', + VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()', + VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()', VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', - VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", - VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", + VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()", + VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()", VOP1Op.V_MOV_B64: 'D0.b64 = S0.b64', VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', - VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", - VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', - VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", - VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', - VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", - VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", - VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", + VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0", + VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF', + VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0", + VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF', + VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF", + VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()", + VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()", VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', - VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", - VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", + VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN", + VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN", VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', - VOP1Op.V_SAT_PK_U8_I16: "tmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", + VOP1Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n <= 16'0 then\nreturn 8'0U\nelsif n >= 16'255 then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\ntmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp', VOP1Op.V_CVT_F32_FP8: 'if SDWA_SRC0_SEL == BYTE1.b3 then\nD0.f32 = fp8_to_f32(S0[15 : 8].fp8)\nelsif SDWA_SRC0_SEL == BYTE2.b3 then\nD0.f32 = fp8_to_f32(S0[23 : 16].fp8)\nelsif SDWA_SRC0_SEL == BYTE3.b3 then\nD0.f32 = fp8_to_f32(S0[31 : 24].fp8)\nelse\n// BYTE0 implied\nD0.f32 = fp8_to_f32(S0[7 : 0].fp8)\nendif', VOP1Op.V_CVT_F32_BF8: 'if SDWA_SRC0_SEL == BYTE1.b3 then\nD0.f32 = bf8_to_f32(S0[15 : 8].bf8)\nelsif SDWA_SRC0_SEL == BYTE2.b3 then\nD0.f32 = bf8_to_f32(S0[23 : 16].bf8)\nelsif SDWA_SRC0_SEL == BYTE3.b3 then\nD0.f32 = bf8_to_f32(S0[31 : 24].bf8)\nelse\n// BYTE0 implied\nD0.f32 = bf8_to_f32(S0[7 : 0].bf8)\nendif', @@ -338,7 +696,7 @@ VOP1Op_PCODE = { VOP1Op.V_PRNG_B32: 'in = S0.u32;\nD0.u32 = ((in << 1U) ^ (in[31] ? 197U : 0U))', VOP1Op.V_PERMLANE16_SWAP_B32: 'for pass in 0 : 1 do\nfor lane in 0 : 15 do\ntmp = VGPR[pass * 32 + lane][SRC0.u32];\nVGPR[pass * 32 + lane][SRC0.u32] = VGPR[pass * 32 + lane + 16][VDST.u32];\nVGPR[pass * 32 + lane + 16][VDST.u32] = tmp\nendfor\nendfor', VOP1Op.V_PERMLANE32_SWAP_B32: 'for lane in 0 : 31 do\ntmp = VGPR[lane][SRC0.u32];\nVGPR[lane][SRC0.u32] = VGPR[lane + 32][VDST.u32];\nVGPR[lane + 32][VDST.u32] = tmp\nendfor', - VOP1Op.V_CVT_F32_BF16: "D0.f32 = 32'F({ S0.b16, 16'0U })", + VOP1Op.V_CVT_F32_BF16: "D0.f32 = 32'F({ S0.b16, 16'0U })\nwhere:\nSRC0 = First operand for instruction.\nVSRC1 = Second operand for instruction.\nOP = Instructions.\nAll VOPC instructions can alternatively be encoded in the VOP3A format.", } VOP2Op_PCODE = { @@ -352,7 +710,7 @@ VOP2Op_PCODE = { VOP2Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", VOP2Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", VOP2Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", - VOP2Op.V_MIN_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S1.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S0.f32\nelse\nD0.f32 = S0.f32 < S1.f32 ? S0.f32 : S1.f32\nendif", + VOP2Op.V_MIN_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S1.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S0.f32\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F32.\nD0.f32 = S0.f32 < S1.f32 ? S0.f32 : S1.f32\nendif", VOP2Op.V_MAX_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S1.f32\nelsif WAVE_MODE.IEEE then\nD0.f32 = S0.f32 >= S1.f32 ? S0.f32 : S1.f32\nelse\nD0.f32 = S0.f32 > S1.f32 ? S0.f32 : S1.f32\nendif", VOP2Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', VOP2Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', @@ -364,6 +722,7 @@ VOP2Op_PCODE = { VOP2Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', VOP2Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', VOP2Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP2Op.V_DOT2C_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', VOP2Op.V_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', VOP2Op.V_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', VOP2Op.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32", @@ -377,8 +736,8 @@ VOP2Op_PCODE = { VOP2Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', VOP2Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', VOP2Op.V_MAC_F16: "tmp = S0.f16 * S1.f16 + D0.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", - VOP2Op.V_MADMK_F16: 'tmp = S0.f16 * SIMM16.f16 + S1.f16;', - VOP2Op.V_MADAK_F16: 'tmp = S0.f16 * S1.f16 + SIMM16.f16;', + VOP2Op.V_MADMK_F16: "tmp = S0.f16 * SIMM16.f16 + S1.f16;\nD0 = { 16'0, tmp.f16 }", + VOP2Op.V_MADAK_F16: "tmp = S0.f16 * S1.f16 + SIMM16.f16;\nD0 = { 16'0, tmp.f16 }", VOP2Op.V_ADD_U16: 'D0.u16 = S0.u16 + S1.u16', VOP2Op.V_SUB_U16: 'D0.u16 = S0.u16 - S1.u16', VOP2Op.V_SUBREV_U16: 'D0.u16 = S1.u16 - S0.u16', @@ -387,12 +746,12 @@ VOP2Op_PCODE = { VOP2Op.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', VOP2Op.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', VOP2Op.V_MAX_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S1.f16\nelsif WAVE_MODE.IEEE then\nD0.f16 = S0.f16 >= S1.f16 ? S0.f16 : S1.f16\nelse\nD0.f16 = S0.f16 > S1.f16 ? S0.f16 : S1.f16\nendif", - VOP2Op.V_MIN_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S1.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S0.f16\nelse\nD0.f16 = S0.f16 < S1.f16 ? S0.f16 : S1.f16\nendif", + VOP2Op.V_MIN_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S1.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S0.f16\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F16.\nD0.f16 = S0.f16 < S1.f16 ? S0.f16 : S1.f16\nendif", VOP2Op.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', VOP2Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', VOP2Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', VOP2Op.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', - VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()", VOP2Op.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32', VOP2Op.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32', VOP2Op.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32', @@ -403,449 +762,260 @@ VOP2Op_PCODE = { VOP2Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', VOP2Op.V_PK_FMAC_F16: 'D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16);\nD0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16)', VOP2Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', - VOP2Op.V_DOT2C_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', -} - -VOP3POp_PCODE = { - VOP3POp.V_PK_MAD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16;\nD0.b32 = tmp", - VOP3POp.V_PK_MUL_LO_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_ADD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16;\nD0.b32 = tmp", - VOP3POp.V_PK_SUB_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16;\nD0.b32 = tmp", - VOP3POp.V_PK_LSHLREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', - VOP3POp.V_PK_LSHRREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', - VOP3POp.V_PK_ASHRREV_I16: 'tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MAX_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 >= S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 >= S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp", - VOP3POp.V_PK_MIN_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 < S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 < S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp", - VOP3POp.V_PK_MAD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16;\nD0.b32 = tmp", - VOP3POp.V_PK_ADD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16;\nD0.b32 = tmp", - VOP3POp.V_PK_SUB_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16;\nD0.b32 = tmp", - VOP3POp.V_PK_MAX_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 >= S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 >= S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp", - VOP3POp.V_PK_MIN_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 < S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 < S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp", - VOP3POp.V_PK_FMA_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16);\ntmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16);\nD0.b32 = tmp", - VOP3POp.V_PK_ADD_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\nD0.b32 = tmp", - VOP3POp.V_PK_MUL_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.b32 = tmp", - VOP3POp.V_PK_MIN_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", - VOP3POp.V_PK_MAX_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", - VOP3POp.V_MAD_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = in[0] * in[1] + in[2]", - VOP3POp.V_MAD_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(in[0] * in[1] + in[2])", - VOP3POp.V_MAD_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(in[0] * in[1] + in[2])", - VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', - VOP3POp.V_DOT2_I32_I16: 'tmp = S2.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp', - VOP3POp.V_DOT2_U32_U16: 'tmp = S2.u32;\ntmp += u16_to_u32(S0[15 : 0].u16) * u16_to_u32(S1[15 : 0].u16);\ntmp += u16_to_u32(S0[31 : 16].u16) * u16_to_u32(S1[31 : 16].u16);\nD0.u32 = tmp', - VOP3POp.V_DOT4_I32_I8: 'tmp = S2.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp', - VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp', - VOP3POp.V_DOT8_I32_I4: 'tmp = S2.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp', - VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp', - VOP3POp.V_PK_FMA_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = fma(S0[31 : 0].f32, S1[31 : 0].f32, S2[31 : 0].f32);\ntmp[63 : 32].f32 = fma(S0[63 : 32].f32, S1[63 : 32].f32, S2[63 : 32].f32);\nD0.b64 = tmp", - VOP3POp.V_PK_MUL_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 * S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 * S1[63 : 32].f32;\nD0.b64 = tmp", - VOP3POp.V_PK_ADD_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 + S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 + S1[63 : 32].f32;\nD0.b64 = tmp", - VOP3POp.V_PK_MOV_B32: 'tmp0.u32 = S0.u32[OPSEL[0].i32 * 32 + 31 : OPSEL[0].i32 * 32];\ntmp1.u32 = S1.u32[OPSEL[1].i32 * 32 + 31 : OPSEL[1].i32 * 32];\nD0.u32[31 : 0] = tmp0.u32;\nD0.u32[63 : 32] = tmp1.u32', - VOP3POp.V_DOT2_F32_BF16: "tmp = 32'F(S0[15 : 0].bf16) * 32'F(S1[15 : 0].bf16);\ntmp += 32'F(S0[31 : 16].bf16) * 32'F(S1[31 : 16].bf16);\ntmp += S2.f32;\nD0.f32 = tmp", - VOP3POp.V_PK_MINIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_minimum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_minimum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32", - VOP3POp.V_PK_MAXIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_maximum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_maximum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32", -} - -VOPCOp_PCODE = { - VOPCOp.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask and\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", - VOPCOp.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", - VOPCOp.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask and\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", - VOPCOp.V_CMP_F_F16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_TRU_F16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_F16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_O_F16: "EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_U_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NEQ_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_TRU_F16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_F32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_TRU_F32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_F32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_O_F32: "EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_U_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NEQ_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_TRU_F32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_F64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_TRU_F64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_F64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_O_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NEQ_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NLT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_TRU_F64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_I16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_I16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_U16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_U16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_I16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NE_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_T_I16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_U16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NE_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_T_U16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_I32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_I32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_U32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_U32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_I32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NE_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_T_I32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_U32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NE_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_T_U32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_I64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_I64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_U64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_U64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_I64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NE_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_T_I64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_U64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_LT_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GT_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_NE_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMPX_T_U64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nVDST = Destination VGPR 0- 255.", } VOP3AOp_PCODE = { - VOP3AOp.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask and\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", - VOP3AOp.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", - VOP3AOp.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into the EXEC mask and\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", - VOP3AOp.V_CMP_F_F16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOP3AOp.V_CMP_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOP3AOp.V_CMP_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOP3AOp.V_CMP_F_F16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_F16: 'D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_F16: 'D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_F16: 'D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_O_F16: "D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_U_F16: "D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", VOP3AOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NGT_F16: 'D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_TRU_F16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_F16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NEQ_F16: 'D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLT_F16: 'D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_TRU_F16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_F16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_O_F16: "EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_O_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", VOP3AOp.V_CMPX_U_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", VOP3AOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NEQ_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NLT_F16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_TRU_F16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_F_F32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_TRU_F16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_F32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_F32: 'D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_F32: 'D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_F32: 'D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_O_F32: "D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_U_F32: "D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", VOP3AOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NGT_F32: 'D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_TRU_F32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_F32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NEQ_F32: 'D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLT_F32: 'D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_TRU_F32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_F32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_O_F32: "EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_O_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", VOP3AOp.V_CMPX_U_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", VOP3AOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NEQ_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NLT_F32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_TRU_F32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_F_F64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_TRU_F32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_F64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_F64: 'D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_F64: 'D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_F64: 'D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_O_F64: 'D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_U_F64: 'D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NGT_F64: 'D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_TRU_F64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_F64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NEQ_F64: 'D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NLT_F64: 'D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_TRU_F64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_F64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_O_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_O_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NEQ_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NLT_F64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_TRU_F64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_F_I16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_TRU_F64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_I16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_I16: 'D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_I16: 'D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_I16: 'D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_I16: 'D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_T_I16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_F_U16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_I16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_U16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_U16: 'D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_U16: 'D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_U16: 'D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_U16: 'D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_T_U16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_I16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_U16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_I16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NE_I16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_T_I16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_U16: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_I16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_U16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NE_U16: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_T_U16: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_F_I32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_U16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_I32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_I32: 'D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_I32: 'D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_I32: 'D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_I32: 'D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_T_I32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_F_U32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_I32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_U32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_U32: 'D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_U32: 'D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_U32: 'D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_U32: 'D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_T_U32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_I32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_U32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_I32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NE_I32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_T_I32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_U32: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_I32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_U32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NE_U32: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_T_U32: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_F_I64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_U32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_I64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_I64: 'D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_I64: 'D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_I64: 'D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_I64: 'D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_T_I64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_F_U64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_I64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_F_U64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMP_LT_U64: 'D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_EQ_U64: 'D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_GT_U64: 'D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_NE_U64: 'D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMP_T_U64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_I64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMP_T_U64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_I64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NE_I64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_T_I64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_F_U64: "Set the per-lane condition code to 0. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3AOp.V_CMPX_LT_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_T_I64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_F_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3AOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_EQ_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_GT_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_NE_U64: 'EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3AOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', VOP3AOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3AOp.V_CMPX_T_U64: "Set the per-lane condition code to 1. Store the result into the EXEC mask and to VCC or a scalar register.\nEXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nVDST = Destination VGPR 0- 255.", - VOP3AOp.V_MOV_B32: 'D0.b32 = S0.b32', + VOP3AOp.V_CMPX_T_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nwhere:\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nOP = DS instructions.\nADDR = Source LDS address VGPR 0 - 255.\nDATA0 = Source data0 VGPR 0 - 255.\nDATA1 = Source data1 VGPR 0 - 255.\nVDST = Destination VGPR 0- 255.", + VOP3AOp.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP3AOp.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP3AOp.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP3AOp.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP3AOp.V_FMAC_F64: 'D0.f64 = fma(S0.f64, S1.f64, D0.f64)', + VOP3AOp.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP3AOp.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP3AOp.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP3AOp.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP3AOp.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP3AOp.V_MIN_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S1.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S0.f32\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F32.\nD0.f32 = S0.f32 < S1.f32 ? S0.f32 : S1.f32\nendif", + VOP3AOp.V_MAX_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S1.f32\nelsif WAVE_MODE.IEEE then\nD0.f32 = S0.f32 >= S1.f32 ? S0.f32 : S1.f32\nelse\nD0.f32 = S0.f32 > S1.f32 ? S0.f32 : S1.f32\nendif", + VOP3AOp.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP3AOp.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP3AOp.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP3AOp.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP3AOp.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP3AOp.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP3AOp.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP3AOp.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP3AOp.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP3AOp.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP3AOp.V_DOT2C_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', + VOP3AOp.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP3AOp.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP3AOp.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP3AOp.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP3AOp.V_MAC_F16: "tmp = S0.f16 * S1.f16 + D0.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", + VOP3AOp.V_ADD_U16: 'D0.u16 = S0.u16 + S1.u16', + VOP3AOp.V_SUB_U16: 'D0.u16 = S0.u16 - S1.u16', + VOP3AOp.V_SUBREV_U16: 'D0.u16 = S1.u16 - S0.u16', + VOP3AOp.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', + VOP3AOp.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', + VOP3AOp.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', + VOP3AOp.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', + VOP3AOp.V_MAX_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S1.f16\nelsif WAVE_MODE.IEEE then\nD0.f16 = S0.f16 >= S1.f16 ? S0.f16 : S1.f16\nelse\nD0.f16 = S0.f16 > S1.f16 ? S0.f16 : S1.f16\nendif", + VOP3AOp.V_MIN_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S1.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S0.f16\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F16.\nD0.f16 = S0.f16 < S1.f16 ? S0.f16 : S1.f16\nendif", + VOP3AOp.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', + VOP3AOp.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', + VOP3AOp.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', + VOP3AOp.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', + VOP3AOp.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()", + VOP3AOp.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP3AOp.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP3AOp.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP3AOp.V_DOT2C_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP3AOp.V_DOT2C_I32_I16: 'tmp = D0.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp', + VOP3AOp.V_DOT4C_I32_I8: 'tmp = D0.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp', + VOP3AOp.V_DOT8C_I32_I4: 'tmp = D0.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp', + VOP3AOp.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP3AOp.V_PK_FMAC_F16: 'D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16);\nD0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16)', + VOP3AOp.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP3AOp.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1', VOP3AOp.V_READFIRSTLANE_B32: "declare lane : 32'I;\nif EXEC == 0x0LL then\nlane = 0;\n// Force lane 0 if all lanes are disabled\nelse\nlane = s_ff1_i32_b64(EXEC);\n// Lowest active lane\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", VOP3AOp.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', VOP3AOp.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', @@ -857,7 +1027,7 @@ VOP3AOp_PCODE = { VOP3AOp.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', VOP3AOp.V_CVT_RPI_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', VOP3AOp.V_CVT_FLR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', - VOP3AOp.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP3AOp.V_CVT_OFF_F32_I4: "declare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", VOP3AOp.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', VOP3AOp.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', VOP3AOp.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', @@ -875,88 +1045,37 @@ VOP3AOp_PCODE = { VOP3AOp.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', VOP3AOp.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", VOP3AOp.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', - VOP3AOp.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', - VOP3AOp.V_LOG_F32: 'D0.f32 = log2(S0.f32)', - VOP3AOp.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP3AOp.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF', + VOP3AOp.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF', + VOP3AOp.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0', VOP3AOp.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', - VOP3AOp.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP3AOp.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0', VOP3AOp.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', VOP3AOp.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', - VOP3AOp.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP3AOp.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF', VOP3AOp.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', - VOP3AOp.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", - VOP3AOp.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP3AOp.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN", + VOP3AOp.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN", VOP3AOp.V_NOT_B32: 'D0.u32 = ~S0.u32', VOP3AOp.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', - VOP3AOp.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP3AOp.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP3AOp.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', - VOP3AOp.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', - VOP3AOp.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP3AOp.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_U32(0x00000000) => 0xffffffff\nV_FFBH_U32(0x800000ff) => 0\nV_FFBH_U32(0x100000ff) => 3\nV_FFBH_U32(0x0000ffff) => 16\nV_FFBH_U32(0x00000001) => 31", + VOP3AOp.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBL_B32(0x00000000) => 0xffffffff\nV_FFBL_B32(0xff000001) => 0\nV_FFBL_B32(0xff000008) => 3\nV_FFBL_B32(0xffff0000) => 16\nV_FFBL_B32(0x80000000) => 31", + VOP3AOp.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_I32(0x00000000) => 0xffffffff\nV_FFBH_I32(0x40000000) => 1\nV_FFBH_I32(0x80000000) => 1\nV_FFBH_I32(0x0fffffff) => 4\nV_FFBH_I32(0xffff0000) => 16\nV_FFBH_I32(0xfffffffe) => 31\nV_FFBH_I32(0xffffffff) => 0xffffffff', + VOP3AOp.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()', + VOP3AOp.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()', VOP3AOp.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', - VOP3AOp.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", - VOP3AOp.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", + VOP3AOp.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()", + VOP3AOp.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()", VOP3AOp.V_MOV_B64: 'D0.b64 = S0.b64', VOP3AOp.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', VOP3AOp.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', VOP3AOp.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', VOP3AOp.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', - VOP3AOp.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", - VOP3AOp.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', - VOP3AOp.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", - VOP3AOp.V_LOG_F16: 'D0.f16 = log2(S0.f16)', - VOP3AOp.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", - VOP3AOp.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', - VOP3AOp.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', - VOP3AOp.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', - VOP3AOp.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', - VOP3AOp.V_FMAC_F64: 'D0.f64 = fma(S0.f64, S1.f64, D0.f64)', - VOP3AOp.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', - VOP3AOp.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", - VOP3AOp.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", - VOP3AOp.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", - VOP3AOp.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", - VOP3AOp.V_MIN_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S1.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S0.f32\nelse\nD0.f32 = S0.f32 < S1.f32 ? S0.f32 : S1.f32\nendif", - VOP3AOp.V_MAX_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S1.f32\nelsif WAVE_MODE.IEEE then\nD0.f32 = S0.f32 >= S1.f32 ? S0.f32 : S1.f32\nelse\nD0.f32 = S0.f32 > S1.f32 ? S0.f32 : S1.f32\nendif", - VOP3AOp.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', - VOP3AOp.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', - VOP3AOp.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', - VOP3AOp.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', - VOP3AOp.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', - VOP3AOp.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', - VOP3AOp.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', - VOP3AOp.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', - VOP3AOp.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', - VOP3AOp.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', - VOP3AOp.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', - VOP3AOp.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', - VOP3AOp.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', - VOP3AOp.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', - VOP3AOp.V_MAC_F16: "tmp = S0.f16 * S1.f16 + D0.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", - VOP3AOp.V_ADD_U16: 'D0.u16 = S0.u16 + S1.u16', - VOP3AOp.V_SUB_U16: 'D0.u16 = S0.u16 - S1.u16', - VOP3AOp.V_SUBREV_U16: 'D0.u16 = S1.u16 - S0.u16', - VOP3AOp.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', - VOP3AOp.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', - VOP3AOp.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', - VOP3AOp.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', - VOP3AOp.V_MAX_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S1.f16\nelsif WAVE_MODE.IEEE then\nD0.f16 = S0.f16 >= S1.f16 ? S0.f16 : S1.f16\nelse\nD0.f16 = S0.f16 > S1.f16 ? S0.f16 : S1.f16\nendif", - VOP3AOp.V_MIN_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S1.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S0.f16\nelse\nD0.f16 = S0.f16 < S1.f16 ? S0.f16 : S1.f16\nendif", - VOP3AOp.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', - VOP3AOp.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', - VOP3AOp.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', - VOP3AOp.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', - VOP3AOp.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", - VOP3AOp.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32', - VOP3AOp.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32', - VOP3AOp.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32', - VOP3AOp.V_DOT2C_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', - VOP3AOp.V_DOT2C_I32_I16: 'tmp = D0.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp', - VOP3AOp.V_DOT4C_I32_I8: 'tmp = D0.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp', - VOP3AOp.V_DOT8C_I32_I4: 'tmp = D0.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp', - VOP3AOp.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', - VOP3AOp.V_PK_FMAC_F16: 'D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16);\nD0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16)', - VOP3AOp.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP3AOp.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0", + VOP3AOp.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF', + VOP3AOp.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0", + VOP3AOp.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF', + VOP3AOp.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF", VOP3AOp.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32", VOP3AOp.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32", VOP3AOp.V_CUBEID_F32: '// Set D0.f = cubemap face ID ({0.0, 1.0, ..., 5.0}).\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = 5.0F\nelse\nD0.f32 = 4.0F\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = 3.0F\nelse\nD0.f32 = 2.0F\nendif\nelse\nif S0.f32 < 0.0F then\nD0.f32 = 1.0F\nelse\nD0.f32 = 0.0F\nendif\nendif', @@ -980,23 +1099,23 @@ VOP3AOp_PCODE = { VOP3AOp.V_MED3_F32: "if (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)) || isNAN(64'F(S2.f32))) then\nD0.f32 = v_min3_f32(S0.f32, S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S0.f32 then\nD0.f32 = v_max_f32(S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S1.f32 then\nD0.f32 = v_max_f32(S0.f32, S2.f32)\nelse\nD0.f32 = v_max_f32(S0.f32, S1.f32)\nendif", VOP3AOp.V_MED3_I32: 'if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32 then\nD0.i32 = v_max_i32(S1.i32, S2.i32)\nelsif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32 then\nD0.i32 = v_max_i32(S0.i32, S2.i32)\nelse\nD0.i32 = v_max_i32(S0.i32, S1.i32)\nendif', VOP3AOp.V_MED3_U32: 'if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32 then\nD0.u32 = v_max_u32(S1.u32, S2.u32)\nelsif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32 then\nD0.u32 = v_max_u32(S0.u32, S2.u32)\nelse\nD0.u32 = v_max_u32(S0.u32, S1.u32)\nendif', - VOP3AOp.V_SAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3AOp.V_SAD_U8: "ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", VOP3AOp.V_SAD_HI_U8: "D0.u32 = (32'U(v_sad_u8(S0, S1, 0U)) << 16U) + S2.u32", - VOP3AOp.V_SAD_U16: '// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', - VOP3AOp.V_SAD_U32: '// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', + VOP3AOp.V_SAD_U16: 'ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', + VOP3AOp.V_SAD_U32: 'ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', VOP3AOp.V_CVT_PK_U8_F32: "tmp = (S2.u32 & 32'U(~(0xff << (S1.u32[1 : 0].u32 * 8U))));\ntmp = (tmp | ((32'U(f32_to_u8(S0.f32)) & 255U) << (S1.u32[1 : 0].u32 * 8U)));\nD0.u32 = tmp", VOP3AOp.V_DIV_FIXUP_F32: "sign_out = (sign(S1.f32) ^ sign(S2.f32));\nif isNAN(64'F(S2.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S2.f32)))\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif ((64'F(S1.f32) == 0.0) && (64'F(S2.f32) == 0.0)) then\n// 0/0\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(abs(S1.f32)) == +INF) && (64'F(abs(S2.f32)) == +INF)) then\n// inf/inf\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(S1.f32) == 0.0) || (64'F(abs(S2.f32)) == +INF)) then\n// x/0, or inf/y\nD0.f32 = sign_out ? -INF.f32 : +INF.f32\nelsif ((64'F(abs(S1.f32)) == +INF) || (64'F(S2.f32) == 0.0)) then\n// x/inf, 0/y\nD0.f32 = sign_out ? -0.0F : 0.0F\nelsif exponent(S2.f32) - exponent(S1.f32) < -150 then\nD0.f32 = sign_out ? -UNDERFLOW_F32 : UNDERFLOW_F32\nelsif exponent(S1.f32) == 255 then\nD0.f32 = sign_out ? -OVERFLOW_F32 : OVERFLOW_F32\nelse\nD0.f32 = sign_out ? -abs(S0.f32) : abs(S0.f32)\nendif", VOP3AOp.V_DIV_FIXUP_F64: "sign_out = (sign(S1.f64) ^ sign(S2.f64));\nif isNAN(S2.f64) then\nD0.f64 = cvtToQuietNAN(S2.f64)\nelsif isNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif ((S1.f64 == 0.0) && (S2.f64 == 0.0)) then\n// 0/0\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((abs(S1.f64) == +INF) && (abs(S2.f64) == +INF)) then\n// inf/inf\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((S1.f64 == 0.0) || (abs(S2.f64) == +INF)) then\n// x/0, or inf/y\nD0.f64 = sign_out ? -INF : +INF\nelsif ((abs(S1.f64) == +INF) || (S2.f64 == 0.0)) then\n// x/inf, 0/y\nD0.f64 = sign_out ? -0.0 : 0.0\nelsif exponent(S2.f64) - exponent(S1.f64) < -1075 then\nD0.f64 = sign_out ? -UNDERFLOW_F64 : UNDERFLOW_F64\nelsif exponent(S1.f64) == 2047 then\nD0.f64 = sign_out ? -OVERFLOW_F64 : OVERFLOW_F64\nelse\nD0.f64 = sign_out ? -abs(S0.f64) : abs(S0.f64)\nendif", VOP3AOp.V_DIV_FMAS_F32: 'if VCC.u64[laneId] then\nD0.f32 = 2.0F ** 32 * fma(S0.f32, S1.f32, S2.f32)\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif', VOP3AOp.V_DIV_FMAS_F64: 'if VCC.u64[laneId] then\nD0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)\nelse\nD0.f64 = fma(S0.f64, S1.f64, S2.f64)\nendif', - VOP3AOp.V_MSAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3AOp.V_MSAD_U8: "ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", VOP3AOp.V_QSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", VOP3AOp.V_MQSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", VOP3AOp.V_MQSAD_U32_U8: "tmp[127 : 96] = 32'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32));\ntmp[95 : 64] = 32'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32));\ntmp[63 : 32] = 32'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32));\ntmp[31 : 0] = 32'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32));\nD0.b128 = tmp.b128", VOP3AOp.V_MAD_LEGACY_F16: "tmp = S0.f16 * S1.f16 + S2.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", VOP3AOp.V_MAD_LEGACY_U16: "tmp = S0.u16 * S1.u16 + S2.u16;\nif OPSEL.u4[3] then\nD0 = { tmp.u16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.u16 }\nendif", VOP3AOp.V_MAD_LEGACY_I16: "tmp = S0.i16 * S1.i16 + S2.i16;\nif OPSEL.u4[3] then\nD0 = { tmp.i16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.i16 }\nendif", - VOP3AOp.V_PERM_B32: 'D0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])', + VOP3AOp.V_PERM_B32: "BYTE_PERMUTE = lambda(data, sel) (\ndeclare in : 8'B[8];\nfor i in 0 : 7 do\nin[i] = data[i * 8 + 7 : i * 8].b8\nendfor;\nif sel.u32 >= 13U then\nreturn 8'0xff\nelsif sel.u32 == 12U then\nreturn 8'0x0\nelsif sel.u32 == 11U then\nreturn in[7][7].b8 * 8'0xff\nelsif sel.u32 == 10U then\nreturn in[5][7].b8 * 8'0xff\nelsif sel.u32 == 9U then\nreturn in[3][7].b8 * 8'0xff\nelsif sel.u32 == 8U then\nreturn in[1][7].b8 * 8'0xff\nelse\nreturn in[sel]\nendif);\nD0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])", VOP3AOp.V_FMA_LEGACY_F16: "tmp = fma(S0.f16, S1.f16, S2.f16);\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", VOP3AOp.V_DIV_FIXUP_LEGACY_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\ntmp = cvtToQuietNAN(64'F(S2.f16))\nelsif isNAN(64'F(S1.f16)) then\ntmp = cvtToQuietNAN(64'F(S1.f16))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\ntmp = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\ntmp = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\ntmp = sign_out ? -INF : +INF\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\ntmp = sign_out ? -0.0 : 0.0\nelse\ntmp = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif", VOP3AOp.V_CVT_PKACCUM_U8_F32: "byte = S1.u32[1 : 0];\nbit = byte.u32 * 8U;\nD0.u32[bit + 7U : bit] = 32'U(f32_to_u8(S0.f32))", @@ -1024,56 +1143,19 @@ VOP3AOp_PCODE = { VOP3AOp.V_FMA_F16: 'D0.f16 = fma(S0.f16, S1.f16, S2.f16)', VOP3AOp.V_DIV_FIXUP_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S2.f16)))\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\nD0.f16 = sign_out ? -INF.f16 : +INF.f16\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\nD0.f16 = sign_out ? -16'0.0 : 16'0.0\nelse\nD0.f16 = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif", VOP3AOp.V_LSHL_ADD_U64: 'D0.u64 = (S0.u64 << S1.u32[2 : 0].u32) + S2.u64', - VOP3AOp.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', - VOP3AOp.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64', - VOP3AOp.V_MIN_F64: 'if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S1.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = S0.f64 < S1.f64 ? S0.f64 : S1.f64\nendif', - VOP3AOp.V_MAX_F64: 'if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S1.f64\nelsif WAVE_MODE.IEEE then\nD0.f64 = S0.f64 >= S1.f64 ? S0.f64 : S1.f64\nelse\nD0.f64 = S0.f64 > S1.f64 ? S0.f64 : S1.f64\nendif', - VOP3AOp.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32', - VOP3AOp.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32', - VOP3AOp.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", - VOP3AOp.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", - VOP3AOp.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32', - VOP3AOp.V_READLANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nD0.b32 = VGPR[lane][SRC0.u32]', - VOP3AOp.V_WRITELANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nVGPR[lane][VDST.u32] = S0.b32', - VOP3AOp.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp", - VOP3AOp.V_MBCNT_LO_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', - VOP3AOp.V_MBCNT_HI_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', - VOP3AOp.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)', - VOP3AOp.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)', - VOP3AOp.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)', - VOP3AOp.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", - VOP3AOp.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', - VOP3AOp.V_CVT_PKNORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);", - VOP3AOp.V_CVT_PKNORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);", - VOP3AOp.V_CVT_PKRTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', - VOP3AOp.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);", - VOP3AOp.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);", - VOP3AOp.V_CVT_PKNORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);", - VOP3AOp.V_CVT_PKNORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);", - VOP3AOp.V_ADD_I32: 'D0.i32 = S0.i32 + S1.i32', - VOP3AOp.V_SUB_I32: 'D0.i32 = S0.i32 - S1.i32', - VOP3AOp.V_ADD_I16: 'D0.i16 = S0.i16 + S1.i16', - VOP3AOp.V_SUB_I16: 'D0.i16 = S0.i16 - S1.i16', - VOP3AOp.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16', - VOP3AOp.V_MUL_LEGACY_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", - VOP3AOp.V_CVT_PK_FP8_F32: 'prev_mode = ROUND_MODE;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[15:0] are preserved\nendif;', - VOP3AOp.V_CVT_PK_BF8_F32: 'prev_mode = ROUND_MODE;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[15:0] are preserved\nendif;', - VOP3AOp.V_CVT_SR_FP8_F32: "prev_mode = ROUND_MODE;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 12].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].fp8 = f32_to_fp8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].fp8 = f32_to_fp8(tmp.f32)\nendif;", - VOP3AOp.V_CVT_SR_BF8_F32: "prev_mode = ROUND_MODE;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 11].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].bf8 = f32_to_bf8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].bf8 = f32_to_bf8(tmp.f32)\nendif;", - VOP3AOp.V_DOT2C_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', - VOP3AOp.V_BITOP3_B16: "tmp = 16'0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 16'U(~S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 16'U(~S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 16'U(~S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 16'U(~S0.b16 & S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 16'U(S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 16'U(S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 16'U(S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 16'U(S0.b16 & S1.b16 & S2.b16) : 16'0U));", - VOP3AOp.V_BITOP3_B32: "tmp = 0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 32'U(~S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 32'U(~S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 32'U(~S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 32'U(~S0.b32 & S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 32'U(S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 32'U(S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 32'U(S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 32'U(S0.b32 & S1.b32 & S2.b32) : 0U));", + VOP3AOp.V_BITOP3_B16: "TTBL = { INST.OMOD[1 : 0], INST.ABS[2 : 0], INST.NEG[2 : 0] };\ntmp = 16'0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 16'U(~S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 16'U(~S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 16'U(~S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 16'U(~S0.b16 & S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 16'U(S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 16'U(S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 16'U(S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 16'U(S0.b16 & S1.b16 & S2.b16) : 16'0U));\nD.b16 = tmp.b16\n{ OMOD[1:0], ABS[2:0], NEG[2:0] }\nD0[i] = TTBL[{S0[i], S1[i], S2[i]}]", + VOP3AOp.V_BITOP3_B32: "TTBL = { INST.OMOD[1 : 0], INST.ABS[2 : 0], INST.NEG[2 : 0] };\ntmp = 0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 32'U(~S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 32'U(~S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 32'U(~S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 32'U(~S0.b32 & S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 32'U(S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 32'U(S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 32'U(S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 32'U(S0.b32 & S1.b32 & S2.b32) : 0U));\nD.b32 = tmp.b32\n{ OMOD[1:0], ABS[2:0], NEG[2:0] }\nD0[i] = TTBL[{S0[i], S1[i], S2[i]}]", VOP3AOp.V_CVT_SCALEF32_PK_FP8_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_fp8_scale(S0.f32, scale.u8);\ntmp1 = f32_to_fp8_scale(S1.f32, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", VOP3AOp.V_CVT_SCALEF32_PK_BF8_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_bf8_scale(S0.f32, scale.u8);\ntmp1 = f32_to_bf8_scale(S1.f32, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", VOP3AOp.V_CVT_SCALEF32_SR_FP8_F32: "scale = 32'U(exponent(S2.f32));\ntmp = f32_to_fp8_sr_scale(S0.f32, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved", VOP3AOp.V_CVT_SCALEF32_SR_BF8_F32: "scale = 32'U(exponent(S2.f32));\ntmp = f32_to_bf8_sr_scale(S0.f32, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].bf8 = tmp;\n// Other destination bits are preserved", - VOP3AOp.V_CVT_SCALEF32_PK_F32_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", - VOP3AOp.V_CVT_SCALEF32_PK_F32_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", - VOP3AOp.V_CVT_SCALEF32_F32_FP8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8;\ntmp = fp8_to_f32_scale(src, scale.u8);", - VOP3AOp.V_CVT_SCALEF32_F32_BF8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8;\ntmp = bf8_to_f32_scale(src, scale.u8);", + VOP3AOp.V_CVT_SCALEF32_PK_F32_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = fp8_to_f32_scale(src[7 : 0].fp8, scale.u8);\ntmp1 = fp8_to_f32_scale(src[15 : 8].fp8, scale.u8);\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", + VOP3AOp.V_CVT_SCALEF32_PK_F32_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = bf8_to_f32_scale(src[7 : 0].bf8, scale.u8);\ntmp1 = bf8_to_f32_scale(src[15 : 8].bf8, scale.u8);\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", + VOP3AOp.V_CVT_SCALEF32_F32_FP8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8;\ntmp = fp8_to_f32_scale(src, scale.u8);\nD0 = tmp.b32", + VOP3AOp.V_CVT_SCALEF32_F32_BF8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8;\ntmp = bf8_to_f32_scale(src, scale.u8);\nD0 = tmp.b32", VOP3AOp.V_CVT_SCALEF32_PK_FP4_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_fp4_scale(S0.f32, scale.u8);\ntmp1 = f32_to_fp4_scale(S1.f32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", - VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = f32_to_fp4_sr_scale(S0[31 : 0].f32, randomVal, scale.u8);\ntmp1 = f32_to_fp4_sr_scale(S0[63 : 32].f32, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", - VOP3AOp.V_CVT_SCALEF32_PK_F32_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", + VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = f32_to_fp4_sr_scale(S0[31 : 0].f32, randomVal, scale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32));\ntmp1 = f32_to_fp4_sr_scale(S0[63 : 32].f32, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_F32_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\ntmp0 = fp4_to_f32_scale(src[3 : 0].fp4, scale.u8);\ntmp1 = fp4_to_f32_scale(src[7 : 4].fp4, scale.u8);\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1", VOP3AOp.V_CVT_SCALEF32_PK_FP8_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_fp8_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_fp8_scale(S0[31 : 16].f16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", VOP3AOp.V_CVT_SCALEF32_PK_BF8_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_bf8_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_bf8_scale(S0[31 : 16].f16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", VOP3AOp.V_CVT_SCALEF32_SR_FP8_F16: "scale = 32'U(exponent(S2.f32));\ntmp = f16_to_fp8_sr_scale(S0.f16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved", @@ -1082,41 +1164,78 @@ VOP3AOp_PCODE = { VOP3AOp.V_CVT_SCALEF32_PK_BF8_BF16: "scale = 32'U(exponent(S1.f32));\ntmp0 = bf16_to_bf8_scale(S0[15 : 0].bf16, scale.u8);\ntmp1 = bf16_to_bf8_scale(S0[31 : 16].bf16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved", VOP3AOp.V_CVT_SCALEF32_SR_FP8_BF16: "scale = 32'U(exponent(S2.f32));\ntmp = bf16_to_fp8_sr_scale(S0.bf16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved", VOP3AOp.V_CVT_SCALEF32_SR_BF8_BF16: "scale = 32'U(exponent(S2.f32));\ntmp = bf16_to_bf8_sr_scale(S0.bf16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].bf8 = tmp;\n// Other destination bits are preserved", - VOP3AOp.V_CVT_SCALEF32_PK_F16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", - VOP3AOp.V_CVT_SCALEF32_PK_F16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", - VOP3AOp.V_CVT_SCALEF32_F16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8;\ntmp = fp8_to_f16_scale(src, scale.u8);\n// OPSEL[3] controls destination hi/lo", - VOP3AOp.V_CVT_SCALEF32_F16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8;\ntmp = bf8_to_f16_scale(src, scale.u8);\n// OPSEL[3] controls destination hi/lo", + VOP3AOp.V_CVT_SCALEF32_PK_F16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = fp8_to_f16_scale(src[7 : 0].fp8, scale.u8);\ntmp1 = fp8_to_f16_scale(src[15 : 8].fp8, scale.u8);\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", + VOP3AOp.V_CVT_SCALEF32_PK_F16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = bf8_to_f16_scale(src[7 : 0].bf8, scale.u8);\ntmp1 = bf8_to_f16_scale(src[15 : 8].bf8, scale.u8);\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", + VOP3AOp.V_CVT_SCALEF32_F16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8;\ntmp = fp8_to_f16_scale(src, scale.u8);\n// OPSEL[3] controls destination hi/lo\nD0 = tmp.b32", + VOP3AOp.V_CVT_SCALEF32_F16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8;\ntmp = bf8_to_f16_scale(src, scale.u8);\n// OPSEL[3] controls destination hi/lo\nD0 = tmp.b32", VOP3AOp.V_CVT_SCALEF32_PK_FP4_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_fp4_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_fp4_scale(S0[31 : 16].f16, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", VOP3AOp.V_CVT_SCALEF32_PK_FP4_BF16: "scale = 32'U(exponent(S1.f32));\ntmp0 = bf16_to_fp4_scale(S0[15 : 0].bf16, scale.u8);\ntmp1 = bf16_to_fp4_scale(S0[31 : 16].bf16, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", - VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = f16_to_fp4_sr_scale(S0[15 : 0].f16, randomVal, scale.u8);\ntmp1 = f16_to_fp4_sr_scale(S0[31 : 16].f16, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", - VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = bf16_to_fp4_sr_scale(S0[15 : 0].bf16, randomVal, scale.u8);\ntmp1 = bf16_to_fp4_sr_scale(S0[31 : 16].bf16, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", - VOP3AOp.V_CVT_SCALEF32_PK_F16_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", - VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\nD0[15 : 0].bf16 = tmp0;\nD0[31 : 16].bf16 = tmp1", - VOP3AOp.V_CVT_SCALEF32_2XPK16_FP6_F32: "scale = 32'U(exponent(S2.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 15 do\n// Note that S0 and S1 inputs are interleaved in the packed result.\ntmp[dOffset + 5 : dOffset].fp6 = f32_to_fp6_scale(S0[sOffset + 31 : sOffset].f32, scale.u8);\ntmp[dOffset + 11 : dOffset + 6].fp6 = f32_to_fp6_scale(S1[sOffset + 31 : sOffset].f32, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_2XPK16_BF6_F32: "scale = 32'U(exponent(S2.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 15 do\n// Note that S0 and S1 inputs are interleaved in the packed result.\ntmp[dOffset + 5 : dOffset].bf6 = f32_to_bf6_scale(S0[sOffset + 31 : sOffset].f32, scale.u8);\ntmp[dOffset + 11 : dOffset + 6].bf6 = f32_to_bf6_scale(S1[sOffset + 31 : sOffset].f32, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].fp6 = f32_to_fp6_sr_scale(S0[sOffset + 31 : sOffset].f32, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = f32_to_bf6_sr_scale(S0[sOffset + 31 : sOffset].f32, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_PK32_F32_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 1024'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 31 : dOffset].f32 = fp6_to_f32_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[1023 : 0] = tmp.b1024", - VOP3AOp.V_CVT_SCALEF32_PK32_F32_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 1024'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 31 : dOffset].f32 = bf6_to_f32_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[1023 : 0] = tmp.b1024", - VOP3AOp.V_CVT_SCALEF32_PK32_FP6_BF16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].fp6 = bf16_to_fp6_scale(S0[sOffset + 15 : sOffset].bf16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_PK32_BF6_F16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = f16_to_bf6_scale(S0[sOffset + 15 : sOffset].f16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_PK32_BF6_BF16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = bf16_to_bf6_scale(S0[sOffset + 15 : sOffset].bf16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].fp6 = f16_to_fp6_sr_scale(S0[sOffset + 15 : sOffset].f16, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].fp6 = bf16_to_fp6_sr_scale(S0[sOffset + 15 : sOffset].bf16, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = f16_to_bf6_sr_scale(S0[sOffset + 15 : sOffset].f16, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 5 : dOffset].bf6 = bf16_to_bf6_sr_scale(S0[sOffset + 15 : sOffset].bf16, randomVal,\nendfor;\nD0[191 : 0] = tmp.b192", - VOP3AOp.V_CVT_SCALEF32_PK32_F16_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 15 : dOffset].f16 = fp6_to_f16_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", - VOP3AOp.V_CVT_SCALEF32_PK32_BF16_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 15 : dOffset].bf16 = fp6_to_bf16_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", - VOP3AOp.V_CVT_SCALEF32_PK32_F16_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 15 : dOffset].f16 = bf6_to_f16_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", - VOP3AOp.V_CVT_SCALEF32_PK32_BF16_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ntmp[dOffset + 15 : dOffset].bf16 = bf6_to_bf16_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", - VOP3AOp.V_ASHR_PK_I8_I32: "declare tmp : 16'B;\ntmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32);\ntmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32);\nD0[15 : 0] = tmp", - VOP3AOp.V_ASHR_PK_U8_I32: "declare tmp : 16'B;\ntmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32);\ntmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32);\nD0[15 : 0] = tmp", - VOP3AOp.V_CVT_PK_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', - VOP3AOp.V_CVT_PK_BF16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].bf16 = f32_to_bf16(S0.f32);\ntmp[31 : 16].bf16 = f32_to_bf16(S1.f32);', - VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[15 : 0].bf16 = tmp0.bf16;\nD0[31 : 16].bf16 = tmp1.bf16", - VOP3AOp.V_CVT_SCALEF32_PK_BF16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\nD0[15 : 0].bf16 = tmp0.bf16;\nD0[31 : 16].bf16 = tmp1.bf16", - VOP3AOp.V_CVT_SR_F16_F32: "prev_mode = ROUND_MODE;\nif OPSEL[3].u2 == 2'0U then\nVGPR[laneId][VDST.u32][15 : 0].f16 = 16'F(f32_to_f16_SR(S0.f32, S1.u32))\nelse\nVGPR[laneId][VDST.u32][31 : 16].f16 = 16'F(f32_to_f16_sr(S0.f32, S1.u32))\nendif;", - VOP3AOp.V_CVT_SR_BF16_F32: "prev_mode = ROUND_MODE;\nif OPSEL[3].u2 == 2'0U then\nVGPR[laneId][VDST.u32][15 : 0].bf16 = 16'BF(f32_to_bf16_SR(S0.f32, S1.u32))\nelse\nVGPR[laneId][VDST.u32][31 : 16].bf16 = 16'BF(f32_to_bf16_sr(S0.f32, S1.u32))\nendif;", + VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = f16_to_fp4_sr_scale(S0[15 : 0].f16, randomVal, scale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32));\ntmp1 = f16_to_fp4_sr_scale(S0[31 : 16].f16, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = bf16_to_fp4_sr_scale(S0[15 : 0].bf16, randomVal, scale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32));\ntmp1 = bf16_to_fp4_sr_scale(S0[31 : 16].bf16, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved", + VOP3AOp.V_CVT_SCALEF32_PK_F16_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\ntmp0 = fp4_to_f16_scale(src[3 : 0].fp4, scale.u8);\ntmp1 = fp4_to_f16_scale(src[7 : 4].fp4, scale.u8);\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1", + VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\ntmp0 = fp4_to_bf16_scale(src[3 : 0].fp4, scale.u8);\ntmp1 = fp4_to_bf16_scale(src[7 : 4].fp4, scale.u8);\nD0[15 : 0].bf16 = tmp0;\nD0[31 : 16].bf16 = tmp1", + VOP3AOp.V_CVT_SCALEF32_2XPK16_FP6_F32: "scale = 32'U(exponent(S2.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 15 do\ndOffset = pass * 12;\nsOffset = pass * 32;\n// Note that S0 and S1 inputs are interleaved in the packed result.\ntmp[dOffset + 5 : dOffset].fp6 = f32_to_fp6_scale(S0[sOffset + 31 : sOffset].f32, scale.u8);\ntmp[dOffset + 11 : dOffset + 6].fp6 = f32_to_fp6_scale(S1[sOffset + 31 : sOffset].f32, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_2XPK16_BF6_F32: "scale = 32'U(exponent(S2.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 15 do\ndOffset = pass * 12;\nsOffset = pass * 32;\n// Note that S0 and S1 inputs are interleaved in the packed result.\ntmp[dOffset + 5 : dOffset].bf6 = f32_to_bf6_scale(S0[sOffset + 31 : sOffset].f32, scale.u8);\ntmp[dOffset + 11 : dOffset + 6].bf6 = f32_to_bf6_scale(S1[sOffset + 31 : sOffset].f32, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 32;\ntmp[dOffset + 5 : dOffset].fp6 = f32_to_fp6_sr_scale(S0[sOffset + 31 : sOffset].f32, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 32;\ntmp[dOffset + 5 : dOffset].bf6 = f32_to_bf6_sr_scale(S0[sOffset + 31 : sOffset].f32, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_F32_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 1024'B;\nfor pass in 0 : 31 do\ndOffset = pass * 32;\nsOffset = pass * 6;\ntmp[dOffset + 31 : dOffset].f32 = fp6_to_f32_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[1023 : 0] = tmp.b1024", + VOP3AOp.V_CVT_SCALEF32_PK32_F32_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 1024'B;\nfor pass in 0 : 31 do\ndOffset = pass * 32;\nsOffset = pass * 6;\ntmp[dOffset + 31 : dOffset].f32 = bf6_to_f32_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[1023 : 0] = tmp.b1024", + VOP3AOp.V_CVT_SCALEF32_PK32_FP6_F16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].fp6 = f16_to_fp6_scale(S0[sOffset + 15 : sOffset].f16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_FP6_BF16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].fp6 = bf16_to_fp6_scale(S0[sOffset + 15 : sOffset].bf16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_BF6_F16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].bf6 = f16_to_bf6_scale(S0[sOffset + 15 : sOffset].f16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_BF6_BF16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].bf6 = bf16_to_bf6_scale(S0[sOffset + 15 : sOffset].bf16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].fp6 = f16_to_fp6_sr_scale(S0[sOffset + 15 : sOffset].f16, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].fp6 = bf16_to_fp6_sr_scale(S0[sOffset + 15 : sOffset].bf16, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].bf6 = f16_to_bf6_sr_scale(S0[sOffset + 15 : sOffset].f16, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].bf6 = bf16_to_bf6_sr_scale(S0[sOffset + 15 : sOffset].bf16, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192", + VOP3AOp.V_CVT_SCALEF32_PK32_F16_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ndOffset = pass * 16;\nsOffset = pass * 6;\ntmp[dOffset + 15 : dOffset].f16 = fp6_to_f16_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", + VOP3AOp.V_CVT_SCALEF32_PK32_BF16_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ndOffset = pass * 16;\nsOffset = pass * 6;\ntmp[dOffset + 15 : dOffset].bf16 = fp6_to_bf16_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", + VOP3AOp.V_CVT_SCALEF32_PK32_F16_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ndOffset = pass * 16;\nsOffset = pass * 6;\ntmp[dOffset + 15 : dOffset].f16 = bf6_to_f16_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", + VOP3AOp.V_CVT_SCALEF32_PK32_BF16_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ndOffset = pass * 16;\nsOffset = pass * 6;\ntmp[dOffset + 15 : dOffset].bf16 = bf6_to_bf16_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512", + VOP3AOp.V_ASHR_PK_I8_I32: "SAT8 = lambda(n) (\nif n <= -128 then\nreturn 8'0x80\nelsif n >= 127 then\nreturn 8'0x7f\nelse\nreturn n[7 : 0].b8\nendif);\ndeclare tmp : 16'B;\ntmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32);\ntmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32);\nD0[15 : 0] = tmp", + VOP3AOp.V_ASHR_PK_U8_I32: "SAT8 = lambda(n) (\nif n <= 0 then\nreturn 8'0x0\nelsif n >= 255 then\nreturn 8'0xff\nelse\nreturn n[7 : 0].b8\nendif);\ndeclare tmp : 16'B;\ntmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32);\ntmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32);\nD0[15 : 0] = tmp", + VOP3AOp.V_CVT_PK_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode', + VOP3AOp.V_CVT_PK_BF16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ntmp[15 : 0].bf16 = f32_to_bf16(S0.f32);\ntmp[31 : 16].bf16 = f32_to_bf16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode', + VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = fp8_to_bf16_scale(src[7 : 0].fp8, scale);\ntmp1 = fp8_to_bf16_scale(src[15 : 8].fp8, scale);\nD0[15 : 0].bf16 = tmp0.bf16;\nD0[31 : 16].bf16 = tmp1.bf16", + VOP3AOp.V_CVT_SCALEF32_PK_BF16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = bf8_to_bf16_scale(src[7 : 0].bf8, scale);\ntmp1 = bf8_to_bf16_scale(src[15 : 8].bf8, scale);\nD0[15 : 0].bf16 = tmp0.bf16;\nD0[31 : 16].bf16 = tmp1.bf16", + VOP3AOp.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', + VOP3AOp.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64', + VOP3AOp.V_MIN_F64: "if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S1.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S0.f64\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F64.\nD0.f64 = S0.f64 < S1.f64 ? S0.f64 : S1.f64\nendif", + VOP3AOp.V_MAX_F64: 'if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S1.f64\nelsif WAVE_MODE.IEEE then\nD0.f64 = S0.f64 >= S1.f64 ? S0.f64 : S1.f64\nelse\nD0.f64 = S0.f64 > S1.f64 ? S0.f64 : S1.f64\nendif', + VOP3AOp.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32\nldexp()', + VOP3AOp.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32', + VOP3AOp.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + VOP3AOp.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + VOP3AOp.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32\nldexp()', + VOP3AOp.V_READLANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nD0.b32 = VGPR[lane][SRC0.u32]', + VOP3AOp.V_WRITELANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nVGPR[lane][VDST.u32] = S0.b32', + VOP3AOp.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp", + VOP3AOp.V_MBCNT_LO_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp", + VOP3AOp.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp\nv_mbcnt_lo_u32_b32 v0, -1, 0\nv_mbcnt_hi_u32_b32 v0, -1, v0\n// v0 now contains laneId\nv_mbcnt_lo_u32_b32 v0, vcc_lo, 0\nv_mbcnt_hi_u32_b32 v0, vcc_hi, v0 // Note vcc_hi is passed in for second instruction\n// v0 now contains position among lanes with VCC=1", + VOP3AOp.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)', + VOP3AOp.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)', + VOP3AOp.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)', + VOP3AOp.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\n// into the whole part of the number.\n// Only whole part of result is kept.\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", + VOP3AOp.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + VOP3AOp.V_CVT_PKNORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);\nD0 = tmp.b32", + VOP3AOp.V_CVT_PKNORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);\nD0 = tmp.b32", + VOP3AOp.V_CVT_PKRTZ_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_TOWARD_ZERO;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode;\n// Round-toward-zero regardless of current round mode setting in hardware.', + VOP3AOp.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);\nD0 = tmp.b32", + VOP3AOp.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);\nD0 = tmp.b32", + VOP3AOp.V_CVT_PKNORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);\nD0 = tmp.b32", + VOP3AOp.V_CVT_PKNORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);\nD0 = tmp.b32", + VOP3AOp.V_ADD_I32: 'D0.i32 = S0.i32 + S1.i32', + VOP3AOp.V_SUB_I32: 'D0.i32 = S0.i32 - S1.i32', + VOP3AOp.V_ADD_I16: 'D0.i16 = S0.i16 + S1.i16', + VOP3AOp.V_SUB_I16: 'D0.i16 = S0.i16 - S1.i16', + VOP3AOp.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16', + VOP3AOp.V_MUL_LEGACY_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP3AOp.V_CVT_PK_FP8_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[15:0] are preserved\nendif;\nROUND_MODE = prev_mode', + VOP3AOp.V_CVT_PK_BF8_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[15:0] are preserved\nendif;\nROUND_MODE = prev_mode', + VOP3AOp.V_CVT_SR_FP8_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 12].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].fp8 = f32_to_fp8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].fp8 = f32_to_fp8(tmp.f32)\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode", + VOP3AOp.V_CVT_SR_BF8_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 11].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].bf8 = f32_to_bf8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].bf8 = f32_to_bf8(tmp.f32)\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode", + VOP3AOp.V_CVT_SR_F16_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u2 == 2'0U then\nVGPR[laneId][VDST.u32][15 : 0].f16 = 16'F(f32_to_f16_SR(S0.f32, S1.u32))\nelse\nVGPR[laneId][VDST.u32][31 : 16].f16 = 16'F(f32_to_f16_sr(S0.f32, S1.u32))\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode", + VOP3AOp.V_CVT_SR_BF16_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u2 == 2'0U then\nVGPR[laneId][VDST.u32][15 : 0].bf16 = 16'BF(f32_to_bf16_SR(S0.f32, S1.u32))\nelse\nVGPR[laneId][VDST.u32][31 : 16].bf16 = 16'BF(f32_to_bf16_sr(S0.f32, S1.u32))\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode", VOP3AOp.V_MINIMUM3_F32: "D0.f32 = 32'F(v_minimum_f32(v_minimum_f32(S0.f32, S1.f32), S2.f32))", VOP3AOp.V_MAXIMUM3_F32: "D0.f32 = 32'F(v_maximum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32))", } @@ -1134,288 +1253,310 @@ VOP3BOp_PCODE = { VOP3BOp.V_MAD_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))", } -DSOp_PCODE = { - DSOp.DS_ADD_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_SUB_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_RSUB_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_INC_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', - DSOp.DS_DEC_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', - DSOp.DS_MIN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - DSOp.DS_MAX_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - DSOp.DS_MIN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_MAX_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_AND_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_OR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_XOR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_MSKOR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_WRITE_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]', - DSOp.DS_WRITE2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]', - DSOp.DS_WRITE2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]', - DSOp.DS_CMPST_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', - DSOp.DS_CMPST_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', - DSOp.DS_MIN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', - DSOp.DS_MAX_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', - DSOp.DS_ADD_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', - DSOp.DS_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', - DSOp.DS_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', - DSOp.DS_WRITE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32", - DSOp.DS_WRITE_B8: 'MEM[ADDR].b8 = DATA[7 : 0]', - DSOp.DS_WRITE_B16: 'MEM[ADDR].b16 = DATA[15 : 0]', - DSOp.DS_ADD_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_SUB_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_RSUB_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_INC_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', - DSOp.DS_DEC_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', - DSOp.DS_MIN_RTN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - DSOp.DS_MAX_RTN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - DSOp.DS_MIN_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_MAX_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_AND_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_OR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_XOR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_MSKOR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_WRXCHG_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', - DSOp.DS_WRXCHG2_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', - DSOp.DS_WRXCHG2ST64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', - DSOp.DS_CMPST_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', - DSOp.DS_CMPST_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', - DSOp.DS_MIN_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', - DSOp.DS_MAX_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', - DSOp.DS_WRAP_RTN_B32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = tmp >= DATA.u32 ? tmp - DATA.u32 : tmp + DATA2.u32;\nRETURN_DATA = tmp', - DSOp.DS_ADD_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', - DSOp.DS_READ_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32', - DSOp.DS_READ2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4U].b32', - DSOp.DS_READ2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256U].b32', - DSOp.DS_READ_I8: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i8))", - DSOp.DS_READ_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", - DSOp.DS_READ_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))", - DSOp.DS_READ_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", - DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;', - DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\nif EXEC[i].u1 then\ndst_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", - DSOp.DS_BPERMUTE_B32: "Note that EXEC mask is applied to both VGPR read and write. If src_lane selects a disabled thread then zero is\n// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\nsrc_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", - DSOp.DS_ADD_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_SUB_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_RSUB_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_INC_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', - DSOp.DS_DEC_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', - DSOp.DS_MIN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - DSOp.DS_MAX_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - DSOp.DS_MIN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - DSOp.DS_MAX_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - DSOp.DS_AND_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_OR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_XOR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_MSKOR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_WRITE_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]', - DSOp.DS_WRITE2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]', - DSOp.DS_WRITE2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]', - DSOp.DS_CMPST_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', - DSOp.DS_CMPST_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - DSOp.DS_MIN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - DSOp.DS_MAX_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - DSOp.DS_WRITE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]', - DSOp.DS_WRITE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]', - DSOp.DS_READ_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - DSOp.DS_READ_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - DSOp.DS_READ_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", - DSOp.DS_READ_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", - DSOp.DS_READ_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;', - DSOp.DS_READ_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;', - DSOp.DS_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', - DSOp.DS_ADD_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_SUB_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_RSUB_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_INC_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', - DSOp.DS_DEC_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', - DSOp.DS_MIN_RTN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - DSOp.DS_MAX_RTN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - DSOp.DS_MIN_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - DSOp.DS_MAX_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - DSOp.DS_AND_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_OR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_XOR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_MSKOR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_WRXCHG_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', - DSOp.DS_WRXCHG2_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', - DSOp.DS_WRXCHG2ST64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', - DSOp.DS_CMPST_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', - DSOp.DS_CMPST_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - DSOp.DS_MIN_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - DSOp.DS_MAX_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - DSOp.DS_READ_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32', - DSOp.DS_READ2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8U + 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8U + 4U].b32', - DSOp.DS_READ2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512U + 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512U + 4U].b32', - DSOp.DS_ADD_RTN_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', - DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", - DSOp.DS_GWS_SEMA_RELEASE_ALL: '// Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\n// Incr the state counter of the resource', - DSOp.DS_GWS_INIT: '// Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\n// Get the value to use in init\nindex = find_first_valid(vector mask)\nvalue = DATA[thread: index]\n// Set the state of the resource', - DSOp.DS_GWS_SEMA_V: '//Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\n//Incr the state counter of the resource', - DSOp.DS_GWS_SEMA_BR: '//Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\nindex = find first valid (vector mask)\ncount = DATA[thread: index];\n//Add count to the resource state counter', - DSOp.DS_GWS_SEMA_P: '//Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + offset0[5:0];\nstate[rid].counter -= 1;', - DSOp.DS_GWS_BARRIER: '//Determine the GWS resource to work on\nrid[5:0] = gds_base[5:0] + OFFSET0[5:0];\nindex = find first valid (vector mask);\nvalue = DATA[thread: index];\n// Input Decision Machine\nthread[rid].flag = state[rid].flag;\nstate[rid].flag = !state.flag;\nstate[rid].counter = value;\nelse\nstate[rid].counter -= 1;\n// Release Machine\nendif;', - DSOp.DS_READ_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32", - DSOp.DS_PK_ADD_RTN_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', - DSOp.DS_PK_ADD_RTN_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', - DSOp.DS_WRITE_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]', - DSOp.DS_WRITE_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]', - DSOp.DS_READ_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32', - DSOp.DS_READ_B128: "addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32\nOFFSET = Unsigned immediate byte offset.\nOFFEN = Send offset either as VADDR or as zero..\nIDXEN = Send index either as VADDR or as zero.\nVADDR = VGPR address source.\nVDATA = Destination vector GPR.\nSOFFSET = Byte offset added to the memory address of an SGPR.\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\n// Mem access size depends on format\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp\ntmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp", +VOP3POp_PCODE = { + VOP3POp.V_PK_MAD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MUL_LO_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ADD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_SUB_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_LSHLREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_LSHRREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ASHRREV_I16: 'tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 >= S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 >= S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MIN_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 < S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 < S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp", + VOP3POp.V_PK_MAD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_ADD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_SUB_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_MAX_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 >= S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 >= S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_MIN_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 < S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 < S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp", + VOP3POp.V_PK_FMA_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16);\ntmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_ADD_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\nD0.b32 = tmp", + VOP3POp.V_PK_MUL_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.b32 = tmp", + VOP3POp.V_PK_MIN_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_MAX_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", + VOP3POp.V_DOT2_F32_BF16: "tmp = 32'F(S0[15 : 0].bf16) * 32'F(S1[15 : 0].bf16);\ntmp += 32'F(S0[31 : 16].bf16) * 32'F(S1[31 : 16].bf16);\ntmp += S2.f32;\nD0.f32 = tmp", + VOP3POp.V_PK_MINIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_minimum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_minimum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32", + VOP3POp.V_PK_MAXIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_maximum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_maximum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32", + VOP3POp.V_MAD_MIX_F32: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = in[0] * in[1] + in[2]", + VOP3POp.V_MAD_MIXLO_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(in[0] * in[1] + in[2])", + VOP3POp.V_MAD_MIXHI_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(in[0] * in[1] + in[2])", + VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP3POp.V_DOT2_I32_I16: 'tmp = S2.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp', + VOP3POp.V_DOT2_U32_U16: 'tmp = S2.u32;\ntmp += u16_to_u32(S0[15 : 0].u16) * u16_to_u32(S1[15 : 0].u16);\ntmp += u16_to_u32(S0[31 : 16].u16) * u16_to_u32(S1[31 : 16].u16);\nD0.u32 = tmp', + VOP3POp.V_DOT4_I32_I8: 'tmp = S2.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp', + VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp', + VOP3POp.V_DOT8_I32_I4: 'tmp = S2.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp', + VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp', + VOP3POp.V_MFMA_F32_16X16X128_F8F6F4: 'D = A (16x128) * B (128x16) + C (16x16)\nA, B: (16*128) elements * 8 bits/element = 16384 bits, divide by 64 lanes * 32 bits/register-lane = 8\nregisters C, D: (16*16) elements * 32 bits/element = 8192 bits, divide by 64 lanes * 32 bits/register-\nlane = 4 registers', + VOP3POp.V_MFMA_F32_32X32X64_F8F6F4: 'D = A (32x64) * B (64x32) + C (32x32)\nA, B: (32*64) elements * 8 bits/element = 16384 bits, divide by 64 lanes * 32 bits/register-lane = 8\nregisters C, D: (32*32) elements * 32 bits/element = 32768 bits, divide by 64 lanes * 32 bits/register-\nlane = 16 registers', + VOP3POp.V_PK_FMA_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = fma(S0[31 : 0].f32, S1[31 : 0].f32, S2[31 : 0].f32);\ntmp[63 : 32].f32 = fma(S0[63 : 32].f32, S1[63 : 32].f32, S2[63 : 32].f32);\nD0.b64 = tmp", + VOP3POp.V_PK_MUL_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 * S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 * S1[63 : 32].f32;\nD0.b64 = tmp", + VOP3POp.V_PK_ADD_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 + S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 + S1[63 : 32].f32;\nD0.b64 = tmp", + VOP3POp.V_PK_MOV_B32: 'tmp0.u32 = S0.u32[OPSEL[0].i32 * 32 + 31 : OPSEL[0].i32 * 32];\ntmp1.u32 = S1.u32[OPSEL[1].i32 * 32 + 31 : OPSEL[1].i32 * 32];\nD0.u32[31 : 0] = tmp0.u32;\nD0.u32[63 : 32] = tmp1.u32\nv_pk_mov_b32 v0, v2, v4 op_sel:[0,1] // evaluates v0 <- v2 and v1 <- v5.\nv_pk_mov_b32 v0, s6, s6 op_sel:[0,1] // 64-bit move from scalar s[6:7].', + VOP3POp.V_MFMA_F32_16X16X32_BF16: 'D = A (16x32) * B (32x16) + C (16x16)', + VOP3POp.V_MFMA_I32_16X16X64_I8: 'D = A (16x64) * B (64x16) + C (16x16)', + VOP3POp.V_MFMA_F32_32X32X16_BF16: 'D = A (32x16) * B (16x32) + C (32x32)', + VOP3POp.V_MFMA_I32_32X32X32_I8: 'D = A (32x32) * B (32x32) + C (32x32)', + VOP3POp.V_SMFMAC_F32_16X16X64_BF16: 'D = A (sparse 16x64) * B (64x16) + D (16x16)', + VOP3POp.V_SMFMAC_I32_16X16X128_I8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_16X16X128_BF8_BF8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_16X16X128_BF8_FP8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_16X16X128_FP8_BF8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)', + VOP3POp.V_MFMA_F32_32X32X1_2B_F32: 'D = A (32x1) * B (1x32) + C (32x32)', + VOP3POp.V_MFMA_F32_16X16X1_4B_F32: 'D = A (16x1) * B (1x16) + C (16x16)', + VOP3POp.V_MFMA_F32_4X4X1_16B_F32: 'D = A (4x1) * B (1x4) + C (4x4)', + VOP3POp.V_SMFMAC_F32_16X16X128_FP8_FP8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)', + VOP3POp.V_MFMA_F32_32X32X2_F32: 'D = A (32x2) * B (2x32) + C (32x32)', + VOP3POp.V_MFMA_F32_16X16X4_F32: 'D = A (16x4) * B (4x16) + C (16x16)', + VOP3POp.V_SMFMAC_F32_32X32X32_BF16: 'D = A (sparse 32x32) * B (32x32) + D (32x32)', + VOP3POp.V_SMFMAC_I32_32X32X64_I8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)', + VOP3POp.V_MFMA_F32_32X32X4_2B_F16: 'D = A (32x4) * B (4x32) + C (32x32)', + VOP3POp.V_MFMA_F32_16X16X4_4B_F16: 'D = A (16x4) * B (4x16) + C (16x16)', + VOP3POp.V_MFMA_F32_4X4X4_16B_F16: 'D = A (4x4) * B (4x4) + C (4x4)', + VOP3POp.V_SMFMAC_F32_32X32X64_BF8_BF8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)', + VOP3POp.V_MFMA_F32_32X32X8_F16: 'D = A (32x8) * B (8x32) + C (32x32)', + VOP3POp.V_MFMA_F32_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)', + VOP3POp.V_SMFMAC_F32_32X32X64_BF8_FP8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)', + VOP3POp.V_SMFMAC_F32_32X32X64_FP8_BF8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)', + VOP3POp.V_MFMA_I32_32X32X4_2B_I8: 'D = A (32x4) * B (4x32) + C (32x32)', + VOP3POp.V_MFMA_I32_16X16X4_4B_I8: 'D = A (16x4) * B (4x16) + C (16x16)', + VOP3POp.V_MFMA_I32_4X4X4_16B_I8: 'D = A (4x4) * B (4x4) + C (4x4)', + VOP3POp.V_SMFMAC_F32_32X32X64_FP8_FP8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)', + VOP3POp.V_MFMA_F32_16X16X32_F16: 'D = A (16x32) * B (32x16) + C (16x16)', + VOP3POp.V_MFMA_F32_32X32X16_F16: 'D = A (32x16) * B (16x32) + C (32x32)', + VOP3POp.V_MFMA_I32_32X32X16_I8: 'D = A (32x16) * B (16x32) + C (32x32)', + VOP3POp.V_MFMA_I32_16X16X32_I8: 'D = A (16x32) * B (32x16) + C (16x16)', + VOP3POp.V_SMFMAC_F32_16X16X64_F16: 'D = A (sparse 16x64) * B (64x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_32X32X32_F16: 'D = A (sparse 32x32) * B (32x32) + D (32x32)', + VOP3POp.V_MFMA_F32_32X32X4_2B_BF16: 'D = A (32x4) * B (4x32) + C (32x32)', + VOP3POp.V_MFMA_F32_16X16X4_4B_BF16: 'D = A (16x4) * B (4x16) + C (16x16)', + VOP3POp.V_MFMA_F32_4X4X4_16B_BF16: 'D = A (4x4) * B (4x4) + C (4x4)', + VOP3POp.V_MFMA_F32_32X32X8_BF16: 'D = A (32x8) * B (8x32) + C (32x32)', + VOP3POp.V_MFMA_F32_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)', + VOP3POp.V_SMFMAC_F32_16X16X32_F16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_32X32X16_F16: 'D = A (sparse 32x16) * B (16x32) + D (32x32)', + VOP3POp.V_SMFMAC_F32_16X16X32_BF16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_32X32X16_BF16: 'D = A (sparse 32x16) * B (16x32) + D (32x32)', + VOP3POp.V_SMFMAC_I32_16X16X64_I8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)', + VOP3POp.V_SMFMAC_I32_32X32X32_I8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)', + VOP3POp.V_MFMA_F64_16X16X4_F64: 'D = A (16x4) * B (4x16) + C (16x16)', + VOP3POp.V_MFMA_F64_4X4X4_4B_F64: 'D = A (4x4) * B (4x4) + C (4x4)', + VOP3POp.V_MFMA_F32_16X16X32_BF8_BF8: 'D = A (16x32) * B (32x16) + C (16x16)', + VOP3POp.V_MFMA_F32_16X16X32_BF8_FP8: 'D = A (16x32) * B (32x16) + C (16x16)', + VOP3POp.V_MFMA_F32_16X16X32_FP8_BF8: 'D = A (16x32) * B (32x16) + C (16x16)', + VOP3POp.V_MFMA_F32_16X16X32_FP8_FP8: 'D = A (16x32) * B (32x16) + C (16x16)', + VOP3POp.V_MFMA_F32_32X32X16_BF8_BF8: 'D = A (32x16) * B (16x32) + C (32x32)', + VOP3POp.V_MFMA_F32_32X32X16_BF8_FP8: 'D = A (32x16) * B (16x32) + C (32x32)', + VOP3POp.V_MFMA_F32_32X32X16_FP8_BF8: 'D = A (32x16) * B (16x32) + C (32x32)', + VOP3POp.V_MFMA_F32_32X32X16_FP8_FP8: 'D = A (32x16) * B (16x32) + C (32x32)', + VOP3POp.V_SMFMAC_F32_16X16X64_BF8_BF8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_16X16X64_BF8_FP8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_16X16X64_FP8_BF8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_16X16X64_FP8_FP8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)', + VOP3POp.V_SMFMAC_F32_32X32X32_BF8_BF8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)', + VOP3POp.V_SMFMAC_F32_32X32X32_BF8_FP8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)', + VOP3POp.V_SMFMAC_F32_32X32X32_FP8_BF8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)', + VOP3POp.V_SMFMAC_F32_32X32X32_FP8_FP8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)', } -FLATOp_PCODE = { - FLATOp.FLAT_LOAD_UBYTE: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", - FLATOp.FLAT_LOAD_SBYTE: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", - FLATOp.FLAT_LOAD_USHORT: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", - FLATOp.FLAT_LOAD_SSHORT: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", - FLATOp.FLAT_LOAD_DWORD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', - FLATOp.FLAT_LOAD_DWORDX2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', - FLATOp.FLAT_LOAD_DWORDX3: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', - FLATOp.FLAT_LOAD_DWORDX4: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', - FLATOp.FLAT_STORE_BYTE: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', - FLATOp.FLAT_STORE_BYTE_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', - FLATOp.FLAT_STORE_SHORT: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', - FLATOp.FLAT_STORE_SHORT_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', - FLATOp.FLAT_STORE_DWORD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', - FLATOp.FLAT_STORE_DWORDX2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', - FLATOp.FLAT_STORE_DWORDX3: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', - FLATOp.FLAT_STORE_DWORDX4: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', - FLATOp.FLAT_LOAD_UBYTE_D16: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });", - FLATOp.FLAT_LOAD_UBYTE_D16_HI: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });", - FLATOp.FLAT_LOAD_SBYTE_D16: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));", - FLATOp.FLAT_LOAD_SBYTE_D16_HI: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));", - FLATOp.FLAT_LOAD_SHORT_D16: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;', - FLATOp.FLAT_LOAD_SHORT_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;', - FLATOp.FLAT_ATOMIC_SWAP: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', - FLATOp.FLAT_ATOMIC_CMPSWAP: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - FLATOp.FLAT_ATOMIC_ADD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', - FLATOp.FLAT_ATOMIC_SUB: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', - FLATOp.FLAT_ATOMIC_SMIN: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - FLATOp.FLAT_ATOMIC_UMIN: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - FLATOp.FLAT_ATOMIC_SMAX: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - FLATOp.FLAT_ATOMIC_UMAX: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - FLATOp.FLAT_ATOMIC_AND: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', - FLATOp.FLAT_ATOMIC_OR: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', - FLATOp.FLAT_ATOMIC_XOR: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', - FLATOp.FLAT_ATOMIC_INC: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', - FLATOp.FLAT_ATOMIC_DEC: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', - FLATOp.FLAT_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp', - FLATOp.FLAT_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', - FLATOp.FLAT_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', - FLATOp.FLAT_ATOMIC_MIN_F64: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - FLATOp.FLAT_ATOMIC_MAX_F64: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - FLATOp.FLAT_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', - FLATOp.FLAT_ATOMIC_SWAP_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', - FLATOp.FLAT_ATOMIC_CMPSWAP_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - FLATOp.FLAT_ATOMIC_ADD_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', - FLATOp.FLAT_ATOMIC_SUB_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', - FLATOp.FLAT_ATOMIC_SMIN_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - FLATOp.FLAT_ATOMIC_UMIN_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - FLATOp.FLAT_ATOMIC_SMAX_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - FLATOp.FLAT_ATOMIC_UMAX_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - FLATOp.FLAT_ATOMIC_AND_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', - FLATOp.FLAT_ATOMIC_OR_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', - FLATOp.FLAT_ATOMIC_XOR_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', - FLATOp.FLAT_ATOMIC_INC_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', - FLATOp.FLAT_ATOMIC_DEC_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', +VOPCOp_PCODE = { + VOPCOp.V_CMP_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOPCOp.V_CMP_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOPCOp.V_CMP_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result", + VOPCOp.V_CMP_F_F16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F16: 'D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F16: 'D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F16: 'D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F16: "D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F16: "D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F16: 'D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F16: 'D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F16: 'D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_TRU_F16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_F16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_O_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_U_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_TRU_F16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_F32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F32: 'D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F32: 'D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F32: 'D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F32: "D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F32: "D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F32: 'D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F32: 'D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F32: 'D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_TRU_F32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_F32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_O_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_U_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_TRU_F32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_F64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F64: 'D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F64: 'D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F64: 'D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F64: 'D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_U_F64: 'D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F64: 'D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F64: 'D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F64: 'D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_TRU_F64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_F64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_O_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_TRU_F64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_I16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I16: 'D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I16: 'D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I16: 'D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I16: 'D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U16: 'D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U16: 'D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U16: 'D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U16: 'D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_I16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_I16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_U16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_U16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_I32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I32: 'D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I32: 'D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I32: 'D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I32: 'D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U32: 'D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U32: 'D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U32: 'D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U32: 'D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_I32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_I32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_U32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_U32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_I64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I64: 'D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I64: 'D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I64: 'D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I64: 'D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U64: 'D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U64: 'D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U64: 'D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U64: 'D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_I64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_I64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_EQ_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMPX_T_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nwhere:\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nOP = DS instructions.\nADDR = Source LDS address VGPR 0 - 255.\nDATA0 = Source data0 VGPR 0 - 255.\nDATA1 = Source data1 VGPR 0 - 255.\nVDST = Destination VGPR 0- 255.", } -GLOBALOp_PCODE = { - GLOBALOp.GLOBAL_LOAD_UBYTE: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", - GLOBALOp.GLOBAL_LOAD_SBYTE: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", - GLOBALOp.GLOBAL_LOAD_USHORT: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", - GLOBALOp.GLOBAL_LOAD_SSHORT: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", - GLOBALOp.GLOBAL_LOAD_DWORD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', - GLOBALOp.GLOBAL_LOAD_DWORDX2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', - GLOBALOp.GLOBAL_LOAD_DWORDX3: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', - GLOBALOp.GLOBAL_LOAD_DWORDX4: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', - GLOBALOp.GLOBAL_STORE_BYTE: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', - GLOBALOp.GLOBAL_STORE_BYTE_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', - GLOBALOp.GLOBAL_STORE_SHORT: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', - GLOBALOp.GLOBAL_STORE_SHORT_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', - GLOBALOp.GLOBAL_STORE_DWORD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', - GLOBALOp.GLOBAL_STORE_DWORDX2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', - GLOBALOp.GLOBAL_STORE_DWORDX3: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', - GLOBALOp.GLOBAL_STORE_DWORDX4: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', - GLOBALOp.GLOBAL_LOAD_UBYTE_D16: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });", - GLOBALOp.GLOBAL_LOAD_UBYTE_D16_HI: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });", - GLOBALOp.GLOBAL_LOAD_SBYTE_D16: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));", - GLOBALOp.GLOBAL_LOAD_SBYTE_D16_HI: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));", - GLOBALOp.GLOBAL_LOAD_SHORT_D16: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;', - GLOBALOp.GLOBAL_LOAD_SHORT_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;', - GLOBALOp.GLOBAL_ATOMIC_SWAP: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_CMPSWAP: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_ADD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_SUB: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_SMIN: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_UMIN: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_SMAX: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_UMAX: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_AND: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_OR: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_XOR: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_INC: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_DEC: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', - GLOBALOp.GLOBAL_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp', - GLOBALOp.GLOBAL_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', - GLOBALOp.GLOBAL_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp', - GLOBALOp.GLOBAL_ATOMIC_MIN_F64: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_MAX_F64: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp', - GLOBALOp.GLOBAL_ATOMIC_SWAP_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_ADD_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_SUB_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_SMIN_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_UMIN_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_SMAX_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_UMAX_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_AND_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_OR_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_XOR_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_INC_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', - GLOBALOp.GLOBAL_ATOMIC_DEC_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', -} - -SCRATCHOp_PCODE = { - SCRATCHOp.SCRATCH_LOAD_UBYTE: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", - SCRATCHOp.SCRATCH_LOAD_SBYTE: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", - SCRATCHOp.SCRATCH_LOAD_USHORT: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", - SCRATCHOp.SCRATCH_LOAD_SSHORT: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", - SCRATCHOp.SCRATCH_LOAD_DWORD: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32', - SCRATCHOp.SCRATCH_LOAD_DWORDX2: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', - SCRATCHOp.SCRATCH_LOAD_DWORDX3: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', - SCRATCHOp.SCRATCH_LOAD_DWORDX4: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', - SCRATCHOp.SCRATCH_STORE_BYTE: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]', - SCRATCHOp.SCRATCH_STORE_BYTE_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]', - SCRATCHOp.SCRATCH_STORE_SHORT: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]', - SCRATCHOp.SCRATCH_STORE_SHORT_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]', - SCRATCHOp.SCRATCH_STORE_DWORD: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]', - SCRATCHOp.SCRATCH_STORE_DWORDX2: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', - SCRATCHOp.SCRATCH_STORE_DWORDX3: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', - SCRATCHOp.SCRATCH_STORE_DWORDX4: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', - SCRATCHOp.SCRATCH_LOAD_UBYTE_D16: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });", - SCRATCHOp.SCRATCH_LOAD_UBYTE_D16_HI: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });", - SCRATCHOp.SCRATCH_LOAD_SBYTE_D16: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));", - SCRATCHOp.SCRATCH_LOAD_SBYTE_D16_HI: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));", - SCRATCHOp.SCRATCH_LOAD_SHORT_D16: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;', - SCRATCHOp.SCRATCH_LOAD_SHORT_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;', -} - -PSEUDOCODE_STRINGS = { - SOP1Op: SOP1Op_PCODE, - SOP2Op: SOP2Op_PCODE, - SOPCOp: SOPCOp_PCODE, - SOPKOp: SOPKOp_PCODE, - SOPPOp: SOPPOp_PCODE, - SMEMOp: SMEMOp_PCODE, - VOP1Op: VOP1Op_PCODE, - VOP2Op: VOP2Op_PCODE, - VOP3POp: VOP3POp_PCODE, - VOPCOp: VOPCOp_PCODE, - VOP3AOp: VOP3AOp_PCODE, - VOP3BOp: VOP3BOp_PCODE, - DSOp: DSOp_PCODE, - FLATOp: FLATOp_PCODE, - GLOBALOp: GLOBALOp_PCODE, - SCRATCHOp: SCRATCHOp_PCODE, -} \ No newline at end of file +PSEUDOCODE_STRINGS = {DSOp: DSOp_PCODE, FLATOp: FLATOp_PCODE, GLOBALOp: GLOBALOp_PCODE, MTBUFOp: MTBUFOp_PCODE, MUBUFOp: MUBUFOp_PCODE, SCRATCHOp: SCRATCHOp_PCODE, SMEMOp: SMEMOp_PCODE, SOP1Op: SOP1Op_PCODE, SOP2Op: SOP2Op_PCODE, SOPCOp: SOPCOp_PCODE, SOPKOp: SOPKOp_PCODE, SOPPOp: SOPPOp_PCODE, VOP1Op: VOP1Op_PCODE, VOP2Op: VOP2Op_PCODE, VOP3AOp: VOP3AOp_PCODE, VOP3BOp: VOP3BOp_PCODE, VOP3POp: VOP3POp_PCODE, VOPCOp: VOPCOp_PCODE} \ No newline at end of file diff --git a/extra/assembly/amd/autogen/rdna3/enum.py b/extra/assembly/amd/autogen/rdna3/enum.py index e7f350fa2d..60aecd92ce 100644 --- a/extra/assembly/amd/autogen/rdna3/enum.py +++ b/extra/assembly/amd/autogen/rdna3/enum.py @@ -1,34 +1,97 @@ -# autogenerated from AMD RDNA3.5 ISA PDF by pdf.py - do not edit +# autogenerated from AMD ISA PDF by pdf.py - do not edit from enum import IntEnum -class SrcEnum(IntEnum): - VCC_LO = 106 - VCC_HI = 107 - NULL = 124 - M0 = 125 - EXEC_LO = 126 - EXEC_HI = 127 - ZERO = 128 - DPP8 = 233 - DPP8FI = 234 - SHARED_BASE = 235 - SHARED_LIMIT = 236 - PRIVATE_BASE = 237 - PRIVATE_LIMIT = 238 - POS_HALF = 240 - NEG_HALF = 241 - POS_ONE = 242 - NEG_ONE = 243 - POS_TWO = 244 - NEG_TWO = 245 - POS_FOUR = 246 - NEG_FOUR = 247 - INV_2PI = 248 - DPP16 = 250 - VCCZ = 251 - EXECZ = 252 - SCC = 253 - LDS_DIRECT = 254 +class BufFmt(IntEnum): + BUF_FMT_8_UNORM = 1 + BUF_FMT_8_SNORM = 2 + BUF_FMT_8_USCALED = 3 + BUF_FMT_8_SSCALED = 4 + BUF_FMT_8_UINT = 5 + BUF_FMT_8_SINT = 6 + BUF_FMT_16_UNORM = 7 + BUF_FMT_16_SNORM = 8 + BUF_FMT_16_USCALED = 9 + BUF_FMT_16_SSCALED = 10 + BUF_FMT_16_UINT = 11 + BUF_FMT_16_SINT = 12 + BUF_FMT_16_FLOAT = 13 + BUF_FMT_8_8_UNORM = 14 + BUF_FMT_8_8_SNORM = 15 + BUF_FMT_8_8_USCALED = 16 + BUF_FMT_8_8_SSCALED = 17 + BUF_FMT_8_8_UINT = 18 + BUF_FMT_8_8_SINT = 19 + BUF_FMT_32_UINT = 20 + BUF_FMT_32_SINT = 21 + BUF_FMT_32_FLOAT = 22 + BUF_FMT_16_16_UNORM = 23 + BUF_FMT_16_16_SNORM = 24 + BUF_FMT_16_16_USCALED = 25 + BUF_FMT_16_16_SSCALED = 26 + BUF_FMT_16_16_UINT = 27 + BUF_FMT_16_16_SINT = 28 + BUF_FMT_16_16_FLOAT = 29 + BUF_FMT_10_11_11_FLOAT = 30 + BUF_FMT_11_11_10_FLOAT = 31 + BUF_FMT_10_10_10_2_UNORM = 32 + BUF_FMT_10_10_10_2_SNORM = 33 + BUF_FMT_10_10_10_2_UINT = 34 + BUF_FMT_10_10_10_2_SINT = 35 + BUF_FMT_2_10_10_10_UNORM = 36 + BUF_FMT_2_10_10_10_SNORM = 37 + BUF_FMT_2_10_10_10_USCALED = 38 + BUF_FMT_2_10_10_10_SSCALED = 39 + BUF_FMT_2_10_10_10_UINT = 40 + BUF_FMT_2_10_10_10_SINT = 41 + BUF_FMT_8_8_8_8_UNORM = 42 + BUF_FMT_8_8_8_8_SNORM = 43 + BUF_FMT_8_8_8_8_USCALED = 44 + BUF_FMT_8_8_8_8_SSCALED = 45 + BUF_FMT_8_8_8_8_UINT = 46 + BUF_FMT_8_8_8_8_SINT = 47 + BUF_FMT_32_32_UINT = 48 + BUF_FMT_32_32_SINT = 49 + BUF_FMT_32_32_FLOAT = 50 + BUF_FMT_16_16_16_16_UNORM = 51 + BUF_FMT_16_16_16_16_SNORM = 52 + BUF_FMT_16_16_16_16_USCALED = 53 + BUF_FMT_16_16_16_16_SSCALED = 54 + BUF_FMT_16_16_16_16_UINT = 55 + BUF_FMT_16_16_16_16_SINT = 56 + BUF_FMT_16_16_16_16_FLOAT = 57 + BUF_FMT_32_32_32_UINT = 58 + BUF_FMT_32_32_32_SINT = 59 + BUF_FMT_32_32_32_FLOAT = 60 + BUF_FMT_32_32_32_32_UINT = 61 + BUF_FMT_8_SRGB = 64 + BUF_FMT_8_8_SRGB = 65 + BUF_FMT_8_8_8_8_SRGB = 66 + BUF_FMT_5_9_9_9_FLOAT = 67 + BUF_FMT_5_6_5_UNORM = 68 + BUF_FMT_1_5_5_5_UNORM = 69 + BUF_FMT_5_5_5_1_UNORM = 70 + BUF_FMT_4_4_4_4_UNORM = 71 + BUF_FMT_4_4_UNORM = 72 + BUF_FMT_1_UNORM = 73 + BUF_FMT_1_REVERSED_UNORM = 74 + BUF_FMT_32_FLOAT_CLAMP = 75 + BUF_FMT_8_24_UNORM = 76 + BUF_FMT_8_24_UINT = 77 + BUF_FMT_24_8_UNORM = 78 + BUF_FMT_24_8_UINT = 79 + BUF_FMT_X24_8_32_UINT = 80 + BUF_FMT_X24_8_32_FLOAT = 81 + BUF_FMT_GB_GR_UNORM = 82 + BUF_FMT_GB_GR_SNORM = 83 + BUF_FMT_GB_GR_UINT = 84 + BUF_FMT_GB_GR_SRGB = 85 + BUF_FMT_BG_RG_UNORM = 86 + BUF_FMT_BG_RG_SNORM = 87 + BUF_FMT_BG_RG_UINT = 88 + BUF_FMT_BG_RG_SRGB = 89 + BUF_FMT_BC1_UNORM = 109 + BUF_FMT_BC1_SRGB = 110 + BUF_FMT_BC2_UNORM = 111 class DSOp(IntEnum): DS_ADD_U32 = 0 @@ -1372,7 +1435,6 @@ class VOP3POp(IntEnum): V_WMMA_I32_16X16X16_IU4 = 69 class VOP3SDOp(IntEnum): - DWORD = 1 V_ADD_CO_CI_U32 = 288 V_SUB_CO_CI_U32 = 289 V_SUBREV_CO_CI_U32 = 290 @@ -1594,68 +1656,3 @@ class VOPDOp(IntEnum): V_DUAL_ADD_NC_U32 = 16 V_DUAL_LSHLREV_B32 = 17 V_DUAL_AND_B32 = 18 - -class BufFmt(IntEnum): - BUF_FMT_8_UNORM = 1 - BUF_FMT_8_SNORM = 2 - BUF_FMT_8_USCALED = 3 - BUF_FMT_8_SSCALED = 4 - BUF_FMT_8_UINT = 5 - BUF_FMT_8_SINT = 6 - BUF_FMT_16_UNORM = 7 - BUF_FMT_16_SNORM = 8 - BUF_FMT_16_USCALED = 9 - BUF_FMT_16_SSCALED = 10 - BUF_FMT_16_UINT = 11 - BUF_FMT_16_SINT = 12 - BUF_FMT_16_FLOAT = 13 - BUF_FMT_8_8_UNORM = 14 - BUF_FMT_8_8_SNORM = 15 - BUF_FMT_8_8_USCALED = 16 - BUF_FMT_8_8_SSCALED = 17 - BUF_FMT_8_8_UINT = 18 - BUF_FMT_8_8_SINT = 19 - BUF_FMT_32_UINT = 20 - BUF_FMT_32_SINT = 21 - BUF_FMT_32_FLOAT = 22 - BUF_FMT_16_16_UNORM = 23 - BUF_FMT_16_16_SNORM = 24 - BUF_FMT_16_16_USCALED = 25 - BUF_FMT_16_16_SSCALED = 26 - BUF_FMT_16_16_UINT = 27 - BUF_FMT_16_16_SINT = 28 - BUF_FMT_16_16_FLOAT = 29 - BUF_FMT_10_11_11_FLOAT = 30 - BUF_FMT_11_11_10_FLOAT = 31 - BUF_FMT_10_10_10_2_UNORM = 32 - BUF_FMT_10_10_10_2_SNORM = 33 - BUF_FMT_10_10_10_2_UINT = 34 - BUF_FMT_10_10_10_2_SINT = 35 - BUF_FMT_2_10_10_10_UNORM = 36 - BUF_FMT_2_10_10_10_SNORM = 37 - BUF_FMT_2_10_10_10_USCALED = 38 - BUF_FMT_2_10_10_10_SSCALED = 39 - BUF_FMT_2_10_10_10_UINT = 40 - BUF_FMT_2_10_10_10_SINT = 41 - BUF_FMT_8_8_8_8_UNORM = 42 - BUF_FMT_8_8_8_8_SNORM = 43 - BUF_FMT_8_8_8_8_USCALED = 44 - BUF_FMT_8_8_8_8_SSCALED = 45 - BUF_FMT_8_8_8_8_UINT = 46 - BUF_FMT_8_8_8_8_SINT = 47 - BUF_FMT_32_32_UINT = 48 - BUF_FMT_32_32_SINT = 49 - BUF_FMT_32_32_FLOAT = 50 - BUF_FMT_16_16_16_16_UNORM = 51 - BUF_FMT_16_16_16_16_SNORM = 52 - BUF_FMT_16_16_16_16_USCALED = 53 - BUF_FMT_16_16_16_16_SSCALED = 54 - BUF_FMT_16_16_16_16_UINT = 55 - BUF_FMT_16_16_16_16_SINT = 56 - BUF_FMT_16_16_16_16_FLOAT = 57 - BUF_FMT_32_32_32_UINT = 58 - BUF_FMT_32_32_32_SINT = 59 - BUF_FMT_32_32_32_FLOAT = 60 - BUF_FMT_32_32_32_32_UINT = 61 - BUF_FMT_32_32_32_32_SINT = 62 - BUF_FMT_32_32_32_32_FLOAT = 63 diff --git a/extra/assembly/amd/autogen/rdna3/ins.py b/extra/assembly/amd/autogen/rdna3/ins.py index 0c486bb4c7..742904165a 100644 --- a/extra/assembly/amd/autogen/rdna3/ins.py +++ b/extra/assembly/amd/autogen/rdna3/ins.py @@ -1,12 +1,11 @@ -# autogenerated from AMD RDNA3.5 ISA PDF by pdf.py - do not edit +# autogenerated from AMD ISA PDF by pdf.py - do not edit # ruff: noqa: F401,F403 from typing import Annotated -from extra.assembly.amd.dsl import bits, BitField, Inst32, Inst64, Inst96, SGPR, VGPR, TTMP as TTMP, s as s, v as v, ttmp as ttmp, SSrc, Src, SImm, Imm, VDSTYEnc, SGPRField, VGPRField +from extra.assembly.amd.dsl import * from extra.assembly.amd.autogen.rdna3.enum import * import functools -# instruction formats -class DPP16(Inst64): +class DPP16(Inst): src0:Src = bits[39:32] dpp_ctrl = bits[48:40] fi = bits[50] @@ -18,7 +17,7 @@ class DPP16(Inst64): bank_mask = bits[59:56] row_mask = bits[63:60] -class DPP8(Inst64): +class DPP8(Inst): src0:Src = bits[39:32] lane_sel0 = bits[42:40] lane_sel1 = bits[45:43] @@ -29,7 +28,7 @@ class DPP8(Inst64): lane_sel6 = bits[60:58] lane_sel7 = bits[63:61] -class DS(Inst64): +class DS(Inst): encoding = bits[31:26] == 0b110110 op:Annotated[BitField, DSOp] = bits[25:18] vdst:VGPRField = bits[63:56] @@ -40,18 +39,18 @@ class DS(Inst64): offset1 = bits[15:8] gds = bits[17] -class EXP(Inst64): +class EXP(Inst): encoding = bits[31:26] == 0b111110 + vsrc0:VGPRField = bits[39:32] + vsrc1:VGPRField = bits[47:40] + vsrc2:VGPRField = bits[55:48] + vsrc3:VGPRField = bits[63:56] en = bits[3:0] target = bits[9:4] - vsrc0 = bits[39:32] - vsrc1:VGPRField = bits[47:40] - vsrc2 = bits[55:48] - vsrc3 = bits[63:56] done = bits[11] row = bits[13] -class FLAT(Inst64): +class FLAT(Inst): encoding = bits[31:26] == 0b110111 op:Annotated[BitField, FLATOp] = bits[24:18] vdst:VGPRField = bits[63:56] @@ -60,12 +59,12 @@ class FLAT(Inst64): saddr:SSrc = bits[54:48] offset:Imm = bits[12:0] seg = bits[17:16] - dlc = bits[13] glc = bits[14] + dlc = bits[13] slc = bits[15] sve = bits[55] -class LDSDIR(Inst32): +class LDSDIR(Inst): encoding = bits[31:24] == 0b11001110 op = bits[21:20] vdst:VGPRField = bits[7:0] @@ -73,29 +72,29 @@ class LDSDIR(Inst32): attr_chan = bits[9:8] wait_va = bits[19:16] -class MIMG(Inst64): +class MIMG(Inst): encoding = bits[31:26] == 0b111100 op:Annotated[BitField, MIMGOp] = bits[25:18] vdata:VGPRField = bits[47:40] vaddr:VGPRField = bits[39:32] srsrc:SGPRField = bits[52:48] - ssamp = bits[62:58] + ssamp:SGPRField = bits[62:58] dmask = bits[11:8] dim = bits[4:2] - unrm = bits[7] - dlc = bits[13] glc = bits[14] + dlc = bits[13] slc = bits[12] + tfe = bits[53] + unrm = bits[7] nsa = bits[0] r128 = bits[15] a16 = bits[16] d16 = bits[17] - tfe = bits[53] lwe = bits[54] addr1 = bits[71:64] addr2 = bits[79:72] -class MTBUF(Inst64): +class MTBUF(Inst): encoding = bits[31:26] == 0b111010 op:Annotated[BitField, MTBUFOp] = bits[18:15] vdata:VGPRField = bits[47:40] @@ -111,7 +110,7 @@ class MTBUF(Inst64): slc = bits[12] tfe = bits[53] -class MUBUF(Inst64): +class MUBUF(Inst): encoding = bits[31:26] == 0b111000 op:Annotated[BitField, MUBUFOp] = bits[25:18] vdata:VGPRField = bits[47:40] @@ -126,7 +125,7 @@ class MUBUF(Inst64): slc = bits[12] tfe = bits[53] -class SMEM(Inst64): +class SMEM(Inst): encoding = bits[31:26] == 0b111101 op:Annotated[BitField, SMEMOp] = bits[25:18] sdata:SGPRField = bits[12:6] @@ -136,62 +135,63 @@ class SMEM(Inst64): glc = bits[14] dlc = bits[13] -class SOP1(Inst32): +class SOP1(Inst): encoding = bits[31:23] == 0b101111101 op:Annotated[BitField, SOP1Op] = bits[15:8] sdst:SGPRField = bits[22:16] ssrc0:SSrc = bits[7:0] -class SOP2(Inst32): +class SOP2(Inst): encoding = bits[31:30] == 0b10 op:Annotated[BitField, SOP2Op] = bits[29:23] sdst:SGPRField = bits[22:16] ssrc0:SSrc = bits[7:0] ssrc1:SSrc = bits[15:8] -class SOPC(Inst32): +class SOPC(Inst): encoding = bits[31:23] == 0b101111110 op:Annotated[BitField, SOPCOp] = bits[22:16] ssrc0:SSrc = bits[7:0] ssrc1:SSrc = bits[15:8] -class SOPK(Inst32): +class SOPK(Inst): encoding = bits[31:28] == 0b1011 op:Annotated[BitField, SOPKOp] = bits[27:23] sdst:SGPRField = bits[22:16] simm16:SImm = bits[15:0] -class SOPP(Inst32): +class SOPP(Inst): encoding = bits[31:23] == 0b101111111 op:Annotated[BitField, SOPPOp] = bits[22:16] simm16:SImm = bits[15:0] -class VINTERP(Inst64): +class VINTERP(Inst): encoding = bits[31:24] == 0b11001101 op:Annotated[BitField, VINTERPOp] = bits[22:16] vdst:VGPRField = bits[7:0] src0:Src = bits[40:32] + src0:Src = bits[40:32] src1:Src = bits[49:41] src2:Src = bits[58:50] - waitexp = bits[10:8] + neg = bits[63:61] clmp = bits[15] opsel = bits[14:11] - neg = bits[63:61] + waitexp = bits[10:8] -class VOP1(Inst32): - encoding = bits[31:25] == 0b111111 +class VOP1(Inst): + encoding = bits[31:25] == 0b0111111 op:Annotated[BitField, VOP1Op] = bits[16:9] vdst:VGPRField = bits[24:17] src0:Src = bits[8:0] -class VOP2(Inst32): - encoding = bits[31] == 0 +class VOP2(Inst): + encoding = bits[31] == 0b0 op:Annotated[BitField, VOP2Op] = bits[30:25] vdst:VGPRField = bits[24:17] src0:Src = bits[8:0] vsrc1:VGPRField = bits[16:9] -class VOP3(Inst64): +class VOP3(Inst): encoding = bits[31:26] == 0b110101 op:Annotated[BitField, VOP3Op] = bits[25:16] vdst:VGPRField = bits[7:0] @@ -204,9 +204,8 @@ class VOP3(Inst64): clmp = bits[15] opsel = bits[14:11] -class VOP3P(Inst64): +class VOP3P(Inst): encoding = bits[31:24] == 0b11001100 - _defaults = {'opsel_hi': 3, 'opsel_hi2': 1} op:Annotated[BitField, VOP3POp] = bits[22:16] vdst:VGPRField = bits[7:0] src0:Src = bits[40:32] @@ -214,12 +213,12 @@ class VOP3P(Inst64): src2:Src = bits[58:50] neg = bits[63:61] neg_hi = bits[10:8] + clmp = bits[15] opsel = bits[13:11] opsel_hi = bits[60:59] - clmp = bits[15] opsel_hi2 = bits[14] -class VOP3SD(Inst64): +class VOP3SD(Inst): encoding = bits[31:26] == 0b110101 op:Annotated[BitField, VOP3SDOp] = bits[25:16] vdst:VGPRField = bits[7:0] @@ -227,26 +226,26 @@ class VOP3SD(Inst64): src0:Src = bits[40:32] src1:Src = bits[49:41] src2:Src = bits[58:50] - clmp = bits[15] omod = bits[60:59] neg = bits[63:61] + clmp = bits[15] -class VOPC(Inst32): - encoding = bits[31:25] == 0b111110 +class VOPC(Inst): + encoding = bits[31:25] == 0b0111110 op:Annotated[BitField, VOPCOp] = bits[24:17] src0:Src = bits[8:0] vsrc1:VGPRField = bits[16:9] -class VOPD(Inst64): +class VOPD(Inst): encoding = bits[31:26] == 0b110010 opx:Annotated[BitField, VOPDOp] = bits[25:22] opy:Annotated[BitField, VOPDOp] = bits[21:17] - vdstx:VGPRField = bits[63:56] + vdstx = bits[63:56] vdsty:VDSTYEnc = bits[55:49] srcx0:Src = bits[8:0] - vsrcx1:VGPRField = bits[16:9] srcy0:Src = bits[40:32] - vsrcy1:VGPRField = bits[48:41] + vsrcx1 = bits[16:9] + vsrcy1 = bits[48:41] # instruction helpers ds_add_u32 = functools.partial(DS, DSOp.DS_ADD_U32) @@ -1077,16 +1076,16 @@ v_add_nc_u32_e32 = functools.partial(VOP2, VOP2Op.V_ADD_NC_U32) v_sub_nc_u32_e32 = functools.partial(VOP2, VOP2Op.V_SUB_NC_U32) v_subrev_nc_u32_e32 = functools.partial(VOP2, VOP2Op.V_SUBREV_NC_U32) v_fmac_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAC_F32) -def v_fmamk_f32_e32(vdst, src0, K, vsrc1): return VOP2(VOP2Op.V_FMAMK_F32, vdst, src0, vsrc1, literal=K) -def v_fmaak_f32_e32(vdst, src0, vsrc1, K): return VOP2(VOP2Op.V_FMAAK_F32, vdst, src0, vsrc1, literal=K) +v_fmamk_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAMK_F32) +v_fmaak_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAAK_F32) v_cvt_pk_rtz_f16_f32_e32 = functools.partial(VOP2, VOP2Op.V_CVT_PK_RTZ_F16_F32) v_add_f16_e32 = functools.partial(VOP2, VOP2Op.V_ADD_F16) v_sub_f16_e32 = functools.partial(VOP2, VOP2Op.V_SUB_F16) v_subrev_f16_e32 = functools.partial(VOP2, VOP2Op.V_SUBREV_F16) v_mul_f16_e32 = functools.partial(VOP2, VOP2Op.V_MUL_F16) v_fmac_f16_e32 = functools.partial(VOP2, VOP2Op.V_FMAC_F16) -def v_fmamk_f16_e32(vdst, src0, K, vsrc1): return VOP2(VOP2Op.V_FMAMK_F16, vdst, src0, vsrc1, literal=K) -def v_fmaak_f16_e32(vdst, src0, vsrc1, K): return VOP2(VOP2Op.V_FMAAK_F16, vdst, src0, vsrc1, literal=K) +v_fmamk_f16_e32 = functools.partial(VOP2, VOP2Op.V_FMAMK_F16) +v_fmaak_f16_e32 = functools.partial(VOP2, VOP2Op.V_FMAAK_F16) v_max_f16_e32 = functools.partial(VOP2, VOP2Op.V_MAX_F16) v_min_f16_e32 = functools.partial(VOP2, VOP2Op.V_MIN_F16) v_ldexp_f16_e32 = functools.partial(VOP2, VOP2Op.V_LDEXP_F16) @@ -1554,7 +1553,6 @@ v_wmma_f16_16x16x16_f16 = functools.partial(VOP3P, VOP3POp.V_WMMA_F16_16X16X16_F v_wmma_bf16_16x16x16_bf16 = functools.partial(VOP3P, VOP3POp.V_WMMA_BF16_16X16X16_BF16) v_wmma_i32_16x16x16_iu8 = functools.partial(VOP3P, VOP3POp.V_WMMA_I32_16X16X16_IU8) v_wmma_i32_16x16x16_iu4 = functools.partial(VOP3P, VOP3POp.V_WMMA_I32_16X16X16_IU4) -dword = functools.partial(VOP3SD, VOP3SDOp.DWORD) v_add_co_ci_u32 = functools.partial(VOP3SD, VOP3SDOp.V_ADD_CO_CI_U32) v_sub_co_ci_u32 = functools.partial(VOP3SD, VOP3SDOp.V_SUB_CO_CI_U32) v_subrev_co_ci_u32 = functools.partial(VOP3SD, VOP3SDOp.V_SUBREV_CO_CI_U32) @@ -1771,31 +1769,4 @@ v_dual_dot2acc_f32_f16 = functools.partial(VOPD, VOPDOp.V_DUAL_DOT2ACC_F32_F16) v_dual_dot2acc_f32_bf16 = functools.partial(VOPD, VOPDOp.V_DUAL_DOT2ACC_F32_BF16) v_dual_add_nc_u32 = functools.partial(VOPD, VOPDOp.V_DUAL_ADD_NC_U32) v_dual_lshlrev_b32 = functools.partial(VOPD, VOPDOp.V_DUAL_LSHLREV_B32) -v_dual_and_b32 = functools.partial(VOPD, VOPDOp.V_DUAL_AND_B32) - -VCC_LO = SrcEnum.VCC_LO -VCC_HI = SrcEnum.VCC_HI -NULL = SrcEnum.NULL -M0 = SrcEnum.M0 -EXEC_LO = SrcEnum.EXEC_LO -EXEC_HI = SrcEnum.EXEC_HI -ZERO = SrcEnum.ZERO -DPP8FI = SrcEnum.DPP8FI -SHARED_BASE = SrcEnum.SHARED_BASE -SHARED_LIMIT = SrcEnum.SHARED_LIMIT -PRIVATE_BASE = SrcEnum.PRIVATE_BASE -PRIVATE_LIMIT = SrcEnum.PRIVATE_LIMIT -POS_HALF = SrcEnum.POS_HALF -NEG_HALF = SrcEnum.NEG_HALF -POS_ONE = SrcEnum.POS_ONE -NEG_ONE = SrcEnum.NEG_ONE -POS_TWO = SrcEnum.POS_TWO -NEG_TWO = SrcEnum.NEG_TWO -POS_FOUR = SrcEnum.POS_FOUR -NEG_FOUR = SrcEnum.NEG_FOUR -INV_2PI = SrcEnum.INV_2PI -VCCZ = SrcEnum.VCCZ -EXECZ = SrcEnum.EXECZ -SCC = SrcEnum.SCC -LDS_DIRECT = SrcEnum.LDS_DIRECT -OFF = NULL +v_dual_and_b32 = functools.partial(VOPD, VOPDOp.V_DUAL_AND_B32) \ No newline at end of file diff --git a/extra/assembly/amd/autogen/rdna3/str_pcode.py b/extra/assembly/amd/autogen/rdna3/str_pcode.py index ef93a8cae7..936f9518db 100644 --- a/extra/assembly/amd/autogen/rdna3/str_pcode.py +++ b/extra/assembly/amd/autogen/rdna3/str_pcode.py @@ -1,1077 +1,7 @@ # autogenerated by pdf.py - do not edit -# to regenerate: python -m extra.assembly.amd.pdf --arch rdna3 +# to regenerate: python -m extra.assembly.amd.pdf # ruff: noqa: E501 -from extra.assembly.amd.autogen.rdna3.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp - -SOP1Op_PCODE = { - SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32', - SOP1Op.S_MOV_B64: 'D0.b64 = S0.b64', - SOP1Op.S_CMOV_B32: 'if SCC then\nD0.b32 = S0.b32\nendif', - SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif', - SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', - SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]', - SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_CTZ_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_CLZ_I32_U64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', - SOP1Op.S_CLS_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', - SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))", - SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", - SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U", - SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U", - SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U", - SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U", - SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor', - SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0', - SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", - SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", - SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", - SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", - SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U', - SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL', - SOP1Op.S_WQM_B32: "tmp = 0U;\ndeclare i : 6'U;\nfor i in 6'0U : 6'31U do\ntmp[i] = S0.u32[i & 6'60U +: 6'4U] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U", - SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL", - SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U', - SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL', - SOP1Op.S_AND_SAVEEXEC_B32: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_OR_SAVEEXEC_B32: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_OR_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_XOR_SAVEEXEC_B32: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_XOR_SAVEEXEC_B64: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_NAND_SAVEEXEC_B32: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_NAND_SAVEEXEC_B64: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_NOR_SAVEEXEC_B32: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_NOR_SAVEEXEC_B64: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_XNOR_SAVEEXEC_B32: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_XNOR_SAVEEXEC_B64: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_AND_NOT0_SAVEEXEC_B32: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_NOT0_SAVEEXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_OR_NOT0_SAVEEXEC_B32: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_OR_NOT0_SAVEEXEC_B64: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_AND_NOT1_SAVEEXEC_B32: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_NOT1_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_OR_NOT1_SAVEEXEC_B32: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_OR_NOT1_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_AND_NOT0_WREXEC_B32: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_NOT0_WREXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_AND_NOT1_WREXEC_B32: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_NOT1_WREXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = SGPR[addr].b32', - SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b64 = SGPR[addr].b64', - SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b32 = S0.b32', - SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b64 = S0.b64', - SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', - SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL', - SOP1Op.S_SETPC_B64: 'PC = S0.i64', - SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64', - SOP1Op.S_RFE_B64: 'PC = S0.i64', - SOP1Op.S_SENDMSG_RTN_B32: 'If SDST is VCC then VCCZ is undefined.', - SOP1Op.S_SENDMSG_RTN_B64: 'If SDST is VCC then VCCZ is undefined.', - SOP1Op.S_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', - SOP1Op.S_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', - SOP1Op.S_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', - SOP1Op.S_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", - SOP1Op.S_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', - SOP1Op.S_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', - SOP1Op.S_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', - SOP1Op.S_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', - SOP1Op.S_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', - SOP1Op.S_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', - SOP1Op.S_CVT_HI_F32_F16: 'D0.f32 = f16_to_f32(S0[31 : 16].f16)', - SOP1Op.S_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", - SOP1Op.S_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", - SOP1Op.S_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', - SOP1Op.S_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", -} - -SOP2Op_PCODE = { - SOP2Op.S_ADD_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_SUB_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_ADD_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', - SOP2Op.S_SUB_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', - SOP2Op.S_ADDC_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_SUBB_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0', - SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0", - SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL', - SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', - SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', - SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', - SOP2Op.S_MAX_U32: 'SCC = S0.u32 >= S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', - SOP2Op.S_AND_B32: 'D0.u32 = (S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_AND_B64: 'D0.u64 = (S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_OR_B32: 'D0.u32 = (S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_OR_B64: 'D0.u64 = (S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_XOR_B64: 'D0.u64 = (S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_NAND_B32: 'D0.u32 = ~(S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_NAND_B64: 'D0.u64 = ~(S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_NOR_B32: 'D0.u32 = ~(S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_NOR_B64: 'D0.u64 = ~(S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_XNOR_B64: 'D0.u64 = ~(S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_AND_NOT1_B32: 'D0.u32 = (S0.u32 & ~S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_AND_NOT1_B64: 'D0.u64 = (S0.u64 & ~S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_OR_NOT1_B32: 'D0.u32 = (S0.u32 | ~S1.u32);\nSCC = D0.u32 != 0U', - SOP2Op.S_OR_NOT1_B64: 'D0.u64 = (S0.u64 | ~S1.u64);\nSCC = D0.u64 != 0ULL', - SOP2Op.S_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S1[22 : 16].u32) - 1U));\nSCC = D0.u32 != 0U', - SOP2Op.S_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32);\nSCC = D0.i32 != 0', - SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL', - SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL', - SOP2Op.S_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', - SOP2Op.S_BFM_B64: 'D0.u64 = (((1ULL << S0[5 : 0].u32) - 1ULL) << S1[5 : 0].u32)', - SOP2Op.S_MUL_I32: 'D0.i32 = S0.i32 * S1.i32', - SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", - SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", - SOP2Op.S_CSELECT_B32: 'D0.u32 = SCC ? S0.u32 : S1.u32', - SOP2Op.S_CSELECT_B64: 'D0.u64 = SCC ? S0.u64 : S1.u64', - SOP2Op.S_PACK_LL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[15 : 0].u16 }', - SOP2Op.S_PACK_LH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[15 : 0].u16 }', - SOP2Op.S_PACK_HH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[31 : 16].u16 }', - SOP2Op.S_PACK_HL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[31 : 16].u16 }', - SOP2Op.S_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', - SOP2Op.S_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', - SOP2Op.S_MIN_F32: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - SOP2Op.S_MAX_F32: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - SOP2Op.S_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', - SOP2Op.S_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', - SOP2Op.S_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', - SOP2Op.S_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', - SOP2Op.S_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', - SOP2Op.S_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', - SOP2Op.S_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', - SOP2Op.S_MIN_F16: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - SOP2Op.S_MAX_F16: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - SOP2Op.S_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', - SOP2Op.S_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', -} - -SOPCOp_PCODE = { - SOPCOp.S_CMP_EQ_I32: 'SCC = S0.i32 == S1.i32', - SOPCOp.S_CMP_LG_I32: 'SCC = S0.i32 <> S1.i32', - SOPCOp.S_CMP_GT_I32: 'SCC = S0.i32 > S1.i32', - SOPCOp.S_CMP_GE_I32: 'SCC = S0.i32 >= S1.i32', - SOPCOp.S_CMP_LT_I32: 'SCC = S0.i32 < S1.i32', - SOPCOp.S_CMP_LE_I32: 'SCC = S0.i32 <= S1.i32', - SOPCOp.S_CMP_EQ_U32: 'SCC = S0.u32 == S1.u32', - SOPCOp.S_CMP_LG_U32: 'SCC = S0.u32 <> S1.u32', - SOPCOp.S_CMP_GT_U32: 'SCC = S0.u32 > S1.u32', - SOPCOp.S_CMP_GE_U32: 'SCC = S0.u32 >= S1.u32', - SOPCOp.S_CMP_LT_U32: 'SCC = S0.u32 < S1.u32', - SOPCOp.S_CMP_LE_U32: 'SCC = S0.u32 <= S1.u32', - SOPCOp.S_BITCMP0_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'0U", - SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U", - SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U", - SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U", - SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64', - SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64', - SOPCOp.S_CMP_LT_F32: 'SCC = S0.f32 < S1.f32', - SOPCOp.S_CMP_LT_F16: 'SCC = S0.f16 < S1.f16', - SOPCOp.S_CMP_EQ_F32: 'SCC = S0.f32 == S1.f32', - SOPCOp.S_CMP_EQ_F16: 'SCC = S0.f16 == S1.f16', - SOPCOp.S_CMP_LE_F32: 'SCC = S0.f32 <= S1.f32', - SOPCOp.S_CMP_LE_F16: 'SCC = S0.f16 <= S1.f16', - SOPCOp.S_CMP_GT_F32: 'SCC = S0.f32 > S1.f32', - SOPCOp.S_CMP_GT_F16: 'SCC = S0.f16 > S1.f16', - SOPCOp.S_CMP_LG_F32: 'SCC = S0.f32 <> S1.f32', - SOPCOp.S_CMP_LG_F16: 'SCC = S0.f16 <> S1.f16', - SOPCOp.S_CMP_GE_F32: 'SCC = S0.f32 >= S1.f32', - SOPCOp.S_CMP_GE_F16: 'SCC = S0.f16 >= S1.f16', - SOPCOp.S_CMP_O_F32: "SCC = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", - SOPCOp.S_CMP_O_F16: "SCC = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", - SOPCOp.S_CMP_U_F32: "SCC = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", - SOPCOp.S_CMP_U_F16: "SCC = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", - SOPCOp.S_CMP_NGE_F32: 'SCC = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', - SOPCOp.S_CMP_NGE_F16: 'SCC = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', - SOPCOp.S_CMP_NLG_F32: 'SCC = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', - SOPCOp.S_CMP_NLG_F16: 'SCC = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', - SOPCOp.S_CMP_NGT_F32: 'SCC = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', - SOPCOp.S_CMP_NGT_F16: 'SCC = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', - SOPCOp.S_CMP_NLE_F32: 'SCC = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', - SOPCOp.S_CMP_NLE_F16: 'SCC = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', - SOPCOp.S_CMP_NEQ_F32: 'SCC = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', - SOPCOp.S_CMP_NEQ_F16: 'SCC = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', - SOPCOp.S_CMP_NLT_F32: 'SCC = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', - SOPCOp.S_CMP_NLT_F16: 'SCC = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', -} - -SOPKOp_PCODE = { - SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(SIMM16.i16))", - SOPKOp.S_VERSION: '// Do nothing - for use by tools only', - SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(SIMM16.i16))\nendif", - SOPKOp.S_CMPK_EQ_I32: "SCC = 64'I(S0.i32) == signext(SIMM16.i16)", - SOPKOp.S_CMPK_LG_I32: "SCC = 64'I(S0.i32) != signext(SIMM16.i16)", - SOPKOp.S_CMPK_GT_I32: "SCC = 64'I(S0.i32) > signext(SIMM16.i16)", - SOPKOp.S_CMPK_GE_I32: "SCC = 64'I(S0.i32) >= signext(SIMM16.i16)", - SOPKOp.S_CMPK_LT_I32: "SCC = 64'I(S0.i32) < signext(SIMM16.i16)", - SOPKOp.S_CMPK_LE_I32: "SCC = 64'I(S0.i32) <= signext(SIMM16.i16)", - SOPKOp.S_CMPK_EQ_U32: "SCC = S0.u32 == 32'U(SIMM16.u16)", - SOPKOp.S_CMPK_LG_U32: "SCC = S0.u32 != 32'U(SIMM16.u16)", - SOPKOp.S_CMPK_GT_U32: "SCC = S0.u32 > 32'U(SIMM16.u16)", - SOPKOp.S_CMPK_GE_U32: "SCC = S0.u32 >= 32'U(SIMM16.u16)", - SOPKOp.S_CMPK_LT_U32: "SCC = S0.u32 < 32'U(SIMM16.u16)", - SOPKOp.S_CMPK_LE_U32: "SCC = S0.u32 <= 32'U(SIMM16.u16)", - SOPKOp.S_ADDK_I32: "tmp = D0.i32;\nD0.i32 = 32'I(64'I(D0.i32) + signext(SIMM16.i16));\nSCC = ((tmp[31] == SIMM16.i16[15]) && (tmp[31] != D0.i32[31]));", - SOPKOp.S_MULK_I32: "D0.i32 = 32'I(64'I(D0.i32) * signext(SIMM16.i16))", - SOPKOp.S_GETREG_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", - SOPKOp.S_SETREG_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask & 32'I(writeableBitMask(hwRegId.u32, WAVE_STATUS.PRIV)));\n// Mask of bits we are allowed to modify\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", - SOPKOp.S_SETREG_IMM32_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask & 32'I(writeableBitMask(hwRegId.u32, WAVE_STATUS.PRIV)));\n// Mask of bits we are allowed to modify\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", - SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL", - SOPKOp.S_WAITCNT_VSCNT: 'vscnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', - SOPKOp.S_WAITCNT_VMCNT: 'vmcnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', - SOPKOp.S_WAITCNT_EXPCNT: 'expcnt <= S0.u[2:0] + S1.u[2:0].\n// Comparison is 3 bits, no clamping is applied for add overflow', - SOPKOp.S_WAITCNT_LGKMCNT: 'lgkmcnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', -} - -SOPPOp_PCODE = { - SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nendfor', - SOPPOp.S_SETHALT: 'When halt type control is set to 1 (FATAL HALT bit select): Set FATAL_HALT bit to value of SIMM16[0]; 1 =\nfatal_halt, 0 = clear FATAL_HALT bit. Setting the fatal_halt flag halts the shader in or outside of the trap', - SOPPOp.S_DELAY_ALU: 'instruction may be omitted. For wave64 the compiler may not know the status of the EXEC mask and hence\n// 1 cycle delay here\n// 2 cycles delay here', - SOPPOp.S_TRAP: '// PC passed into trap handler points to S_TRAP itself,\nPC = TBA.i64;\n// trap base address', - SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;", - SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_VCCZ: "If VCCZ is 1 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_VCCNZ: "If VCCZ is 0 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_CDBGSYS: "if WAVE_STATUS.COND_DBG_SYS.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_CDBGUSER: "if WAVE_STATUS.COND_DBG_USER.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: "if (WAVE_STATUS.COND_DBG_SYS || WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: "if (WAVE_STATUS.COND_DBG_SYS && WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", -} - -SMEMOp_PCODE = { - SMEMOp.S_LOAD_B32: 'SDATA[31 : 0] = MEM[ADDR].b32', - SMEMOp.S_LOAD_B64: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32', - SMEMOp.S_LOAD_B128: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32', - SMEMOp.S_LOAD_B256: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32', - SMEMOp.S_LOAD_B512: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32;\nSDATA[287 : 256] = MEM[ADDR + 32U].b32;\nSDATA[319 : 288] = MEM[ADDR + 36U].b32;\nSDATA[351 : 320] = MEM[ADDR + 40U].b32;\nSDATA[383 : 352] = MEM[ADDR + 44U].b32;\nSDATA[415 : 384] = MEM[ADDR + 48U].b32;\nSDATA[447 : 416] = MEM[ADDR + 52U].b32;\nSDATA[479 : 448] = MEM[ADDR + 56U].b32;\nSDATA[511 : 480] = MEM[ADDR + 60U].b32', - SMEMOp.S_BUFFER_LOAD_B32: 'SDATA[31 : 0] = MEM[ADDR].b32', - SMEMOp.S_BUFFER_LOAD_B64: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32', - SMEMOp.S_BUFFER_LOAD_B128: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32', - SMEMOp.S_BUFFER_LOAD_B256: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32', - SMEMOp.S_BUFFER_LOAD_B512: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32;\nSDATA[287 : 256] = MEM[ADDR + 32U].b32;\nSDATA[319 : 288] = MEM[ADDR + 36U].b32;\nSDATA[351 : 320] = MEM[ADDR + 40U].b32;\nSDATA[383 : 352] = MEM[ADDR + 44U].b32;\nSDATA[415 : 384] = MEM[ADDR + 48U].b32;\nSDATA[447 : 416] = MEM[ADDR + 52U].b32;\nSDATA[479 : 448] = MEM[ADDR + 56U].b32;\nSDATA[511 : 480] = MEM[ADDR + 60U].b32', -} - -VOP1Op_PCODE = { - VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32', - VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", - VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', - VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', - VOP1Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', - VOP1Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', - VOP1Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', - VOP1Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', - VOP1Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', - VOP1Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', - VOP1Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', - VOP1Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', - VOP1Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", - VOP1Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', - VOP1Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', - VOP1Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', - VOP1Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', - VOP1Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', - VOP1Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', - VOP1Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', - VOP1Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', - VOP1Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', - VOP1Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', - VOP1Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', - VOP1Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', - VOP1Op.V_MOV_B16: 'D0.b16 = S0.b16', - VOP1Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', - VOP1Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', - VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', - VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", - VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', - VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', - VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', - VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', - VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', - VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', - VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', - VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', - VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', - VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', - VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", - VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", - VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32', - VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', - VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', - VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', - VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', - VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', - VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", - VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", - VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nVGPR[laneId][addr].b32 = S0.b32', - VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = VGPR[laneId][addr].b32', - VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', - VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', - VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', - VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', - VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', - VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', - VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", - VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', - VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", - VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', - VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", - VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", - VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", - VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", - VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", - VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', - VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", - VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', - VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", - VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", - VOP1Op.V_SAT_PK_U8_I16: 'D0.b16 = { SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16) }', - VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', - VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', - VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp', - VOP1Op.V_SWAP_B16: 'tmp = D0.b16;\nD0.b16 = S0.b16;\nS0.b16 = tmp', - VOP1Op.V_PERMLANE64_B32: "declare tmp : 32'B[64];\ndeclare lane : 32'U;\nif WAVE32 then\n// Supported in wave64 ONLY; treated as scalar NOP in wave32\nelse\nfor lane in 0U : 63U do\n// Copy original S0 in case D==S0\ntmp[lane] = VGPR[lane][SRC0.u32]\nendfor;\nfor lane in 0U : 63U do\naltlane = { ~lane[5], lane[4 : 0] };\n// 0<->32, ..., 31<->63\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altlane]\nendif\nendfor\nendif", - VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\ntmp = VGPR[laneId][addrd].b32;', - VOP1Op.V_NOT_B16: 'D0.u16 = ~S0.u16', - VOP1Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", - VOP1Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", -} - -VOP2Op_PCODE = { - VOP2Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', - VOP2Op.V_DOT2ACC_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', - VOP2Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', - VOP2Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', - VOP2Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', - VOP2Op.V_FMAC_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, D0.f32)\nendif", - VOP2Op.V_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", - VOP2Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', - VOP2Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", - VOP2Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", - VOP2Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", - VOP2Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", - VOP2Op.V_MIN_F32: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - VOP2Op.V_MAX_F32: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - VOP2Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', - VOP2Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', - VOP2Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', - VOP2Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', - VOP2Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', - VOP2Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', - VOP2Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', - VOP2Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', - VOP2Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', - VOP2Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', - VOP2Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', - VOP2Op.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP2Op.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP2Op.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP2Op.V_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', - VOP2Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', - VOP2Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', - VOP2Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', - VOP2Op.V_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', - VOP2Op.V_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', - VOP2Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', - VOP2Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', - VOP2Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', - VOP2Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', - VOP2Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', - VOP2Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', - VOP2Op.V_FMAMK_F16: 'D0.f16 = fma(S0.f16, SIMM32.f16, S1.f16)', - VOP2Op.V_FMAAK_F16: 'D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16)', - VOP2Op.V_MAX_F16: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - VOP2Op.V_MIN_F16: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", - VOP2Op.V_PK_FMAC_F16: 'D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16);\nD0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16)', -} - -VOP3Op_PCODE = { - VOP3Op.V_CMP_F_F16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_T_F16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_F_F32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_T_F32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_F_F64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_T_F64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_F_I32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_T_I32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_F_U32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_T_U32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_F_I64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_T_I64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_F_U64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_T_U64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMPX_F_F16: "EXEC.u64[laneId] = 1'0U", - VOP3Op.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', - VOP3Op.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f16 == S1.f16', - VOP3Op.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', - VOP3Op.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', - VOP3Op.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', - VOP3Op.V_CMPX_GE_F16: 'EXEC.u64[laneId] = S0.f16 >= S1.f16', - VOP3Op.V_CMPX_O_F16: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", - VOP3Op.V_CMPX_U_F16: "EXEC.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", - VOP3Op.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', - VOP3Op.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', - VOP3Op.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', - VOP3Op.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', - VOP3Op.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', - VOP3Op.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', - VOP3Op.V_CMPX_T_F16: "EXEC.u64[laneId] = 1'1U", - VOP3Op.V_CMPX_F_F32: "EXEC.u64[laneId] = 1'0U", - VOP3Op.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', - VOP3Op.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f32 == S1.f32', - VOP3Op.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', - VOP3Op.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', - VOP3Op.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', - VOP3Op.V_CMPX_GE_F32: 'EXEC.u64[laneId] = S0.f32 >= S1.f32', - VOP3Op.V_CMPX_O_F32: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", - VOP3Op.V_CMPX_U_F32: "EXEC.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", - VOP3Op.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', - VOP3Op.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', - VOP3Op.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', - VOP3Op.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', - VOP3Op.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', - VOP3Op.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', - VOP3Op.V_CMPX_T_F32: "EXEC.u64[laneId] = 1'1U", - VOP3Op.V_CMPX_F_F64: "EXEC.u64[laneId] = 1'0U", - VOP3Op.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', - VOP3Op.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f64 == S1.f64', - VOP3Op.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', - VOP3Op.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', - VOP3Op.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', - VOP3Op.V_CMPX_GE_F64: 'EXEC.u64[laneId] = S0.f64 >= S1.f64', - VOP3Op.V_CMPX_O_F64: 'EXEC.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64))', - VOP3Op.V_CMPX_U_F64: 'EXEC.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64))', - VOP3Op.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <', - VOP3Op.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==', - VOP3Op.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=', - VOP3Op.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >', - VOP3Op.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', - VOP3Op.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', - VOP3Op.V_CMPX_T_F64: "EXEC.u64[laneId] = 1'1U", - VOP3Op.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', - VOP3Op.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i16 == S1.i16', - VOP3Op.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', - VOP3Op.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', - VOP3Op.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', - VOP3Op.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', - VOP3Op.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', - VOP3Op.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u16 == S1.u16', - VOP3Op.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', - VOP3Op.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', - VOP3Op.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', - VOP3Op.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', - VOP3Op.V_CMPX_F_I32: "EXEC.u64[laneId] = 1'0U", - VOP3Op.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', - VOP3Op.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i32 == S1.i32', - VOP3Op.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', - VOP3Op.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', - VOP3Op.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', - VOP3Op.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', - VOP3Op.V_CMPX_T_I32: "EXEC.u64[laneId] = 1'1U", - VOP3Op.V_CMPX_F_U32: "EXEC.u64[laneId] = 1'0U", - VOP3Op.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', - VOP3Op.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u32 == S1.u32', - VOP3Op.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', - VOP3Op.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', - VOP3Op.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', - VOP3Op.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', - VOP3Op.V_CMPX_T_U32: "EXEC.u64[laneId] = 1'1U", - VOP3Op.V_CMPX_F_I64: "EXEC.u64[laneId] = 1'0U", - VOP3Op.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', - VOP3Op.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i64 == S1.i64', - VOP3Op.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', - VOP3Op.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', - VOP3Op.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', - VOP3Op.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', - VOP3Op.V_CMPX_T_I64: "EXEC.u64[laneId] = 1'1U", - VOP3Op.V_CMPX_F_U64: "EXEC.u64[laneId] = 1'0U", - VOP3Op.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', - VOP3Op.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u64 == S1.u64', - VOP3Op.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', - VOP3Op.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', - VOP3Op.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', - VOP3Op.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', - VOP3Op.V_CMPX_T_U64: "EXEC.u64[laneId] = 1'1U", - VOP3Op.V_CMPX_CLASS_F16: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOP3Op.V_CMPX_CLASS_F32: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOP3Op.V_CMPX_CLASS_F64: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32', - VOP3Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", - VOP3Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', - VOP3Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', - VOP3Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', - VOP3Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', - VOP3Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', - VOP3Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', - VOP3Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', - VOP3Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', - VOP3Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', - VOP3Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', - VOP3Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", - VOP3Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', - VOP3Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', - VOP3Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', - VOP3Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', - VOP3Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', - VOP3Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', - VOP3Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', - VOP3Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', - VOP3Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', - VOP3Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', - VOP3Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', - VOP3Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', - VOP3Op.V_MOV_B16: 'D0.b16 = S0.b16', - VOP3Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', - VOP3Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', - VOP3Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', - VOP3Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", - VOP3Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', - VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', - VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', - VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', - VOP3Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', - VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', - VOP3Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', - VOP3Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', - VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', - VOP3Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', - VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", - VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", - VOP3Op.V_NOT_B32: 'D0.u32 = ~S0.u32', - VOP3Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', - VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', - VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', - VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', - VOP3Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', - VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", - VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", - VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nVGPR[laneId][addr].b32 = S0.b32', - VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = VGPR[laneId][addr].b32', - VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', - VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', - VOP3Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', - VOP3Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', - VOP3Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', - VOP3Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', - VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", - VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', - VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", - VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', - VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", - VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", - VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", - VOP3Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", - VOP3Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", - VOP3Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', - VOP3Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", - VOP3Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', - VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", - VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", - VOP3Op.V_SAT_PK_U8_I16: 'D0.b16 = { SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16) }', - VOP3Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', - VOP3Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', - VOP3Op.V_NOT_B16: 'D0.u16 = ~S0.u16', - VOP3Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", - VOP3Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", - VOP3Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', - VOP3Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', - VOP3Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', - VOP3Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', - VOP3Op.V_FMAC_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, D0.f32)\nendif", - VOP3Op.V_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", - VOP3Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', - VOP3Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", - VOP3Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", - VOP3Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", - VOP3Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", - VOP3Op.V_MIN_F32: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - VOP3Op.V_MAX_F32: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - VOP3Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', - VOP3Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', - VOP3Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', - VOP3Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', - VOP3Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', - VOP3Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', - VOP3Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', - VOP3Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', - VOP3Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', - VOP3Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', - VOP3Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', - VOP3Op.V_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', - VOP3Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', - VOP3Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', - VOP3Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', - VOP3Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', - VOP3Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', - VOP3Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', - VOP3Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', - VOP3Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', - VOP3Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', - VOP3Op.V_MAX_F16: "// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - VOP3Op.V_MIN_F16: "// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE", - VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", - VOP3Op.V_FMA_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif", - VOP3Op.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32", - VOP3Op.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32", - VOP3Op.V_CUBEID_F32: '// Set D0.f = cubemap face ID ({0.0, 1.0, ..., 5.0}).\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = 5.0F\nelse\nD0.f32 = 4.0F\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = 3.0F\nelse\nD0.f32 = 2.0F\nendif\nelse\nif S0.f32 < 0.0F then\nD0.f32 = 1.0F\nelse\nD0.f32 = 0.0F\nendif\nendif', - VOP3Op.V_CUBESC_F32: '// D0.f = cubemap S coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = -S0.f32\nelse\nD0.f32 = S0.f32\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S0.f32\nelse\nif S0.f32 < 0.0F then\nD0.f32 = S2.f32\nelse\nD0.f32 = -S2.f32\nendif\nendif', - VOP3Op.V_CUBETC_F32: '// D0.f = cubemap T coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = -S1.f32\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = -S2.f32\nelse\nD0.f32 = S2.f32\nendif\nelse\nD0.f32 = -S1.f32\nendif', - VOP3Op.V_CUBEMA_F32: '// D0.f = 2.0 * cubemap major axis.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = S2.f32 * 2.0F\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S1.f32 * 2.0F\nelse\nD0.f32 = S0.f32 * 2.0F\nendif', - VOP3Op.V_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S2[4 : 0].u32) - 1U))', - VOP3Op.V_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32)', - VOP3Op.V_BFI_B32: 'D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32))', - VOP3Op.V_FMA_F32: 'D0.f32 = fma(S0.f32, S1.f32, S2.f32)', - VOP3Op.V_FMA_F64: 'D0.f64 = fma(S0.f64, S1.f64, S2.f64)', - VOP3Op.V_LERP_U8: 'tmp = ((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1U << 24U);\ntmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1U << 16U);\ntmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1U << 8U);\ntmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1U);\nD0.u32 = tmp.u32', - VOP3Op.V_ALIGNBIT_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> S2.u32[4 : 0].u32) & 0xffffffffLL)", - VOP3Op.V_ALIGNBYTE_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> (S2.u32[1 : 0].u32 * 8U)) & 0xffffffffLL)", - VOP3Op.V_MULLIT_F32: "if ((S1.f32 == -MAX_FLOAT_F32) || (64'F(S1.f32) == -INF) || isNAN(64'F(S1.f32)) || (S2.f32 <= 0.0F) ||\nisNAN(64'F(S2.f32))) then\nD0.f32 = -MAX_FLOAT_F32\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", - VOP3Op.V_MIN3_F32: 'D0.f32 = v_min_f32(v_min_f32(S0.f32, S1.f32), S2.f32)', - VOP3Op.V_MIN3_I32: 'D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', - VOP3Op.V_MIN3_U32: 'D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', - VOP3Op.V_MAX3_F32: 'D0.f32 = v_max_f32(v_max_f32(S0.f32, S1.f32), S2.f32)', - VOP3Op.V_MAX3_I32: 'D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', - VOP3Op.V_MAX3_U32: 'D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', - VOP3Op.V_MED3_F32: "if (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)) || isNAN(64'F(S2.f32))) then\nD0.f32 = v_min3_f32(S0.f32, S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S0.f32 then\nD0.f32 = v_max_f32(S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S1.f32 then\nD0.f32 = v_max_f32(S0.f32, S2.f32)\nelse\nD0.f32 = v_max_f32(S0.f32, S1.f32)\nendif", - VOP3Op.V_MED3_I32: 'if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32 then\nD0.i32 = v_max_i32(S1.i32, S2.i32)\nelsif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32 then\nD0.i32 = v_max_i32(S0.i32, S2.i32)\nelse\nD0.i32 = v_max_i32(S0.i32, S1.i32)\nendif', - VOP3Op.V_MED3_U32: 'if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32 then\nD0.u32 = v_max_u32(S1.u32, S2.u32)\nelsif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32 then\nD0.u32 = v_max_u32(S0.u32, S2.u32)\nelse\nD0.u32 = v_max_u32(S0.u32, S1.u32)\nendif', - VOP3Op.V_SAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", - VOP3Op.V_SAD_HI_U8: "D0.u32 = (32'U(v_sad_u8(S0, S1, 0U)) << 16U) + S2.u32", - VOP3Op.V_SAD_U16: '// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', - VOP3Op.V_SAD_U32: '// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', - VOP3Op.V_CVT_PK_U8_F32: "tmp = (S2.u32 & 32'U(~(0xff << (S1.u32[1 : 0].u32 * 8U))));\ntmp = (tmp | ((32'U(f32_to_u8(S0.f32)) & 255U) << (S1.u32[1 : 0].u32 * 8U)));\nD0.u32 = tmp", - VOP3Op.V_DIV_FIXUP_F32: "sign_out = (sign(S1.f32) ^ sign(S2.f32));\nif isNAN(64'F(S2.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S2.f32)))\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif ((64'F(S1.f32) == 0.0) && (64'F(S2.f32) == 0.0)) then\n// 0/0\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(abs(S1.f32)) == +INF) && (64'F(abs(S2.f32)) == +INF)) then\n// inf/inf\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(S1.f32) == 0.0) || (64'F(abs(S2.f32)) == +INF)) then\n// x/0, or inf/y\nD0.f32 = sign_out ? -INF.f32 : +INF.f32\nelsif ((64'F(abs(S1.f32)) == +INF) || (64'F(S2.f32) == 0.0)) then\n// x/inf, 0/y\nD0.f32 = sign_out ? -0.0F : 0.0F\nelsif exponent(S2.f32) - exponent(S1.f32) < -150 then\nD0.f32 = sign_out ? -UNDERFLOW_F32 : UNDERFLOW_F32\nelsif exponent(S1.f32) == 255 then\nD0.f32 = sign_out ? -OVERFLOW_F32 : OVERFLOW_F32\nelse\nD0.f32 = sign_out ? -abs(S0.f32) : abs(S0.f32)\nendif", - VOP3Op.V_DIV_FIXUP_F64: "sign_out = (sign(S1.f64) ^ sign(S2.f64));\nif isNAN(S2.f64) then\nD0.f64 = cvtToQuietNAN(S2.f64)\nelsif isNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif ((S1.f64 == 0.0) && (S2.f64 == 0.0)) then\n// 0/0\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((abs(S1.f64) == +INF) && (abs(S2.f64) == +INF)) then\n// inf/inf\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((S1.f64 == 0.0) || (abs(S2.f64) == +INF)) then\n// x/0, or inf/y\nD0.f64 = sign_out ? -INF : +INF\nelsif ((abs(S1.f64) == +INF) || (S2.f64 == 0.0)) then\n// x/inf, 0/y\nD0.f64 = sign_out ? -0.0 : 0.0\nelsif exponent(S2.f64) - exponent(S1.f64) < -1075 then\nD0.f64 = sign_out ? -UNDERFLOW_F64 : UNDERFLOW_F64\nelsif exponent(S1.f64) == 2047 then\nD0.f64 = sign_out ? -OVERFLOW_F64 : OVERFLOW_F64\nelse\nD0.f64 = sign_out ? -abs(S0.f64) : abs(S0.f64)\nendif", - VOP3Op.V_DIV_FMAS_F32: 'if VCC.u64[laneId] then\nD0.f32 = 2.0F ** 32 * fma(S0.f32, S1.f32, S2.f32)\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif', - VOP3Op.V_DIV_FMAS_F64: 'if VCC.u64[laneId] then\nD0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)\nelse\nD0.f64 = fma(S0.f64, S1.f64, S2.f64)\nendif', - VOP3Op.V_MSAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", - VOP3Op.V_QSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", - VOP3Op.V_MQSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", - VOP3Op.V_MQSAD_U32_U8: "tmp[127 : 96] = 32'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32));\ntmp[95 : 64] = 32'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32));\ntmp[63 : 32] = 32'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32));\ntmp[31 : 0] = 32'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32));\nD0.b128 = tmp.b128", - VOP3Op.V_XOR3_B32: 'D0.u32 = (S0.u32 ^ S1.u32 ^ S2.u32)', - VOP3Op.V_MAD_U16: 'D0.u16 = S0.u16 * S1.u16 + S2.u16', - VOP3Op.V_PERM_B32: 'D0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])', - VOP3Op.V_XAD_U32: 'D0.u32 = (S0.u32 ^ S1.u32) + S2.u32', - VOP3Op.V_LSHL_ADD_U32: 'D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32', - VOP3Op.V_ADD_LSHL_U32: 'D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32)', - VOP3Op.V_FMA_F16: 'D0.f16 = fma(S0.f16, S1.f16, S2.f16)', - VOP3Op.V_MIN3_F16: 'D0.f16 = v_min_f16(v_min_f16(S0.f16, S1.f16), S2.f16)', - VOP3Op.V_MIN3_I16: 'D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16)', - VOP3Op.V_MIN3_U16: 'D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16)', - VOP3Op.V_MAX3_F16: 'D0.f16 = v_max_f16(v_max_f16(S0.f16, S1.f16), S2.f16)', - VOP3Op.V_MAX3_I16: 'D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16)', - VOP3Op.V_MAX3_U16: 'D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16)', - VOP3Op.V_MED3_F16: "if (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)) || isNAN(64'F(S2.f16))) then\nD0.f16 = v_min3_f16(S0.f16, S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S0.f16 then\nD0.f16 = v_max_f16(S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S1.f16 then\nD0.f16 = v_max_f16(S0.f16, S2.f16)\nelse\nD0.f16 = v_max_f16(S0.f16, S1.f16)\nendif", - VOP3Op.V_MED3_I16: 'if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16 then\nD0.i16 = v_max_i16(S1.i16, S2.i16)\nelsif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16 then\nD0.i16 = v_max_i16(S0.i16, S2.i16)\nelse\nD0.i16 = v_max_i16(S0.i16, S1.i16)\nendif', - VOP3Op.V_MED3_U16: 'if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16 then\nD0.u16 = v_max_u16(S1.u16, S2.u16)\nelsif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16 then\nD0.u16 = v_max_u16(S0.u16, S2.u16)\nelse\nD0.u16 = v_max_u16(S0.u16, S1.u16)\nendif', - VOP3Op.V_MAD_I16: 'D0.i16 = S0.i16 * S1.i16 + S2.i16', - VOP3Op.V_DIV_FIXUP_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S2.f16)))\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\nD0.f16 = sign_out ? -INF.f16 : +INF.f16\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\nD0.f16 = sign_out ? -16'0.0 : 16'0.0\nelse\nD0.f16 = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif", - VOP3Op.V_ADD3_U32: 'D0.u32 = S0.u32 + S1.u32 + S2.u32', - VOP3Op.V_LSHL_OR_B32: 'D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32)', - VOP3Op.V_AND_OR_B32: 'D0.u32 = ((S0.u32 & S1.u32) | S2.u32)', - VOP3Op.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)', - VOP3Op.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32", - VOP3Op.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32", - VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor", - VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor", - VOP3Op.V_CNDMASK_B16: 'D0.u16 = VCC.u64[laneId] ? S1.u16 : S0.u16', - VOP3Op.V_MAXMIN_F32: 'D0.f32 = v_min_f32(v_max_f32(S0.f32, S1.f32), S2.f32)', - VOP3Op.V_MINMAX_F32: 'D0.f32 = v_max_f32(v_min_f32(S0.f32, S1.f32), S2.f32)', - VOP3Op.V_MAXMIN_F16: 'D0.f16 = v_min_f16(v_max_f16(S0.f16, S1.f16), S2.f16)', - VOP3Op.V_MINMAX_F16: 'D0.f16 = v_max_f16(v_min_f16(S0.f16, S1.f16), S2.f16)', - VOP3Op.V_MAXMIN_U32: 'D0.u32 = v_min_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', - VOP3Op.V_MINMAX_U32: 'D0.u32 = v_max_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', - VOP3Op.V_MAXMIN_I32: 'D0.i32 = v_min_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', - VOP3Op.V_MINMAX_I32: 'D0.i32 = v_max_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', - VOP3Op.V_DOT2_F16_F16: 'tmp = S2.f16;\ntmp += S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp += S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.f16 = tmp', - VOP3Op.V_DOT2_BF16_BF16: 'tmp = S2.bf16;\ntmp += S0[15 : 0].bf16 * S1[15 : 0].bf16;\ntmp += S0[31 : 16].bf16 * S1[31 : 16].bf16;\nD0.bf16 = tmp', - VOP3Op.V_ADD_NC_U16: 'D0.u16 = S0.u16 + S1.u16', - VOP3Op.V_SUB_NC_U16: 'D0.u16 = S0.u16 - S1.u16', - VOP3Op.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', - VOP3Op.V_CVT_PK_I16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_i16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_i16_f32(S0.f32));", - VOP3Op.V_CVT_PK_U16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_u16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_u16_f32(S0.f32));", - VOP3Op.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', - VOP3Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', - VOP3Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', - VOP3Op.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', - VOP3Op.V_ADD_NC_I16: 'D0.i16 = S0.i16 + S1.i16', - VOP3Op.V_SUB_NC_I16: 'D0.i16 = S0.i16 - S1.i16', - VOP3Op.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16', - VOP3Op.V_CVT_PK_NORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);", - VOP3Op.V_CVT_PK_NORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);", - VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32', - VOP3Op.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', - VOP3Op.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp", - VOP3Op.V_MBCNT_LO_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', - VOP3Op.V_MBCNT_HI_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', - VOP3Op.V_CVT_PK_NORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);", - VOP3Op.V_CVT_PK_NORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);", - VOP3Op.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);", - VOP3Op.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);", - VOP3Op.V_SUB_NC_I32: 'D0.i32 = S0.i32 - S1.i32', - VOP3Op.V_ADD_NC_I32: 'D0.i32 = S0.i32 + S1.i32', - VOP3Op.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', - VOP3Op.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64', - VOP3Op.V_MIN_F64: '// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif LT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nelse\nif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif LT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE', - VOP3Op.V_MAX_F64: '// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif GT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nelse\nif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif GT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE', - VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32', - VOP3Op.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32', - VOP3Op.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", - VOP3Op.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", - VOP3Op.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", - VOP3Op.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', - VOP3Op.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', - VOP3Op.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', - VOP3Op.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)', - VOP3Op.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)', - VOP3Op.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)', - VOP3Op.V_READLANE_B32: "declare lane : 32'U;\nif WAVE32 then\nlane = S1.u32[4 : 0].u32;\n// Lane select for wave32\nelse\nlane = S1.u32[5 : 0].u32;\n// Lane select for wave64\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", - VOP3Op.V_WRITELANE_B32: "declare lane : 32'U;\nif WAVE32 then\nlane = S1.u32[4 : 0].u32;\n// Lane select for wave32\nelse\nlane = S1.u32[5 : 0].u32;\n// Lane select for wave64\nendif;\nVGPR[lane][VDST.u32] = S0.b32", - VOP3Op.V_AND_B16: 'D0.u16 = (S0.u16 & S1.u16)', - VOP3Op.V_OR_B16: 'D0.u16 = (S0.u16 | S1.u16)', - VOP3Op.V_XOR_B16: 'D0.u16 = (S0.u16 ^ S1.u16)', -} - -VOP3SDOp_PCODE = { - VOP3SDOp.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_DIV_SCALE_F32: "VCC = 0x0LL;\nif ((64'F(S2.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\nD0.f32 = NAN.f32\nelsif exponent(S2.f32) - exponent(S1.f32) >= 96 then\n// N/D near MAX_FLOAT_F32\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif S1.f32 == DENORM.f32 then\nD0.f32 = ldexp(S0.f32, 64)\nelsif ((1.0 / 64'F(S1.f32) == DENORM.f64) && (S2.f32 / S1.f32 == DENORM.f32)) then\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif 1.0 / 64'F(S1.f32) == DENORM.f64 then\nD0.f32 = ldexp(S0.f32, -64)\nelsif S2.f32 / S1.f32 == DENORM.f32 then\nVCC = 0x1LL;\nif S0.f32 == S2.f32 then\n// Only scale the numerator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif exponent(S2.f32) <= 23 then\n// Numerator is tiny\nD0.f32 = ldexp(S0.f32, 64)\nendif", - VOP3SDOp.V_DIV_SCALE_F64: 'VCC = 0x0LL;\nif ((S2.f64 == 0.0) || (S1.f64 == 0.0)) then\nD0.f64 = NAN.f64\nelsif exponent(S2.f64) - exponent(S1.f64) >= 768 then\n// N/D near MAX_FLOAT_F64\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, 128)\nelsif ((1.0 / S1.f64 == DENORM.f64) && (S2.f64 / S1.f64 == DENORM.f64)) then\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif 1.0 / S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, -128)\nelsif S2.f64 / S1.f64 == DENORM.f64 then\nVCC = 0x1LL;\nif S0.f64 == S2.f64 then\n// Only scale the numerator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif exponent(S2.f64) <= 53 then\n// Numerator is tiny\nD0.f64 = ldexp(S0.f64, 128)\nendif', - VOP3SDOp.V_MAD_U64_U32: "{ D1.u1, D0.u64 } = 65'B(65'U(S0.u32) * 65'U(S1.u32) + 65'U(S2.u64))", - VOP3SDOp.V_MAD_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))", - VOP3SDOp.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", -} - -VOP3POp_PCODE = { - VOP3POp.V_PK_MAD_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MUL_LO_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_ADD_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_SUB_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_LSHLREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', - VOP3POp.V_PK_LSHRREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', - VOP3POp.V_PK_ASHRREV_I16: 'tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MAX_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 >= S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 >= S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MIN_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 < S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 < S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MAD_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_ADD_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_SUB_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MAX_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 >= S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 >= S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MIN_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 < S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 < S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_FMA_F16: "declare tmp : 32'B;\ntmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16);\ntmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16);\nD0.b32 = tmp", - VOP3POp.V_PK_ADD_F16: 'tmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MUL_F16: 'tmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MIN_F16: 'tmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16);\ntmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16);\nD0.b32 = tmp.b32', - VOP3POp.V_PK_MAX_F16: 'tmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16);\ntmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16);\nD0.b32 = tmp.b32', - VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', - VOP3POp.V_DOT4_I32_IU8: "declare A : 32'I[4];\ndeclare B : 32'I[4];\nfor i in 0 : 3 do\nA8 = S0[i * 8 + 7 : i * 8];\nB8 = S1[i * 8 + 7 : i * 8];\nendfor;\nC = S2.i32;\ntmp = C.i32;\nD0.i32 = tmp", - VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp', - VOP3POp.V_DOT8_I32_IU4: "declare A : 32'I[8];\ndeclare B : 32'I[8];\nfor i in 0 : 7 do\nA4 = S0[i * 4 + 3 : i * 4];\nB4 = S1[i * 4 + 3 : i * 4];\nendfor;\nC = S2.i32;\ntmp = C.i32;\nD0.i32 = tmp", - VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp', - VOP3POp.V_DOT2_F32_BF16: 'tmp = S2.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', - VOP3POp.V_FMA_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])", - VOP3POp.V_FMA_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", - VOP3POp.V_FMA_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", - VOP3POp.V_WMMA_F32_16X16X16_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_F32_16X16X16_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_F16_16X16X16_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f16(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.bf16(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_I32_16X16X16_IU8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_I32_16X16X16_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', -} - -VOPCOp_PCODE = { - VOPCOp.V_CMP_F_F16: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_F16: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_F32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_F32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_F64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_F64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_F_I32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_I32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_U32: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_U32: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_I64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_I64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_F_U64: "Set the per-lane condition code to 0. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_T_U64: "Set the per-lane condition code to 1. Store the result into VCC or a scalar register.\nD0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMPX_F_F16: "EXEC.u64[laneId] = 1'0U", - VOPCOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', - VOPCOp.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f16 == S1.f16', - VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', - VOPCOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', - VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', - VOPCOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = S0.f16 >= S1.f16', - VOPCOp.V_CMPX_O_F16: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", - VOPCOp.V_CMPX_U_F16: "EXEC.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", - VOPCOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', - VOPCOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', - VOPCOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', - VOPCOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', - VOPCOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', - VOPCOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', - VOPCOp.V_CMPX_T_F16: "EXEC.u64[laneId] = 1'1U", - VOPCOp.V_CMPX_F_F32: "EXEC.u64[laneId] = 1'0U", - VOPCOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', - VOPCOp.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f32 == S1.f32', - VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', - VOPCOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', - VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', - VOPCOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = S0.f32 >= S1.f32', - VOPCOp.V_CMPX_O_F32: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", - VOPCOp.V_CMPX_U_F32: "EXEC.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", - VOPCOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', - VOPCOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', - VOPCOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', - VOPCOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', - VOPCOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', - VOPCOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', - VOPCOp.V_CMPX_T_F32: "EXEC.u64[laneId] = 1'1U", - VOPCOp.V_CMPX_F_F64: "EXEC.u64[laneId] = 1'0U", - VOPCOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', - VOPCOp.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f64 == S1.f64', - VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', - VOPCOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', - VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', - VOPCOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = S0.f64 >= S1.f64', - VOPCOp.V_CMPX_O_F64: 'EXEC.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64))', - VOPCOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64))', - VOPCOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <', - VOPCOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==', - VOPCOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=', - VOPCOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >', - VOPCOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', - VOPCOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', - VOPCOp.V_CMPX_T_F64: "EXEC.u64[laneId] = 1'1U", - VOPCOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', - VOPCOp.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i16 == S1.i16', - VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', - VOPCOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', - VOPCOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', - VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', - VOPCOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', - VOPCOp.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u16 == S1.u16', - VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', - VOPCOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', - VOPCOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', - VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', - VOPCOp.V_CMPX_F_I32: "EXEC.u64[laneId] = 1'0U", - VOPCOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', - VOPCOp.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i32 == S1.i32', - VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', - VOPCOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', - VOPCOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', - VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', - VOPCOp.V_CMPX_T_I32: "EXEC.u64[laneId] = 1'1U", - VOPCOp.V_CMPX_F_U32: "EXEC.u64[laneId] = 1'0U", - VOPCOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', - VOPCOp.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u32 == S1.u32', - VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', - VOPCOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', - VOPCOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', - VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', - VOPCOp.V_CMPX_T_U32: "EXEC.u64[laneId] = 1'1U", - VOPCOp.V_CMPX_F_I64: "EXEC.u64[laneId] = 1'0U", - VOPCOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', - VOPCOp.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i64 == S1.i64', - VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', - VOPCOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', - VOPCOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', - VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', - VOPCOp.V_CMPX_T_I64: "EXEC.u64[laneId] = 1'1U", - VOPCOp.V_CMPX_F_U64: "EXEC.u64[laneId] = 1'0U", - VOPCOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', - VOPCOp.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u64 == S1.u64', - VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', - VOPCOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', - VOPCOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', - VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', - VOPCOp.V_CMPX_T_U64: "EXEC.u64[laneId] = 1'1U", - VOPCOp.V_CMPX_CLASS_F16: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOPCOp.V_CMPX_CLASS_F32: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOPCOp.V_CMPX_CLASS_F64: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", -} +from extra.assembly.amd.autogen.rdna3.enum import DSOp, FLATOp, GLOBALOp, MIMGOp, MTBUFOp, MUBUFOp, SCRATCHOp, SMEMOp, SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, VINTERPOp, VOP1Op, VOP2Op, VOP3Op, VOP3POp, VOP3SDOp, VOPCOp DSOp_PCODE = { DSOp.DS_ADD_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', @@ -1090,8 +20,8 @@ DSOp_PCODE = { DSOp.DS_STORE_B32: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0]', DSOp.DS_STORE_2ADDR_B32: 'MEM[ADDR + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]', DSOp.DS_STORE_2ADDR_STRIDE64_B32: 'MEM[ADDR + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]', - DSOp.DS_CMPSTORE_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', - DSOp.DS_CMPSTORE_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_CMPSTORE_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp', + DSOp.DS_CMPSTORE_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp', DSOp.DS_MIN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', DSOp.DS_MAX_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', DSOp.DS_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', @@ -1113,12 +43,12 @@ DSOp_PCODE = { DSOp.DS_STOREXCHG_RTN_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', DSOp.DS_STOREXCHG_2ADDR_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', - DSOp.DS_CMPSTORE_RTN_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', - DSOp.DS_CMPSTORE_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + DSOp.DS_CMPSTORE_RTN_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp', + DSOp.DS_CMPSTORE_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp', DSOp.DS_MIN_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', DSOp.DS_MAX_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', DSOp.DS_WRAP_RTN_B32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = tmp >= DATA.u32 ? tmp - DATA.u32 : tmp + DATA2.u32;\nRETURN_DATA = tmp', - DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;', + DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\n}\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset[15]) {\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n}\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n}', DSOp.DS_LOAD_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32', DSOp.DS_LOAD_2ADDR_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 4U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET1.u32 * 4U].b32', DSOp.DS_LOAD_2ADDR_STRIDE64_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 256U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET1.u32 * 256U].b32', @@ -1126,8 +56,8 @@ DSOp_PCODE = { DSOp.DS_LOAD_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", DSOp.DS_LOAD_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))", DSOp.DS_LOAD_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", - DSOp.DS_CONSUME: 'addr = M0.base + offset; // offset by LDS HWBASE, limit to M.size\nrtnval = LDS(addr);\nGPR[VDST] = rtnval; // return to all valid threads', - DSOp.DS_APPEND: 'addr = M0.base + offset; // offset by LDS HWBASE, limit to M.size\nrtnval = LDS(addr);\nGPR[VDST] = rtnval; // return to all valid threads', + DSOp.DS_CONSUME: 'addr = M0.base + offset; // offset by LDS HWBASE, limit to M.size\nrtnval = LDS(addr);\nLDS(addr) = LDS(addr) - countbits(valid mask);\nGPR[VDST] = rtnval; // return to all valid threads', + DSOp.DS_APPEND: 'addr = M0.base + offset; // offset by LDS HWBASE, limit to M.size\nrtnval = LDS(addr);\nLDS(addr) = LDS(addr) + countbits(valid mask);\nGPR[VDST] = rtnval; // return to all valid threads', DSOp.DS_ADD_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', DSOp.DS_SUB_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', DSOp.DS_RSUB_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 = DATA.u64 - MEM[ADDR].u64;\nRETURN_DATA.u64 = tmp', @@ -1144,8 +74,8 @@ DSOp_PCODE = { DSOp.DS_STORE_B64: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32]', DSOp.DS_STORE_2ADDR_B64: 'MEM[ADDR + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]', DSOp.DS_STORE_2ADDR_STRIDE64_B64: 'MEM[ADDR + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]', - DSOp.DS_CMPSTORE_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', - DSOp.DS_CMPSTORE_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_CMPSTORE_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp', + DSOp.DS_CMPSTORE_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp', DSOp.DS_MIN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', DSOp.DS_MAX_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', DSOp.DS_ADD_RTN_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', @@ -1164,30 +94,30 @@ DSOp_PCODE = { DSOp.DS_STOREXCHG_RTN_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', DSOp.DS_STOREXCHG_2ADDR_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', - DSOp.DS_CMPSTORE_RTN_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', - DSOp.DS_CMPSTORE_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp', + DSOp.DS_CMPSTORE_RTN_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp', + DSOp.DS_CMPSTORE_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp', DSOp.DS_MIN_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', DSOp.DS_MAX_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp', DSOp.DS_LOAD_B64: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4U].b32', DSOp.DS_LOAD_2ADDR_B64: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET0.u32 * 8U + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[ADDR + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[ADDR + OFFSET1.u32 * 8U + 4U].b32', DSOp.DS_LOAD_2ADDR_STRIDE64_B64: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET0.u32 * 512U + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[ADDR + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[ADDR + OFFSET1.u32 * 512U + 4U].b32', DSOp.DS_ADD_RTN_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', - DSOp.DS_ADD_GS_REG_RTN: 'if OFFSET0[5:2] > 7\n// 64-bit GS register access\naddr = (OFFSET0[5:2] - 8) * 2 + 8;\nVDST[0] = GS_REGS(addr + 0);\nVDST[1] = GS_REGS(addr + 1);\n{GS_REGS(addr + 1), GS_REGS(addr)} += DATA0[0]; // source is 32 bit\nelse\naddr = OFFSET0[5:2];\nVDST[0] = GS_REGS(addr);\nGS_REGS(addr) += DATA0[0];\noffset[5:2] Register\noffset[5:2] Register', - DSOp.DS_SUB_GS_REG_RTN: 'if OFFSET0[5:2] > 7\n// 64-bit GS register access\naddr = (OFFSET0[5:2] - 8) * 2 + 8;\nVDST[0] = GS_REGS(addr + 0);\nVDST[1] = GS_REGS(addr + 1);\n{GS_REGS(addr + 1), GS_REGS(addr)} -= DATA0[0]; // source is 32 bit\nelse\naddr = OFFSET0[5:2];\nVDST[0] = GS_REGS(addr);\nGS_REGS(addr) -= DATA0[0];\noffset[5:2] Register\noffset[5:2] Register', - DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", + DSOp.DS_ADD_GS_REG_RTN: 'if OFFSET0[5:2] > 7\n// 64-bit GS register access\naddr = (OFFSET0[5:2] - 8) * 2 + 8;\nVDST[0] = GS_REGS(addr + 0);\nVDST[1] = GS_REGS(addr + 1);\n{GS_REGS(addr + 1), GS_REGS(addr)} += DATA0[0]; // source is 32 bit\nelse\naddr = OFFSET0[5:2];\nVDST[0] = GS_REGS(addr);\nGS_REGS(addr) += DATA0[0];\nendif.', + DSOp.DS_SUB_GS_REG_RTN: 'if OFFSET0[5:2] > 7\n// 64-bit GS register access\naddr = (OFFSET0[5:2] - 8) * 2 + 8;\nVDST[0] = GS_REGS(addr + 0);\nVDST[1] = GS_REGS(addr + 1);\n{GS_REGS(addr + 1), GS_REGS(addr)} -= DATA0[0]; // source is 32 bit\nelse\naddr = OFFSET0[5:2];\nVDST[0] = GS_REGS(addr);\nGS_REGS(addr) -= DATA0[0];\nendif.', + DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nADDR0 = ((ADDR + offset.u32) & 0xfff8U);\nADDR1 = ADDR0 + 4U;\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", DSOp.DS_STORE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]', DSOp.DS_STORE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]', - DSOp.DS_LOAD_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - DSOp.DS_LOAD_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - DSOp.DS_LOAD_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", - DSOp.DS_LOAD_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", - DSOp.DS_LOAD_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;', - DSOp.DS_LOAD_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;', - DSOp.DS_BVH_STACK_RTN_B32: 'The LDS stack address is computed using values packed into ADDR and part of OFFSET1. ADDR carries the\nstack address for the lane. OFFSET1[5:4] contains stack_size[1:0] -- this value is constant for all lanes and is\n(stack_base, stack_index) = DECODE_ADDR(ADDR, OFFSET1);\nlast_node_ptr = DATA0;\n// First 3 passes: push data onto stack\nfor i = 0..2 do\nif DATA_VALID(DATA1[i])\nMEM[stack_base + stack_index] = DATA1[i];\nelsif DATA1[i] == last_node_ptr\nendif\nendfor\n// Fourth pass: return data or pop\nif DATA_VALID(DATA1[3])\nVGPR_RTN = DATA1[3]\nelse\nVGPR_RTN = MEM[stack_base + stack_index];\nMEM[stack_base + stack_index] = INVALID_NODE;\nendif\nif data == INVALID_NODE\nelsif last_node_ptr != INVALID_NODE && data == last_node_ptr\n// Match last_node_ptr\nelse\nendif', + DSOp.DS_LOAD_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// RETURN_DATA[31:16] is preserved.", + DSOp.DS_LOAD_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// RETURN_DATA[15:0] is preserved.", + DSOp.DS_LOAD_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));\n// RETURN_DATA[31:16] is preserved.", + DSOp.DS_LOAD_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));\n// RETURN_DATA[15:0] is preserved.", + DSOp.DS_LOAD_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;\n// RETURN_DATA[31:16] is preserved.', + DSOp.DS_LOAD_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;\n// RETURN_DATA[15:0] is preserved.', + DSOp.DS_BVH_STACK_RTN_B32: '(stack_base, stack_index) = DECODE_ADDR(ADDR, OFFSET1);\nlast_node_ptr = DATA0;\n// First 3 passes: push data onto stack\nfor i = 0..2 do\nif DATA_VALID(DATA1[i])\nMEM[stack_base + stack_index] = DATA1[i];\nIncrement stack_index\nelsif DATA1[i] == last_node_ptr\n// Treat all further data as invalid as well.\nbreak\nendif\nendfor\n// Fourth pass: return data or pop\nif DATA_VALID(DATA1[3])\nVGPR_RTN = DATA1[3]\nelse\nVGPR_RTN = MEM[stack_base + stack_index];\nMEM[stack_base + stack_index] = INVALID_NODE;\nDecrement stack_index\nendif\nADDR = ENCODE_ADDR(stack_base, stack_index).\nfunction DATA_VALID(data):\nif data == INVALID_NODE\nreturn false\nelsif last_node_ptr != INVALID_NODE && data == last_node_ptr\n// Match last_node_ptr\nreturn false\nelse\nreturn true\nendif\nendfunction.', DSOp.DS_STORE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32", DSOp.DS_LOAD_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32", - DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", - DSOp.DS_BPERMUTE_B32: "Note that EXEC mask is applied to both VGPR read and write. If src_lane selects a disabled thread then zero is\n// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\n// NOTE: destination lane is MOD 32 regardless of wave size.\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xF, OFFSET = 0\nVGPR[VDST] = { B, D, 0, C }\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xA, OFFSET = 0\nVGPR[VDST] = { -, D, -, 0 }", + DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\n// NOTE: destination lane is MOD 32 regardless of wave size.\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xF, OFFSET = 0\nVGPR[VDST] = { A, A, D, B }\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xA, OFFSET = 0\nVGPR[VDST] = { -, 0, -, B }", DSOp.DS_STORE_B96: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET.u32 + 8U].b32 = DATA[95 : 64]', DSOp.DS_STORE_B128: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[ADDR + OFFSET.u32 + 12U].b32 = DATA[127 : 96]', DSOp.DS_LOAD_B96: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[ADDR + OFFSET.u32 + 8U].b32', @@ -1209,14 +139,16 @@ FLATOp_PCODE = { FLATOp.FLAT_STORE_B64: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32]', FLATOp.FLAT_STORE_B96: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64]', FLATOp.FLAT_STORE_B128: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64];\nMEM[ADDR + 12U].b32 = VDATA[127 : 96]', - FLATOp.FLAT_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - FLATOp.FLAT_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", - FLATOp.FLAT_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;', - FLATOp.FLAT_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - FLATOp.FLAT_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", - FLATOp.FLAT_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;', + FLATOp.FLAT_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// VDATA[31:16] is preserved.", + FLATOp.FLAT_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));\n// VDATA[31:16] is preserved.", + FLATOp.FLAT_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;\n// VDATA[31:16] is preserved.', + FLATOp.FLAT_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// VDATA[15:0] is preserved.", + FLATOp.FLAT_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));\n// VDATA[15:0] is preserved.", + FLATOp.FLAT_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;\n// VDATA[15:0] is preserved.', FLATOp.FLAT_STORE_D16_HI_B8: 'MEM[ADDR].b8 = VDATA[23 : 16]', FLATOp.FLAT_STORE_D16_HI_B16: 'MEM[ADDR].b16 = VDATA[31 : 16]', + FLATOp.GLOBAL_LOAD_ADDTID_B32: "RETURN_DATA.u32 = MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32", + FLATOp.GLOBAL_STORE_ADDTID_B32: "MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32 = DATA.u32", FLATOp.FLAT_ATOMIC_SWAP_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', FLATOp.FLAT_ATOMIC_CMPSWAP_B32: 'tmp = MEM[ADDR].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[ADDR].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', FLATOp.FLAT_ATOMIC_ADD_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', @@ -1247,8 +179,6 @@ FLATOp_PCODE = { FLATOp.FLAT_ATOMIC_MIN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', FLATOp.FLAT_ATOMIC_MAX_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', FLATOp.FLAT_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', - FLATOp.GLOBAL_LOAD_ADDTID_B32: "RETURN_DATA.u32 = MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32", - FLATOp.GLOBAL_STORE_ADDTID_B32: "MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32 = DATA.u32", } GLOBALOp_PCODE = { @@ -1266,12 +196,12 @@ GLOBALOp_PCODE = { GLOBALOp.GLOBAL_STORE_B64: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32]', GLOBALOp.GLOBAL_STORE_B96: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64]', GLOBALOp.GLOBAL_STORE_B128: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64];\nMEM[ADDR + 12U].b32 = VDATA[127 : 96]', - GLOBALOp.GLOBAL_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - GLOBALOp.GLOBAL_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", - GLOBALOp.GLOBAL_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;', - GLOBALOp.GLOBAL_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - GLOBALOp.GLOBAL_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", - GLOBALOp.GLOBAL_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;', + GLOBALOp.GLOBAL_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// VDATA[31:16] is preserved.", + GLOBALOp.GLOBAL_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));\n// VDATA[31:16] is preserved.", + GLOBALOp.GLOBAL_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;\n// VDATA[31:16] is preserved.', + GLOBALOp.GLOBAL_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// VDATA[15:0] is preserved.", + GLOBALOp.GLOBAL_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));\n// VDATA[15:0] is preserved.", + GLOBALOp.GLOBAL_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;\n// VDATA[15:0] is preserved.', GLOBALOp.GLOBAL_STORE_D16_HI_B8: 'MEM[ADDR].b8 = VDATA[23 : 16]', GLOBALOp.GLOBAL_STORE_D16_HI_B16: 'MEM[ADDR].b16 = VDATA[31 : 16]', GLOBALOp.GLOBAL_LOAD_ADDTID_B32: "RETURN_DATA.u32 = MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32", @@ -1309,6 +239,116 @@ GLOBALOp_PCODE = { GLOBALOp.GLOBAL_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', } +MIMGOp_PCODE = { + MIMGOp.IMAGE_ATOMIC_SWAP: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + MIMGOp.IMAGE_ATOMIC_CMPSWAP: 'tmp = MEM[ADDR].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[ADDR].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MIMGOp.IMAGE_ATOMIC_ADD: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + MIMGOp.IMAGE_ATOMIC_SUB: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + MIMGOp.IMAGE_ATOMIC_SMIN: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + MIMGOp.IMAGE_ATOMIC_UMIN: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MIMGOp.IMAGE_ATOMIC_SMAX: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + MIMGOp.IMAGE_ATOMIC_UMAX: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MIMGOp.IMAGE_ATOMIC_AND: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + MIMGOp.IMAGE_ATOMIC_OR: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + MIMGOp.IMAGE_ATOMIC_XOR: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + MIMGOp.IMAGE_ATOMIC_INC: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + MIMGOp.IMAGE_ATOMIC_DEC: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + MIMGOp.IMAGE_GET_LOD: 'VDATA[0] = clampedLOD;\nVDATA[1] = rawLOD.', +} + +MTBUFOp_PCODE = { + MTBUFOp.TBUFFER_LOAD_FORMAT_X: 'VDATA[31 : 0].b32 = ConvertFromFormat(MEM[TADDR.X]);\n// Mem access size depends on format', + MTBUFOp.TBUFFER_LOAD_FORMAT_XY: 'VDATA[31 : 0].b32 = ConvertFromFormat(MEM[TADDR.X]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[TADDR.Y])', + MTBUFOp.TBUFFER_LOAD_FORMAT_XYZ: 'VDATA[31 : 0].b32 = ConvertFromFormat(MEM[TADDR.X]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[TADDR.Y]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[TADDR.Z])', + MTBUFOp.TBUFFER_LOAD_FORMAT_XYZW: 'VDATA[31 : 0].b32 = ConvertFromFormat(MEM[TADDR.X]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[TADDR.Y]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[TADDR.Z]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[TADDR.W])', + MTBUFOp.TBUFFER_STORE_FORMAT_X: 'MEM[TADDR.X] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format', + MTBUFOp.TBUFFER_STORE_FORMAT_XY: 'MEM[TADDR.X] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(VDATA[63 : 32].b32)', + MTBUFOp.TBUFFER_STORE_FORMAT_XYZ: 'MEM[TADDR.X] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[TADDR.Z] = ConvertToFormat(VDATA[95 : 64].b32)', + MTBUFOp.TBUFFER_STORE_FORMAT_XYZW: 'MEM[TADDR.X] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[TADDR.Z] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[TADDR.W] = ConvertToFormat(VDATA[127 : 96].b32)', + MTBUFOp.TBUFFER_LOAD_D16_FORMAT_X: "VDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\n// VDATA[31:16].b16 is preserved.", + MTBUFOp.TBUFFER_LOAD_D16_FORMAT_XY: "VDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Y]))", + MTBUFOp.TBUFFER_LOAD_D16_FORMAT_XYZ: "VDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Y]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Z]));\n// VDATA[63:48].b16 is preserved.", + MTBUFOp.TBUFFER_LOAD_D16_FORMAT_XYZW: "VDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Y]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Z]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[TADDR.W]))", + MTBUFOp.TBUFFER_STORE_D16_FORMAT_X: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format", + MTBUFOp.TBUFFER_STORE_D16_FORMAT_XY: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(32'B(VDATA[31 : 16].b16))", + MTBUFOp.TBUFFER_STORE_D16_FORMAT_XYZ: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[TADDR.Z] = ConvertToFormat(32'B(VDATA[47 : 32].b16))", + MTBUFOp.TBUFFER_STORE_D16_FORMAT_XYZW: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[TADDR.Z] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[TADDR.W] = ConvertToFormat(32'B(VDATA[63 : 48].b16))", +} + +MUBUFOp_PCODE = { + MUBUFOp.BUFFER_LOAD_FORMAT_X: 'VDATA[31 : 0].b32 = ConvertFromFormat(MEM[TADDR.X]);\n// Mem access size depends on format', + MUBUFOp.BUFFER_LOAD_FORMAT_XY: 'VDATA[31 : 0].b32 = ConvertFromFormat(MEM[TADDR.X]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[TADDR.Y])', + MUBUFOp.BUFFER_LOAD_FORMAT_XYZ: 'VDATA[31 : 0].b32 = ConvertFromFormat(MEM[TADDR.X]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[TADDR.Y]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[TADDR.Z])', + MUBUFOp.BUFFER_LOAD_FORMAT_XYZW: 'VDATA[31 : 0].b32 = ConvertFromFormat(MEM[TADDR.X]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[TADDR.Y]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[TADDR.Z]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[TADDR.W])', + MUBUFOp.BUFFER_STORE_FORMAT_X: 'MEM[TADDR.X] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format', + MUBUFOp.BUFFER_STORE_FORMAT_XY: 'MEM[TADDR.X] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(VDATA[63 : 32].b32)', + MUBUFOp.BUFFER_STORE_FORMAT_XYZ: 'MEM[TADDR.X] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[TADDR.Z] = ConvertToFormat(VDATA[95 : 64].b32)', + MUBUFOp.BUFFER_STORE_FORMAT_XYZW: 'MEM[TADDR.X] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[TADDR.Z] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[TADDR.W] = ConvertToFormat(VDATA[127 : 96].b32)', + MUBUFOp.BUFFER_LOAD_D16_FORMAT_X: "VDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\n// VDATA[31:16].b16 is preserved.", + MUBUFOp.BUFFER_LOAD_D16_FORMAT_XY: "VDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Y]))", + MUBUFOp.BUFFER_LOAD_D16_FORMAT_XYZ: "VDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Y]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Z]));\n// VDATA[63:48].b16 is preserved.", + MUBUFOp.BUFFER_LOAD_D16_FORMAT_XYZW: "VDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Y]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[TADDR.Z]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[TADDR.W]))", + MUBUFOp.BUFFER_STORE_D16_FORMAT_X: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format", + MUBUFOp.BUFFER_STORE_D16_FORMAT_XY: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(32'B(VDATA[31 : 16].b16))", + MUBUFOp.BUFFER_STORE_D16_FORMAT_XYZ: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[TADDR.Z] = ConvertToFormat(32'B(VDATA[47 : 32].b16))", + MUBUFOp.BUFFER_STORE_D16_FORMAT_XYZW: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[TADDR.Y] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[TADDR.Z] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[TADDR.W] = ConvertToFormat(32'B(VDATA[63 : 48].b16))", + MUBUFOp.BUFFER_LOAD_U8: "VDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + MUBUFOp.BUFFER_LOAD_I8: "VDATA.i32 = 32'I(signext(MEM[ADDR].i8))", + MUBUFOp.BUFFER_LOAD_U16: "VDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + MUBUFOp.BUFFER_LOAD_I16: "VDATA.i32 = 32'I(signext(MEM[ADDR].i16))", + MUBUFOp.BUFFER_LOAD_B32: 'VDATA[31 : 0] = MEM[ADDR].b32', + MUBUFOp.BUFFER_LOAD_B64: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32', + MUBUFOp.BUFFER_LOAD_B96: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32;\nVDATA[95 : 64] = MEM[ADDR + 8U].b32', + MUBUFOp.BUFFER_LOAD_B128: 'VDATA[31 : 0] = MEM[ADDR].b32;\nVDATA[63 : 32] = MEM[ADDR + 4U].b32;\nVDATA[95 : 64] = MEM[ADDR + 8U].b32;\nVDATA[127 : 96] = MEM[ADDR + 12U].b32', + MUBUFOp.BUFFER_STORE_B8: 'MEM[ADDR].b8 = VDATA[7 : 0]', + MUBUFOp.BUFFER_STORE_B16: 'MEM[ADDR].b16 = VDATA[15 : 0]', + MUBUFOp.BUFFER_STORE_B32: 'MEM[ADDR].b32 = VDATA[31 : 0]', + MUBUFOp.BUFFER_STORE_B64: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32]', + MUBUFOp.BUFFER_STORE_B96: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64]', + MUBUFOp.BUFFER_STORE_B128: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64];\nMEM[ADDR + 12U].b32 = VDATA[127 : 96]', + MUBUFOp.BUFFER_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// VDATA[31:16] is preserved.", + MUBUFOp.BUFFER_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));\n// VDATA[31:16] is preserved.", + MUBUFOp.BUFFER_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;\n// VDATA[31:16] is preserved.', + MUBUFOp.BUFFER_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// VDATA[15:0] is preserved.", + MUBUFOp.BUFFER_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));\n// VDATA[15:0] is preserved.", + MUBUFOp.BUFFER_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;\n// VDATA[15:0] is preserved.', + MUBUFOp.BUFFER_STORE_D16_HI_B8: 'MEM[ADDR].b8 = VDATA[23 : 16]', + MUBUFOp.BUFFER_STORE_D16_HI_B16: 'MEM[ADDR].b16 = VDATA[31 : 16]', + MUBUFOp.BUFFER_LOAD_D16_HI_FORMAT_X: "VDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[TADDR.X]));\n// Mem access size depends on format\n// VDATA[15:0].b16 is preserved.", + MUBUFOp.BUFFER_STORE_D16_HI_FORMAT_X: "MEM[TADDR.X] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\n// Mem access size depends on format", + MUBUFOp.BUFFER_ATOMIC_SWAP_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + MUBUFOp.BUFFER_ATOMIC_CMPSWAP_B32: 'tmp = MEM[ADDR].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[ADDR].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_ADD_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_SUB_U32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_CSUB_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + MUBUFOp.BUFFER_ATOMIC_MIN_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + MUBUFOp.BUFFER_ATOMIC_MIN_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_MAX_I32: 'tmp = MEM[ADDR].i32;\nsrc = DATA.i32;\nMEM[ADDR].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + MUBUFOp.BUFFER_ATOMIC_MAX_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_AND_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + MUBUFOp.BUFFER_ATOMIC_OR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + MUBUFOp.BUFFER_ATOMIC_XOR_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + MUBUFOp.BUFFER_ATOMIC_INC_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_DEC_U32: 'tmp = MEM[ADDR].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + MUBUFOp.BUFFER_ATOMIC_SWAP_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + MUBUFOp.BUFFER_ATOMIC_CMPSWAP_B64: 'tmp = MEM[ADDR].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[ADDR].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_ADD_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_SUB_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_MIN_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + MUBUFOp.BUFFER_ATOMIC_MIN_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_MAX_I64: 'tmp = MEM[ADDR].i64;\nsrc = DATA.i64;\nMEM[ADDR].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + MUBUFOp.BUFFER_ATOMIC_MAX_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_AND_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + MUBUFOp.BUFFER_ATOMIC_OR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + MUBUFOp.BUFFER_ATOMIC_XOR_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + MUBUFOp.BUFFER_ATOMIC_INC_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_DEC_U64: 'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + MUBUFOp.BUFFER_ATOMIC_CMPSWAP_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA[31 : 0].f32;\ncmp = DATA[63 : 32].f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + MUBUFOp.BUFFER_ATOMIC_MIN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + MUBUFOp.BUFFER_ATOMIC_MAX_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp', + MUBUFOp.BUFFER_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', +} + SCRATCHOp_PCODE = { SCRATCHOp.SCRATCH_LOAD_U8: "VDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", SCRATCHOp.SCRATCH_LOAD_I8: "VDATA.i32 = 32'I(signext(MEM[ADDR].i8))", @@ -1324,31 +364,1094 @@ SCRATCHOp_PCODE = { SCRATCHOp.SCRATCH_STORE_B64: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32]', SCRATCHOp.SCRATCH_STORE_B96: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64]', SCRATCHOp.SCRATCH_STORE_B128: 'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64];\nMEM[ADDR + 12U].b32 = VDATA[127 : 96]', - SCRATCHOp.SCRATCH_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - SCRATCHOp.SCRATCH_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", - SCRATCHOp.SCRATCH_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;', - SCRATCHOp.SCRATCH_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - SCRATCHOp.SCRATCH_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", - SCRATCHOp.SCRATCH_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;', + SCRATCHOp.SCRATCH_LOAD_D16_U8: "VDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// VDATA[31:16] is preserved.", + SCRATCHOp.SCRATCH_LOAD_D16_I8: "VDATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));\n// VDATA[31:16] is preserved.", + SCRATCHOp.SCRATCH_LOAD_D16_B16: 'VDATA[15 : 0].b16 = MEM[ADDR].b16;\n// VDATA[31:16] is preserved.', + SCRATCHOp.SCRATCH_LOAD_D16_HI_U8: "VDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// VDATA[15:0] is preserved.", + SCRATCHOp.SCRATCH_LOAD_D16_HI_I8: "VDATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));\n// VDATA[15:0] is preserved.", + SCRATCHOp.SCRATCH_LOAD_D16_HI_B16: 'VDATA[31 : 16].b16 = MEM[ADDR].b16;\n// VDATA[15:0] is preserved.', SCRATCHOp.SCRATCH_STORE_D16_HI_B8: 'MEM[ADDR].b8 = VDATA[23 : 16]', SCRATCHOp.SCRATCH_STORE_D16_HI_B16: 'MEM[ADDR].b16 = VDATA[31 : 16]', } -PSEUDOCODE_STRINGS = { - SOP1Op: SOP1Op_PCODE, - SOP2Op: SOP2Op_PCODE, - SOPCOp: SOPCOp_PCODE, - SOPKOp: SOPKOp_PCODE, - SOPPOp: SOPPOp_PCODE, - SMEMOp: SMEMOp_PCODE, - VOP1Op: VOP1Op_PCODE, - VOP2Op: VOP2Op_PCODE, - VOP3Op: VOP3Op_PCODE, - VOP3SDOp: VOP3SDOp_PCODE, - VOP3POp: VOP3POp_PCODE, - VOPCOp: VOPCOp_PCODE, - DSOp: DSOp_PCODE, - FLATOp: FLATOp_PCODE, - GLOBALOp: GLOBALOp_PCODE, - SCRATCHOp: SCRATCHOp_PCODE, -} \ No newline at end of file +SMEMOp_PCODE = { + SMEMOp.S_LOAD_B32: 'SDATA[31 : 0] = MEM[ADDR].b32', + SMEMOp.S_LOAD_B64: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32', + SMEMOp.S_LOAD_B128: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32', + SMEMOp.S_LOAD_B256: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32', + SMEMOp.S_LOAD_B512: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32;\nSDATA[287 : 256] = MEM[ADDR + 32U].b32;\nSDATA[319 : 288] = MEM[ADDR + 36U].b32;\nSDATA[351 : 320] = MEM[ADDR + 40U].b32;\nSDATA[383 : 352] = MEM[ADDR + 44U].b32;\nSDATA[415 : 384] = MEM[ADDR + 48U].b32;\nSDATA[447 : 416] = MEM[ADDR + 52U].b32;\nSDATA[479 : 448] = MEM[ADDR + 56U].b32;\nSDATA[511 : 480] = MEM[ADDR + 60U].b32', + SMEMOp.S_BUFFER_LOAD_B32: 'SDATA[31 : 0] = MEM[ADDR].b32', + SMEMOp.S_BUFFER_LOAD_B64: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32', + SMEMOp.S_BUFFER_LOAD_B128: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32', + SMEMOp.S_BUFFER_LOAD_B256: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32', + SMEMOp.S_BUFFER_LOAD_B512: 'SDATA[31 : 0] = MEM[ADDR].b32;\nSDATA[63 : 32] = MEM[ADDR + 4U].b32;\nSDATA[95 : 64] = MEM[ADDR + 8U].b32;\nSDATA[127 : 96] = MEM[ADDR + 12U].b32;\nSDATA[159 : 128] = MEM[ADDR + 16U].b32;\nSDATA[191 : 160] = MEM[ADDR + 20U].b32;\nSDATA[223 : 192] = MEM[ADDR + 24U].b32;\nSDATA[255 : 224] = MEM[ADDR + 28U].b32;\nSDATA[287 : 256] = MEM[ADDR + 32U].b32;\nSDATA[319 : 288] = MEM[ADDR + 36U].b32;\nSDATA[351 : 320] = MEM[ADDR + 40U].b32;\nSDATA[383 : 352] = MEM[ADDR + 44U].b32;\nSDATA[415 : 384] = MEM[ADDR + 48U].b32;\nSDATA[447 : 416] = MEM[ADDR + 52U].b32;\nSDATA[479 : 448] = MEM[ADDR + 56U].b32;\nSDATA[511 : 480] = MEM[ADDR + 60U].b32', +} + +SOP1Op_PCODE = { + SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32', + SOP1Op.S_MOV_B64: 'D0.b64 = S0.b64', + SOP1Op.S_CMOV_B32: 'if SCC then\nD0.b32 = S0.b32\nendif', + SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif', + SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]', + SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CTZ_I32_B32(0xaaaaaaaa) => 1\nS_CTZ_I32_B32(0x55555555) => 0\nS_CTZ_I32_B32(0x00000000) => 0xffffffff\nS_CTZ_I32_B32(0xffffffff) => 0\nS_CTZ_I32_B32(0x00010000) => 16", + SOP1Op.S_CTZ_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CLZ_I32_U32(0x00000000) => 0xffffffff\nS_CLZ_I32_U32(0x0000cccc) => 16\nS_CLZ_I32_U32(0xffff3333) => 0\nS_CLZ_I32_U32(0x7fffffff) => 1\nS_CLZ_I32_U32(0x80000000) => 0\nS_CLZ_I32_U32(0xffffffff) => 0", + SOP1Op.S_CLZ_I32_U64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CLS_I32(0x00000000) => 0xffffffff\nS_CLS_I32(0x0000cccc) => 16\nS_CLS_I32(0xffff3333) => 16\nS_CLS_I32(0x7fffffff) => 1\nS_CLS_I32(0x80000000) => 1\nS_CLS_I32(0xffffffff) => 0xffffffff', + SOP1Op.S_CLS_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))", + SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U", + SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U", + SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U", + SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U", + SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor\ns_bitreplicate_b64 s2, s0\ns_bitreplicate_b64 s2, s2', + SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0\nS_ABS_I32(0x00000001) => 0x00000001\nS_ABS_I32(0x7fffffff) => 0x7fffffff\nS_ABS_I32(0x80000000) => 0x80000000 // Note this is negative!\nS_ABS_I32(0x80000001) => 0x7fffffff\nS_ABS_I32(0x80000002) => 0x7ffffffe\nS_ABS_I32(0xffffffff) => 0x00000001', + SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT0_I32_B32(0x00000000) => 32\nS_BCNT0_I32_B32(0xcccccccc) => 16\nS_BCNT0_I32_B32(0xffffffff) => 0", + SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT1_I32_B32(0x00000000) => 0\nS_BCNT1_I32_B32(0xcccccccc) => 16\nS_BCNT1_I32_B32(0xffffffff) => 32", + SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U', + SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_WQM_B32: "tmp = 0U;\ndeclare i : 6'U;\nfor i in 6'0U : 6'31U do\ntmp[i] = S0.u32[i & 6'60U +: 6'4U] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL", + SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U', + SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL', + SOP1Op.S_AND_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XOR_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_XOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NAND_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_NAND_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NOR_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_NOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XNOR_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_XNOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT0_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT0_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_NOT0_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_NOT0_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT1_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_NOT1_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_NOT1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT0_WREXEC_B32: 'EXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT0_WREXEC_B64: 'EXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT1_WREXEC_B32: 'EXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT1_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL\n// V0 holds the index value per lane\n// save exec mask for restore at the end\ns_mov_b64 s2, exec\n// exec mask of remaining (unprocessed) threads\ns_mov_b64 s4, exec\nloop:\n// get the index value for the first active lane\nv_readfirstlane_b32 s0, v0\n// find all other lanes with same index value\nv_cmpx_eq s0, v0\n // do the operation using the current EXEC mask. S0 holds the index.\n// mask out thread that was just executed\n// s_andn2_b64 s4, s4, exec\n// s_mov_b64 exec, s4\ns_andn2_wrexec_b64 s4, s4 // replaces above 2 ops\n// repeat until EXEC==0\ns_cbranch_scc1 loop\ns_mov_b64 exec, s2', + SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32\ns_mov_b32 m0, 10\ns_movrels_b32 s5, s7', + SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b64 = SGPR[addr].b64', + SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32\ns_mov_b32 m0, 10\ns_movreld_b32 s5, s7', + SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b64 = S0.b64', + SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nSGPR[addrd].b32 = SGPR[addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\ns_movrelsd_2_b32 s5, s7', + SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL', + SOP1Op.S_SETPC_B64: 'PC = S0.i64', + SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64', + SOP1Op.S_RFE_B64: "WAVE_STATUS.PRIV = 1'0U;\nPC = S0.i64", + SOP1Op.S_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + SOP1Op.S_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + SOP1Op.S_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + SOP1Op.S_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + SOP1Op.S_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + SOP1Op.S_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + SOP1Op.S_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + SOP1Op.S_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + SOP1Op.S_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + SOP1Op.S_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + SOP1Op.S_CVT_HI_F32_F16: 'D0.f32 = f16_to_f32(S0[31 : 16].f16)', + SOP1Op.S_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + SOP1Op.S_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + SOP1Op.S_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + SOP1Op.S_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", +} + +SOP2Op_PCODE = { + SOP2Op.S_ADD_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADDC_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_SUB_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUBB_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_ADD_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32', + SOP2Op.S_SUB_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32', + SOP2Op.S_ADDC_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADDC_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_SUBB_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUBB_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0\nS_ABSDIFF_I32(0x00000002, 0x00000005) => 0x00000003\nS_ABSDIFF_I32(0xffffffff, 0x00000000) => 0x00000001\nS_ABSDIFF_I32(0x80000000, 0x00000000) => 0x80000000 // Note: result is negative!\nS_ABSDIFF_I32(0x80000000, 0x00000001) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xffffffff) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xfffffffe) => 0x7ffffffe', + SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0", + SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', + SOP2Op.S_MAX_U32: 'SCC = S0.u32 >= S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_AND_B32: 'D0.u32 = (S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_AND_B64: 'D0.u64 = (S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_OR_B32: 'D0.u32 = (S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_OR_B64: 'D0.u64 = (S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XOR_B64: 'D0.u64 = (S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NAND_B32: 'D0.u32 = ~(S0.u32 & S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NAND_B64: 'D0.u64 = ~(S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_NOR_B32: 'D0.u32 = ~(S0.u32 | S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_NOR_B64: 'D0.u64 = ~(S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_XNOR_B64: 'D0.u64 = ~(S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_AND_NOT1_B32: 'D0.u32 = (S0.u32 & ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_AND_NOT1_B64: 'D0.u64 = (S0.u64 & ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_OR_NOT1_B32: 'D0.u32 = (S0.u32 | ~S1.u32);\nSCC = D0.u32 != 0U', + SOP2Op.S_OR_NOT1_B64: 'D0.u64 = (S0.u64 | ~S1.u64);\nSCC = D0.u64 != 0ULL', + SOP2Op.S_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S1[22 : 16].u32) - 1U));\nSCC = D0.u32 != 0U', + SOP2Op.S_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32);\nSCC = D0.i32 != 0', + SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL', + SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL', + SOP2Op.S_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + SOP2Op.S_BFM_B64: 'D0.u64 = (((1ULL << S0[5 : 0].u32) - 1ULL) << S1[5 : 0].u32)', + SOP2Op.S_MUL_I32: 'D0.i32 = S0.i32 * S1.i32', + SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + SOP2Op.S_CSELECT_B32: 'D0.u32 = SCC ? S0.u32 : S1.u32', + SOP2Op.S_CSELECT_B64: 'D0.u64 = SCC ? S0.u64 : S1.u64', + SOP2Op.S_PACK_LL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_LH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[15 : 0].u16 }', + SOP2Op.S_PACK_HH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[31 : 16].u16 }', + SOP2Op.S_PACK_HL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[31 : 16].u16 }', + SOP2Op.S_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + SOP2Op.S_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + SOP2Op.S_MIN_F32: "LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + SOP2Op.S_MAX_F32: "GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + SOP2Op.S_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + SOP2Op.S_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', + SOP2Op.S_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', + SOP2Op.S_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + SOP2Op.S_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_TOWARD_ZERO;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode;\n// Round-toward-zero regardless of current round mode setting in hardware.', + SOP2Op.S_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + SOP2Op.S_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + SOP2Op.S_MIN_F16: "LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + SOP2Op.S_MAX_F16: "GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + SOP2Op.S_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + SOP2Op.S_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', +} + +SOPCOp_PCODE = { + SOPCOp.S_CMP_EQ_I32: 'SCC = S0.i32 == S1.i32', + SOPCOp.S_CMP_LG_I32: 'SCC = S0.i32 <> S1.i32', + SOPCOp.S_CMP_GT_I32: 'SCC = S0.i32 > S1.i32', + SOPCOp.S_CMP_GE_I32: 'SCC = S0.i32 >= S1.i32', + SOPCOp.S_CMP_LT_I32: 'SCC = S0.i32 < S1.i32', + SOPCOp.S_CMP_LE_I32: 'SCC = S0.i32 <= S1.i32', + SOPCOp.S_CMP_EQ_U32: 'SCC = S0.u32 == S1.u32', + SOPCOp.S_CMP_LG_U32: 'SCC = S0.u32 <> S1.u32', + SOPCOp.S_CMP_GT_U32: 'SCC = S0.u32 > S1.u32', + SOPCOp.S_CMP_GE_U32: 'SCC = S0.u32 >= S1.u32', + SOPCOp.S_CMP_LT_U32: 'SCC = S0.u32 < S1.u32', + SOPCOp.S_CMP_LE_U32: 'SCC = S0.u32 <= S1.u32', + SOPCOp.S_BITCMP0_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U", + SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U", + SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U", + SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64', + SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64', + SOPCOp.S_CMP_LT_F32: 'SCC = S0.f32 < S1.f32', + SOPCOp.S_CMP_EQ_F32: 'SCC = S0.f32 == S1.f32', + SOPCOp.S_CMP_LE_F32: 'SCC = S0.f32 <= S1.f32', + SOPCOp.S_CMP_GT_F32: 'SCC = S0.f32 > S1.f32', + SOPCOp.S_CMP_LG_F32: 'SCC = S0.f32 <> S1.f32', + SOPCOp.S_CMP_GE_F32: 'SCC = S0.f32 >= S1.f32', + SOPCOp.S_CMP_O_F32: "SCC = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + SOPCOp.S_CMP_U_F32: "SCC = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + SOPCOp.S_CMP_NGE_F32: 'SCC = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + SOPCOp.S_CMP_NLG_F32: 'SCC = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + SOPCOp.S_CMP_NGT_F32: 'SCC = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + SOPCOp.S_CMP_NLE_F32: 'SCC = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + SOPCOp.S_CMP_NEQ_F32: 'SCC = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + SOPCOp.S_CMP_NLT_F32: 'SCC = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + SOPCOp.S_CMP_LT_F16: 'SCC = S0.f16 < S1.f16', + SOPCOp.S_CMP_EQ_F16: 'SCC = S0.f16 == S1.f16', + SOPCOp.S_CMP_LE_F16: 'SCC = S0.f16 <= S1.f16', + SOPCOp.S_CMP_GT_F16: 'SCC = S0.f16 > S1.f16', + SOPCOp.S_CMP_LG_F16: 'SCC = S0.f16 <> S1.f16', + SOPCOp.S_CMP_GE_F16: 'SCC = S0.f16 >= S1.f16', + SOPCOp.S_CMP_O_F16: "SCC = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + SOPCOp.S_CMP_U_F16: "SCC = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + SOPCOp.S_CMP_NGE_F16: 'SCC = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + SOPCOp.S_CMP_NLG_F16: 'SCC = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + SOPCOp.S_CMP_NGT_F16: 'SCC = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + SOPCOp.S_CMP_NLE_F16: 'SCC = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + SOPCOp.S_CMP_NEQ_F16: 'SCC = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + SOPCOp.S_CMP_NLT_F16: 'SCC = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', +} + +SOPKOp_PCODE = { + SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(SIMM16.i16))", + SOPKOp.S_VERSION: 'nop();\n// Do nothing - for use by tools only', + SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(SIMM16.i16))\nendif", + SOPKOp.S_CMPK_EQ_I32: "SCC = 64'I(S0.i32) == signext(SIMM16.i16)", + SOPKOp.S_CMPK_LG_I32: "SCC = 64'I(S0.i32) != signext(SIMM16.i16)", + SOPKOp.S_CMPK_GT_I32: "SCC = 64'I(S0.i32) > signext(SIMM16.i16)", + SOPKOp.S_CMPK_GE_I32: "SCC = 64'I(S0.i32) >= signext(SIMM16.i16)", + SOPKOp.S_CMPK_LT_I32: "SCC = 64'I(S0.i32) < signext(SIMM16.i16)", + SOPKOp.S_CMPK_LE_I32: "SCC = 64'I(S0.i32) <= signext(SIMM16.i16)", + SOPKOp.S_CMPK_EQ_U32: "SCC = S0.u32 == 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_LG_U32: "SCC = S0.u32 != 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_GT_U32: "SCC = S0.u32 > 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_GE_U32: "SCC = S0.u32 >= 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_LT_U32: "SCC = S0.u32 < 32'U(SIMM16.u16)", + SOPKOp.S_CMPK_LE_U32: "SCC = S0.u32 <= 32'U(SIMM16.u16)", + SOPKOp.S_ADDK_I32: "tmp = D0.i32;\n// save value so we can check sign bits for overflow later.\nD0.i32 = 32'I(64'I(D0.i32) + signext(SIMM16.i16));\nSCC = ((tmp[31] == SIMM16.i16[15]) && (tmp[31] != D0.i32[31]));\n// signed overflow.", + SOPKOp.S_MULK_I32: "D0.i32 = 32'I(64'I(D0.i32) * signext(SIMM16.i16))", + SOPKOp.S_GETREG_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", + SOPKOp.S_SETREG_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask & 32'I(writeableBitMask(hwRegId.u32, WAVE_STATUS.PRIV)));\n// Mask of bits we are allowed to modify\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\nHW_REGISTERS[hwRegId] = value.b32;\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_SETREG_IMM32_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask & 32'I(writeableBitMask(hwRegId.u32, WAVE_STATUS.PRIV)));\n// Mask of bits we are allowed to modify\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\nHW_REGISTERS[hwRegId] = value.b32;\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL", + SOPKOp.S_WAITCNT_VSCNT: 'vscnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', + SOPKOp.S_WAITCNT_VMCNT: 'vmcnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', + SOPKOp.S_WAITCNT_EXPCNT: 'expcnt <= S0.u[2:0] + S1.u[2:0].\n// Comparison is 3 bits, no clamping is applied for add overflow', + SOPKOp.S_WAITCNT_LGKMCNT: 'lgkmcnt <= S0.u[5:0] + S1.u[5:0].\n// Comparison is 6 bits, no clamping is applied for add overflow', +} + +SOPPOp_PCODE = { + SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor\ns_nop 0 // Wait 1 cycle.\ns_nop 0xf // Wait 16 cycles.', + SOPPOp.S_SLEEP: 's_sleep 0 // Wait for 0 clocks.\ns_sleep 1 // Wait for 1-64 clocks.\ns_sleep 2 // Wait for 65-128 clocks.', + SOPPOp.S_DELAY_ALU: 'v_mov_b32 v3, v0\nv_lshlrev_b32 v30, 1, v31\nv_lshlrev_b32 v24, 1, v25\ns_delay_alu instid0(INSTID_VALU_DEP_3) | instskip(INSTSKIP_SKIP_1) | instid1(INSTID_VALU_DEP_1)\n// 1 cycle delay here\nv_add_f32 v0, v1, v3\nv_sub_f32 v11, v9, v9\n// 2 cycles delay here\nv_mul_f32 v10, v13, v11', + SOPPOp.S_WAITCNT: 'expcnt <= WaitEXPCNT\nlgkmcnt <= WaitLGKMCNT\nvmcnt <= WaitVMCNT', + SOPPOp.S_TRAP: 'TrapID = SIMM16.u16[7 : 0];\n"Wait for all instructions to complete";\n// PC passed into trap handler points to S_TRAP itself,\n// *not* to the next instruction.\n{ TTMP[1], TTMP[0] } = { 7\'0, HT[0], TrapID[7 : 0], PC[47 : 0] };\nPC = TBA.i64;\n// trap base address\nWAVE_STATUS.PRIV = 1\'1U', + SOPPOp.S_CODE_END: '...\ns_endpgm // last real instruction in shader buffer\ns_code_end // 1\ns_code_end // 2\ns_code_end // 3\ns_code_end // 4\ns_code_end // done!', + SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.\ns_branch label // Set SIMM16 = +4 = 0x0004\ns_nop 0 // 4 bytes\nlabel:\ns_nop 0 // 4 bytes\ns_branch label // Set SIMM16 = -8 = 0xfff8", + SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCZ: "if VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCNZ: "if VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS: "if WAVE_STATUS.COND_DBG_SYS.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGUSER: "if WAVE_STATUS.COND_DBG_USER.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: "if (WAVE_STATUS.COND_DBG_SYS || WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: "if (WAVE_STATUS.COND_DBG_SYS && WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_SETPRIO: 'Priority = {SysUserPrio[1:0], WaveAge[3:0]}\nSysUserPrio = MIN(3, SysPrio[1:0] + UserPrio[1:0]).', +} + +VINTERPOp_PCODE = { + VINTERPOp.V_INTERP_P10_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f32, S1.f32, VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f32)\ns_mov_b32 m0, s0 // assume s0 contains newprim mask\nlds_param_load v0, attr0 // v0 is a temporary register\nv_interp_p10_f32 v3, v0, v1, v0 // v1 contains i coordinate\nv_interp_p2_f32 v3, v0, v2, v3 // v2 contains j coordinate', + VINTERPOp.V_INTERP_P2_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f32, S1.f32, S2.f32)', + VINTERPOp.V_INTERP_P10_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f16), S1.f32, 32'F(VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f16))", + VINTERPOp.V_INTERP_P2_F16_F32: "D0.f16 = 16'F(fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f16), S1.f32, S2.f32))", + VINTERPOp.V_INTERP_P10_RTZ_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f16), S1.f32, 32'F(VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f16))", + VINTERPOp.V_INTERP_P2_RTZ_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f16), S1.f32, S2.f32)\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nGDS = Set if GDS, cleared if LDS.\nOP = DS instruction opcode\nADDR = Source LDS address VGPR 0 - 255.\nDATA0 = Source data0 VGPR 0 - 255.\nDATA1 = Source data1 VGPR 0 - 255.\nVDST = Destination VGPR 0- 255.", +} + +VOP1Op_PCODE = { + VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1', + VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP1Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP1Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP1Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP1Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP1Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP1Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP1Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP1Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP1Op.V_CVT_OFF_F32_I4: "declare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP1Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP1Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP1Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP1Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP1Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP1Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP1Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP1Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP1Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP1Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP1Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP1Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP1Op.V_MOV_B16: 'D0.b16 = S0.b16', + VOP1Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP1Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF', + VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF', + VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0', + VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0', + VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF', + VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN", + VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN", + VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CLZ_I32_U32(0x00000000) => 0xffffffff\nV_CLZ_I32_U32(0x800000ff) => 0\nV_CLZ_I32_U32(0x100000ff) => 3\nV_CLZ_I32_U32(0x0000ffff) => 16\nV_CLZ_I32_U32(0x00000001) => 31", + VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CTZ_I32_B32(0x00000000) => 0xffffffff\nV_CTZ_I32_B32(0xff000001) => 0\nV_CTZ_I32_B32(0xff000008) => 3\nV_CTZ_I32_B32(0xffff0000) => 16\nV_CTZ_I32_B32(0x80000000) => 31", + VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CLS_I32(0x00000000) => 0xffffffff\nV_CLS_I32(0x40000000) => 1\nV_CLS_I32(0x80000000) => 1\nV_CLS_I32(0x0fffffff) => 4\nV_CLS_I32(0xffff0000) => 16\nV_CLS_I32(0xfffffffe) => 31\nV_CLS_I32(0xffffffff) => 0xffffffff', + VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()', + VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()', + VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()", + VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()", + VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32\ns_mov_b32 m0, 10\nv_movreld_b32 v5, v7', + VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32\ns_mov_b32 m0, 10\nv_movrels_b32 v5, v7', + VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, 10\nv_movrelsd_b32 v5, v7', + VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\nv_movrelsd_2_b32 v5, v7', + VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0", + VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF', + VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0", + VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF', + VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF", + VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()", + VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()", + VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", + VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', + VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN", + VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN", + VOP1Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n.i32 <= 0 then\nreturn 8'0U\nelsif n >= 16'I(0xff) then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\nD0.b16 = { SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16) }", + VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', + VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', + VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp', + VOP1Op.V_SWAP_B16: 'tmp = D0.b16;\nD0.b16 = S0.b16;\nS0.b16 = tmp', + VOP1Op.V_PERMLANE64_B32: "declare tmp : 32'B[64];\ndeclare lane : 32'U;\nif WAVE32 then\n// Supported in wave64 ONLY; treated as scalar NOP in wave32\ns_nop(16'0U)\nelse\nfor lane in 0U : 63U do\n// Copy original S0 in case D==S0\ntmp[lane] = VGPR[lane][SRC0.u32]\nendfor;\nfor lane in 0U : 63U do\naltlane = { ~lane[5], lane[4 : 0] };\n// 0<->32, ..., 31<->63\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altlane]\nendif\nendfor\nendif", + VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\ntmp = VGPR[laneId][addrd].b32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32;\nVGPR[laneId][addrs].b32 = tmp\ns_mov_b32 m0, ((20 << 16) | 10)\nv_swaprel_b32 v5, v7', + VOP1Op.V_NOT_B16: 'D0.u16 = ~S0.u16', + VOP1Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + VOP1Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }\nSRC0 = First operand for instruction.\nVSRC1 = Second operand for instruction.\nOP = Instruction opcode.\nAll VOPC instructions can alternatively be encoded in the VOP3 format.", +} + +VOP2Op_PCODE = { + VOP2Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP2Op.V_DOT2ACC_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP2Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP2Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP2Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP2Op.V_FMAC_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, D0.f32)\nendif", + VOP2Op.V_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP2Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP2Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP2Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP2Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP2Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP2Op.V_MIN_F32: "LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + VOP2Op.V_MAX_F32: "GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + VOP2Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP2Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP2Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP2Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP2Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP2Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP2Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP2Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP2Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP2Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP2Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP2Op.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP2Op.V_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP2Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP2Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP2Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP2Op.V_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', + VOP2Op.V_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', + VOP2Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_TOWARD_ZERO;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode;\n// Round-toward-zero regardless of current round mode setting in hardware.', + VOP2Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP2Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP2Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP2Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP2Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', + VOP2Op.V_FMAMK_F16: 'D0.f16 = fma(S0.f16, SIMM32.f16, S1.f16)', + VOP2Op.V_FMAAK_F16: 'D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16)', + VOP2Op.V_MAX_F16: "GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + VOP2Op.V_MIN_F16: "LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()", + VOP2Op.V_PK_FMAC_F16: 'D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16);\nD0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16)', +} + +VOP3Op_PCODE = { + VOP3Op.V_CMP_F_F16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_F16: 'D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F16: 'D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F16: 'D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F16: "D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_U_F16: "D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F16: 'D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F16: 'D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F16: 'D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_F16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_F32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_F32: 'D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F32: 'D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F32: 'D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F32: "D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_U_F32: "D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F32: 'D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F32: 'D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F32: 'D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_F32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_F64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_F64: 'D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F64: 'D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F64: 'D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F64: 'D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_U_F64: 'D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F64: 'D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F64: 'D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F64: 'D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_F64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_I16: 'D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I16: 'D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I16: 'D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I16: 'D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_U16: 'D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U16: 'D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U16: 'D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U16: 'D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_F_I32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_I32: 'D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I32: 'D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I32: 'D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I32: 'D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_I32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_U32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_U32: 'D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U32: 'D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U32: 'D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U32: 'D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_U32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_I64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_I64: 'D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I64: 'D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I64: 'D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I64: 'D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_I64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_F_U64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_LT_U64: 'D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U64: 'D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U64: 'D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U64: 'D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_T_U64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMPX_F_F16: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', + VOP3Op.V_CMPX_EQ_F16: 'EXEC.u64[laneId] = S0.f16 == S1.f16', + VOP3Op.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', + VOP3Op.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', + VOP3Op.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', + VOP3Op.V_CMPX_GE_F16: 'EXEC.u64[laneId] = S0.f16 >= S1.f16', + VOP3Op.V_CMPX_O_F16: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + VOP3Op.V_CMPX_U_F16: "EXEC.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + VOP3Op.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_T_F16: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_F32: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', + VOP3Op.V_CMPX_EQ_F32: 'EXEC.u64[laneId] = S0.f32 == S1.f32', + VOP3Op.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', + VOP3Op.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', + VOP3Op.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', + VOP3Op.V_CMPX_GE_F32: 'EXEC.u64[laneId] = S0.f32 >= S1.f32', + VOP3Op.V_CMPX_O_F32: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + VOP3Op.V_CMPX_U_F32: "EXEC.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + VOP3Op.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_T_F32: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_F64: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', + VOP3Op.V_CMPX_EQ_F64: 'EXEC.u64[laneId] = S0.f64 == S1.f64', + VOP3Op.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', + VOP3Op.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', + VOP3Op.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', + VOP3Op.V_CMPX_GE_F64: 'EXEC.u64[laneId] = S0.f64 >= S1.f64', + VOP3Op.V_CMPX_O_F64: 'EXEC.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64))', + VOP3Op.V_CMPX_U_F64: 'EXEC.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64))', + VOP3Op.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <', + VOP3Op.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==', + VOP3Op.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=', + VOP3Op.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >', + VOP3Op.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', + VOP3Op.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', + VOP3Op.V_CMPX_T_F64: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', + VOP3Op.V_CMPX_EQ_I16: 'EXEC.u64[laneId] = S0.i16 == S1.i16', + VOP3Op.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', + VOP3Op.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', + VOP3Op.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', + VOP3Op.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', + VOP3Op.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', + VOP3Op.V_CMPX_EQ_U16: 'EXEC.u64[laneId] = S0.u16 == S1.u16', + VOP3Op.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', + VOP3Op.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', + VOP3Op.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', + VOP3Op.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', + VOP3Op.V_CMPX_F_I32: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', + VOP3Op.V_CMPX_EQ_I32: 'EXEC.u64[laneId] = S0.i32 == S1.i32', + VOP3Op.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', + VOP3Op.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', + VOP3Op.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', + VOP3Op.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', + VOP3Op.V_CMPX_T_I32: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_U32: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', + VOP3Op.V_CMPX_EQ_U32: 'EXEC.u64[laneId] = S0.u32 == S1.u32', + VOP3Op.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', + VOP3Op.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', + VOP3Op.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', + VOP3Op.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', + VOP3Op.V_CMPX_T_U32: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_I64: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', + VOP3Op.V_CMPX_EQ_I64: 'EXEC.u64[laneId] = S0.i64 == S1.i64', + VOP3Op.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', + VOP3Op.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', + VOP3Op.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', + VOP3Op.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', + VOP3Op.V_CMPX_T_I64: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_F_U64: "EXEC.u64[laneId] = 1'0U", + VOP3Op.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', + VOP3Op.V_CMPX_EQ_U64: 'EXEC.u64[laneId] = S0.u64 == S1.u64', + VOP3Op.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', + VOP3Op.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', + VOP3Op.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', + VOP3Op.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', + VOP3Op.V_CMPX_T_U64: "EXEC.u64[laneId] = 1'1U", + VOP3Op.V_CMPX_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CMPX_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CMPX_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOP3Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOP3Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOP3Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOP3Op.V_FMAC_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, D0.f32)\nendif", + VOP3Op.V_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP3Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOP3Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)", + VOP3Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)", + VOP3Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)", + VOP3Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)", + VOP3Op.V_MIN_F32: "LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif LT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + VOP3Op.V_MAX_F32: "GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nelse\nif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif GT_NEG_ZERO(S0.f32, S1.f32) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + VOP3Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32', + VOP3Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32', + VOP3Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32', + VOP3Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32', + VOP3Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOP3Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)', + VOP3Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)', + VOP3Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', + VOP3Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)', + VOP3Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)', + VOP3Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)', + VOP3Op.V_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', + VOP3Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', + VOP3Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', + VOP3Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOP3Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_TOWARD_ZERO;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode;\n// Round-toward-zero regardless of current round mode setting in hardware.', + VOP3Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', + VOP3Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', + VOP3Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', + VOP3Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', + VOP3Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', + VOP3Op.V_MAX_F16: "GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + VOP3Op.V_MIN_F16: "LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.", + VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()", + VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1', + VOP3Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP3Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP3Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP3Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP3Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP3Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP3Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP3Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP3Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP3Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP3Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP3Op.V_CVT_OFF_F32_I4: "declare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP3Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP3Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP3Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP3Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP3Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP3Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP3Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP3Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP3Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP3Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP3Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP3Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP3Op.V_MOV_B16: 'D0.b16 = S0.b16', + VOP3Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP3Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP3Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP3Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP3Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF', + VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF', + VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0', + VOP3Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0', + VOP3Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP3Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF', + VOP3Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN", + VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN", + VOP3Op.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP3Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CLZ_I32_U32(0x00000000) => 0xffffffff\nV_CLZ_I32_U32(0x800000ff) => 0\nV_CLZ_I32_U32(0x100000ff) => 3\nV_CLZ_I32_U32(0x0000ffff) => 16\nV_CLZ_I32_U32(0x00000001) => 31", + VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CTZ_I32_B32(0x00000000) => 0xffffffff\nV_CTZ_I32_B32(0xff000001) => 0\nV_CTZ_I32_B32(0xff000008) => 3\nV_CTZ_I32_B32(0xffff0000) => 16\nV_CTZ_I32_B32(0x80000000) => 31", + VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CLS_I32(0x00000000) => 0xffffffff\nV_CLS_I32(0x40000000) => 1\nV_CLS_I32(0x80000000) => 1\nV_CLS_I32(0x0fffffff) => 4\nV_CLS_I32(0xffff0000) => 16\nV_CLS_I32(0xfffffffe) => 31\nV_CLS_I32(0xffffffff) => 0xffffffff', + VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()', + VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()', + VOP3Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()", + VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()", + VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32\ns_mov_b32 m0, 10\nv_movreld_b32 v5, v7', + VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32\ns_mov_b32 m0, 10\nv_movrels_b32 v5, v7', + VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, 10\nv_movrelsd_b32 v5, v7', + VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\nv_movrelsd_2_b32 v5, v7', + VOP3Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP3Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP3Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP3Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0", + VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF', + VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0", + VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF', + VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF", + VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()", + VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()", + VOP3Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + VOP3Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + VOP3Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + VOP3Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", + VOP3Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', + VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN", + VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN", + VOP3Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n.i32 <= 0 then\nreturn 8'0U\nelsif n >= 16'I(0xff) then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\nD0.b16 = { SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16) }", + VOP3Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', + VOP3Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', + VOP3Op.V_NOT_B16: 'D0.u16 = ~S0.u16', + VOP3Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + VOP3Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", + VOP3Op.V_FMA_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif", + VOP3Op.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32", + VOP3Op.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32", + VOP3Op.V_CUBEID_F32: '// Set D0.f = cubemap face ID ({0.0, 1.0, ..., 5.0}).\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = 5.0F\nelse\nD0.f32 = 4.0F\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = 3.0F\nelse\nD0.f32 = 2.0F\nendif\nelse\nif S0.f32 < 0.0F then\nD0.f32 = 1.0F\nelse\nD0.f32 = 0.0F\nendif\nendif', + VOP3Op.V_CUBESC_F32: '// D0.f = cubemap S coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = -S0.f32\nelse\nD0.f32 = S0.f32\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S0.f32\nelse\nif S0.f32 < 0.0F then\nD0.f32 = S2.f32\nelse\nD0.f32 = -S2.f32\nendif\nendif', + VOP3Op.V_CUBETC_F32: '// D0.f = cubemap T coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = -S1.f32\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = -S2.f32\nelse\nD0.f32 = S2.f32\nendif\nelse\nD0.f32 = -S1.f32\nendif', + VOP3Op.V_CUBEMA_F32: '// D0.f = 2.0 * cubemap major axis.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = S2.f32 * 2.0F\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S1.f32 * 2.0F\nelse\nD0.f32 = S0.f32 * 2.0F\nendif', + VOP3Op.V_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S2[4 : 0].u32) - 1U))', + VOP3Op.V_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32)', + VOP3Op.V_BFI_B32: 'D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32))', + VOP3Op.V_FMA_F32: 'D0.f32 = fma(S0.f32, S1.f32, S2.f32)', + VOP3Op.V_FMA_F64: 'D0.f64 = fma(S0.f64, S1.f64, S2.f64)', + VOP3Op.V_LERP_U8: 'tmp = ((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1U << 24U);\ntmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1U << 16U);\ntmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1U << 8U);\ntmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1U);\nD0.u32 = tmp.u32', + VOP3Op.V_ALIGNBIT_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> S2.u32[4 : 0].u32) & 0xffffffffLL)", + VOP3Op.V_ALIGNBYTE_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> (S2.u32[1 : 0].u32 * 8U)) & 0xffffffffLL)", + VOP3Op.V_MULLIT_F32: "if ((S1.f32 == -MAX_FLOAT_F32) || (64'F(S1.f32) == -INF) || isNAN(64'F(S1.f32)) || (S2.f32 <= 0.0F) ||\nisNAN(64'F(S2.f32))) then\nD0.f32 = -MAX_FLOAT_F32\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOP3Op.V_MIN3_F32: 'D0.f32 = v_min_f32(v_min_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MIN3_I32: 'D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MIN3_U32: 'D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MAX3_F32: 'D0.f32 = v_max_f32(v_max_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MAX3_I32: 'D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MAX3_U32: 'D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MED3_F32: "if (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)) || isNAN(64'F(S2.f32))) then\nD0.f32 = v_min3_f32(S0.f32, S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S0.f32 then\nD0.f32 = v_max_f32(S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S1.f32 then\nD0.f32 = v_max_f32(S0.f32, S2.f32)\nelse\nD0.f32 = v_max_f32(S0.f32, S1.f32)\nendif", + VOP3Op.V_MED3_I32: 'if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32 then\nD0.i32 = v_max_i32(S1.i32, S2.i32)\nelsif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32 then\nD0.i32 = v_max_i32(S0.i32, S2.i32)\nelse\nD0.i32 = v_max_i32(S0.i32, S1.i32)\nendif', + VOP3Op.V_MED3_U32: 'if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32 then\nD0.u32 = v_max_u32(S1.u32, S2.u32)\nelsif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32 then\nD0.u32 = v_max_u32(S0.u32, S2.u32)\nelse\nD0.u32 = v_max_u32(S0.u32, S1.u32)\nendif', + VOP3Op.V_SAD_U8: "ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3Op.V_SAD_HI_U8: "D0.u32 = (32'U(v_sad_u8(S0, S1, 0U)) << 16U) + S2.u32", + VOP3Op.V_SAD_U16: 'ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', + VOP3Op.V_SAD_U32: 'ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', + VOP3Op.V_CVT_PK_U8_F32: "tmp = (S2.u32 & 32'U(~(0xff << (S1.u32[1 : 0].u32 * 8U))));\ntmp = (tmp | ((32'U(f32_to_u8(S0.f32)) & 255U) << (S1.u32[1 : 0].u32 * 8U)));\nD0.u32 = tmp", + VOP3Op.V_DIV_FIXUP_F32: "sign_out = (sign(S1.f32) ^ sign(S2.f32));\nif isNAN(64'F(S2.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S2.f32)))\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif ((64'F(S1.f32) == 0.0) && (64'F(S2.f32) == 0.0)) then\n// 0/0\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(abs(S1.f32)) == +INF) && (64'F(abs(S2.f32)) == +INF)) then\n// inf/inf\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(S1.f32) == 0.0) || (64'F(abs(S2.f32)) == +INF)) then\n// x/0, or inf/y\nD0.f32 = sign_out ? -INF.f32 : +INF.f32\nelsif ((64'F(abs(S1.f32)) == +INF) || (64'F(S2.f32) == 0.0)) then\n// x/inf, 0/y\nD0.f32 = sign_out ? -0.0F : 0.0F\nelsif exponent(S2.f32) - exponent(S1.f32) < -150 then\nD0.f32 = sign_out ? -UNDERFLOW_F32 : UNDERFLOW_F32\nelsif exponent(S1.f32) == 255 then\nD0.f32 = sign_out ? -OVERFLOW_F32 : OVERFLOW_F32\nelse\nD0.f32 = sign_out ? -abs(S0.f32) : abs(S0.f32)\nendif", + VOP3Op.V_DIV_FIXUP_F64: "sign_out = (sign(S1.f64) ^ sign(S2.f64));\nif isNAN(S2.f64) then\nD0.f64 = cvtToQuietNAN(S2.f64)\nelsif isNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif ((S1.f64 == 0.0) && (S2.f64 == 0.0)) then\n// 0/0\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((abs(S1.f64) == +INF) && (abs(S2.f64) == +INF)) then\n// inf/inf\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((S1.f64 == 0.0) || (abs(S2.f64) == +INF)) then\n// x/0, or inf/y\nD0.f64 = sign_out ? -INF : +INF\nelsif ((abs(S1.f64) == +INF) || (S2.f64 == 0.0)) then\n// x/inf, 0/y\nD0.f64 = sign_out ? -0.0 : 0.0\nelsif exponent(S2.f64) - exponent(S1.f64) < -1075 then\nD0.f64 = sign_out ? -UNDERFLOW_F64 : UNDERFLOW_F64\nelsif exponent(S1.f64) == 2047 then\nD0.f64 = sign_out ? -OVERFLOW_F64 : OVERFLOW_F64\nelse\nD0.f64 = sign_out ? -abs(S0.f64) : abs(S0.f64)\nendif", + VOP3Op.V_DIV_FMAS_F32: 'if VCC.u64[laneId] then\nD0.f32 = 2.0F ** 32 * fma(S0.f32, S1.f32, S2.f32)\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif', + VOP3Op.V_DIV_FMAS_F64: 'if VCC.u64[laneId] then\nD0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)\nelse\nD0.f64 = fma(S0.f64, S1.f64, S2.f64)\nendif', + VOP3Op.V_MSAD_U8: "ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3Op.V_QSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", + VOP3Op.V_MQSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", + VOP3Op.V_MQSAD_U32_U8: "tmp[127 : 96] = 32'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32));\ntmp[95 : 64] = 32'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32));\ntmp[63 : 32] = 32'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32));\ntmp[31 : 0] = 32'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32));\nD0.b128 = tmp.b128", + VOP3Op.V_XOR3_B32: 'D0.u32 = (S0.u32 ^ S1.u32 ^ S2.u32)', + VOP3Op.V_MAD_U16: 'D0.u16 = S0.u16 * S1.u16 + S2.u16', + VOP3Op.V_PERM_B32: "BYTE_PERMUTE = lambda(data, sel) (\ndeclare in : 8'B[8];\nfor i in 0 : 7 do\nin[i] = data[i * 8 + 7 : i * 8].b8\nendfor;\nif sel.u32 >= 13U then\nreturn 8'0xff\nelsif sel.u32 == 12U then\nreturn 8'0x0\nelsif sel.u32 == 11U then\nreturn in[7][7].b8 * 8'0xff\nelsif sel.u32 == 10U then\nreturn in[5][7].b8 * 8'0xff\nelsif sel.u32 == 9U then\nreturn in[3][7].b8 * 8'0xff\nelsif sel.u32 == 8U then\nreturn in[1][7].b8 * 8'0xff\nelse\nreturn in[sel]\nendif);\nD0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])", + VOP3Op.V_XAD_U32: 'D0.u32 = (S0.u32 ^ S1.u32) + S2.u32', + VOP3Op.V_LSHL_ADD_U32: 'D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32', + VOP3Op.V_ADD_LSHL_U32: 'D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32)', + VOP3Op.V_FMA_F16: 'D0.f16 = fma(S0.f16, S1.f16, S2.f16)', + VOP3Op.V_MIN3_F16: 'D0.f16 = v_min_f16(v_min_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MIN3_I16: 'D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16)', + VOP3Op.V_MIN3_U16: 'D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16)', + VOP3Op.V_MAX3_F16: 'D0.f16 = v_max_f16(v_max_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MAX3_I16: 'D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16)', + VOP3Op.V_MAX3_U16: 'D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16)', + VOP3Op.V_MED3_F16: "if (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)) || isNAN(64'F(S2.f16))) then\nD0.f16 = v_min3_f16(S0.f16, S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S0.f16 then\nD0.f16 = v_max_f16(S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S1.f16 then\nD0.f16 = v_max_f16(S0.f16, S2.f16)\nelse\nD0.f16 = v_max_f16(S0.f16, S1.f16)\nendif", + VOP3Op.V_MED3_I16: 'if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16 then\nD0.i16 = v_max_i16(S1.i16, S2.i16)\nelsif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16 then\nD0.i16 = v_max_i16(S0.i16, S2.i16)\nelse\nD0.i16 = v_max_i16(S0.i16, S1.i16)\nendif', + VOP3Op.V_MED3_U16: 'if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16 then\nD0.u16 = v_max_u16(S1.u16, S2.u16)\nelsif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16 then\nD0.u16 = v_max_u16(S0.u16, S2.u16)\nelse\nD0.u16 = v_max_u16(S0.u16, S1.u16)\nendif', + VOP3Op.V_MAD_I16: 'D0.i16 = S0.i16 * S1.i16 + S2.i16', + VOP3Op.V_DIV_FIXUP_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S2.f16)))\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\nD0.f16 = sign_out ? -INF.f16 : +INF.f16\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\nD0.f16 = sign_out ? -16'0.0 : 16'0.0\nelse\nD0.f16 = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif", + VOP3Op.V_ADD3_U32: 'D0.u32 = S0.u32 + S1.u32 + S2.u32', + VOP3Op.V_LSHL_OR_B32: 'D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32)', + VOP3Op.V_AND_OR_B32: 'D0.u32 = ((S0.u32 & S1.u32) | S2.u32)', + VOP3Op.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)', + VOP3Op.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32", + VOP3Op.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32", + VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor\nv_mov_b32 s0, 0x87654321;\nv_mov_b32 s1, 0x0fedcba9;\nv_permlane16_b32 v1, v0, s0, s1;\n// ROW 0:\n// v1.lane[0] <- v0.lane[1]\n// v1.lane[1] <- v0.lane[2]\n// ...\n// v1.lane[14] <- v0.lane[15]\n// v1.lane[15] <- v0.lane[0]\n//\n// ROW 1:\n// v1.lane[16] <- v0.lane[17]\n// v1.lane[17] <- v0.lane[18]\n// ...\n// v1.lane[30] <- v0.lane[31]\n// v1.lane[31] <- v0.lane[16]", + VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor\n// Note for this to work, source and destination VGPRs must be different.\n// For this rotation, lane 15 gets data from lane 16, lane 31 gets data from lane 0.\n// These are the only two lanes that need to use v_permlanex16_b32.\n// Enable only the threads that get data from their own row.\nv_mov_b32 exec_lo, 0x7fff7fff; // Lanes getting data from their own row\nv_mov_b32 s0, 0x87654321;\nv_mov_b32 s1, 0x0fedcba9;\nv_permlane16_b32 v1, v0, s0, s1 fi; // FI bit needed for lanes 14 and 30\n// ROW 0:\n// v1.lane[0] <- v0.lane[1]\n// v1.lane[1] <- v0.lane[2]\n// ...\n// v1.lane[14] <- v0.lane[15] (needs FI to read)\n// v1.lane[15] unset\n//\n// ROW 1:\n// v1.lane[16] <- v0.lane[17]\n// v1.lane[17] <- v0.lane[18]\n// ...\n// v1.lane[30] <- v0.lane[31] (needs FI to read)\n// v1.lane[31] unset\n// Enable only the threads that get data from the other row.\nv_mov_b32 exec_lo, 0x80008000; // Lanes getting data from the other row\nv_permlanex16_b32 v1, v0, s0, s1 fi; // FI bit needed for lanes 15 and 31\n// v1.lane[15] <- v0.lane[16]\n// v1.lane[31] <- v0.lane[0]", + VOP3Op.V_CNDMASK_B16: 'D0.u16 = VCC.u64[laneId] ? S1.u16 : S0.u16', + VOP3Op.V_MAXMIN_F32: 'D0.f32 = v_min_f32(v_max_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MINMAX_F32: 'D0.f32 = v_max_f32(v_min_f32(S0.f32, S1.f32), S2.f32)', + VOP3Op.V_MAXMIN_F16: 'D0.f16 = v_min_f16(v_max_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MINMAX_F16: 'D0.f16 = v_max_f16(v_min_f16(S0.f16, S1.f16), S2.f16)', + VOP3Op.V_MAXMIN_U32: 'D0.u32 = v_min_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MINMAX_U32: 'D0.u32 = v_max_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', + VOP3Op.V_MAXMIN_I32: 'D0.i32 = v_min_i32(v_max_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_MINMAX_I32: 'D0.i32 = v_max_i32(v_min_i32(S0.i32, S1.i32), S2.i32)', + VOP3Op.V_DOT2_F16_F16: 'tmp = S2.f16;\ntmp += S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp += S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.f16 = tmp', + VOP3Op.V_DOT2_BF16_BF16: 'tmp = S2.bf16;\ntmp += S0[15 : 0].bf16 * S1[15 : 0].bf16;\ntmp += S0[31 : 16].bf16 * S1[31 : 16].bf16;\nD0.bf16 = tmp', + VOP3Op.V_ADD_NC_U16: 'D0.u16 = S0.u16 + S1.u16', + VOP3Op.V_SUB_NC_U16: 'D0.u16 = S0.u16 - S1.u16', + VOP3Op.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', + VOP3Op.V_CVT_PK_I16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_i16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_i16_f32(S0.f32));\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_U16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_u16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_u16_f32(S0.f32));\nD0 = tmp.b32", + VOP3Op.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', + VOP3Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', + VOP3Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', + VOP3Op.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16', + VOP3Op.V_ADD_NC_I16: 'D0.i16 = S0.i16 + S1.i16', + VOP3Op.V_SUB_NC_I16: 'D0.i16 = S0.i16 - S1.i16', + VOP3Op.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16', + VOP3Op.V_CVT_PK_NORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_NORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);\nD0 = tmp.b32", + VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32\nldexp()', + VOP3Op.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', + VOP3Op.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp", + VOP3Op.V_MBCNT_LO_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp", + VOP3Op.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp\nv_mbcnt_lo_u32_b32 v0, -1, 0\nv_mbcnt_hi_u32_b32 v0, -1, v0\n// v0 now contains laneId\nv_mbcnt_lo_u32_b32 v0, vcc_lo, 0\nv_mbcnt_hi_u32_b32 v0, vcc_hi, v0 // Note vcc_hi is passed in for second instruction\n// v0 now contains position among lanes with VCC=1", + VOP3Op.V_CVT_PK_NORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_NORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);\nD0 = tmp.b32", + VOP3Op.V_SUB_NC_I32: 'D0.i32 = S0.i32 - S1.i32', + VOP3Op.V_ADD_NC_I32: 'D0.i32 = S0.i32 + S1.i32', + VOP3Op.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', + VOP3Op.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64', + VOP3Op.V_MIN_F64: 'LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((abs(a) == 0.0) && (abs(b) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif LT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nelse\nif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif LT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.', + VOP3Op.V_MAX_F64: 'GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((abs(a) == 0.0) && (abs(b) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif GT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nelse\nif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif GT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.', + VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32\nldexp()', + VOP3Op.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32', + VOP3Op.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", + VOP3Op.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", + VOP3Op.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\n// into the whole part of the number.\n// Only whole part of result is kept.\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", + VOP3Op.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', + VOP3Op.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', + VOP3Op.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', + VOP3Op.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)', + VOP3Op.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)', + VOP3Op.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)', + VOP3Op.V_READLANE_B32: "declare lane : 32'U;\nif WAVE32 then\nlane = S1.u32[4 : 0].u32;\n// Lane select for wave32\nelse\nlane = S1.u32[5 : 0].u32;\n// Lane select for wave64\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP3Op.V_WRITELANE_B32: "declare lane : 32'U;\nif WAVE32 then\nlane = S1.u32[4 : 0].u32;\n// Lane select for wave32\nelse\nlane = S1.u32[5 : 0].u32;\n// Lane select for wave64\nendif;\nVGPR[lane][VDST.u32] = S0.b32", + VOP3Op.V_AND_B16: 'D0.u16 = (S0.u16 & S1.u16)', + VOP3Op.V_OR_B16: 'D0.u16 = (S0.u16 | S1.u16)', + VOP3Op.V_XOR_B16: 'D0.u16 = (S0.u16 ^ S1.u16)', +} + +VOP3POp_PCODE = { + VOP3POp.V_PK_MAD_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MUL_LO_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ADD_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_SUB_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_LSHLREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_LSHRREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ASHRREV_I16: 'tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 >= S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 >= S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MIN_I16: 'tmp[31 : 16].i16 = S0[31 : 16].i16 < S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\ntmp[15 : 0].i16 = S0[15 : 0].i16 < S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAD_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_ADD_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_SUB_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 >= S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 >= S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MIN_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 < S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 < S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_FMA_F16: "declare tmp : 32'B;\ntmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16);\ntmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16);\nD0.b32 = tmp", + VOP3POp.V_PK_ADD_F16: 'tmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MUL_F16: 'tmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MIN_F16: 'tmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16);\ntmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16);\nD0.b32 = tmp.b32', + VOP3POp.V_PK_MAX_F16: 'tmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16);\ntmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16);\nD0.b32 = tmp.b32', + VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOP3POp.V_DOT4_I32_IU8: "declare A : 32'I[4];\ndeclare B : 32'I[4];\n// Figure out whether inputs are signed/unsigned.\nfor i in 0 : 3 do\nA8 = S0[i * 8 + 7 : i * 8];\nB8 = S1[i * 8 + 7 : i * 8];\nA[i] = NEG[0].u1 ? 32'I(signext(A8.i8)) : 32'I(32'U(A8.u8));\nB[i] = NEG[1].u1 ? 32'I(signext(B8.i8)) : 32'I(32'U(B8.u8))\nendfor;\nC = S2.i32;\n// Signed multiplier/adder. Extend unsigned inputs with leading 0.\ntmp = C.i32;\ntmp += A[0] * B[0];\ntmp += A[1] * B[1];\ntmp += A[2] * B[2];\ntmp += A[3] * B[3];\nD0.i32 = tmp", + VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp', + VOP3POp.V_DOT8_I32_IU4: "declare A : 32'I[8];\ndeclare B : 32'I[8];\n// Figure out whether inputs are signed/unsigned.\nfor i in 0 : 7 do\nA4 = S0[i * 4 + 3 : i * 4];\nB4 = S1[i * 4 + 3 : i * 4];\nA[i] = NEG[0].u1 ? 32'I(signext(A4.i4)) : 32'I(32'U(A4.u4));\nB[i] = NEG[1].u1 ? 32'I(signext(B4.i4)) : 32'I(32'U(B4.u4))\nendfor;\nC = S2.i32;\n// Signed multiplier/adder. Extend unsigned inputs with leading 0.\ntmp = C.i32;\ntmp += A[0] * B[0];\ntmp += A[1] * B[1];\ntmp += A[2] * B[2];\ntmp += A[3] * B[3];\ntmp += A[4] * B[4];\ntmp += A[5] * B[5];\ntmp += A[6] * B[6];\ntmp += A[7] * B[7];\nD0.i32 = tmp", + VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp', + VOP3POp.V_DOT2_F32_BF16: 'tmp = S2.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', + VOP3POp.V_FMA_MIX_F32: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])", + VOP3POp.V_FMA_MIXLO_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", + VOP3POp.V_FMA_MIXHI_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", + VOP3POp.V_WMMA_F32_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F16_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.bf16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X16_IU8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X16_IU4: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', +} + +VOP3SDOp_PCODE = { + VOP3SDOp.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_DIV_SCALE_F32: "VCC = 0x0LL;\nif ((64'F(S2.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\nD0.f32 = NAN.f32\nelsif exponent(S2.f32) - exponent(S1.f32) >= 96 then\n// N/D near MAX_FLOAT_F32\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif S1.f32 == DENORM.f32 then\nD0.f32 = ldexp(S0.f32, 64)\nelsif ((1.0 / 64'F(S1.f32) == DENORM.f64) && (S2.f32 / S1.f32 == DENORM.f32)) then\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif 1.0 / 64'F(S1.f32) == DENORM.f64 then\nD0.f32 = ldexp(S0.f32, -64)\nelsif S2.f32 / S1.f32 == DENORM.f32 then\nVCC = 0x1LL;\nif S0.f32 == S2.f32 then\n// Only scale the numerator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif exponent(S2.f32) <= 23 then\n// Numerator is tiny\nD0.f32 = ldexp(S0.f32, 64)\nendif", + VOP3SDOp.V_DIV_SCALE_F64: 'VCC = 0x0LL;\nif ((S2.f64 == 0.0) || (S1.f64 == 0.0)) then\nD0.f64 = NAN.f64\nelsif exponent(S2.f64) - exponent(S1.f64) >= 768 then\n// N/D near MAX_FLOAT_F64\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, 128)\nelsif ((1.0 / S1.f64 == DENORM.f64) && (S2.f64 / S1.f64 == DENORM.f64)) then\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif 1.0 / S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, -128)\nelsif S2.f64 / S1.f64 == DENORM.f64 then\nVCC = 0x1LL;\nif S0.f64 == S2.f64 then\n// Only scale the numerator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif exponent(S2.f64) <= 53 then\n// Numerator is tiny\nD0.f64 = ldexp(S0.f64, 128)\nendif', + VOP3SDOp.V_MAD_U64_U32: "{ D1.u1, D0.u64 } = 65'B(65'U(S0.u32) * 65'U(S1.u32) + 65'U(S2.u64))", + VOP3SDOp.V_MAD_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))", + VOP3SDOp.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", +} + +VOPCOp_PCODE = { + VOPCOp.V_CMP_F_F16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F16: 'D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F16: 'D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F16: 'D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F16: "D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F16: "D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F16: 'D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F16: 'D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F16: 'D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_F16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_F32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F32: 'D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F32: 'D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F32: 'D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F32: "D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F32: "D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F32: 'D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F32: 'D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F32: 'D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_F32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_F64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_F64: 'D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F64: 'D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F64: 'D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F64: 'D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_U_F64: 'D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F64: 'D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F64: 'D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F64: 'D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_F64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I16: 'D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I16: 'D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I16: 'D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I16: 'D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_U16: 'D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U16: 'D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U16: 'D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U16: 'D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_F_I32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I32: 'D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I32: 'D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I32: 'D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I32: 'D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U32: 'D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U32: 'D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U32: 'D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U32: 'D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_I64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_I64: 'D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I64: 'D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I64: 'D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I64: 'D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_I64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_F_U64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_LT_U64: 'D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U64: 'D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U64: 'D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U64: 'D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_T_U64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMPX_F_F16: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', + VOPCOp.V_CMPX_EQ_F16: 'EXEC.u64[laneId] = S0.f16 == S1.f16', + VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', + VOPCOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', + VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', + VOPCOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = S0.f16 >= S1.f16', + VOPCOp.V_CMPX_O_F16: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + VOPCOp.V_CMPX_U_F16: "EXEC.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + VOPCOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_T_F16: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_F32: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', + VOPCOp.V_CMPX_EQ_F32: 'EXEC.u64[laneId] = S0.f32 == S1.f32', + VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', + VOPCOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', + VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', + VOPCOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = S0.f32 >= S1.f32', + VOPCOp.V_CMPX_O_F32: "EXEC.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", + VOPCOp.V_CMPX_U_F32: "EXEC.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", + VOPCOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_T_F32: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_F64: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', + VOPCOp.V_CMPX_EQ_F64: 'EXEC.u64[laneId] = S0.f64 == S1.f64', + VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', + VOPCOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', + VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', + VOPCOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = S0.f64 >= S1.f64', + VOPCOp.V_CMPX_O_F64: 'EXEC.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64))', + VOPCOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64))', + VOPCOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <', + VOPCOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==', + VOPCOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=', + VOPCOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >', + VOPCOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', + VOPCOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', + VOPCOp.V_CMPX_T_F64: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', + VOPCOp.V_CMPX_EQ_I16: 'EXEC.u64[laneId] = S0.i16 == S1.i16', + VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', + VOPCOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', + VOPCOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', + VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', + VOPCOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', + VOPCOp.V_CMPX_EQ_U16: 'EXEC.u64[laneId] = S0.u16 == S1.u16', + VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', + VOPCOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', + VOPCOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', + VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', + VOPCOp.V_CMPX_F_I32: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', + VOPCOp.V_CMPX_EQ_I32: 'EXEC.u64[laneId] = S0.i32 == S1.i32', + VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', + VOPCOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', + VOPCOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', + VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', + VOPCOp.V_CMPX_T_I32: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_U32: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', + VOPCOp.V_CMPX_EQ_U32: 'EXEC.u64[laneId] = S0.u32 == S1.u32', + VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', + VOPCOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', + VOPCOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', + VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', + VOPCOp.V_CMPX_T_U32: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_I64: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', + VOPCOp.V_CMPX_EQ_I64: 'EXEC.u64[laneId] = S0.i64 == S1.i64', + VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', + VOPCOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', + VOPCOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', + VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', + VOPCOp.V_CMPX_T_I64: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_F_U64: "EXEC.u64[laneId] = 1'0U", + VOPCOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', + VOPCOp.V_CMPX_EQ_U64: 'EXEC.u64[laneId] = S0.u64 == S1.u64', + VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', + VOPCOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', + VOPCOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', + VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', + VOPCOp.V_CMPX_T_U64: "EXEC.u64[laneId] = 1'1U", + VOPCOp.V_CMPX_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", +} + +PSEUDOCODE_STRINGS = {DSOp: DSOp_PCODE, FLATOp: FLATOp_PCODE, GLOBALOp: GLOBALOp_PCODE, MIMGOp: MIMGOp_PCODE, MTBUFOp: MTBUFOp_PCODE, MUBUFOp: MUBUFOp_PCODE, SCRATCHOp: SCRATCHOp_PCODE, SMEMOp: SMEMOp_PCODE, SOP1Op: SOP1Op_PCODE, SOP2Op: SOP2Op_PCODE, SOPCOp: SOPCOp_PCODE, SOPKOp: SOPKOp_PCODE, SOPPOp: SOPPOp_PCODE, VINTERPOp: VINTERPOp_PCODE, VOP1Op: VOP1Op_PCODE, VOP2Op: VOP2Op_PCODE, VOP3Op: VOP3Op_PCODE, VOP3POp: VOP3POp_PCODE, VOP3SDOp: VOP3SDOp_PCODE, VOPCOp: VOPCOp_PCODE} \ No newline at end of file diff --git a/extra/assembly/amd/autogen/rdna4/enum.py b/extra/assembly/amd/autogen/rdna4/enum.py index cf5a3e8796..1c5e921131 100644 --- a/extra/assembly/amd/autogen/rdna4/enum.py +++ b/extra/assembly/amd/autogen/rdna4/enum.py @@ -1,34 +1,100 @@ -# autogenerated from AMD RDNA4 ISA PDF by pdf.py - do not edit +# autogenerated from AMD ISA PDF by pdf.py - do not edit from enum import IntEnum -class SrcEnum(IntEnum): - VCC_LO = 106 - VCC_HI = 107 - NULL = 124 - M0 = 125 - EXEC_LO = 126 - EXEC_HI = 127 - ZERO = 128 - DPP8 = 233 - DPP8FI = 234 - SHARED_BASE = 235 - SHARED_LIMIT = 236 - PRIVATE_BASE = 237 - PRIVATE_LIMIT = 238 - POS_HALF = 240 - NEG_HALF = 241 - POS_ONE = 242 - NEG_ONE = 243 - POS_TWO = 244 - NEG_TWO = 245 - POS_FOUR = 246 - NEG_FOUR = 247 - INV_2PI = 248 - DPP16 = 250 - VCCZ = 251 - EXECZ = 252 - SCC = 253 - LDS_DIRECT = 254 +class BufFmt(IntEnum): + BUF_FMT_8_UNORM = 1 + BUF_FMT_8_SNORM = 2 + BUF_FMT_8_USCALED = 3 + BUF_FMT_8_SSCALED = 4 + BUF_FMT_8_UINT = 5 + BUF_FMT_8_SINT = 6 + BUF_FMT_16_UNORM = 7 + BUF_FMT_16_SNORM = 8 + BUF_FMT_16_USCALED = 9 + BUF_FMT_16_SSCALED = 10 + BUF_FMT_16_UINT = 11 + BUF_FMT_16_SINT = 12 + BUF_FMT_16_FLOAT = 13 + BUF_FMT_8_8_UNORM = 14 + BUF_FMT_8_8_SNORM = 15 + BUF_FMT_8_8_USCALED = 16 + BUF_FMT_8_8_SSCALED = 17 + BUF_FMT_8_8_UINT = 18 + BUF_FMT_8_8_SINT = 19 + BUF_FMT_32_UINT = 20 + BUF_FMT_32_SINT = 21 + BUF_FMT_32_FLOAT = 22 + BUF_FMT_16_16_UNORM = 23 + BUF_FMT_16_16_SNORM = 24 + BUF_FMT_16_16_USCALED = 25 + BUF_FMT_16_16_SSCALED = 26 + BUF_FMT_16_16_UINT = 27 + BUF_FMT_16_16_SINT = 28 + BUF_FMT_16_16_FLOAT = 29 + BUF_FMT_10_11_11_FLOAT = 30 + BUF_FMT_11_11_10_FLOAT = 31 + BUF_FMT_10_10_10_2_UNORM = 32 + BUF_FMT_10_10_10_2_SNORM = 33 + BUF_FMT_10_10_10_2_UINT = 34 + BUF_FMT_10_10_10_2_SINT = 35 + BUF_FMT_2_10_10_10_UNORM = 36 + BUF_FMT_2_10_10_10_SNORM = 37 + BUF_FMT_2_10_10_10_USCALED = 38 + BUF_FMT_2_10_10_10_SSCALED = 39 + BUF_FMT_2_10_10_10_UINT = 40 + BUF_FMT_2_10_10_10_SINT = 41 + BUF_FMT_8_8_8_8_UNORM = 42 + BUF_FMT_8_8_8_8_SNORM = 43 + BUF_FMT_8_8_8_8_USCALED = 44 + BUF_FMT_8_8_8_8_SSCALED = 45 + BUF_FMT_8_8_8_8_UINT = 46 + BUF_FMT_8_8_8_8_SINT = 47 + BUF_FMT_32_32_UINT = 48 + BUF_FMT_32_32_SINT = 49 + BUF_FMT_32_32_FLOAT = 50 + BUF_FMT_16_16_16_16_UNORM = 51 + BUF_FMT_16_16_16_16_SNORM = 52 + BUF_FMT_16_16_16_16_USCALED = 53 + BUF_FMT_16_16_16_16_SSCALED = 54 + BUF_FMT_16_16_16_16_UINT = 55 + BUF_FMT_16_16_16_16_SINT = 56 + BUF_FMT_16_16_16_16_FLOAT = 57 + BUF_FMT_32_32_32_UINT = 58 + BUF_FMT_32_32_32_SINT = 59 + BUF_FMT_32_32_32_FLOAT = 60 + BUF_FMT_32_32_32_32_UINT = 61 + BUF_FMT_32_32_32_32_SINT = 62 + BUF_FMT_32_32_32_32_FLOAT = 63 + BUF_FMT_8_SRGB = 64 + BUF_FMT_8_8_SRGB = 65 + BUF_FMT_8_8_8_8_SRGB = 66 + BUF_FMT_5_9_9_9_FLOAT = 67 + BUF_FMT_5_6_5_UNORM = 68 + BUF_FMT_1_5_5_5_UNORM = 69 + BUF_FMT_5_5_5_1_UNORM = 70 + BUF_FMT_4_4_4_4_UNORM = 71 + BUF_FMT_4_4_UNORM = 72 + BUF_FMT_1_UNORM = 73 + BUF_FMT_1_REVERSED_UNORM = 74 + BUF_FMT_32_FLOAT_CLAMP = 75 + BUF_FMT_8_24_UNORM = 76 + BUF_FMT_8_24_UINT = 77 + BUF_FMT_24_8_UNORM = 78 + BUF_FMT_24_8_UINT = 79 + BUF_FMT_X24_8_32_UINT = 80 + BUF_FMT_X24_8_32_FLOAT = 81 + BUF_FMT_GB_GR_UNORM = 82 + BUF_FMT_GB_GR_SNORM = 83 + BUF_FMT_GB_GR_UINT = 84 + BUF_FMT_GB_GR_SRGB = 85 + BUF_FMT_BG_RG_UNORM = 86 + BUF_FMT_BG_RG_SNORM = 87 + BUF_FMT_BG_RG_UINT = 88 + BUF_FMT_BG_RG_SRGB = 89 + BUF_FMT_BC1_UNORM = 109 + BUF_FMT_BC1_SRGB = 110 + BUF_FMT_BC2_UNORM = 111 + BUF_FMT_BC2_SRGB = 112 class DSOp(IntEnum): DS_ADD_U32 = 0 @@ -1347,7 +1413,6 @@ class VOP3POp(IntEnum): V_SWMMAC_F32_16X16X32_BF8_BF8 = 90 class VOP3SDOp(IntEnum): - DWORD = 1 V_ADD_CO_CI_U32 = 288 V_SUB_CO_CI_U32 = 289 V_SUBREV_CO_CI_U32 = 290 @@ -1627,52 +1692,3 @@ class VSCRATCHOp(IntEnum): SCRATCH_STORE_D16_HI_B16 = 37 SCRATCH_LOAD_BLOCK = 83 SCRATCH_STORE_BLOCK = 84 - -class BufFmt(IntEnum): - BUF_FMT_8_UNORM = 1 - BUF_FMT_8_SNORM = 2 - BUF_FMT_8_USCALED = 3 - BUF_FMT_8_SSCALED = 4 - BUF_FMT_8_UINT = 5 - BUF_FMT_8_SINT = 6 - BUF_FMT_16_UNORM = 7 - BUF_FMT_16_SNORM = 8 - BUF_FMT_16_USCALED = 9 - BUF_FMT_16_SSCALED = 10 - BUF_FMT_16_UINT = 11 - BUF_FMT_16_SINT = 12 - BUF_FMT_16_FLOAT = 13 - BUF_FMT_8_8_UNORM = 14 - BUF_FMT_8_8_SNORM = 15 - BUF_FMT_8_8_USCALED = 16 - BUF_FMT_8_8_SSCALED = 17 - BUF_FMT_8_8_UINT = 18 - BUF_FMT_8_8_SINT = 19 - BUF_FMT_32_UINT = 20 - BUF_FMT_32_SINT = 21 - BUF_FMT_32_FLOAT = 22 - BUF_FMT_16_16_UNORM = 23 - BUF_FMT_10_10_10_2_UNORM = 32 - BUF_FMT_10_10_10_2_SNORM = 33 - BUF_FMT_10_10_10_2_UINT = 34 - BUF_FMT_10_10_10_2_SINT = 35 - BUF_FMT_2_10_10_10_UNORM = 36 - BUF_FMT_2_10_10_10_SNORM = 37 - BUF_FMT_2_10_10_10_USCALED = 38 - BUF_FMT_2_10_10_10_SSCALED = 39 - BUF_FMT_2_10_10_10_UINT = 40 - BUF_FMT_2_10_10_10_SINT = 41 - BUF_FMT_8_8_8_8_UNORM = 42 - BUF_FMT_8_8_8_8_SNORM = 43 - BUF_FMT_8_8_8_8_USCALED = 44 - BUF_FMT_8_8_8_8_SSCALED = 45 - BUF_FMT_8_8_8_8_UINT = 46 - BUF_FMT_8_8_8_8_SINT = 47 - BUF_FMT_32_32_UINT = 48 - BUF_FMT_32_32_SINT = 49 - BUF_FMT_32_32_FLOAT = 50 - BUF_FMT_16_16_16_16_UNORM = 51 - BUF_FMT_16_16_16_16_SNORM = 52 - BUF_FMT_16_16_16_16_USCALED = 53 - BUF_FMT_16_16_16_16_SSCALED = 54 - BUF_FMT_16_16_16_16_UINT = 55 diff --git a/extra/assembly/amd/autogen/rdna4/ins.py b/extra/assembly/amd/autogen/rdna4/ins.py index b0c3ac80f9..99da6270f1 100644 --- a/extra/assembly/amd/autogen/rdna4/ins.py +++ b/extra/assembly/amd/autogen/rdna4/ins.py @@ -1,12 +1,11 @@ -# autogenerated from AMD RDNA4 ISA PDF by pdf.py - do not edit +# autogenerated from AMD ISA PDF by pdf.py - do not edit # ruff: noqa: F401,F403 from typing import Annotated -from extra.assembly.amd.dsl import bits, BitField, Inst32, Inst64, Inst96, SGPR, VGPR, TTMP as TTMP, s as s, v as v, ttmp as ttmp, SSrc, Src, SImm, Imm, VDSTYEnc, SGPRField, VGPRField +from extra.assembly.amd.dsl import * from extra.assembly.amd.autogen.rdna4.enum import * import functools -# instruction formats -class DPP16(Inst64): +class DPP16(Inst): src0:Src = bits[39:32] dpp_ctrl = bits[48:40] fi = bits[50] @@ -18,7 +17,7 @@ class DPP16(Inst64): bank_mask = bits[59:56] row_mask = bits[63:60] -class DPP8(Inst64): +class DPP8(Inst): src0:Src = bits[39:32] lane_sel0 = bits[42:40] lane_sel1 = bits[45:43] @@ -29,7 +28,17 @@ class DPP8(Inst64): lane_sel6 = bits[60:58] lane_sel7 = bits[63:61] -class SMEM(Inst64): +class DS(Inst): + encoding = bits[31:26] == 0b110110 + op:Annotated[BitField, DSOp] = bits[25:18] + vdst:VGPRField = bits[63:56] + addr:VGPRField = bits[39:32] + data0:VGPRField = bits[47:40] + data1:VGPRField = bits[55:48] + offset0 = bits[7:0] + offset1 = bits[15:8] + +class SMEM(Inst): encoding = bits[31:26] == 0b111101 op:Annotated[BitField, SMEMOp] = bits[18:13] sdata:SGPRField = bits[12:6] @@ -39,153 +48,116 @@ class SMEM(Inst64): th = bits[24:23] ioffset = bits[55:32] -class SOP1(Inst32): +class SOP1(Inst): encoding = bits[31:23] == 0b101111101 op:Annotated[BitField, SOP1Op] = bits[15:8] sdst:SGPRField = bits[22:16] ssrc0:SSrc = bits[7:0] -class SOP2(Inst32): +class SOP2(Inst): encoding = bits[31:30] == 0b10 op:Annotated[BitField, SOP2Op] = bits[29:23] sdst:SGPRField = bits[22:16] ssrc0:SSrc = bits[7:0] ssrc1:SSrc = bits[15:8] -class SOPC(Inst32): +class SOPC(Inst): encoding = bits[31:23] == 0b101111110 op:Annotated[BitField, SOPCOp] = bits[22:16] ssrc0:SSrc = bits[7:0] ssrc1:SSrc = bits[15:8] -class SOPK(Inst32): +class SOPK(Inst): encoding = bits[31:28] == 0b1011 op:Annotated[BitField, SOPKOp] = bits[27:23] sdst:SGPRField = bits[22:16] simm16:SImm = bits[15:0] -class SOPP(Inst32): +class SOPP(Inst): encoding = bits[31:23] == 0b101111111 op:Annotated[BitField, SOPPOp] = bits[22:16] simm16:SImm = bits[15:0] -class VBUFFER(Inst96): +class VBUFFER(Inst): encoding = bits[31:26] == 0b110001 - soffset:SSrc = bits[6:0] op:Annotated[BitField, VBUFFEROp] = bits[21:14] - tfe = bits[22] vdata:VGPRField = bits[39:32] - rsrc = bits[49:41] - scope = bits[51:50] - th = bits[54:52] + vaddr:VGPRField = bits[71:64] + soffset:SSrc = bits[6:0] format = bits[61:55] offen = bits[62] idxen = bits[63] - vaddr:VGPRField = bits[71:64] + tfe = bits[22] + rsrc = bits[49:41] + scope = bits[51:50] + th = bits[54:52] ioffset = bits[95:72] -class VDS(Inst64): - encoding = bits[31:26] == 0b110110 - offset0 = bits[7:0] - offset1 = bits[15:8] - op = bits[25:18] - addr:VGPRField = bits[39:32] - data0:VGPRField = bits[47:40] - data1:VGPRField = bits[55:48] - vdst:VGPRField = bits[63:56] - -class VDSDIR(Inst64): - encoding = bits[31:24] == 0b11001101 +class VDSDIR(Inst): + encoding = bits[31:24] == 0b11001110 + op:Annotated[BitField, VDSDIROp] = bits[21:20] vdst:VGPRField = bits[7:0] - waitexp = bits[10:8] - opsel = bits[14:11] - cm = bits[15] - op:Annotated[BitField, VDSDIROp] = bits[20:16] - src0:Src = bits[40:32] - src1:Src = bits[49:41] - src2:Src = bits[58:50] - neg = bits[63:61] + attr = bits[15:10] + attr_chan = bits[9:8] + wait_va = bits[19:16] + wait_vmvsrc = bits[23] -class VEXPORT(Inst64): +class VEXPORT(Inst): encoding = bits[31:26] == 0b111110 + vsrc0:VGPRField = bits[39:32] + vsrc1:VGPRField = bits[47:40] + vsrc2:VGPRField = bits[55:48] + vsrc3:VGPRField = bits[63:56] en = bits[3:0] target = bits[9:4] done = bits[11] row = bits[13] - vsrc0 = bits[39:32] - vsrc1:VGPRField = bits[47:40] - vsrc2 = bits[55:48] - vsrc3 = bits[63:56] -class VFLAT(Inst96): - encoding = bits[31:24] == 0b11101100 - saddr:SSrc = bits[6:0] - op:Annotated[BitField, VFLATOp] = bits[20:14] - vdst:VGPRField = bits[39:32] - sve = bits[49] - scope = bits[51:50] - th = bits[54:52] - vsrc = bits[62:55] - vaddr:VGPRField = bits[71:64] - ioffset = bits[95:72] - -class VGLOBAL(Inst96): - encoding = bits[31:24] == 0b11101110 - saddr:SSrc = bits[6:0] - op:Annotated[BitField, VGLOBALOp] = bits[20:14] - vdst:VGPRField = bits[39:32] - sve = bits[49] - scope = bits[51:50] - th = bits[54:52] - vsrc = bits[62:55] - vaddr:VGPRField = bits[71:64] - ioffset = bits[95:72] - -class VIMAGE(Inst96): +class VIMAGE(Inst): encoding = bits[31:26] == 0b110100 + op:Annotated[BitField, VIMAGEOp] = bits[21:14] + vdata:VGPRField = bits[39:32] + dmask = bits[25:22] dim = bits[2:0] + tfe = bits[55] r128 = bits[4] d16 = bits[5] a16 = bits[6] - op:Annotated[BitField, VIMAGEOp] = bits[21:14] - dmask = bits[25:22] - vdata:VGPRField = bits[39:32] rsrc = bits[49:41] scope = bits[51:50] th = bits[54:52] - tfe = bits[55] vaddr4 = bits[56:63] vaddr0 = bits[71:64] vaddr1 = bits[79:72] vaddr2 = bits[87:80] vaddr3 = bits[95:88] -class VINTERP(Inst64): +class VINTERP(Inst): encoding = bits[31:24] == 0b11001101 op:Annotated[BitField, VINTERPOp] = bits[20:16] vdst:VGPRField = bits[7:0] src0:Src = bits[40:32] src1:Src = bits[49:41] src2:Src = bits[58:50] - waitexp = bits[10:8] - opsel = bits[14:11] neg = bits[63:61] + opsel = bits[14:11] + waitexp = bits[10:8] cm = bits[15] -class VOP1(Inst32): - encoding = bits[31:25] == 0b111111 +class VOP1(Inst): + encoding = bits[31:25] == 0b0111111 op:Annotated[BitField, VOP1Op] = bits[15:9] vdst:VGPRField = bits[24:17] src0:Src = bits[8:0] -class VOP2(Inst32): - encoding = bits[31] == 0 +class VOP2(Inst): + encoding = bits[31] == 0b0 op:Annotated[BitField, VOP2Op] = bits[30:25] vdst:VGPRField = bits[24:17] src0:Src = bits[8:0] vsrc1:VGPRField = bits[16:9] -class VOP3(Inst64): +class VOP3(Inst): encoding = bits[31:26] == 0b110101 op:Annotated[BitField, VOP3Op] = bits[25:16] vdst:VGPRField = bits[7:0] @@ -198,9 +170,8 @@ class VOP3(Inst64): opsel = bits[14:11] cm = bits[15] -class VOP3P(Inst64): +class VOP3P(Inst): encoding = bits[31:24] == 0b11001100 - _defaults = {'opsel_hi': 3, 'opsel_hi2': 1} op:Annotated[BitField, VOP3POp] = bits[22:16] vdst:VGPRField = bits[7:0] src0:Src = bits[40:32] @@ -213,7 +184,7 @@ class VOP3P(Inst64): opsel_hi2 = bits[14] cm = bits[15] -class VOP3SD(Inst64): +class VOP3SD(Inst): encoding = bits[31:26] == 0b110101 op:Annotated[BitField, VOP3SDOp] = bits[25:16] vdst:VGPRField = bits[7:0] @@ -221,38 +192,38 @@ class VOP3SD(Inst64): src0:Src = bits[40:32] src1:Src = bits[49:41] src2:Src = bits[58:50] - cm = bits[15] omod = bits[60:59] neg = bits[63:61] + cm = bits[15] -class VOPC(Inst32): - encoding = bits[31:25] == 0b111110 +class VOPC(Inst): + encoding = bits[31:25] == 0b0111110 op:Annotated[BitField, VOPCOp] = bits[24:17] src0:Src = bits[8:0] vsrc1:VGPRField = bits[16:9] -class VOPD(Inst64): +class VOPD(Inst): encoding = bits[31:26] == 0b110010 opx:Annotated[BitField, VOPDOp] = bits[25:22] opy:Annotated[BitField, VOPDOp] = bits[21:17] - vdstx:VGPRField = bits[63:56] + vdstx = bits[63:56] vdsty:VDSTYEnc = bits[55:49] srcx0:Src = bits[8:0] - vsrcx1:VGPRField = bits[16:9] srcy0:Src = bits[40:32] - vsrcy1:VGPRField = bits[48:41] + vsrcx1 = bits[16:9] + vsrcy1 = bits[48:41] -class VSAMPLE(Inst96): +class VSAMPLE(Inst): encoding = bits[31:26] == 0b111001 + op:Annotated[BitField, VSAMPLEOp] = bits[21:14] + vdata:VGPRField = bits[39:32] + dmask = bits[25:22] dim = bits[2:0] tfe = bits[3] + unrm = bits[13] r128 = bits[4] d16 = bits[5] a16 = bits[6] - unrm = bits[13] - op:Annotated[BitField, VSAMPLEOp] = bits[21:14] - dmask = bits[25:22] - vdata:VGPRField = bits[39:32] lwe = bits[40] rsrc = bits[49:41] scope = bits[51:50] @@ -263,19 +234,130 @@ class VSAMPLE(Inst96): vaddr2 = bits[87:80] vaddr3 = bits[95:88] -class VSCRATCH(Inst96): - encoding = bits[31:24] == 0b11101101 - saddr:SSrc = bits[6:0] - op:Annotated[BitField, VSCRATCHOp] = bits[20:14] - vdst:VGPRField = bits[39:32] - sve = bits[49] - scope = bits[51:50] - th = bits[54:52] - vsrc = bits[62:55] - vaddr:VGPRField = bits[71:64] - ioffset = bits[95:72] - # instruction helpers +ds_add_u32 = functools.partial(DS, DSOp.DS_ADD_U32) +ds_sub_u32 = functools.partial(DS, DSOp.DS_SUB_U32) +ds_rsub_u32 = functools.partial(DS, DSOp.DS_RSUB_U32) +ds_inc_u32 = functools.partial(DS, DSOp.DS_INC_U32) +ds_dec_u32 = functools.partial(DS, DSOp.DS_DEC_U32) +ds_min_i32 = functools.partial(DS, DSOp.DS_MIN_I32) +ds_max_i32 = functools.partial(DS, DSOp.DS_MAX_I32) +ds_min_u32 = functools.partial(DS, DSOp.DS_MIN_U32) +ds_max_u32 = functools.partial(DS, DSOp.DS_MAX_U32) +ds_and_b32 = functools.partial(DS, DSOp.DS_AND_B32) +ds_or_b32 = functools.partial(DS, DSOp.DS_OR_B32) +ds_xor_b32 = functools.partial(DS, DSOp.DS_XOR_B32) +ds_mskor_b32 = functools.partial(DS, DSOp.DS_MSKOR_B32) +ds_store_b32 = functools.partial(DS, DSOp.DS_STORE_B32) +ds_store_2addr_b32 = functools.partial(DS, DSOp.DS_STORE_2ADDR_B32) +ds_store_2addr_stride64_b32 = functools.partial(DS, DSOp.DS_STORE_2ADDR_STRIDE64_B32) +ds_cmpstore_b32 = functools.partial(DS, DSOp.DS_CMPSTORE_B32) +ds_min_num_f32 = functools.partial(DS, DSOp.DS_MIN_NUM_F32) +ds_max_num_f32 = functools.partial(DS, DSOp.DS_MAX_NUM_F32) +ds_nop = functools.partial(DS, DSOp.DS_NOP) +ds_add_f32 = functools.partial(DS, DSOp.DS_ADD_F32) +ds_store_b8 = functools.partial(DS, DSOp.DS_STORE_B8) +ds_store_b16 = functools.partial(DS, DSOp.DS_STORE_B16) +ds_add_rtn_u32 = functools.partial(DS, DSOp.DS_ADD_RTN_U32) +ds_sub_rtn_u32 = functools.partial(DS, DSOp.DS_SUB_RTN_U32) +ds_rsub_rtn_u32 = functools.partial(DS, DSOp.DS_RSUB_RTN_U32) +ds_inc_rtn_u32 = functools.partial(DS, DSOp.DS_INC_RTN_U32) +ds_dec_rtn_u32 = functools.partial(DS, DSOp.DS_DEC_RTN_U32) +ds_min_rtn_i32 = functools.partial(DS, DSOp.DS_MIN_RTN_I32) +ds_max_rtn_i32 = functools.partial(DS, DSOp.DS_MAX_RTN_I32) +ds_min_rtn_u32 = functools.partial(DS, DSOp.DS_MIN_RTN_U32) +ds_max_rtn_u32 = functools.partial(DS, DSOp.DS_MAX_RTN_U32) +ds_and_rtn_b32 = functools.partial(DS, DSOp.DS_AND_RTN_B32) +ds_or_rtn_b32 = functools.partial(DS, DSOp.DS_OR_RTN_B32) +ds_xor_rtn_b32 = functools.partial(DS, DSOp.DS_XOR_RTN_B32) +ds_mskor_rtn_b32 = functools.partial(DS, DSOp.DS_MSKOR_RTN_B32) +ds_storexchg_rtn_b32 = functools.partial(DS, DSOp.DS_STOREXCHG_RTN_B32) +ds_storexchg_2addr_rtn_b32 = functools.partial(DS, DSOp.DS_STOREXCHG_2ADDR_RTN_B32) +ds_storexchg_2addr_stride64_rtn_b32 = functools.partial(DS, DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32) +ds_cmpstore_rtn_b32 = functools.partial(DS, DSOp.DS_CMPSTORE_RTN_B32) +ds_min_num_rtn_f32 = functools.partial(DS, DSOp.DS_MIN_NUM_RTN_F32) +ds_max_num_rtn_f32 = functools.partial(DS, DSOp.DS_MAX_NUM_RTN_F32) +ds_swizzle_b32 = functools.partial(DS, DSOp.DS_SWIZZLE_B32) +ds_load_b32 = functools.partial(DS, DSOp.DS_LOAD_B32) +ds_load_2addr_b32 = functools.partial(DS, DSOp.DS_LOAD_2ADDR_B32) +ds_load_2addr_stride64_b32 = functools.partial(DS, DSOp.DS_LOAD_2ADDR_STRIDE64_B32) +ds_load_i8 = functools.partial(DS, DSOp.DS_LOAD_I8) +ds_load_u8 = functools.partial(DS, DSOp.DS_LOAD_U8) +ds_load_i16 = functools.partial(DS, DSOp.DS_LOAD_I16) +ds_load_u16 = functools.partial(DS, DSOp.DS_LOAD_U16) +ds_consume = functools.partial(DS, DSOp.DS_CONSUME) +ds_append = functools.partial(DS, DSOp.DS_APPEND) +ds_add_u64 = functools.partial(DS, DSOp.DS_ADD_U64) +ds_sub_u64 = functools.partial(DS, DSOp.DS_SUB_U64) +ds_rsub_u64 = functools.partial(DS, DSOp.DS_RSUB_U64) +ds_inc_u64 = functools.partial(DS, DSOp.DS_INC_U64) +ds_dec_u64 = functools.partial(DS, DSOp.DS_DEC_U64) +ds_min_i64 = functools.partial(DS, DSOp.DS_MIN_I64) +ds_max_i64 = functools.partial(DS, DSOp.DS_MAX_I64) +ds_min_u64 = functools.partial(DS, DSOp.DS_MIN_U64) +ds_max_u64 = functools.partial(DS, DSOp.DS_MAX_U64) +ds_and_b64 = functools.partial(DS, DSOp.DS_AND_B64) +ds_or_b64 = functools.partial(DS, DSOp.DS_OR_B64) +ds_xor_b64 = functools.partial(DS, DSOp.DS_XOR_B64) +ds_mskor_b64 = functools.partial(DS, DSOp.DS_MSKOR_B64) +ds_store_b64 = functools.partial(DS, DSOp.DS_STORE_B64) +ds_store_2addr_b64 = functools.partial(DS, DSOp.DS_STORE_2ADDR_B64) +ds_store_2addr_stride64_b64 = functools.partial(DS, DSOp.DS_STORE_2ADDR_STRIDE64_B64) +ds_cmpstore_b64 = functools.partial(DS, DSOp.DS_CMPSTORE_B64) +ds_min_num_f64 = functools.partial(DS, DSOp.DS_MIN_NUM_F64) +ds_max_num_f64 = functools.partial(DS, DSOp.DS_MAX_NUM_F64) +ds_add_rtn_u64 = functools.partial(DS, DSOp.DS_ADD_RTN_U64) +ds_sub_rtn_u64 = functools.partial(DS, DSOp.DS_SUB_RTN_U64) +ds_rsub_rtn_u64 = functools.partial(DS, DSOp.DS_RSUB_RTN_U64) +ds_inc_rtn_u64 = functools.partial(DS, DSOp.DS_INC_RTN_U64) +ds_dec_rtn_u64 = functools.partial(DS, DSOp.DS_DEC_RTN_U64) +ds_min_rtn_i64 = functools.partial(DS, DSOp.DS_MIN_RTN_I64) +ds_max_rtn_i64 = functools.partial(DS, DSOp.DS_MAX_RTN_I64) +ds_min_rtn_u64 = functools.partial(DS, DSOp.DS_MIN_RTN_U64) +ds_max_rtn_u64 = functools.partial(DS, DSOp.DS_MAX_RTN_U64) +ds_and_rtn_b64 = functools.partial(DS, DSOp.DS_AND_RTN_B64) +ds_or_rtn_b64 = functools.partial(DS, DSOp.DS_OR_RTN_B64) +ds_xor_rtn_b64 = functools.partial(DS, DSOp.DS_XOR_RTN_B64) +ds_mskor_rtn_b64 = functools.partial(DS, DSOp.DS_MSKOR_RTN_B64) +ds_storexchg_rtn_b64 = functools.partial(DS, DSOp.DS_STOREXCHG_RTN_B64) +ds_storexchg_2addr_rtn_b64 = functools.partial(DS, DSOp.DS_STOREXCHG_2ADDR_RTN_B64) +ds_storexchg_2addr_stride64_rtn_b64 = functools.partial(DS, DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64) +ds_cmpstore_rtn_b64 = functools.partial(DS, DSOp.DS_CMPSTORE_RTN_B64) +ds_min_num_rtn_f64 = functools.partial(DS, DSOp.DS_MIN_NUM_RTN_F64) +ds_max_num_rtn_f64 = functools.partial(DS, DSOp.DS_MAX_NUM_RTN_F64) +ds_load_b64 = functools.partial(DS, DSOp.DS_LOAD_B64) +ds_load_2addr_b64 = functools.partial(DS, DSOp.DS_LOAD_2ADDR_B64) +ds_load_2addr_stride64_b64 = functools.partial(DS, DSOp.DS_LOAD_2ADDR_STRIDE64_B64) +ds_add_rtn_f32 = functools.partial(DS, DSOp.DS_ADD_RTN_F32) +ds_condxchg32_rtn_b64 = functools.partial(DS, DSOp.DS_CONDXCHG32_RTN_B64) +ds_cond_sub_u32 = functools.partial(DS, DSOp.DS_COND_SUB_U32) +ds_sub_clamp_u32 = functools.partial(DS, DSOp.DS_SUB_CLAMP_U32) +ds_pk_add_f16 = functools.partial(DS, DSOp.DS_PK_ADD_F16) +ds_pk_add_bf16 = functools.partial(DS, DSOp.DS_PK_ADD_BF16) +ds_store_b8_d16_hi = functools.partial(DS, DSOp.DS_STORE_B8_D16_HI) +ds_store_b16_d16_hi = functools.partial(DS, DSOp.DS_STORE_B16_D16_HI) +ds_load_u8_d16 = functools.partial(DS, DSOp.DS_LOAD_U8_D16) +ds_load_u8_d16_hi = functools.partial(DS, DSOp.DS_LOAD_U8_D16_HI) +ds_load_i8_d16 = functools.partial(DS, DSOp.DS_LOAD_I8_D16) +ds_load_i8_d16_hi = functools.partial(DS, DSOp.DS_LOAD_I8_D16_HI) +ds_load_u16_d16 = functools.partial(DS, DSOp.DS_LOAD_U16_D16) +ds_load_u16_d16_hi = functools.partial(DS, DSOp.DS_LOAD_U16_D16_HI) +ds_cond_sub_rtn_u32 = functools.partial(DS, DSOp.DS_COND_SUB_RTN_U32) +ds_sub_clamp_rtn_u32 = functools.partial(DS, DSOp.DS_SUB_CLAMP_RTN_U32) +ds_pk_add_rtn_f16 = functools.partial(DS, DSOp.DS_PK_ADD_RTN_F16) +ds_pk_add_rtn_bf16 = functools.partial(DS, DSOp.DS_PK_ADD_RTN_BF16) +ds_store_addtid_b32 = functools.partial(DS, DSOp.DS_STORE_ADDTID_B32) +ds_load_addtid_b32 = functools.partial(DS, DSOp.DS_LOAD_ADDTID_B32) +ds_permute_b32 = functools.partial(DS, DSOp.DS_PERMUTE_B32) +ds_bpermute_b32 = functools.partial(DS, DSOp.DS_BPERMUTE_B32) +ds_bpermute_fi_b32 = functools.partial(DS, DSOp.DS_BPERMUTE_FI_B32) +ds_store_b96 = functools.partial(DS, DSOp.DS_STORE_B96) +ds_store_b128 = functools.partial(DS, DSOp.DS_STORE_B128) +ds_bvh_stack_push4_pop1_rtn_b32 = functools.partial(DS, DSOp.DS_BVH_STACK_PUSH4_POP1_RTN_B32) +ds_bvh_stack_push8_pop1_rtn_b32 = functools.partial(DS, DSOp.DS_BVH_STACK_PUSH8_POP1_RTN_B32) +ds_bvh_stack_push8_pop2_rtn_b64 = functools.partial(DS, DSOp.DS_BVH_STACK_PUSH8_POP2_RTN_B64) +ds_load_b96 = functools.partial(DS, DSOp.DS_LOAD_B96) +ds_load_b128 = functools.partial(DS, DSOp.DS_LOAD_B128) s_load_b32 = functools.partial(SMEM, SMEMOp.S_LOAD_B32) s_load_b64 = functools.partial(SMEM, SMEMOp.S_LOAD_B64) s_load_b128 = functools.partial(SMEM, SMEMOp.S_LOAD_B128) @@ -647,126 +729,6 @@ tbuffer_store_d16_format_xyz = functools.partial(VBUFFER, VBUFFEROp.TBUFFER_STOR tbuffer_store_d16_format_xyzw = functools.partial(VBUFFER, VBUFFEROp.TBUFFER_STORE_D16_FORMAT_XYZW) ds_param_load = functools.partial(VDSDIR, VDSDIROp.DS_PARAM_LOAD) ds_direct_load = functools.partial(VDSDIR, VDSDIROp.DS_DIRECT_LOAD) -flat_load_u8 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_U8) -flat_load_i8 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_I8) -flat_load_u16 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_U16) -flat_load_i16 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_I16) -flat_load_b32 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_B32) -flat_load_b64 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_B64) -flat_load_b96 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_B96) -flat_load_b128 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_B128) -flat_store_b8 = functools.partial(VFLAT, VFLATOp.FLAT_STORE_B8) -flat_store_b16 = functools.partial(VFLAT, VFLATOp.FLAT_STORE_B16) -flat_store_b32 = functools.partial(VFLAT, VFLATOp.FLAT_STORE_B32) -flat_store_b64 = functools.partial(VFLAT, VFLATOp.FLAT_STORE_B64) -flat_store_b96 = functools.partial(VFLAT, VFLATOp.FLAT_STORE_B96) -flat_store_b128 = functools.partial(VFLAT, VFLATOp.FLAT_STORE_B128) -flat_load_d16_u8 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_D16_U8) -flat_load_d16_i8 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_D16_I8) -flat_load_d16_b16 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_D16_B16) -flat_load_d16_hi_u8 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_D16_HI_U8) -flat_load_d16_hi_i8 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_D16_HI_I8) -flat_load_d16_hi_b16 = functools.partial(VFLAT, VFLATOp.FLAT_LOAD_D16_HI_B16) -flat_store_d16_hi_b8 = functools.partial(VFLAT, VFLATOp.FLAT_STORE_D16_HI_B8) -flat_store_d16_hi_b16 = functools.partial(VFLAT, VFLATOp.FLAT_STORE_D16_HI_B16) -flat_atomic_swap_b32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_SWAP_B32) -flat_atomic_cmpswap_b32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_CMPSWAP_B32) -flat_atomic_add_u32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_ADD_U32) -flat_atomic_sub_u32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_SUB_U32) -flat_atomic_sub_clamp_u32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_SUB_CLAMP_U32) -flat_atomic_min_i32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MIN_I32) -flat_atomic_min_u32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MIN_U32) -flat_atomic_max_i32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MAX_I32) -flat_atomic_max_u32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MAX_U32) -flat_atomic_and_b32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_AND_B32) -flat_atomic_or_b32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_OR_B32) -flat_atomic_xor_b32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_XOR_B32) -flat_atomic_inc_u32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_INC_U32) -flat_atomic_dec_u32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_DEC_U32) -flat_atomic_swap_b64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_SWAP_B64) -flat_atomic_cmpswap_b64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_CMPSWAP_B64) -flat_atomic_add_u64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_ADD_U64) -flat_atomic_sub_u64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_SUB_U64) -flat_atomic_min_i64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MIN_I64) -flat_atomic_min_u64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MIN_U64) -flat_atomic_max_i64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MAX_I64) -flat_atomic_max_u64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MAX_U64) -flat_atomic_and_b64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_AND_B64) -flat_atomic_or_b64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_OR_B64) -flat_atomic_xor_b64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_XOR_B64) -flat_atomic_inc_u64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_INC_U64) -flat_atomic_dec_u64 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_DEC_U64) -flat_atomic_cond_sub_u32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_COND_SUB_U32) -flat_atomic_min_num_f32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MIN_NUM_F32) -flat_atomic_max_num_f32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_MAX_NUM_F32) -flat_atomic_add_f32 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_ADD_F32) -flat_atomic_pk_add_f16 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_PK_ADD_F16) -flat_atomic_pk_add_bf16 = functools.partial(VFLAT, VFLATOp.FLAT_ATOMIC_PK_ADD_BF16) -global_load_u8 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_U8) -global_load_i8 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_I8) -global_load_u16 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_U16) -global_load_i16 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_I16) -global_load_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_B32) -global_load_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_B64) -global_load_b96 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_B96) -global_load_b128 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_B128) -global_store_b8 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_B8) -global_store_b16 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_B16) -global_store_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_B32) -global_store_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_B64) -global_store_b96 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_B96) -global_store_b128 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_B128) -global_load_d16_u8 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_D16_U8) -global_load_d16_i8 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_D16_I8) -global_load_d16_b16 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_D16_B16) -global_load_d16_hi_u8 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_D16_HI_U8) -global_load_d16_hi_i8 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_D16_HI_I8) -global_load_d16_hi_b16 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_D16_HI_B16) -global_store_d16_hi_b8 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_D16_HI_B8) -global_store_d16_hi_b16 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_D16_HI_B16) -global_load_addtid_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_ADDTID_B32) -global_store_addtid_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_ADDTID_B32) -global_inv = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_INV) -global_wb = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_WB) -global_atomic_swap_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_SWAP_B32) -global_atomic_cmpswap_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_CMPSWAP_B32) -global_atomic_add_u32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_ADD_U32) -global_atomic_sub_u32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_SUB_U32) -global_atomic_sub_clamp_u32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_SUB_CLAMP_U32) -global_atomic_min_i32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MIN_I32) -global_atomic_min_u32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MIN_U32) -global_atomic_max_i32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MAX_I32) -global_atomic_max_u32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MAX_U32) -global_atomic_and_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_AND_B32) -global_atomic_or_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_OR_B32) -global_atomic_xor_b32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_XOR_B32) -global_atomic_inc_u32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_INC_U32) -global_atomic_dec_u32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_DEC_U32) -global_atomic_swap_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_SWAP_B64) -global_atomic_cmpswap_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_CMPSWAP_B64) -global_atomic_add_u64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_ADD_U64) -global_atomic_sub_u64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_SUB_U64) -global_atomic_min_i64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MIN_I64) -global_atomic_min_u64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MIN_U64) -global_atomic_max_i64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MAX_I64) -global_atomic_max_u64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MAX_U64) -global_atomic_and_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_AND_B64) -global_atomic_or_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_OR_B64) -global_atomic_xor_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_XOR_B64) -global_atomic_inc_u64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_INC_U64) -global_atomic_dec_u64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_DEC_U64) -global_wbinv = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_WBINV) -global_atomic_cond_sub_u32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_COND_SUB_U32) -global_atomic_min_num_f32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MIN_NUM_F32) -global_atomic_max_num_f32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_MAX_NUM_F32) -global_load_block = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_BLOCK) -global_store_block = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_STORE_BLOCK) -global_atomic_add_f32 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_ADD_F32) -global_load_tr_b128 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_TR_B128) -global_load_tr_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_LOAD_TR_B64) -global_atomic_pk_add_f16 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_PK_ADD_F16) -global_atomic_pk_add_bf16 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_PK_ADD_BF16) -global_atomic_ordered_add_b64 = functools.partial(VGLOBAL, VGLOBALOp.GLOBAL_ATOMIC_ORDERED_ADD_B64) image_load = functools.partial(VIMAGE, VIMAGEOp.IMAGE_LOAD) image_load_mip = functools.partial(VIMAGE, VIMAGEOp.IMAGE_LOAD_MIP) image_load_pck = functools.partial(VIMAGE, VIMAGEOp.IMAGE_LOAD_PCK) @@ -931,8 +893,8 @@ v_add_nc_u32_e32 = functools.partial(VOP2, VOP2Op.V_ADD_NC_U32) v_sub_nc_u32_e32 = functools.partial(VOP2, VOP2Op.V_SUB_NC_U32) v_subrev_nc_u32_e32 = functools.partial(VOP2, VOP2Op.V_SUBREV_NC_U32) v_fmac_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAC_F32) -def v_fmamk_f32_e32(vdst, src0, K, vsrc1): return VOP2(VOP2Op.V_FMAMK_F32, vdst, src0, vsrc1, literal=K) -def v_fmaak_f32_e32(vdst, src0, vsrc1, K): return VOP2(VOP2Op.V_FMAAK_F32, vdst, src0, vsrc1, literal=K) +v_fmamk_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAMK_F32) +v_fmaak_f32_e32 = functools.partial(VOP2, VOP2Op.V_FMAAK_F32) v_cvt_pk_rtz_f16_f32_e32 = functools.partial(VOP2, VOP2Op.V_CVT_PK_RTZ_F16_F32) v_min_num_f16_e32 = functools.partial(VOP2, VOP2Op.V_MIN_NUM_F16) v_max_num_f16_e32 = functools.partial(VOP2, VOP2Op.V_MAX_NUM_F16) @@ -941,8 +903,8 @@ v_sub_f16_e32 = functools.partial(VOP2, VOP2Op.V_SUB_F16) v_subrev_f16_e32 = functools.partial(VOP2, VOP2Op.V_SUBREV_F16) v_mul_f16_e32 = functools.partial(VOP2, VOP2Op.V_MUL_F16) v_fmac_f16_e32 = functools.partial(VOP2, VOP2Op.V_FMAC_F16) -def v_fmamk_f16_e32(vdst, src0, K, vsrc1): return VOP2(VOP2Op.V_FMAMK_F16, vdst, src0, vsrc1, literal=K) -def v_fmaak_f16_e32(vdst, src0, vsrc1, K): return VOP2(VOP2Op.V_FMAAK_F16, vdst, src0, vsrc1, literal=K) +v_fmamk_f16_e32 = functools.partial(VOP2, VOP2Op.V_FMAMK_F16) +v_fmaak_f16_e32 = functools.partial(VOP2, VOP2Op.V_FMAAK_F16) v_ldexp_f16_e32 = functools.partial(VOP2, VOP2Op.V_LDEXP_F16) v_pk_fmac_f16_e32 = functools.partial(VOP2, VOP2Op.V_PK_FMAC_F16) v_cmp_lt_f16_e64 = functools.partial(VOP3, VOP3Op.V_CMP_LT_F16) @@ -1435,7 +1397,6 @@ v_swmmac_f32_16x16x32_fp8_fp8 = functools.partial(VOP3P, VOP3POp.V_SWMMAC_F32_16 v_swmmac_f32_16x16x32_fp8_bf8 = functools.partial(VOP3P, VOP3POp.V_SWMMAC_F32_16X16X32_FP8_BF8) v_swmmac_f32_16x16x32_bf8_fp8 = functools.partial(VOP3P, VOP3POp.V_SWMMAC_F32_16X16X32_BF8_FP8) v_swmmac_f32_16x16x32_bf8_bf8 = functools.partial(VOP3P, VOP3POp.V_SWMMAC_F32_16X16X32_BF8_BF8) -dword = functools.partial(VOP3SD, VOP3SDOp.DWORD) v_add_co_ci_u32 = functools.partial(VOP3SD, VOP3SDOp.V_ADD_CO_CI_U32) v_sub_co_ci_u32 = functools.partial(VOP3SD, VOP3SDOp.V_SUB_CO_CI_U32) v_subrev_co_ci_u32 = functools.partial(VOP3SD, VOP3SDOp.V_SUBREV_CO_CI_U32) @@ -1682,55 +1643,4 @@ image_gather4_c_cl = functools.partial(VSAMPLE, VSAMPLEOp.IMAGE_GATHER4_C_CL) image_gather4_c_l = functools.partial(VSAMPLE, VSAMPLEOp.IMAGE_GATHER4_C_L) image_gather4_c_b = functools.partial(VSAMPLE, VSAMPLEOp.IMAGE_GATHER4_C_B) image_gather4_c_b_cl = functools.partial(VSAMPLE, VSAMPLEOp.IMAGE_GATHER4_C_B_CL) -image_gather4h = functools.partial(VSAMPLE, VSAMPLEOp.IMAGE_GATHER4H) -scratch_load_u8 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_U8) -scratch_load_i8 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_I8) -scratch_load_u16 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_U16) -scratch_load_i16 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_I16) -scratch_load_b32 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_B32) -scratch_load_b64 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_B64) -scratch_load_b96 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_B96) -scratch_load_b128 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_B128) -scratch_store_b8 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_B8) -scratch_store_b16 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_B16) -scratch_store_b32 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_B32) -scratch_store_b64 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_B64) -scratch_store_b96 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_B96) -scratch_store_b128 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_B128) -scratch_load_d16_u8 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_D16_U8) -scratch_load_d16_i8 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_D16_I8) -scratch_load_d16_b16 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_D16_B16) -scratch_load_d16_hi_u8 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_D16_HI_U8) -scratch_load_d16_hi_i8 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_D16_HI_I8) -scratch_load_d16_hi_b16 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_D16_HI_B16) -scratch_store_d16_hi_b8 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_D16_HI_B8) -scratch_store_d16_hi_b16 = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_D16_HI_B16) -scratch_load_block = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_LOAD_BLOCK) -scratch_store_block = functools.partial(VSCRATCH, VSCRATCHOp.SCRATCH_STORE_BLOCK) - -VCC_LO = SrcEnum.VCC_LO -VCC_HI = SrcEnum.VCC_HI -NULL = SrcEnum.NULL -M0 = SrcEnum.M0 -EXEC_LO = SrcEnum.EXEC_LO -EXEC_HI = SrcEnum.EXEC_HI -ZERO = SrcEnum.ZERO -DPP8FI = SrcEnum.DPP8FI -SHARED_BASE = SrcEnum.SHARED_BASE -SHARED_LIMIT = SrcEnum.SHARED_LIMIT -PRIVATE_BASE = SrcEnum.PRIVATE_BASE -PRIVATE_LIMIT = SrcEnum.PRIVATE_LIMIT -POS_HALF = SrcEnum.POS_HALF -NEG_HALF = SrcEnum.NEG_HALF -POS_ONE = SrcEnum.POS_ONE -NEG_ONE = SrcEnum.NEG_ONE -POS_TWO = SrcEnum.POS_TWO -NEG_TWO = SrcEnum.NEG_TWO -POS_FOUR = SrcEnum.POS_FOUR -NEG_FOUR = SrcEnum.NEG_FOUR -INV_2PI = SrcEnum.INV_2PI -VCCZ = SrcEnum.VCCZ -EXECZ = SrcEnum.EXECZ -SCC = SrcEnum.SCC -LDS_DIRECT = SrcEnum.LDS_DIRECT -OFF = NULL +image_gather4h = functools.partial(VSAMPLE, VSAMPLEOp.IMAGE_GATHER4H) \ No newline at end of file diff --git a/extra/assembly/amd/autogen/rdna4/str_pcode.py b/extra/assembly/amd/autogen/rdna4/str_pcode.py index 7e8c5aa25e..d354184963 100644 --- a/extra/assembly/amd/autogen/rdna4/str_pcode.py +++ b/extra/assembly/amd/autogen/rdna4/str_pcode.py @@ -1,7 +1,160 @@ # autogenerated by pdf.py - do not edit -# to regenerate: python -m extra.assembly.amd.pdf --arch rdna4 +# to regenerate: python -m extra.assembly.amd.pdf # ruff: noqa: E501 -from extra.assembly.amd.autogen.rdna4.enum import SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp +from extra.assembly.amd.autogen.rdna4.enum import DSOp, SMEMOp, SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, VBUFFEROp, VFLATOp, VGLOBALOp, VIMAGEOp, VINTERPOp, VOP1Op, VOP2Op, VOP3Op, VOP3POp, VOP3SDOp, VOPCOp, VOPDOp, VSAMPLEOp, VSCRATCHOp + +DSOp_PCODE = { + DSOp.DS_ADD_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_STORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]', + DSOp.DS_STORE_2ADDR_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]', + DSOp.DS_STORE_2ADDR_STRIDE64_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]', + DSOp.DS_CMPSTORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp', + DSOp.DS_MIN_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n!sign(tmp.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + DSOp.DS_MAX_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\nsign(tmp.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + DSOp.DS_ADD_F32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_STORE_B8: 'MEM[ADDR].b8 = DATA[7 : 0]', + DSOp.DS_STORE_B16: 'MEM[ADDR].b16 = DATA[15 : 0]', + DSOp.DS_ADD_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_RSUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', + DSOp.DS_INC_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_DEC_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MIN_RTN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MAX_RTN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + DSOp.DS_MIN_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_MAX_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_AND_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_OR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_XOR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_MSKOR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', + DSOp.DS_STOREXCHG_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + DSOp.DS_STOREXCHG_2ADDR_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', + DSOp.DS_CMPSTORE_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp', + DSOp.DS_MIN_NUM_RTN_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n!sign(tmp.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + DSOp.DS_MAX_NUM_RTN_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\nsign(tmp.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\n}\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset[15]) {\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n}\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n}', + DSOp.DS_LOAD_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32', + DSOp.DS_LOAD_2ADDR_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4U].b32', + DSOp.DS_LOAD_2ADDR_STRIDE64_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256U].b32', + DSOp.DS_LOAD_I8: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i8))", + DSOp.DS_LOAD_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + DSOp.DS_LOAD_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))", + DSOp.DS_LOAD_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + DSOp.DS_CONSUME: 'addr = offset; // offset by LDS HWBASE\nrtnval = LDS(addr);\nLDS(addr) = LDS(addr) - countbits(valid mask);\nGPR[VDST] = rtnval; // return to all valid threads', + DSOp.DS_APPEND: 'addr = offset; // offset by LDS HWBASE\nrtnval = LDS(addr);\nLDS(addr) = LDS(addr) + countbits(valid mask);\nGPR[VDST] = rtnval; // return to all valid threads', + DSOp.DS_ADD_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_STORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]', + DSOp.DS_STORE_2ADDR_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_STORE_2ADDR_STRIDE64_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]', + DSOp.DS_CMPSTORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp', + DSOp.DS_MIN_NUM_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 < tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && sign(src.f64) &&\n!sign(tmp.f64))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_NUM_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 > tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && !sign(src.f64) &&\nsign(tmp.f64))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', + DSOp.DS_ADD_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_SUB_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_RSUB_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', + DSOp.DS_INC_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_DEC_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MIN_RTN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MAX_RTN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + DSOp.DS_MIN_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_MAX_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + DSOp.DS_AND_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_OR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_XOR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_MSKOR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', + DSOp.DS_STOREXCHG_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + DSOp.DS_STOREXCHG_2ADDR_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', + DSOp.DS_CMPSTORE_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp', + DSOp.DS_MIN_NUM_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 < tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && sign(src.f64) &&\n!sign(tmp.f64))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', + DSOp.DS_MAX_NUM_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 > tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && !sign(src.f64) &&\nsign(tmp.f64))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', + DSOp.DS_LOAD_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32', + DSOp.DS_LOAD_2ADDR_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8U + 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8U + 4U].b32', + DSOp.DS_LOAD_2ADDR_STRIDE64_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512U + 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512U + 4U].b32', + DSOp.DS_ADD_RTN_F32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nADDR0 = ((ADDR + offset.u32) & 0xfff8U);\nADDR1 = ADDR0 + 4U;\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", + DSOp.DS_COND_SUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_CLAMP_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + DSOp.DS_PK_ADD_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + DSOp.DS_PK_ADD_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + DSOp.DS_STORE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]', + DSOp.DS_STORE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]', + DSOp.DS_LOAD_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// RETURN_DATA[31:16] is preserved.", + DSOp.DS_LOAD_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// RETURN_DATA[15:0] is preserved.", + DSOp.DS_LOAD_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));\n// RETURN_DATA[31:16] is preserved.", + DSOp.DS_LOAD_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));\n// RETURN_DATA[15:0] is preserved.", + DSOp.DS_LOAD_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;\n// RETURN_DATA[31:16] is preserved.', + DSOp.DS_LOAD_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;\n// RETURN_DATA[15:0] is preserved.', + DSOp.DS_COND_SUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', + DSOp.DS_SUB_CLAMP_RTN_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + DSOp.DS_PK_ADD_RTN_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + DSOp.DS_PK_ADD_RTN_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + DSOp.DS_STORE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32", + DSOp.DS_LOAD_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32", + DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xF, OFFSET = 0\nVGPR[VDST] = { B, D, 0, C }\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xA, OFFSET = 0\nVGPR[VDST] = { -, D, -, 0 }", + DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xF, OFFSET = 0\nVGPR[VDST] = { A, A, D, B }\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xA, OFFSET = 0\nVGPR[VDST] = { -, 0, -, B }", + DSOp.DS_BPERMUTE_FI_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\n// Source VGPR is read even if src_lane is invalid in EXEC mask.\ntmp[i] = VGPR[src_lane][DATA0]\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", + DSOp.DS_STORE_B96: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]', + DSOp.DS_STORE_B128: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]', + DSOp.DS_BVH_STACK_PUSH4_POP1_RTN_B32: "declare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\nDATA_VALID = lambda(data) (\nif data == INVALID_NODE then\nreturn 1'0U\nelsif ((last_node_ptr != INVALID_NODE) && (data == last_node_ptr)) then\n// Match last_node_ptr\nreturn 1'0U\nelse\nreturn 1'1U\nendif);\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 3 passes: push data onto stack\nfor i in 0 : 2 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\n// Treat all further data as invalid as well.\nbreak\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[127 : 96]) then\nRETURN_DATA[31 : 0] = DATA1[127 : 96]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;\nRETURN_ADDR[31 : 0] = 32'B(ENCODE_ADDR(stack_base, stack_index))", + DSOp.DS_BVH_STACK_PUSH8_POP1_RTN_B32: "declare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\nDATA_VALID = lambda(data) (\nif data == INVALID_NODE then\nreturn 1'0U\nelsif ((last_node_ptr != INVALID_NODE) && (data == last_node_ptr)) then\n// Match last_node_ptr\nreturn 1'0U\nelse\nreturn 1'1U\nendif);\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 7 passes: push data onto stack\nfor i in 0 : 6 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\n// Treat all further data as invalid as well.\nbreak\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[255 : 224]) then\nRETURN_DATA[31 : 0] = DATA1[255 : 224]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;\nRETURN_ADDR[31 : 0] = 32'B(ENCODE_ADDR(stack_base, stack_index))", + DSOp.DS_BVH_STACK_PUSH8_POP2_RTN_B64: "declare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\nDATA_VALID = lambda(data) (\nif data == INVALID_NODE then\nreturn 1'0U\nelsif ((last_node_ptr != INVALID_NODE) && (data == last_node_ptr)) then\n// Match last_node_ptr\nreturn 1'0U\nelse\nreturn 1'1U\nendif);\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 7 passes: push data onto stack\nfor i in 0 : 6 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\n// Treat all further data as invalid as well.\nbreak\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[255 : 224]) then\nRETURN_DATA[31 : 0] = DATA1[255 : 224]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;\n// Attempt a second pop\nif DATA_VALID(MEM[stack_base.u32 + stack_index]) then\nRETURN_DATA[63 : 32] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;\nRETURN_ADDR[31 : 0] = 32'B(ENCODE_ADDR(stack_base, stack_index))", + DSOp.DS_LOAD_B96: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32', + DSOp.DS_LOAD_B128: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32', +} + +SMEMOp_PCODE = { + SMEMOp.S_LOAD_B32: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32', + SMEMOp.S_LOAD_B64: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', + SMEMOp.S_LOAD_B128: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', + SMEMOp.S_LOAD_B256: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32', + SMEMOp.S_LOAD_B512: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32', + SMEMOp.S_LOAD_B96: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32', + SMEMOp.S_LOAD_I8: "SDATA.i32 = 32'I(signext(MEM[ADDR].i8))", + SMEMOp.S_LOAD_U8: "SDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + SMEMOp.S_LOAD_I16: "SDATA.i32 = 32'I(signext(MEM[ADDR].i16))", + SMEMOp.S_LOAD_U16: "SDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + SMEMOp.S_BUFFER_LOAD_B32: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32', + SMEMOp.S_BUFFER_LOAD_B64: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', + SMEMOp.S_BUFFER_LOAD_B128: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', + SMEMOp.S_BUFFER_LOAD_B256: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32', + SMEMOp.S_BUFFER_LOAD_B512: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32', + SMEMOp.S_BUFFER_LOAD_B96: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32', + SMEMOp.S_BUFFER_LOAD_I8: "SDATA.i32 = 32'I(signext(MEM[ADDR].i8))", + SMEMOp.S_BUFFER_LOAD_U8: "SDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", + SMEMOp.S_BUFFER_LOAD_I16: "SDATA.i32 = 32'I(signext(MEM[ADDR].i16))", + SMEMOp.S_BUFFER_LOAD_U16: "SDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", + SMEMOp.S_PREFETCH_INST: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[63 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nPrefetchScalarInst(mem_addr, length)\nendif", + SMEMOp.S_PREFETCH_INST_PC_REL: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(PC[63 : 0].i64 + 8LL + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S1.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nPrefetchScalarInst(mem_addr, length)\nendif", + SMEMOp.S_PREFETCH_DATA: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[63 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nPrefetchScalarData(mem_addr, length)\nendif", + SMEMOp.S_BUFFER_PREFETCH_DATA: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[47 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nPrefetchScalarData(mem_addr, length)\nendif", + SMEMOp.S_PREFETCH_DATA_PC_REL: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(PC[63 : 0].i64 + 8LL + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S1.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nPrefetchScalarData(mem_addr, length)\nendif", +} SOP1Op_PCODE = { SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32', @@ -10,23 +163,23 @@ SOP1Op_PCODE = { SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif', SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]', - SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_CTZ_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_CLZ_I32_U64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp", - SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', - SOP1Op.S_CLS_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nendif\nendfor;\nD0.i32 = tmp', + SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CTZ_I32_B32(0xaaaaaaaa) => 1\nS_CTZ_I32_B32(0x55555555) => 0\nS_CTZ_I32_B32(0x00000000) => 0xffffffff\nS_CTZ_I32_B32(0xffffffff) => 0\nS_CTZ_I32_B32(0x00010000) => 16", + SOP1Op.S_CTZ_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CLZ_I32_U32(0x00000000) => 0xffffffff\nS_CLZ_I32_U32(0x0000cccc) => 16\nS_CLZ_I32_U32(0xffff3333) => 0\nS_CLZ_I32_U32(0x7fffffff) => 1\nS_CLZ_I32_U32(0x80000000) => 0\nS_CLZ_I32_U32(0xffffffff) => 0", + SOP1Op.S_CLZ_I32_U64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp", + SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CLS_I32(0x00000000) => 0xffffffff\nS_CLS_I32(0x0000cccc) => 16\nS_CLS_I32(0xffff3333) => 16\nS_CLS_I32(0x7fffffff) => 1\nS_CLS_I32(0x80000000) => 1\nS_CLS_I32(0xffffffff) => 0xffffffff', + SOP1Op.S_CLS_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp', SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))", SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U", SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U", SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U", SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U", - SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor', - SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0', - SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor\ns_bitreplicate_b64 s2, s0\ns_bitreplicate_b64 s2, s2', + SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0\nS_ABS_I32(0x00000001) => 0x00000001\nS_ABS_I32(0x7fffffff) => 0x7fffffff\nS_ABS_I32(0x80000000) => 0x80000000 // Note this is negative!\nS_ABS_I32(0x80000001) => 0x7fffffff\nS_ABS_I32(0x80000002) => 0x7ffffffe\nS_ABS_I32(0xffffffff) => 0x00000001', + SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT0_I32_B32(0x00000000) => 32\nS_BCNT0_I32_B32(0xcccccccc) => 16\nS_BCNT0_I32_B32(0xffffffff) => 0", SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", - SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U", + SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT1_I32_B32(0x00000000) => 0\nS_BCNT1_I32_B32(0xcccccccc) => 16\nS_BCNT1_I32_B32(0xffffffff) => 32", SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL", SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U', SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL', @@ -34,46 +187,43 @@ SOP1Op_PCODE = { SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL", SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U', SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL', - SOP1Op.S_AND_SAVEEXEC_B32: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_OR_SAVEEXEC_B32: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_OR_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask, set\nSCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar destination\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_XOR_SAVEEXEC_B32: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_XOR_SAVEEXEC_B64: 'Calculate bitwise XOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_NAND_SAVEEXEC_B32: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_NAND_SAVEEXEC_B64: 'Calculate bitwise NAND on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_NOR_SAVEEXEC_B32: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_NOR_SAVEEXEC_B64: 'Calculate bitwise NOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_XNOR_SAVEEXEC_B32: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_XNOR_SAVEEXEC_B64: 'Calculate bitwise XNOR on the scalar input and the EXEC mask, store the calculated result into the EXEC mask,\nset SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the scalar\nsaveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_AND_NOT0_SAVEEXEC_B32: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_NOT0_SAVEEXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_OR_NOT0_SAVEEXEC_B32: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_OR_NOT0_SAVEEXEC_B64: 'Calculate bitwise OR on the EXEC mask and the negation of the scalar input, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_AND_NOT1_SAVEEXEC_B32: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_NOT1_SAVEEXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nthe EXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_OR_NOT1_SAVEEXEC_B32: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_OR_NOT1_SAVEEXEC_B64: 'Calculate bitwise OR on the scalar input and the negation of the EXEC mask, store the calculated result into the\nEXEC mask, set SCC iff the calculated result is nonzero and store the original value of the EXEC mask into the\nsaveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_AND_NOT0_WREXEC_B32: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_NOT0_WREXEC_B64: 'Calculate bitwise AND on the EXEC mask and the negation of the scalar input, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_AND_NOT1_WREXEC_B32: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', - SOP1Op.S_AND_NOT1_WREXEC_B64: 'Calculate bitwise AND on the scalar input and the negation of the EXEC mask, store the calculated result into\nUnlike the SAVEEXEC series of opcodes, the value written to destination SGPRs is the result of the bitwise-op\nresult. EXEC and the destination SGPRs have the same value at the end of this instruction. This instruction is\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', - SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = SGPR[addr].b32', - SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b64 = SGPR[addr].b64', - SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b32 = S0.b32', - SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\nSGPR[addr].b64 = S0.b64', - SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + SOP1Op.S_AND_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XOR_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_XOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NAND_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_NAND_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_NOR_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_NOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_XNOR_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = ~(S0.u32 ^ EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_XNOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT0_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT0_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_NOT0_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (~S0.u32 | EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_NOT0_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT1_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_OR_NOT1_SAVEEXEC_B32: 'saveexec = EXEC.u32;\nEXEC.u32 = (S0.u32 | ~EXEC.u32);\nD0.u32 = saveexec.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_OR_NOT1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT0_WREXEC_B32: 'EXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT0_WREXEC_B64: 'EXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL', + SOP1Op.S_AND_NOT1_WREXEC_B32: 'EXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U', + SOP1Op.S_AND_NOT1_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL\n// V0 holds the index value per lane\n// save exec mask for restore at the end\ns_mov_b64 s2, exec\n// exec mask of remaining (unprocessed) threads\ns_mov_b64 s4, exec\nloop:\n// get the index value for the first active lane\nv_readfirstlane_b32 s0, v0\n// find all other lanes with same index value\nv_cmpx_eq s0, v0\n // do the operation using the current EXEC mask. S0 holds the index.\n// mask out thread that was just executed\n// s_andn2_b64 s4, s4, exec\n// s_mov_b64 exec, s4\ns_andn2_wrexec_b64 s4, s4 // replaces above 2 ops\n// repeat until EXEC==0\ns_cbranch_scc1 loop\ns_mov_b64 exec, s2', + SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32\ns_mov_b32 m0, 10\ns_movrels_b32 s5, s7', + SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b64 = SGPR[addr].b64', + SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32\ns_mov_b32 m0, 10\ns_movreld_b32 s5, s7', + SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b64 = S0.b64', + SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nSGPR[addrd].b32 = SGPR[addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\ns_movrelsd_2_b32 s5, s7', SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL', SOP1Op.S_SETPC_B64: 'PC = S0.i64', SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64', - SOP1Op.S_RFE_B64: 'PC = S0.i64', - SOP1Op.S_SENDMSG_RTN_B32: 'If SDST is VCC then VCCZ is undefined.', - SOP1Op.S_SENDMSG_RTN_B64: 'If SDST is VCC then VCCZ is undefined.', - SOP1Op.S_BARRIER_SIGNAL: "if !InWorkgroup() then\nelsif ((barrierNumber == -2) && !WAVE_STATUS.PRIV) then\nelsif barrierNumber == 0 then\nelse\nBARRIER_STATE[barrierNumber & 63].signalCnt += 7'1U\nendif;", - SOP1Op.S_BARRIER_SIGNAL_ISFIRST: "if !InWorkgroup() then\nSCC = 1'0U\nelsif ((barrierNumber == -2) && !WAVE_STATUS.PRIV) then\nSCC = 1'0U\nelsif barrierNumber == 0 then\nSCC = 1'0U\nelse\n// Set SCC if this is the first signaling event for this barrier.\nSCC = BARRIER_STATE[barrierNumber & 63].signalCnt.u32 == 0U;\nBARRIER_STATE[barrierNumber & 63].signalCnt += 7'1U\nendif;", - SOP1Op.S_GET_BARRIER_STATE: "D0.u32 = 32'U({ 9'0, BARRIER_STATE[barrierNumber & 63].signalCnt.u7, 5'0, BARRIER_STATE[barrierNumber &", - SOP1Op.S_ALLOC_VGPR: "n = ReallocVgprs(32'I(S0[8 : 0].u32));\nif n < 0 then\nSCC = 1'0U\nelse\nNUM_VGPRS = n;\nSCC = 1'1U\nendif", - SOP1Op.S_SLEEP_VAR: 'S0[6:0] determines the sleep duration. The wave sleeps for (64*(S0[6:0]-1) … 64*S0[6:0]) clocks. The exact', + SOP1Op.S_RFE_B64: "WAVE_STATUS.PRIV = 1'0U;\nPC = S0.i64", + SOP1Op.S_BARRIER_SIGNAL: ";\n// M0 cannot reference the negative barrier numbers.\nbarrierNumber = IsM0(SRC0.u32) ? 32'I(M0[4 : 0].u32) : SRC0.i32;\nif !InWorkgroup() then\n// Must be in a workgroup to signal a barrier.\ns_nop(16'0U)\nelsif ((barrierNumber == -2) && !WAVE_STATUS.PRIV) then\n// Barrier #-2 is privileged (for traps only).\ns_nop(16'0U)\nelsif barrierNumber == 0 then\n// Barrier #0 is a NOP.\ns_nop(16'0U)\nelse\nBARRIER_STATE[barrierNumber & 63].signalCnt += 7'1U\nendif;\n// Check for barrier completion.\nCheckBarrierComplete(barrierNumber)", + SOP1Op.S_BARRIER_SIGNAL_ISFIRST: ";\n// M0 cannot reference the negative barrier numbers.\nbarrierNumber = IsM0(SRC0.u32) ? 32'I(M0[4 : 0].u32) : SRC0.i32;\nif !InWorkgroup() then\n// Must be in a workgroup to signal a barrier.\nSCC = 1'0U\nelsif ((barrierNumber == -2) && !WAVE_STATUS.PRIV) then\n// Barrier #-2 is privileged (for traps only).\nSCC = 1'0U\nelsif barrierNumber == 0 then\n// Barrier #0 is a NOP.\nSCC = 1'0U\nelse\n// Set SCC if this is the first signaling event for this barrier.\nSCC = BARRIER_STATE[barrierNumber & 63].signalCnt.u32 == 0U;\nBARRIER_STATE[barrierNumber & 63].signalCnt += 7'1U\nendif;\nCheckBarrierComplete(barrierNumber)", + SOP1Op.S_GET_BARRIER_STATE: "barrierNumber = IsM0(SRC0.u32) ? 32'I(M0[4 : 0].u32) : SRC0.i32;\nD0.u32 = 32'U({ 9'0, BARRIER_STATE[barrierNumber & 63].signalCnt.u7, 5'0, BARRIER_STATE[barrierNumber &\n63].memberCnt.u7, 3'0, BARRIER_STATE[barrierNumber & 63].valid.u1 })", + SOP1Op.S_ALLOC_VGPR: "WaitIdleExceptStoreCnt();\nn = ReallocVgprs(32'I(S0[8 : 0].u32));\n// ReallocVgprs returns the actual number of VGPRs allocated rounded to segment size.\n// ReallocVgprs returns a negative value if reallocation fails.\nif n < 0 then\nSCC = 1'0U\nelse\nNUM_VGPRS = n;\nSCC = 1'1U\nendif", SOP1Op.S_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', SOP1Op.S_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', SOP1Op.S_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', @@ -92,23 +242,23 @@ SOP1Op_PCODE = { } SOP2Op_PCODE = { - SOP2Op.S_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_ADD_CO_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', - SOP2Op.S_SUB_CO_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\nD0.i32 = tmp.i32', - SOP2Op.S_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0', + SOP2Op.S_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_ADD_CO_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32', + SOP2Op.S_SUB_CO_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32', + SOP2Op.S_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0\nS_ABSDIFF_I32(0x00000002, 0x00000005) => 0x00000003\nS_ABSDIFF_I32(0xffffffff, 0x00000000) => 0x00000001\nS_ABSDIFF_I32(0x80000000, 0x00000000) => 0x80000000 // Note: result is negative!\nS_ABSDIFF_I32(0x80000000, 0x00000001) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xffffffff) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xfffffffe) => 0x7ffffffe', SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U', SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U', SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL', SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0", SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL', - SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", - SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", + SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32", SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32', SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32', @@ -152,7 +302,7 @@ SOP2Op_PCODE = { SOP2Op.S_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', SOP2Op.S_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', SOP2Op.S_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', - SOP2Op.S_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + SOP2Op.S_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_TOWARD_ZERO;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode;\n// Round-toward-zero regardless of current round mode setting in hardware.', SOP2Op.S_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', SOP2Op.S_SUB_F16: 'D0.f16 = S0.f16 - S1.f16', SOP2Op.S_MIN_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", @@ -188,92 +338,309 @@ SOPCOp_PCODE = { SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64', SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64', SOPCOp.S_CMP_LT_F32: 'SCC = S0.f32 < S1.f32', - SOPCOp.S_CMP_LT_F16: 'SCC = S0.f16 < S1.f16', SOPCOp.S_CMP_EQ_F32: 'SCC = S0.f32 == S1.f32', - SOPCOp.S_CMP_EQ_F16: 'SCC = S0.f16 == S1.f16', SOPCOp.S_CMP_LE_F32: 'SCC = S0.f32 <= S1.f32', - SOPCOp.S_CMP_LE_F16: 'SCC = S0.f16 <= S1.f16', SOPCOp.S_CMP_GT_F32: 'SCC = S0.f32 > S1.f32', - SOPCOp.S_CMP_GT_F16: 'SCC = S0.f16 > S1.f16', SOPCOp.S_CMP_LG_F32: 'SCC = S0.f32 <> S1.f32', - SOPCOp.S_CMP_LG_F16: 'SCC = S0.f16 <> S1.f16', SOPCOp.S_CMP_GE_F32: 'SCC = S0.f32 >= S1.f32', - SOPCOp.S_CMP_GE_F16: 'SCC = S0.f16 >= S1.f16', SOPCOp.S_CMP_O_F32: "SCC = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)))", - SOPCOp.S_CMP_O_F16: "SCC = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", SOPCOp.S_CMP_U_F32: "SCC = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)))", - SOPCOp.S_CMP_U_F16: "SCC = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", SOPCOp.S_CMP_NGE_F32: 'SCC = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <', - SOPCOp.S_CMP_NGE_F16: 'SCC = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', SOPCOp.S_CMP_NLG_F32: 'SCC = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==', - SOPCOp.S_CMP_NLG_F16: 'SCC = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', SOPCOp.S_CMP_NGT_F32: 'SCC = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=', - SOPCOp.S_CMP_NGT_F16: 'SCC = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', SOPCOp.S_CMP_NLE_F32: 'SCC = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >', - SOPCOp.S_CMP_NLE_F16: 'SCC = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', SOPCOp.S_CMP_NEQ_F32: 'SCC = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', - SOPCOp.S_CMP_NEQ_F16: 'SCC = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', SOPCOp.S_CMP_NLT_F32: 'SCC = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', + SOPCOp.S_CMP_LT_F16: 'SCC = S0.f16 < S1.f16', + SOPCOp.S_CMP_EQ_F16: 'SCC = S0.f16 == S1.f16', + SOPCOp.S_CMP_LE_F16: 'SCC = S0.f16 <= S1.f16', + SOPCOp.S_CMP_GT_F16: 'SCC = S0.f16 > S1.f16', + SOPCOp.S_CMP_LG_F16: 'SCC = S0.f16 <> S1.f16', + SOPCOp.S_CMP_GE_F16: 'SCC = S0.f16 >= S1.f16', + SOPCOp.S_CMP_O_F16: "SCC = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)))", + SOPCOp.S_CMP_U_F16: "SCC = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)))", + SOPCOp.S_CMP_NGE_F16: 'SCC = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <', + SOPCOp.S_CMP_NLG_F16: 'SCC = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==', + SOPCOp.S_CMP_NGT_F16: 'SCC = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=', + SOPCOp.S_CMP_NLE_F16: 'SCC = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >', + SOPCOp.S_CMP_NEQ_F16: 'SCC = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', SOPCOp.S_CMP_NLT_F16: 'SCC = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', } SOPKOp_PCODE = { SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(S0.i16))", - SOPKOp.S_VERSION: '// Do nothing - for use by tools only', + SOPKOp.S_VERSION: 'nop();\n// Do nothing - for use by tools only', SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(S0.i16))\nendif", - SOPKOp.S_ADDK_CO_I32: "tmp = D0.i32;\nD0.i32 = D0.i32 + 32'I(signext(S0.i16));\nSCC = ((tmp[31] == S0.i16[15]) && (tmp[31] != D0.i32[31]));", + SOPKOp.S_ADDK_CO_I32: "tmp = D0.i32;\n// Save value to check sign bits for overflow later.\nD0.i32 = D0.i32 + 32'I(signext(S0.i16));\nSCC = ((tmp[31] == S0.i16[15]) && (tmp[31] != D0.i32[31]));\n// signed overflow.", SOPKOp.S_MULK_I32: "D0.i32 = D0.i32 * 32'I(signext(S0.i16))", - SOPKOp.S_GETREG_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", - SOPKOp.S_SETREG_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", - SOPKOp.S_SETREG_IMM32_B32: "OFFSET = SIMM16[10:6]\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_GETREG_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))", + SOPKOp.S_SETREG_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\nHW_REGISTERS[hwRegId] = value.b32;\n// Side-effects may trigger here if certain bits are modified", + SOPKOp.S_SETREG_IMM32_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\nHW_REGISTERS[hwRegId] = value.b32;\n// Side-effects may trigger here if certain bits are modified", SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL", } SOPPOp_PCODE = { - SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nendfor', - SOPPOp.S_SETHALT: 'When halt type control is set to 1 (FATAL HALT bit select): Set FATAL_HALT bit to value of SIMM16[0]; 1 =\nfatal_halt, 0 = clear FATAL_HALT bit. Setting the fatal_halt flag halts the shader in or outside of the trap', - SOPPOp.S_DELAY_ALU: 'instruction may be omitted. For wave64 the compiler may not know the status of the EXEC mask and hence\n// 1 cycle delay here\n// 2 cycles delay here', - SOPPOp.S_TRAP: '// PC passed into trap handler points to S_TRAP itself,\nPC = TBA.i64;\n// trap base address', - SOPPOp.S_BARRIER_WAIT: '// barrierBit 0: reserved\n// barrierBit 1: workgroup\n// barrierBit 2: trap\n// Implemented as a power-saving idle', - SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;", + SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor\ns_nop 0 // Wait 1 cycle.\ns_nop 0xf // Wait 16 cycles.', + SOPPOp.S_SLEEP: 's_sleep { duration: 0 } // Wait for 0 clocks.\ns_sleep { duration: 1 } // Wait for 1-64 clocks.\ns_sleep { duration: 2 } // Wait for 65-128 clocks.\ns_sleep { sleep_forever: 1 } // Wait until an event occurs.', + SOPPOp.S_DELAY_ALU: 'v_mov_b32 v3, v0\nv_lshlrev_b32 v30, 1, v31\nv_lshlrev_b32 v24, 1, v25\ns_delay_alu { instid0: INSTID_VALU_DEP_3, instskip: INSTSKIP_SKIP_1, instid1: INSTID_VALU_DEP_1 }\n// 1 cycle delay here\nv_add_f32 v0, v1, v3\nv_sub_f32 v11, v9, v9\n// 2 cycles delay here\nv_mul_f32 v10, v13, v11', + SOPPOp.S_TRAP: 'TrapID = SIMM16.u16[3 : 0].u8;\n"Wait for all instructions to complete";\n// PC passed into trap handler points to S_TRAP itself,\n// *not* to the next instruction.\n{ TTMP[1], TTMP[0] } = { TrapID[3 : 0], 12\'0, PC[47 : 0] };\nPC = TBA.i64;\n// trap base address\nWAVE_STATUS.PRIV = 1\'1U', + SOPPOp.S_BARRIER_WAIT: ";\n// barrierBit 0: reserved\n// barrierBit 1: workgroup\n// barrierBit 2: trap\nbarrierBit = SIMM16.i32 >= 0 ? 0 : SIMM16.i32 == -1 ? 1 : 2;\nwhile !WAVE_BARRIER_COMPLETE[barrierBit] do\n// Implemented as a power-saving idle\ns_nop(16'0U)\nendwhile;\nWAVE_BARRIER_COMPLETE[barrierBit] = 1'0U", + SOPPOp.S_CODE_END: '...\ns_endpgm // last real instruction in shader buffer\ns_code_end // 1\ns_code_end // 2\ns_code_end // 3\ns_code_end // 4\ns_code_end // done!', + SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.\ns_branch label // Set SIMM16 = +4 = 0x0004\ns_nop 0 // 4 bytes\nlabel:\ns_nop 0 // 4 bytes\ns_branch label // Set SIMM16 = -8 = 0xfff8", SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_VCCZ: "If VCCZ is 1 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", - SOPPOp.S_CBRANCH_VCCNZ: "If VCCZ is 0 then jump to a constant offset relative to the current PC.\nif VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCZ: "if VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_CBRANCH_VCCNZ: "if VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif", + SOPPOp.S_SETPRIO: 'SysUserPrio = MIN(3, SysPrio[1:0] + UserPrio[1:0]). Priority = {SysUserPrio[1:0], WaveAge[3:0]}', } -SMEMOp_PCODE = { - SMEMOp.S_LOAD_B32: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32', - SMEMOp.S_LOAD_B64: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', - SMEMOp.S_LOAD_B128: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', - SMEMOp.S_LOAD_B256: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32', - SMEMOp.S_LOAD_B512: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32', - SMEMOp.S_LOAD_B96: 'addr = CalcGlobalAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32', - SMEMOp.S_LOAD_I8: "SDATA.i32 = 32'I(signext(MEM[ADDR].i8))", - SMEMOp.S_LOAD_U8: "SDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", - SMEMOp.S_LOAD_I16: "SDATA.i32 = 32'I(signext(MEM[ADDR].i16))", - SMEMOp.S_LOAD_U16: "SDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", - SMEMOp.S_BUFFER_LOAD_B32: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32', - SMEMOp.S_BUFFER_LOAD_B64: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32', - SMEMOp.S_BUFFER_LOAD_B128: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32', - SMEMOp.S_BUFFER_LOAD_B256: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32', - SMEMOp.S_BUFFER_LOAD_B512: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32', - SMEMOp.S_BUFFER_LOAD_B96: 'addr = CalcBufferAddr(sgpr_base.b64, offset.b64);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32', - SMEMOp.S_BUFFER_LOAD_I8: "SDATA.i32 = 32'I(signext(MEM[ADDR].i8))", - SMEMOp.S_BUFFER_LOAD_U8: "SDATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", - SMEMOp.S_BUFFER_LOAD_I16: "SDATA.i32 = 32'I(signext(MEM[ADDR].i16))", - SMEMOp.S_BUFFER_LOAD_U16: "SDATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", - SMEMOp.S_PREFETCH_INST: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[63 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", - SMEMOp.S_PREFETCH_INST_PC_REL: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(PC[63 : 0].i64 + 8LL + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S1.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", - SMEMOp.S_PREFETCH_DATA: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[63 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", - SMEMOp.S_BUFFER_PREFETCH_DATA: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(S0[47 : 0].i64 + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S2.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", - SMEMOp.S_PREFETCH_DATA_PC_REL: "if MODE.SCALAR_PREFETCH_EN.u1 then\nmem_addr = (64'U(PC[63 : 0].i64 + 8LL + 64'I(IOFFSET.i24)) & 0xffffffffffffff80ULL);\n// Force 128B alignment\nlength = S1.u32;\n// SGPR or M0\nlength += SDATA.u32;\n// SDATA is an immediate\nlength = (length & 31U);\n// Length restricted to 0..31\nlength = (length + 1U) * 128U;\n// Prefetch 1-32 cachelines, units of 128B\nendif", +VBUFFEROp_PCODE = { + VBUFFEROp.BUFFER_LOAD_FORMAT_X: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format', + VBUFFEROp.BUFFER_LOAD_FORMAT_XY: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])', + VBUFFEROp.BUFFER_LOAD_FORMAT_XYZ: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])', + VBUFFEROp.BUFFER_LOAD_FORMAT_XYZW: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])', + VBUFFEROp.BUFFER_STORE_FORMAT_X: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format', + VBUFFEROp.BUFFER_STORE_FORMAT_XY: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)', + VBUFFEROp.BUFFER_STORE_FORMAT_XYZ: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)', + VBUFFEROp.BUFFER_STORE_FORMAT_XYZW: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)', + VBUFFEROp.BUFFER_LOAD_D16_FORMAT_X: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[31:16].b16 is preserved.", + VBUFFEROp.BUFFER_LOAD_D16_FORMAT_XY: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))", + VBUFFEROp.BUFFER_LOAD_D16_FORMAT_XYZ: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\n// VDATA[63:48].b16 is preserved.", + VBUFFEROp.BUFFER_LOAD_D16_FORMAT_XYZW: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))", + VBUFFEROp.BUFFER_STORE_D16_FORMAT_X: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format", + VBUFFEROp.BUFFER_STORE_D16_FORMAT_XY: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))", + VBUFFEROp.BUFFER_STORE_D16_FORMAT_XYZ: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))", + VBUFFEROp.BUFFER_STORE_D16_FORMAT_XYZW: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))", + VBUFFEROp.BUFFER_LOAD_U8: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + VBUFFEROp.BUFFER_LOAD_I8: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + VBUFFEROp.BUFFER_LOAD_U16: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + VBUFFEROp.BUFFER_LOAD_I16: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + VBUFFEROp.BUFFER_LOAD_B32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32', + VBUFFEROp.BUFFER_LOAD_B64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + VBUFFEROp.BUFFER_LOAD_B96: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + VBUFFEROp.BUFFER_LOAD_B128: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + VBUFFEROp.BUFFER_STORE_B8: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b8 = VDATA[7 : 0]', + VBUFFEROp.BUFFER_STORE_B16: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b16 = VDATA[15 : 0]', + VBUFFEROp.BUFFER_STORE_B32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0]', + VBUFFEROp.BUFFER_STORE_B64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + VBUFFEROp.BUFFER_STORE_B96: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + VBUFFEROp.BUFFER_STORE_B128: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + VBUFFEROp.BUFFER_LOAD_D16_U8: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.", + VBUFFEROp.BUFFER_LOAD_D16_I8: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.", + VBUFFEROp.BUFFER_LOAD_D16_B16: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.', + VBUFFEROp.BUFFER_LOAD_D16_HI_U8: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.", + VBUFFEROp.BUFFER_LOAD_D16_HI_I8: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.", + VBUFFEROp.BUFFER_LOAD_D16_HI_B16: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.', + VBUFFEROp.BUFFER_STORE_D16_HI_B8: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b8 = VDATA[23 : 16]', + VBUFFEROp.BUFFER_STORE_D16_HI_B16: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b16 = VDATA[31 : 16]', + VBUFFEROp.BUFFER_LOAD_D16_HI_FORMAT_X: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[15:0].b16 is preserved.", + VBUFFEROp.BUFFER_STORE_D16_HI_FORMAT_X: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\n// Mem access size depends on format", + VBUFFEROp.BUFFER_ATOMIC_SWAP_B32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_CMPSWAP_B32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_ADD_U32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_SUB_U32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_SUB_CLAMP_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + VBUFFEROp.BUFFER_ATOMIC_MIN_I32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_MIN_U32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_MAX_I32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_MAX_U32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_AND_B32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_OR_B32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_XOR_B32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_INC_U32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_DEC_U32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_SWAP_B64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_CMPSWAP_B64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_ADD_U64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_SUB_U64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_MIN_I64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_MIN_U64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_MAX_I64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_MAX_U64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_AND_B64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_OR_B64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_XOR_B64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_INC_U64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_DEC_U64: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + VBUFFEROp.BUFFER_ATOMIC_COND_SUB_U32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_MIN_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n!sign(tmp.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + VBUFFEROp.BUFFER_ATOMIC_MAX_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\nsign(tmp.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + VBUFFEROp.BUFFER_ATOMIC_ADD_F32: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + VBUFFEROp.BUFFER_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + VBUFFEROp.BUFFER_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + VBUFFEROp.TBUFFER_LOAD_FORMAT_X: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format', + VBUFFEROp.TBUFFER_LOAD_FORMAT_XY: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])', + VBUFFEROp.TBUFFER_LOAD_FORMAT_XYZ: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])', + VBUFFEROp.TBUFFER_LOAD_FORMAT_XYZW: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])', + VBUFFEROp.TBUFFER_STORE_FORMAT_X: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format', + VBUFFEROp.TBUFFER_STORE_FORMAT_XY: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)', + VBUFFEROp.TBUFFER_STORE_FORMAT_XYZ: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)', + VBUFFEROp.TBUFFER_STORE_FORMAT_XYZW: 'addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)', + VBUFFEROp.TBUFFER_LOAD_D16_FORMAT_X: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[31:16].b16 is preserved.", + VBUFFEROp.TBUFFER_LOAD_D16_FORMAT_XY: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))", + VBUFFEROp.TBUFFER_LOAD_D16_FORMAT_XYZ: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\n// VDATA[63:48].b16 is preserved.", + VBUFFEROp.TBUFFER_LOAD_D16_FORMAT_XYZW: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))", + VBUFFEROp.TBUFFER_STORE_D16_FORMAT_X: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format", + VBUFFEROp.TBUFFER_STORE_D16_FORMAT_XY: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))", + VBUFFEROp.TBUFFER_STORE_D16_FORMAT_XYZ: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))", + VBUFFEROp.TBUFFER_STORE_D16_FORMAT_XYZW: "addr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))", +} + +VFLATOp_PCODE = { + VFLATOp.FLAT_LOAD_U8: "addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + VFLATOp.FLAT_LOAD_I8: "addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + VFLATOp.FLAT_LOAD_U16: "addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + VFLATOp.FLAT_LOAD_I16: "addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + VFLATOp.FLAT_LOAD_B32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[31 : 0] = MEM[addr].b32', + VFLATOp.FLAT_LOAD_B64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + VFLATOp.FLAT_LOAD_B96: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + VFLATOp.FLAT_LOAD_B128: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + VFLATOp.FLAT_STORE_B8: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nMEM[addr].b8 = VDATA[7 : 0]', + VFLATOp.FLAT_STORE_B16: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nMEM[addr].b16 = VDATA[15 : 0]', + VFLATOp.FLAT_STORE_B32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nMEM[addr].b32 = VDATA[31 : 0]', + VFLATOp.FLAT_STORE_B64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + VFLATOp.FLAT_STORE_B96: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + VFLATOp.FLAT_STORE_B128: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + VFLATOp.FLAT_LOAD_D16_U8: "addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.", + VFLATOp.FLAT_LOAD_D16_I8: "addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.", + VFLATOp.FLAT_LOAD_D16_B16: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.', + VFLATOp.FLAT_LOAD_D16_HI_U8: "addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.", + VFLATOp.FLAT_LOAD_D16_HI_I8: "addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.", + VFLATOp.FLAT_LOAD_D16_HI_B16: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.', + VFLATOp.FLAT_STORE_D16_HI_B8: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nMEM[addr].b8 = VDATA[23 : 16]', + VFLATOp.FLAT_STORE_D16_HI_B16: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\nMEM[addr].b16 = VDATA[31 : 16]', + VFLATOp.FLAT_ATOMIC_SWAP_B32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + VFLATOp.FLAT_ATOMIC_CMPSWAP_B32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VFLATOp.FLAT_ATOMIC_ADD_U32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + VFLATOp.FLAT_ATOMIC_SUB_U32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + VFLATOp.FLAT_ATOMIC_SUB_CLAMP_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + VFLATOp.FLAT_ATOMIC_MIN_I32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + VFLATOp.FLAT_ATOMIC_MIN_U32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VFLATOp.FLAT_ATOMIC_MAX_I32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + VFLATOp.FLAT_ATOMIC_MAX_U32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VFLATOp.FLAT_ATOMIC_AND_B32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + VFLATOp.FLAT_ATOMIC_OR_B32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + VFLATOp.FLAT_ATOMIC_XOR_B32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + VFLATOp.FLAT_ATOMIC_INC_U32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + VFLATOp.FLAT_ATOMIC_DEC_U32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + VFLATOp.FLAT_ATOMIC_SWAP_B64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + VFLATOp.FLAT_ATOMIC_CMPSWAP_B64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VFLATOp.FLAT_ATOMIC_ADD_U64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + VFLATOp.FLAT_ATOMIC_SUB_U64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + VFLATOp.FLAT_ATOMIC_MIN_I64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + VFLATOp.FLAT_ATOMIC_MIN_U64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VFLATOp.FLAT_ATOMIC_MAX_I64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + VFLATOp.FLAT_ATOMIC_MAX_U64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VFLATOp.FLAT_ATOMIC_AND_B64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + VFLATOp.FLAT_ATOMIC_OR_B64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + VFLATOp.FLAT_ATOMIC_XOR_B64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + VFLATOp.FLAT_ATOMIC_INC_U64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + VFLATOp.FLAT_ATOMIC_DEC_U64: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + VFLATOp.FLAT_ATOMIC_COND_SUB_U32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', + VFLATOp.FLAT_ATOMIC_MIN_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n!sign(tmp.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + VFLATOp.FLAT_ATOMIC_MAX_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\nsign(tmp.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + VFLATOp.FLAT_ATOMIC_ADD_F32: 'addr = CalcFlatAddr(v_addr.b64, flat_scratch.b64);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + VFLATOp.FLAT_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + VFLATOp.FLAT_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', +} + +VGLOBALOp_PCODE = { + VGLOBALOp.GLOBAL_LOAD_U8: "addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + VGLOBALOp.GLOBAL_LOAD_I8: "addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + VGLOBALOp.GLOBAL_LOAD_U16: "addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + VGLOBALOp.GLOBAL_LOAD_I16: "addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + VGLOBALOp.GLOBAL_LOAD_B32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[31 : 0] = MEM[addr].b32', + VGLOBALOp.GLOBAL_LOAD_B64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + VGLOBALOp.GLOBAL_LOAD_B96: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + VGLOBALOp.GLOBAL_LOAD_B128: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + VGLOBALOp.GLOBAL_STORE_B8: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nMEM[addr].b8 = VDATA[7 : 0]', + VGLOBALOp.GLOBAL_STORE_B16: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nMEM[addr].b16 = VDATA[15 : 0]', + VGLOBALOp.GLOBAL_STORE_B32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nMEM[addr].b32 = VDATA[31 : 0]', + VGLOBALOp.GLOBAL_STORE_B64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + VGLOBALOp.GLOBAL_STORE_B96: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + VGLOBALOp.GLOBAL_STORE_B128: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + VGLOBALOp.GLOBAL_LOAD_D16_U8: "addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.", + VGLOBALOp.GLOBAL_LOAD_D16_I8: "addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.", + VGLOBALOp.GLOBAL_LOAD_D16_B16: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.', + VGLOBALOp.GLOBAL_LOAD_D16_HI_U8: "addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.", + VGLOBALOp.GLOBAL_LOAD_D16_HI_I8: "addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.", + VGLOBALOp.GLOBAL_LOAD_D16_HI_B16: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.', + VGLOBALOp.GLOBAL_STORE_D16_HI_B8: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nMEM[addr].b8 = VDATA[23 : 16]', + VGLOBALOp.GLOBAL_STORE_D16_HI_B16: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\nMEM[addr].b16 = VDATA[31 : 16]', + VGLOBALOp.GLOBAL_LOAD_ADDTID_B32: "RETURN_DATA.u32 = MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32", + VGLOBALOp.GLOBAL_STORE_ADDTID_B32: "MEM[SGPR_ADDR[63 : 0] + INST_OFFSET[11 : 0].b64 + 64'B(laneID.i32 * 4)].u32 = DATA.u32", + VGLOBALOp.GLOBAL_ATOMIC_SWAP_B32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_CMPSWAP_B32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_ADD_U32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_SUB_U32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_SUB_CLAMP_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", + VGLOBALOp.GLOBAL_ATOMIC_MIN_I32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_MIN_U32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_MAX_I32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_MAX_U32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_AND_B32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_OR_B32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_XOR_B32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_INC_U32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_DEC_U32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_SWAP_B64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_CMPSWAP_B64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_ADD_U64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_SUB_U64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_MIN_I64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_MIN_U64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_MAX_I64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_MAX_U64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_AND_B64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_OR_B64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_XOR_B64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_INC_U64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_DEC_U64: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_COND_SUB_U32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_MIN_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n!sign(tmp.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + VGLOBALOp.GLOBAL_ATOMIC_MAX_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\nsign(tmp.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + VGLOBALOp.GLOBAL_LOAD_BLOCK: 'for i in 0 : 31 do\n// Algorithm can skip over "holes" both in the memory block and VGPR block.\nif M0[i].u1 then\nVGPR[laneId][VDST.u32 + i.u32] = MEM[VGPR[laneId][ADDR].i32 + i * 4].b32\nendif\nendfor', + VGLOBALOp.GLOBAL_STORE_BLOCK: 'for i in 0 : 31 do\n// Algorithm can skip over "holes" both in the memory block and VGPR block.\nif M0[i].u1 then\nMEM[VGPR[laneId][ADDR].i32 + i * 4] = VGPR[laneId][VDATA.u32 + i.u32].b32\nendif\nendfor', + VGLOBALOp.GLOBAL_ATOMIC_ADD_F32: 'addr = CalcGlobalAddr(v_addr.b64, s_saddr.b64);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + VGLOBALOp.GLOBAL_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + VGLOBALOp.GLOBAL_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + VGLOBALOp.GLOBAL_ATOMIC_ORDERED_ADD_B64: "shader_id = DATA[31 : 0];\nshader_offset = DATA[63 : 32];\nold_mem_id = MEM[ADDR].b32;\nold_mem_value = MEM[ADDR + 4U].b32;\nif old_mem_id == shader_id then\nMEM[ADDR].b32 = 32'B((old_mem_id.i32 + 1) & VGT_GS_MAX_WAVE_ID.i32);\n// increment the ordered ID in memory\nMEM[ADDR + 4U].b32 = old_mem_value + shader_offset;\n// add the streamout offset\nendif;\nRETURN_DATA[31 : 0].b32 = old_mem_id;\nRETURN_DATA[63 : 32].b32 = old_mem_value", +} + +VIMAGEOp_PCODE = { + VIMAGEOp.IMAGE_ATOMIC_SWAP: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_CMPSWAP: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_ADD_UINT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_SUB_UINT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_MIN_INT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_MIN_UINT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_MAX_INT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_MAX_UINT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_AND: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_OR: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_XOR: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_INC_UINT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_DEC_UINT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_ADD_FLT: 'addr = CalcImageAddr(v_addr.b128);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', + VIMAGEOp.IMAGE_ATOMIC_MIN_FLT: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n!sign(tmp.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + VIMAGEOp.IMAGE_ATOMIC_MAX_FLT: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\nsign(tmp.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", + VIMAGEOp.IMAGE_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', + VIMAGEOp.IMAGE_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', +} + +VINTERPOp_PCODE = { + VINTERPOp.V_INTERP_P10_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f32, S1.f32, VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f32)\ns_mov_b32 m0, s0 // assume s0 contains newprim mask\nlds_param_load v0, attr0 // v0 is a temporary register\nv_interp_p10_f32 v3, v0, v1, v0 // v1 contains i coordinate\nv_interp_p2_f32 v3, v0, v2, v3 // v2 contains j coordinate', + VINTERPOp.V_INTERP_P2_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f32, S1.f32, S2.f32)', + VINTERPOp.V_INTERP_P10_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f16), S1.f32, 32'F(VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f16))", + VINTERPOp.V_INTERP_P2_F16_F32: "D0.f16 = 16'F(fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f16), S1.f32, S2.f32))", + VINTERPOp.V_INTERP_P10_RTZ_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f16), S1.f32, 32'F(VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f16))", + VINTERPOp.V_INTERP_P2_RTZ_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f16), S1.f32, S2.f32)", } VOP1Op_PCODE = { - VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32', + VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1', VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', @@ -285,7 +652,7 @@ VOP1Op_PCODE = { VOP1Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', VOP1Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', VOP1Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', - VOP1Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP1Op.V_CVT_OFF_F32_I4: "declare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", VOP1Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', VOP1Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', VOP1Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', @@ -304,63 +671,63 @@ VOP1Op_PCODE = { VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', - VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', - VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', - VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', + VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF', + VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF', + VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0', VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', - VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', + VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0', VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', - VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', + VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF', VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', - VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", - VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", + VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN", + VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN", VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32', VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', - VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', - VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', - VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', + VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor", + VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor", + VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor', + VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()', + VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()', VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', - VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", - VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", - VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nVGPR[laneId][addr].b32 = S0.b32', - VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = VGPR[laneId][addr].b32', - VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', - VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', + VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()", + VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()", + VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32\ns_mov_b32 m0, 10\nv_movreld_b32 v5, v7', + VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32\ns_mov_b32 m0, 10\nv_movrels_b32 v5, v7', + VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, 10\nv_movrelsd_b32 v5, v7', + VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\nv_movrelsd_2_b32 v5, v7', VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', - VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", - VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', - VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", - VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', - VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", - VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", - VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", + VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0", + VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF', + VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0", + VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF', + VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF", + VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()", + VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()", VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', - VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", - VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", - VOP1Op.V_SAT_PK_U8_I16: "tmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", + VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN", + VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN", + VOP1Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n <= 16'0 then\nreturn 8'0U\nelsif n >= 16'255 then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\ntmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp', VOP1Op.V_SWAP_B16: 'tmp = D0.b16;\nD0.b16 = S0.b16;\nS0.b16 = tmp', - VOP1Op.V_PERMLANE64_B32: "declare tmp : 32'B[64];\ndeclare lane : 32'U;\nif WAVE32 then\n// Supported in wave64 ONLY; treated as scalar NOP in wave32\nelse\nfor lane in 0U : 63U do\n// Copy original S0 in case D==S0\ntmp[lane] = VGPR[lane][SRC0.u32]\nendfor;\nfor lane in 0U : 63U do\naltlane = { ~lane[5], lane[4 : 0] };\n// 0<->32, ..., 31<->63\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altlane]\nendif\nendfor\nendif", - VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\ntmp = VGPR[laneId][addrd].b32;', + VOP1Op.V_PERMLANE64_B32: "declare tmp : 32'B[64];\ndeclare lane : 32'U;\nif WAVE32 then\n// Supported in wave64 ONLY; treated as scalar NOP in wave32\ns_nop(16'0U)\nelse\nfor lane in 0U : 63U do\n// Copy original S0 in case D==S0\ntmp[lane] = VGPR[lane][SRC0.u32]\nendfor;\nfor lane in 0U : 63U do\naltlane = { ~lane[5], lane[4 : 0] };\n// 0<->32, ..., 31<->63\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altlane]\nendif\nendfor\nendif", + VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\ntmp = VGPR[laneId][addrd].b32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32;\nVGPR[laneId][addrs].b32 = tmp\ns_mov_b32 m0, ((20 << 16) | 10)\nv_swaprel_b32 v5, v7', VOP1Op.V_NOT_B16: 'D0.u16 = ~S0.u16', VOP1Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", VOP1Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", VOP1Op.V_CVT_F32_FP8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].fp8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].fp8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].fp8)\nelse\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].fp8)\nendif", VOP1Op.V_CVT_F32_BF8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].bf8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].bf8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].bf8)\nelse\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].bf8)\nendif", VOP1Op.V_CVT_PK_F32_FP8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8);\nD0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8)', - VOP1Op.V_CVT_PK_F32_BF8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)', + VOP1Op.V_CVT_PK_F32_BF8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)\nSRC0 = First operand for instruction.\nVSRC1 = Second operand for instruction.\nOP = Instruction opcode.\nAll VOPC instructions can alternatively be encoded in the VOP3 format.', } VOP2Op_PCODE = { @@ -401,7 +768,7 @@ VOP2Op_PCODE = { VOP2Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', VOP2Op.V_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', VOP2Op.V_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', - VOP2Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + VOP2Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_TOWARD_ZERO;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode;\n// Round-toward-zero regardless of current round mode setting in hardware.', VOP2Op.V_MIN_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", VOP2Op.V_MAX_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 > S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && !sign(S0.f16) &&\nsign(S1.f16))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", VOP2Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', @@ -411,94 +778,94 @@ VOP2Op_PCODE = { VOP2Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', VOP2Op.V_FMAMK_F16: 'D0.f16 = fma(S0.f16, SIMM32.f16, S1.f16)', VOP2Op.V_FMAAK_F16: 'D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16)', - VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()", VOP2Op.V_PK_FMAC_F16: 'D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16);\nD0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16)', } VOP3Op_PCODE = { - VOP3Op.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_F16: 'D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F16: 'D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F16: 'D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_O_F16: "D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_U_F16: "D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", VOP3Op.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F16: 'D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F16: 'D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F16: 'D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_F32: 'D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F32: 'D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F32: 'D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_O_F32: "D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_U_F32: "D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", VOP3Op.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F32: 'D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F32: 'D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F32: 'D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_F64: 'D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_F64: 'D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_F64: 'D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_O_F64: 'D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_U_F64: 'D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NGT_F64: 'D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NEQ_F64: 'D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NLT_F64: 'D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_I16: 'D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I16: 'D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I16: 'D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I16: 'D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_U16: 'D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U16: 'D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U16: 'D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U16: 'D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_I32: 'D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I32: 'D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I32: 'D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I32: 'D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_U32: 'D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U32: 'D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U32: 'D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U32: 'D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_I64: 'D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_I64: 'D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_I64: 'D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_I64: 'D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_LT_U64: 'D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_EQ_U64: 'D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_GT_U64: 'D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOP3Op.V_CMP_NE_U64: 'D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', VOP3Op.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOP3Op.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOP3Op.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOP3Op.V_CMP_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", VOP3Op.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', - VOP3Op.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f16 == S1.f16', + VOP3Op.V_CMPX_EQ_F16: 'EXEC.u64[laneId] = S0.f16 == S1.f16', VOP3Op.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', VOP3Op.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', VOP3Op.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', @@ -512,7 +879,7 @@ VOP3Op_PCODE = { VOP3Op.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', VOP3Op.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', VOP3Op.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', - VOP3Op.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f32 == S1.f32', + VOP3Op.V_CMPX_EQ_F32: 'EXEC.u64[laneId] = S0.f32 == S1.f32', VOP3Op.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', VOP3Op.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', VOP3Op.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', @@ -526,7 +893,7 @@ VOP3Op_PCODE = { VOP3Op.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', VOP3Op.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', VOP3Op.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', - VOP3Op.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f64 == S1.f64', + VOP3Op.V_CMPX_EQ_F64: 'EXEC.u64[laneId] = S0.f64 == S1.f64', VOP3Op.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', VOP3Op.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', VOP3Op.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', @@ -540,128 +907,44 @@ VOP3Op_PCODE = { VOP3Op.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', VOP3Op.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', VOP3Op.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', - VOP3Op.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i16 == S1.i16', + VOP3Op.V_CMPX_EQ_I16: 'EXEC.u64[laneId] = S0.i16 == S1.i16', VOP3Op.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', VOP3Op.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', VOP3Op.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', VOP3Op.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', VOP3Op.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', - VOP3Op.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u16 == S1.u16', + VOP3Op.V_CMPX_EQ_U16: 'EXEC.u64[laneId] = S0.u16 == S1.u16', VOP3Op.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', VOP3Op.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', VOP3Op.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', VOP3Op.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', VOP3Op.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', - VOP3Op.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i32 == S1.i32', + VOP3Op.V_CMPX_EQ_I32: 'EXEC.u64[laneId] = S0.i32 == S1.i32', VOP3Op.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', VOP3Op.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', VOP3Op.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', VOP3Op.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', VOP3Op.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', - VOP3Op.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u32 == S1.u32', + VOP3Op.V_CMPX_EQ_U32: 'EXEC.u64[laneId] = S0.u32 == S1.u32', VOP3Op.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', VOP3Op.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', VOP3Op.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', VOP3Op.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', VOP3Op.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', - VOP3Op.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i64 == S1.i64', + VOP3Op.V_CMPX_EQ_I64: 'EXEC.u64[laneId] = S0.i64 == S1.i64', VOP3Op.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', VOP3Op.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', VOP3Op.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', VOP3Op.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', VOP3Op.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', - VOP3Op.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u64 == S1.u64', + VOP3Op.V_CMPX_EQ_U64: 'EXEC.u64[laneId] = S0.u64 == S1.u64', VOP3Op.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', VOP3Op.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', VOP3Op.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', VOP3Op.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', - VOP3Op.V_CMPX_CLASS_F16: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOP3Op.V_CMPX_CLASS_F32: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOP3Op.V_CMPX_CLASS_F64: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32', - VOP3Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", - VOP3Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', - VOP3Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', - VOP3Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', - VOP3Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', - VOP3Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', - VOP3Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', - VOP3Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', - VOP3Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', - VOP3Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', - VOP3Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', - VOP3Op.V_CVT_OFF_F32_I4: "Used for interpolation in shader. Lookup table on S0[3:0]:\ndeclare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", - VOP3Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', - VOP3Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', - VOP3Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', - VOP3Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', - VOP3Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', - VOP3Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', - VOP3Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', - VOP3Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', - VOP3Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', - VOP3Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', - VOP3Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', - VOP3Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', - VOP3Op.V_MOV_B16: 'D0.b16 = S0.b16', - VOP3Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', - VOP3Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', - VOP3Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', - VOP3Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", - VOP3Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', - VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', - VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)', - VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32', - VOP3Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', - VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', - VOP3Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', - VOP3Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', - VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', - VOP3Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', - VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))", - VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))", - VOP3Op.V_NOT_B32: 'D0.u32 = ~S0.u32', - VOP3Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', - VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nendif\nendfor", - VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nendif\nendfor', - VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif', - VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif', - VOP3Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', - VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif", - VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif", - VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\nVGPR[laneId][addr].b32 = S0.b32', - VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\nD0.b32 = VGPR[laneId][addr].b32', - VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', - VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction', - VOP3Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', - VOP3Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', - VOP3Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', - VOP3Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', - VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16", - VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)', - VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)", - VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)', - VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)", - VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif", - VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif", - VOP3Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", - VOP3Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", - VOP3Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', - VOP3Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", - VOP3Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', - VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))", - VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))", - VOP3Op.V_SAT_PK_U8_I16: "tmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", - VOP3Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', - VOP3Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', - VOP3Op.V_NOT_B16: 'D0.u16 = ~S0.u16', - VOP3Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", - VOP3Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", - VOP3Op.V_CVT_F32_FP8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].fp8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].fp8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].fp8)\nelse\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].fp8)\nendif", - VOP3Op.V_CVT_F32_BF8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].bf8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].bf8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].bf8)\nelse\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].bf8)\nendif", - VOP3Op.V_CVT_PK_F32_FP8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8);\nD0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8)', - VOP3Op.V_CVT_PK_F32_BF8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)', + VOP3Op.V_CMPX_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CMPX_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOP3Op.V_CMPX_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", VOP3Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', VOP3Op.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64', VOP3Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', @@ -694,7 +977,7 @@ VOP3Op_PCODE = { VOP3Op.V_SUB_NC_U32: 'D0.u32 = S0.u32 - S1.u32', VOP3Op.V_SUBREV_NC_U32: 'D0.u32 = S1.u32 - S0.u32', VOP3Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', - VOP3Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);', + VOP3Op.V_CVT_PK_RTZ_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_TOWARD_ZERO;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode;\n// Round-toward-zero regardless of current round mode setting in hardware.', VOP3Op.V_MIN_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", VOP3Op.V_MAX_NUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f16)) && isNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((S0.f16 > S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && !sign(S0.f16) &&\nsign(S1.f16))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", VOP3Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16', @@ -702,7 +985,91 @@ VOP3Op_PCODE = { VOP3Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16', VOP3Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16', VOP3Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)', - VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))", + VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()", + VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1', + VOP3Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]", + VOP3Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)', + VOP3Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)', + VOP3Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)', + VOP3Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)', + VOP3Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)', + VOP3Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)', + VOP3Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)', + VOP3Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)', + VOP3Op.V_CVT_NEAREST_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))', + VOP3Op.V_CVT_FLOOR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))', + VOP3Op.V_CVT_OFF_F32_I4: "declare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]", + VOP3Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)', + VOP3Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)', + VOP3Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)', + VOP3Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)', + VOP3Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)', + VOP3Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)', + VOP3Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)', + VOP3Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)', + VOP3Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)', + VOP3Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif', + VOP3Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif', + VOP3Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif', + VOP3Op.V_MOV_B16: 'D0.b16 = S0.b16', + VOP3Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)', + VOP3Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)', + VOP3Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif', + VOP3Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif", + VOP3Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif', + VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF', + VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF', + VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0', + VOP3Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception', + VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0', + VOP3Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64', + VOP3Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)', + VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF', + VOP3Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)', + VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN", + VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN", + VOP3Op.V_NOT_B32: 'D0.u32 = ~S0.u32', + VOP3Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]', + VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor", + VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor", + VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor', + VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()', + VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()', + VOP3Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)', + VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()", + VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()", + VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32\ns_mov_b32 m0, 10\nv_movreld_b32 v5, v7', + VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32\ns_mov_b32 m0, 10\nv_movrels_b32 v5, v7', + VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, 10\nv_movrelsd_b32 v5, v7', + VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\nv_movrelsd_2_b32 v5, v7', + VOP3Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)', + VOP3Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)', + VOP3Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)', + VOP3Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)', + VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0", + VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF', + VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0", + VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF', + VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF", + VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()", + VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()", + VOP3Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif", + VOP3Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif", + VOP3Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)', + VOP3Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif", + VOP3Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)', + VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN", + VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN", + VOP3Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n <= 16'0 then\nreturn 8'0U\nelsif n >= 16'255 then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\ntmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16", + VOP3Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)', + VOP3Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)', + VOP3Op.V_NOT_B16: 'D0.u16 = ~S0.u16', + VOP3Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))", + VOP3Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }", + VOP3Op.V_CVT_F32_FP8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].fp8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].fp8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].fp8)\nelse\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].fp8)\nendif", + VOP3Op.V_CVT_F32_BF8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].bf8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].bf8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].bf8)\nelse\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].bf8)\nendif", + VOP3Op.V_CVT_PK_F32_FP8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8);\nD0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8)', + VOP3Op.V_CVT_PK_F32_BF8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)', VOP3Op.V_FMA_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = S2.f32\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif", VOP3Op.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32", VOP3Op.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32", @@ -725,10 +1092,10 @@ VOP3Op_PCODE = { VOP3Op.V_MAX3_U32: 'D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', VOP3Op.V_MED3_I32: 'if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32 then\nD0.i32 = v_max_i32(S1.i32, S2.i32)\nelsif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32 then\nD0.i32 = v_max_i32(S0.i32, S2.i32)\nelse\nD0.i32 = v_max_i32(S0.i32, S1.i32)\nendif', VOP3Op.V_MED3_U32: 'if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32 then\nD0.u32 = v_max_u32(S1.u32, S2.u32)\nelsif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32 then\nD0.u32 = v_max_u32(S0.u32, S2.u32)\nelse\nD0.u32 = v_max_u32(S0.u32, S1.u32)\nendif', - VOP3Op.V_SAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3Op.V_SAD_U8: "ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", VOP3Op.V_SAD_HI_U8: "D0.u32 = (32'U(v_sad_u8(S0, S1, 0U)) << 16U) + S2.u32", - VOP3Op.V_SAD_U16: '// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', - VOP3Op.V_SAD_U32: '// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', + VOP3Op.V_SAD_U16: 'ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp', + VOP3Op.V_SAD_U32: 'ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32', VOP3Op.V_CVT_PK_U8_F32: "tmp = (S2.u32 & 32'U(~(0xff << (S1.u32[1 : 0].u32 * 8U))));\ntmp = (tmp | ((32'U(f32_to_u8(S0.f32)) & 255U) << (S1.u32[1 : 0].u32 * 8U)));\nD0.u32 = tmp", VOP3Op.V_DIV_FIXUP_F32: "sign_out = (sign(S1.f32) ^ sign(S2.f32));\nif isNAN(64'F(S2.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S2.f32)))\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif ((64'F(S1.f32) == 0.0) && (64'F(S2.f32) == 0.0)) then\n// 0/0\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(abs(S1.f32)) == +INF) && (64'F(abs(S2.f32)) == +INF)) then\n// inf/inf\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(S1.f32) == 0.0) || (64'F(abs(S2.f32)) == +INF)) then\n// x/0, or inf/y\nD0.f32 = sign_out ? -INF.f32 : +INF.f32\nelsif ((64'F(abs(S1.f32)) == +INF) || (64'F(S2.f32) == 0.0)) then\n// x/inf, 0/y\nD0.f32 = sign_out ? -0.0F : 0.0F\nelsif exponent(S2.f32) - exponent(S1.f32) < -150 then\nD0.f32 = sign_out ? -UNDERFLOW_F32 : UNDERFLOW_F32\nelsif exponent(S1.f32) == 255 then\nD0.f32 = sign_out ? -OVERFLOW_F32 : OVERFLOW_F32\nelse\nD0.f32 = sign_out ? -abs(S0.f32) : abs(S0.f32)\nendif", VOP3Op.V_DIV_FIXUP_F64: "sign_out = (sign(S1.f64) ^ sign(S2.f64));\nif isNAN(S2.f64) then\nD0.f64 = cvtToQuietNAN(S2.f64)\nelsif isNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif ((S1.f64 == 0.0) && (S2.f64 == 0.0)) then\n// 0/0\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((abs(S1.f64) == +INF) && (abs(S2.f64) == +INF)) then\n// inf/inf\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((S1.f64 == 0.0) || (abs(S2.f64) == +INF)) then\n// x/0, or inf/y\nD0.f64 = sign_out ? -INF : +INF\nelsif ((abs(S1.f64) == +INF) || (S2.f64 == 0.0)) then\n// x/inf, 0/y\nD0.f64 = sign_out ? -0.0 : 0.0\nelsif exponent(S2.f64) - exponent(S1.f64) < -1075 then\nD0.f64 = sign_out ? -UNDERFLOW_F64 : UNDERFLOW_F64\nelsif exponent(S1.f64) == 2047 then\nD0.f64 = sign_out ? -OVERFLOW_F64 : OVERFLOW_F64\nelse\nD0.f64 = sign_out ? -abs(S0.f64) : abs(S0.f64)\nendif", @@ -744,13 +1111,13 @@ VOP3Op_PCODE = { VOP3Op.V_MED3_NUM_F16: "if (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)) || isNAN(64'F(S2.f16))) then\nD0.f16 = v_min3_num_f16(S0.f16, S1.f16, S2.f16)\nelsif v_max3_num_f16(S0.f16, S1.f16, S2.f16) == S0.f16 then\nD0.f16 = v_max_num_f16(S1.f16, S2.f16)\nelsif v_max3_num_f16(S0.f16, S1.f16, S2.f16) == S1.f16 then\nD0.f16 = v_max_num_f16(S0.f16, S2.f16)\nelse\nD0.f16 = v_max_num_f16(S0.f16, S1.f16)\nendif", VOP3Op.V_DIV_FMAS_F32: 'if VCC.u64[laneId] then\nD0.f32 = 2.0F ** 32 * fma(S0.f32, S1.f32, S2.f32)\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif', VOP3Op.V_DIV_FMAS_F64: 'if VCC.u64[laneId] then\nD0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)\nelse\nD0.f64 = fma(S0.f64, S1.f64, S2.f64)\nendif', - VOP3Op.V_MSAD_U8: "// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", + VOP3Op.V_MSAD_U8: "ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp", VOP3Op.V_QSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", VOP3Op.V_MQSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64", VOP3Op.V_MQSAD_U32_U8: "tmp[127 : 96] = 32'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32));\ntmp[95 : 64] = 32'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32));\ntmp[63 : 32] = 32'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32));\ntmp[31 : 0] = 32'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32));\nD0.b128 = tmp.b128", VOP3Op.V_XOR3_B32: 'D0.u32 = (S0.u32 ^ S1.u32 ^ S2.u32)', VOP3Op.V_MAD_U16: 'D0.u16 = S0.u16 * S1.u16 + S2.u16', - VOP3Op.V_PERM_B32: 'D0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])', + VOP3Op.V_PERM_B32: "BYTE_PERMUTE = lambda(data, sel) (\ndeclare in : 8'B[8];\nfor i in 0 : 7 do\nin[i] = data[i * 8 + 7 : i * 8].b8\nendfor;\nif sel.u32 >= 13U then\nreturn 8'0xff\nelsif sel.u32 == 12U then\nreturn 8'0x0\nelsif sel.u32 == 11U then\nreturn in[7][7].b8 * 8'0xff\nelsif sel.u32 == 10U then\nreturn in[5][7].b8 * 8'0xff\nelsif sel.u32 == 9U then\nreturn in[3][7].b8 * 8'0xff\nelsif sel.u32 == 8U then\nreturn in[1][7].b8 * 8'0xff\nelse\nreturn in[sel]\nendif);\nD0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])", VOP3Op.V_XAD_U32: 'D0.u32 = (S0.u32 ^ S1.u32) + S2.u32', VOP3Op.V_LSHL_ADD_U32: 'D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32', VOP3Op.V_ADD_LSHL_U32: 'D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32)', @@ -769,8 +1136,8 @@ VOP3Op_PCODE = { VOP3Op.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)', VOP3Op.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32", VOP3Op.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32", - VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor", - VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor", + VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor\nv_mov_b32 s0, 0x87654321;\nv_mov_b32 s1, 0x0fedcba9;\nv_permlane16_b32 v1, v0, s0, s1;\n// ROW 0:\n// v1.lane[0] <- v0.lane[1]\n// v1.lane[1] <- v0.lane[2]\n// ...\n// v1.lane[14] <- v0.lane[15]\n// v1.lane[15] <- v0.lane[0]\n//\n// ROW 1:\n// v1.lane[16] <- v0.lane[17]\n// v1.lane[17] <- v0.lane[18]\n// ...\n// v1.lane[30] <- v0.lane[31]\n// v1.lane[31] <- v0.lane[16]", + VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor\n// Note for this to work, source and destination VGPRs must be different.\n// For this rotation, lane 15 gets data from lane 16, lane 31 gets data from lane 0.\n// These are the only two lanes that need to use v_permlanex16_b32.\n// Enable only the threads that get data from their own row.\nv_mov_b32 exec_lo, 0x7fff7fff; // Lanes getting data from their own row\nv_mov_b32 s0, 0x87654321;\nv_mov_b32 s1, 0x0fedcba9;\nv_permlane16_b32 v1, v0, s0, s1 fi; // FI bit needed for lanes 14 and 30\n// ROW 0:\n// v1.lane[0] <- v0.lane[1]\n// v1.lane[1] <- v0.lane[2]\n// ...\n// v1.lane[14] <- v0.lane[15] (needs FI to read)\n// v1.lane[15] unset\n//\n// ROW 1:\n// v1.lane[16] <- v0.lane[17]\n// v1.lane[17] <- v0.lane[18]\n// ...\n// v1.lane[30] <- v0.lane[31] (needs FI to read)\n// v1.lane[31] unset\n// Enable only the threads that get data from the other row.\nv_mov_b32 exec_lo, 0x80008000; // Lanes getting data from the other row\nv_permlanex16_b32 v1, v0, s0, s1 fi; // FI bit needed for lanes 15 and 31\n// v1.lane[15] <- v0.lane[16]\n// v1.lane[31] <- v0.lane[0]", VOP3Op.V_CNDMASK_B16: 'D0.u16 = VCC.u64[laneId] ? S1.u16 : S0.u16', VOP3Op.V_MAXMIN_U32: 'D0.u32 = v_min_u32(v_max_u32(S0.u32, S1.u32), S2.u32)', VOP3Op.V_MINMAX_U32: 'D0.u32 = v_max_u32(v_min_u32(S0.u32, S1.u32), S2.u32)', @@ -786,21 +1153,21 @@ VOP3Op_PCODE = { VOP3Op.V_MAXIMUMMINIMUM_F32: 'D0.f32 = v_minimum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32)', VOP3Op.V_MINIMUMMAXIMUM_F16: 'D0.f16 = v_maximum_f16(v_minimum_f16(S0.f16, S1.f16), S2.f16)', VOP3Op.V_MAXIMUMMINIMUM_F16: 'D0.f16 = v_minimum_f16(v_maximum_f16(S0.f16, S1.f16), S2.f16)', - VOP3Op.V_S_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)', - VOP3Op.V_S_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16);\nD0[31 : 16] = 16'0x0", - VOP3Op.V_S_LOG_F32: 'D0.f32 = log2(S0.f32)', - VOP3Op.V_S_LOG_F16: "D0.f16 = log2(S0.f16);\nD0[31 : 16] = 16'0x0", - VOP3Op.V_S_RCP_F32: 'D0.f32 = 1.0F / S0.f32', - VOP3Op.V_S_RCP_F16: "D0.f16 = 16'1.0 / S0.f16;\nD0[31 : 16] = 16'0x0", - VOP3Op.V_S_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)', - VOP3Op.V_S_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16);\nD0[31 : 16] = 16'0x0", - VOP3Op.V_S_SQRT_F32: 'D0.f32 = sqrt(S0.f32)', - VOP3Op.V_S_SQRT_F16: "D0.f16 = sqrt(S0.f16);\nD0[31 : 16] = 16'0x0", + VOP3Op.V_S_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF', + VOP3Op.V_S_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16);\nD0[31 : 16] = 16'0x0\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF", + VOP3Op.V_S_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF', + VOP3Op.V_S_LOG_F16: "D0.f16 = log2(S0.f16);\nD0[31 : 16] = 16'0x0\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF", + VOP3Op.V_S_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0', + VOP3Op.V_S_RCP_F16: "D0.f16 = 16'1.0 / S0.f16;\nD0[31 : 16] = 16'0x0\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0", + VOP3Op.V_S_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0', + VOP3Op.V_S_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16);\nD0[31 : 16] = 16'0x0\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0", + VOP3Op.V_S_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF', + VOP3Op.V_S_SQRT_F16: "D0.f16 = sqrt(S0.f16);\nD0[31 : 16] = 16'0x0\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF", VOP3Op.V_ADD_NC_U16: 'D0.u16 = S0.u16 + S1.u16', VOP3Op.V_SUB_NC_U16: 'D0.u16 = S0.u16 - S1.u16', VOP3Op.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16', - VOP3Op.V_CVT_PK_I16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_i16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_i16_f32(S0.f32));", - VOP3Op.V_CVT_PK_U16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_u16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_u16_f32(S0.f32));", + VOP3Op.V_CVT_PK_I16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_i16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_i16_f32(S0.f32));\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_U16_F32: "declare tmp : 32'B;\ntmp[31 : 16] = 16'B(v_cvt_u16_f32(S1.f32));\ntmp[15 : 0] = 16'B(v_cvt_u16_f32(S0.f32));\nD0 = tmp.b32", VOP3Op.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16', VOP3Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16', VOP3Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16', @@ -810,24 +1177,24 @@ VOP3Op_PCODE = { VOP3Op.V_PERMLANE16_VAR_B32: "declare tmp : 32'B[64];\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : wave32.u1 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nlane = row * 16 + i;\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[row * 16 + VGPR[lane][SRC1.u32][3 : 0].i32]\nendif\nendfor\nendfor", VOP3Op.V_PERMLANEX16_VAR_B32: "declare tmp : 32'B[64];\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : wave32.u1 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nlane = row * 16 + i;\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altrow.i32 * 16 + VGPR[lane][SRC1.u32][3 : 0].i32]\nendif\nendfor\nendfor", VOP3Op.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16', - VOP3Op.V_CVT_PK_NORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);", - VOP3Op.V_CVT_PK_NORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);", - VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32', + VOP3Op.V_CVT_PK_NORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_NORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);\nD0 = tmp.b32", + VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32\nldexp()', VOP3Op.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)', VOP3Op.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp", - VOP3Op.V_MBCNT_LO_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', - VOP3Op.V_MBCNT_HI_U32_B32: 'MaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\nendfor;\nD0.u32 = tmp', - VOP3Op.V_CVT_PK_NORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);", - VOP3Op.V_CVT_PK_NORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);", - VOP3Op.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);", - VOP3Op.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);", + VOP3Op.V_MBCNT_LO_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp", + VOP3Op.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp\nv_mbcnt_lo_u32_b32 v0, -1, 0\nv_mbcnt_hi_u32_b32 v0, -1, v0\n// v0 now contains laneId\nv_mbcnt_lo_u32_b32 v0, vcc_lo, 0\nv_mbcnt_hi_u32_b32 v0, vcc_hi, v0 // Note vcc_hi is passed in for second instruction\n// v0 now contains position among lanes with VCC=1", + VOP3Op.V_CVT_PK_NORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_NORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);\nD0 = tmp.b32", + VOP3Op.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);\nD0 = tmp.b32", VOP3Op.V_SUB_NC_I32: 'D0.i32 = S0.i32 - S1.i32', VOP3Op.V_ADD_NC_I32: 'D0.i32 = S0.i32 + S1.i32', - VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32', + VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32\nldexp()', VOP3Op.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32', VOP3Op.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)", VOP3Op.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)", - VOP3Op.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", + VOP3Op.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\n// into the whole part of the number.\n// Only whole part of result is kept.\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)", VOP3Op.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)', VOP3Op.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)', VOP3Op.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)', @@ -844,23 +1211,10 @@ VOP3Op_PCODE = { VOP3Op.V_MAXIMUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isSignalNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isQuietNAN(64'F(S0.f32)) then\nD0.f32 = S0.f32\nelsif isQuietNAN(64'F(S1.f32)) then\nD0.f32 = S1.f32\nelsif ((S0.f32 > S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && !sign(S0.f32) &&\nsign(S1.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", VOP3Op.V_MINIMUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S1.f16\nelsif ((S0.f16 < S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && sign(S0.f16) &&\n!sign(S1.f16))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", VOP3Op.V_MAXIMUM_F16: "if (isSignalNAN(64'F(S0.f16)) || isSignalNAN(64'F(S1.f16))) then\nTRAPSTS.INVALID = 1\nendif;\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S1.f16\nelsif ((S0.f16 > S1.f16) || ((abs(S0.f16) == 16'0.0) && (abs(S1.f16) == 16'0.0) && !sign(S0.f16) &&\nsign(S1.f16))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif", - VOP3Op.V_CVT_PK_FP8_F32: 'prev_mode = ROUND_MODE;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[15:0] are preserved\nendif;', - VOP3Op.V_CVT_PK_BF8_F32: 'prev_mode = ROUND_MODE;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[15:0] are preserved\nendif;', - VOP3Op.V_CVT_SR_FP8_F32: "prev_mode = ROUND_MODE;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 12].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].fp8 = f32_to_fp8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].fp8 = f32_to_fp8(tmp.f32)\nendif;", - VOP3Op.V_CVT_SR_BF8_F32: "prev_mode = ROUND_MODE;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 11].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].bf8 = f32_to_bf8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].bf8 = f32_to_bf8(tmp.f32)\nendif;", -} - -VOP3SDOp_PCODE = { - VOP3SDOp.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_DIV_SCALE_F32: "VCC = 0x0LL;\nif ((64'F(S2.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\nD0.f32 = NAN.f32\nelsif exponent(S2.f32) - exponent(S1.f32) >= 96 then\n// N/D near MAX_FLOAT_F32\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif S1.f32 == DENORM.f32 then\nD0.f32 = ldexp(S0.f32, 64)\nelsif ((1.0 / 64'F(S1.f32) == DENORM.f64) && (S2.f32 / S1.f32 == DENORM.f32)) then\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif 1.0 / 64'F(S1.f32) == DENORM.f64 then\nD0.f32 = ldexp(S0.f32, -64)\nelsif S2.f32 / S1.f32 == DENORM.f32 then\nVCC = 0x1LL;\nif S0.f32 == S2.f32 then\n// Only scale the numerator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif exponent(S2.f32) <= 23 then\n// Numerator is tiny\nD0.f32 = ldexp(S0.f32, 64)\nendif", - VOP3SDOp.V_DIV_SCALE_F64: 'VCC = 0x0LL;\nif ((S2.f64 == 0.0) || (S1.f64 == 0.0)) then\nD0.f64 = NAN.f64\nelsif exponent(S2.f64) - exponent(S1.f64) >= 768 then\n// N/D near MAX_FLOAT_F64\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, 128)\nelsif ((1.0 / S1.f64 == DENORM.f64) && (S2.f64 / S1.f64 == DENORM.f64)) then\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif 1.0 / S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, -128)\nelsif S2.f64 / S1.f64 == DENORM.f64 then\nVCC = 0x1LL;\nif S0.f64 == S2.f64 then\n// Only scale the numerator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif exponent(S2.f64) <= 53 then\n// Numerator is tiny\nD0.f64 = ldexp(S0.f64, 128)\nendif', - VOP3SDOp.V_MAD_CO_U64_U32: "{ D1.u1, D0.u64 } = 65'B(65'U(S0.u32) * 65'U(S1.u32) + 65'U(S2.u64))", - VOP3SDOp.V_MAD_CO_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))", - VOP3SDOp.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", - VOP3SDOp.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3Op.V_CVT_PK_FP8_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[15:0] are preserved\nendif;\nROUND_MODE = prev_mode', + VOP3Op.V_CVT_PK_BF8_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[15:0] are preserved\nendif;\nROUND_MODE = prev_mode', + VOP3Op.V_CVT_SR_FP8_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 12].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].fp8 = f32_to_fp8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].fp8 = f32_to_fp8(tmp.f32)\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode", + VOP3Op.V_CVT_SR_BF8_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 11].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].bf8 = f32_to_bf8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].bf8 = f32_to_bf8(tmp.f32)\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode", } VOP3POp_PCODE = { @@ -882,130 +1236,143 @@ VOP3POp_PCODE = { VOP3POp.V_PK_ADD_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\nD0.b32 = tmp", VOP3POp.V_PK_MUL_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.b32 = tmp", VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', - VOP3POp.V_DOT4_I32_IU8: "declare A : 32'I[4];\ndeclare B : 32'I[4];\nfor i in 0 : 3 do\nA8 = S0[i * 8 + 7 : i * 8];\nB8 = S1[i * 8 + 7 : i * 8];\nendfor;\nC = S2.i32;\ntmp = C.i32;\nD0.i32 = tmp", + VOP3POp.V_DOT4_I32_IU8: "declare A : 32'I[4];\ndeclare B : 32'I[4];\n// Figure out whether inputs are signed/unsigned.\nfor i in 0 : 3 do\nA8 = S0[i * 8 + 7 : i * 8];\nB8 = S1[i * 8 + 7 : i * 8];\nA[i] = NEG[0].u1 ? 32'I(signext(A8.i8)) : 32'I(32'U(A8.u8));\nB[i] = NEG[1].u1 ? 32'I(signext(B8.i8)) : 32'I(32'U(B8.u8))\nendfor;\nC = S2.i32;\n// Signed multiplier/adder. Extend unsigned inputs with leading 0.\ntmp = C.i32;\ntmp += A[0] * B[0];\ntmp += A[1] * B[1];\ntmp += A[2] * B[2];\ntmp += A[3] * B[3];\nD0.i32 = tmp", VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp', - VOP3POp.V_DOT8_I32_IU4: "declare A : 32'I[8];\ndeclare B : 32'I[8];\nfor i in 0 : 7 do\nA4 = S0[i * 4 + 3 : i * 4];\nB4 = S1[i * 4 + 3 : i * 4];\nendfor;\nC = S2.i32;\ntmp = C.i32;\nD0.i32 = tmp", + VOP3POp.V_DOT8_I32_IU4: "declare A : 32'I[8];\ndeclare B : 32'I[8];\n// Figure out whether inputs are signed/unsigned.\nfor i in 0 : 7 do\nA4 = S0[i * 4 + 3 : i * 4];\nB4 = S1[i * 4 + 3 : i * 4];\nA[i] = NEG[0].u1 ? 32'I(signext(A4.i4)) : 32'I(32'U(A4.u4));\nB[i] = NEG[1].u1 ? 32'I(signext(B4.i4)) : 32'I(32'U(B4.u4))\nendfor;\nC = S2.i32;\n// Signed multiplier/adder. Extend unsigned inputs with leading 0.\ntmp = C.i32;\ntmp += A[0] * B[0];\ntmp += A[1] * B[1];\ntmp += A[2] * B[2];\ntmp += A[3] * B[3];\ntmp += A[4] * B[4];\ntmp += A[5] * B[5];\ntmp += A[6] * B[6];\ntmp += A[7] * B[7];\nD0.i32 = tmp", VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp', VOP3POp.V_DOT2_F32_BF16: 'tmp = S2.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', VOP3POp.V_PK_MIN_NUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_min_num_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_min_num_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", VOP3POp.V_PK_MAX_NUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_max_num_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_max_num_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", VOP3POp.V_PK_MINIMUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_minimum_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_minimum_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", VOP3POp.V_PK_MAXIMUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_maximum_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_maximum_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp", - VOP3POp.V_FMA_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])", - VOP3POp.V_FMA_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", - VOP3POp.V_FMA_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", + VOP3POp.V_FMA_MIX_F32: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])", + VOP3POp.V_FMA_MIXLO_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", + VOP3POp.V_FMA_MIXHI_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))", VOP3POp.V_DOT4_F32_FP8_BF8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].fp8) * 32'F(S1[7 : 0].bf8);\ntmp += 32'F(S0[15 : 8].fp8) * 32'F(S1[15 : 8].bf8);\ntmp += 32'F(S0[23 : 16].fp8) * 32'F(S1[23 : 16].bf8);\ntmp += 32'F(S0[31 : 24].fp8) * 32'F(S1[31 : 24].bf8);\nD0.f32 = tmp", VOP3POp.V_DOT4_F32_BF8_FP8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].bf8) * 32'F(S1[7 : 0].fp8);\ntmp += 32'F(S0[15 : 8].bf8) * 32'F(S1[15 : 8].fp8);\ntmp += 32'F(S0[23 : 16].bf8) * 32'F(S1[23 : 16].fp8);\ntmp += 32'F(S0[31 : 24].bf8) * 32'F(S1[31 : 24].fp8);\nD0.f32 = tmp", VOP3POp.V_DOT4_F32_FP8_FP8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].fp8) * 32'F(S1[7 : 0].fp8);\ntmp += 32'F(S0[15 : 8].fp8) * 32'F(S1[15 : 8].fp8);\ntmp += 32'F(S0[23 : 16].fp8) * 32'F(S1[23 : 16].fp8);\ntmp += 32'F(S0[31 : 24].fp8) * 32'F(S1[31 : 24].fp8);\nD0.f32 = tmp", VOP3POp.V_DOT4_F32_BF8_BF8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].bf8) * 32'F(S1[7 : 0].bf8);\ntmp += 32'F(S0[15 : 8].bf8) * 32'F(S1[15 : 8].bf8);\ntmp += 32'F(S0[23 : 16].bf8) * 32'F(S1[23 : 16].bf8);\ntmp += 32'F(S0[31 : 24].bf8) * 32'F(S1[31 : 24].bf8);\nD0.f32 = tmp", - VOP3POp.V_WMMA_F32_16X16X16_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_F32_16X16X16_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_F16_16X16X16_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f16(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.bf16(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_I32_16X16X16_IU8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_I32_16X16X16_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_F32_16X16X16_FP8_FP8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.fp8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_F32_16X16X16_FP8_BF8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.bf8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_F32_16X16X16_BF8_FP8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.fp8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_F32_16X16X16_BF8_BF8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.bf8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_WMMA_I32_16X16X32_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x32) * S1.iu4(32x16) + S2.i32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_F32_16X16X32_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_F32_16X16X32_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_F16_16X16X32_F16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(32x16, index set from S2) + D0.f16(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_BF16_16X16X32_BF16: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(32x16, index set from S2) + D0.bf16(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_I32_16X16X32_IU8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(32x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_I32_16X16X32_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(32x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_I32_16X16X64_IU4: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x32) * S1.iu4(64x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_F32_16X16X32_FP8_FP8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.fp8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_F32_16X16X32_FP8_BF8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.bf8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_F32_16X16X32_BF8_FP8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.fp8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', - VOP3POp.V_SWMMAC_F32_16X16X32_BF8_BF8: 'saved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.bf8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F16_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.bf16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X16_IU8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X16_IU4: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(16x16) + S2.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_FP8_FP8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.fp8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_FP8_BF8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.bf8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_BF8_FP8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.fp8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_F32_16X16X16_BF8_BF8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.bf8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_WMMA_I32_16X16X32_IU4: 'D = A (16x32) * B (32x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x32) * S1.iu4(32x16) + S2.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_F16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_BF16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F16_16X16X32_F16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(32x16, index set from S2) + D0.f16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_BF16_16X16X32_BF16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(32x16, index set from S2) + D0.bf16(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_I32_16X16X32_IU8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(32x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_I32_16X16X32_IU4: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(32x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_I32_16X16X64_IU4: 'D = A (sparse 16x64) * B (64x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x32) * S1.iu4(64x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_FP8_FP8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.fp8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_FP8_BF8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.bf8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_BF8_FP8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.fp8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', + VOP3POp.V_SWMMAC_F32_16X16X32_BF8_BF8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.bf8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec', +} + +VOP3SDOp_PCODE = { + VOP3SDOp.V_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUBREV_CO_CI_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_DIV_SCALE_F32: "VCC = 0x0LL;\nif ((64'F(S2.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\nD0.f32 = NAN.f32\nelsif exponent(S2.f32) - exponent(S1.f32) >= 96 then\n// N/D near MAX_FLOAT_F32\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif S1.f32 == DENORM.f32 then\nD0.f32 = ldexp(S0.f32, 64)\nelsif ((1.0 / 64'F(S1.f32) == DENORM.f64) && (S2.f32 / S1.f32 == DENORM.f32)) then\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif 1.0 / 64'F(S1.f32) == DENORM.f64 then\nD0.f32 = ldexp(S0.f32, -64)\nelsif S2.f32 / S1.f32 == DENORM.f32 then\nVCC = 0x1LL;\nif S0.f32 == S2.f32 then\n// Only scale the numerator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif exponent(S2.f32) <= 23 then\n// Numerator is tiny\nD0.f32 = ldexp(S0.f32, 64)\nendif", + VOP3SDOp.V_DIV_SCALE_F64: 'VCC = 0x0LL;\nif ((S2.f64 == 0.0) || (S1.f64 == 0.0)) then\nD0.f64 = NAN.f64\nelsif exponent(S2.f64) - exponent(S1.f64) >= 768 then\n// N/D near MAX_FLOAT_F64\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, 128)\nelsif ((1.0 / S1.f64 == DENORM.f64) && (S2.f64 / S1.f64 == DENORM.f64)) then\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif 1.0 / S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, -128)\nelsif S2.f64 / S1.f64 == DENORM.f64 then\nVCC = 0x1LL;\nif S0.f64 == S2.f64 then\n// Only scale the numerator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif exponent(S2.f64) <= 53 then\n// Numerator is tiny\nD0.f64 = ldexp(S0.f64, 128)\nendif', + VOP3SDOp.V_MAD_CO_U64_U32: "{ D1.u1, D0.u64 } = 65'B(65'U(S0.u32) * 65'U(S1.u32) + 65'U(S2.u64))", + VOP3SDOp.V_MAD_CO_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))", + VOP3SDOp.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADD_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", + VOP3SDOp.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUB_CO_CI_U32.\nD0.u32 = tmp.u32", } VOPCOp_PCODE = { - VOPCOp.V_CMP_LT_F16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_F16: 'D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F16: 'D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F16: 'D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F16: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_U_F16: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_O_F16: "D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F16: "D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.", VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F16: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F16: 'D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F16: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_F32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F16: 'D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F16: 'D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_F32: 'D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F32: 'D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F32: 'D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F32: "Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_U_F32: "VCC or a scalar register.\nD0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_O_F32: "D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_U_F32: "D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.", VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F32: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F32: 'D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F32: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_F64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F32: 'D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F32: 'D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_F64: 'D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_F64: 'D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_F64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_F64: 'D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_O_F64: 'Set the per-lane condition code to 1 iff the first input is orderable to the second input. Store the result into VCC\nD0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_U_F64: 'VCC or a scalar register.\nD0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_O_F64: 'D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_U_F64: 'D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NGT_F64: 'VCC or a scalar register.\nD0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NGT_F64: 'D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NEQ_F64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NLT_F64: 'Set the per-lane condition code to 1 iff the first input is not less than the second input. Store the result into VCC\nD0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_I16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NEQ_F64: 'D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NLT_F64: 'D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_I16: 'D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I16: 'D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I16: 'D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I16: 'D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_U16: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_U16: 'D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U16: 'D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U16: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U16: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U16: 'D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U16: 'D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_I32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_I32: 'D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I32: 'D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I32: 'D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I32: 'D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_U32: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_U32: 'D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U32: 'D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U32: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U32: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U32: 'D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U32: 'D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_I64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_I64: 'D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_I64: 'D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_I64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_I64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_I64: 'D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_I64: 'D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_LT_U64: 'Set the per-lane condition code to 1 iff the first input is less than the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into VCC or a\nD0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_LT_U64: 'D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_EQ_U64: 'D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_GT_U64: 'Set the per-lane condition code to 1 iff the first input is greater than the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_NE_U64: 'Set the per-lane condition code to 1 iff the first input is not equal to the second input. Store the result into VCC\nD0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_GT_U64: 'D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.', + VOPCOp.V_CMP_NE_U64: 'D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.', VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.', - VOPCOp.V_CMP_CLASS_F16: "half-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_CLASS_F32: "single-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", - VOPCOp.V_CMP_CLASS_F64: "double-precision float, and set the per-lane condition code to the result. Store the result into VCC or a scalar\nS1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", + VOPCOp.V_CMP_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.", VOPCOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = S0.f16 < S1.f16', - VOPCOp.V_CMPX_EQ_F16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f16 == S1.f16', + VOPCOp.V_CMPX_EQ_F16: 'EXEC.u64[laneId] = S0.f16 == S1.f16', VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = S0.f16 <= S1.f16', VOPCOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = S0.f16 > S1.f16', VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = S0.f16 <> S1.f16', @@ -1019,7 +1386,7 @@ VOPCOp_PCODE = { VOPCOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=', VOPCOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=', VOPCOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = S0.f32 < S1.f32', - VOPCOp.V_CMPX_EQ_F32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f32 == S1.f32', + VOPCOp.V_CMPX_EQ_F32: 'EXEC.u64[laneId] = S0.f32 == S1.f32', VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = S0.f32 <= S1.f32', VOPCOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = S0.f32 > S1.f32', VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = S0.f32 <> S1.f32', @@ -1033,7 +1400,7 @@ VOPCOp_PCODE = { VOPCOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=', VOPCOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=', VOPCOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = S0.f64 < S1.f64', - VOPCOp.V_CMPX_EQ_F64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.f64 == S1.f64', + VOPCOp.V_CMPX_EQ_F64: 'EXEC.u64[laneId] = S0.f64 == S1.f64', VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = S0.f64 <= S1.f64', VOPCOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = S0.f64 > S1.f64', VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = S0.f64 <> S1.f64', @@ -1047,183 +1414,95 @@ VOPCOp_PCODE = { VOPCOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=', VOPCOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=', VOPCOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = S0.i16 < S1.i16', - VOPCOp.V_CMPX_EQ_I16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i16 == S1.i16', + VOPCOp.V_CMPX_EQ_I16: 'EXEC.u64[laneId] = S0.i16 == S1.i16', VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = S0.i16 <= S1.i16', VOPCOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = S0.i16 > S1.i16', VOPCOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = S0.i16 <> S1.i16', VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = S0.i16 >= S1.i16', VOPCOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = S0.u16 < S1.u16', - VOPCOp.V_CMPX_EQ_U16: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u16 == S1.u16', + VOPCOp.V_CMPX_EQ_U16: 'EXEC.u64[laneId] = S0.u16 == S1.u16', VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = S0.u16 <= S1.u16', VOPCOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = S0.u16 > S1.u16', VOPCOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = S0.u16 <> S1.u16', VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = S0.u16 >= S1.u16', VOPCOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = S0.i32 < S1.i32', - VOPCOp.V_CMPX_EQ_I32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i32 == S1.i32', + VOPCOp.V_CMPX_EQ_I32: 'EXEC.u64[laneId] = S0.i32 == S1.i32', VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = S0.i32 <= S1.i32', VOPCOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = S0.i32 > S1.i32', VOPCOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = S0.i32 <> S1.i32', VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = S0.i32 >= S1.i32', VOPCOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = S0.u32 < S1.u32', - VOPCOp.V_CMPX_EQ_U32: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u32 == S1.u32', + VOPCOp.V_CMPX_EQ_U32: 'EXEC.u64[laneId] = S0.u32 == S1.u32', VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = S0.u32 <= S1.u32', VOPCOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = S0.u32 > S1.u32', VOPCOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = S0.u32 <> S1.u32', VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = S0.u32 >= S1.u32', VOPCOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = S0.i64 < S1.i64', - VOPCOp.V_CMPX_EQ_I64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.i64 == S1.i64', + VOPCOp.V_CMPX_EQ_I64: 'EXEC.u64[laneId] = S0.i64 == S1.i64', VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = S0.i64 <= S1.i64', VOPCOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = S0.i64 > S1.i64', VOPCOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = S0.i64 <> S1.i64', VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = S0.i64 >= S1.i64', VOPCOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = S0.u64 < S1.u64', - VOPCOp.V_CMPX_EQ_U64: 'Set the per-lane condition code to 1 iff the first input is equal to the second input. Store the result into the EXEC\nEXEC.u64[laneId] = S0.u64 == S1.u64', + VOPCOp.V_CMPX_EQ_U64: 'EXEC.u64[laneId] = S0.u64 == S1.u64', VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = S0.u64 <= S1.u64', VOPCOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = S0.u64 > S1.u64', VOPCOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = S0.u64 <> S1.u64', VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = S0.u64 >= S1.u64', - VOPCOp.V_CMPX_CLASS_F16: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOPCOp.V_CMPX_CLASS_F32: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", - VOPCOp.V_CMPX_CLASS_F64: "S1.u[0] value is a signaling NAN.\nS1.u[1] value is a quiet NAN.\nS1.u[2] value is negative infinity.\nS1.u[3] value is a negative normal value.\nS1.u[4] value is a negative denormal value.\nS1.u[5] value is negative zero.\nS1.u[6] value is positive zero.\nS1.u[7] value is a positive denormal value.\nS1.u[8] value is a positive normal value.\nS1.u[9] value is positive infinity.\ndeclare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", + VOPCOp.V_CMPX_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = result", } -DSOp_PCODE = { - DSOp.DS_ADD_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_SUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_RSUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_INC_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', - DSOp.DS_DEC_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', - DSOp.DS_MIN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - DSOp.DS_MAX_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - DSOp.DS_MIN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_MAX_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_AND_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_OR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_XOR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_MSKOR_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_STORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]', - DSOp.DS_STORE_2ADDR_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]', - DSOp.DS_STORE_2ADDR_STRIDE64_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]', - DSOp.DS_CMPSTORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', - DSOp.DS_MIN_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", - DSOp.DS_MAX_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", - DSOp.DS_ADD_F32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', - DSOp.DS_STORE_B8: 'MEM[ADDR].b8 = DATA[7 : 0]', - DSOp.DS_STORE_B16: 'MEM[ADDR].b16 = DATA[15 : 0]', - DSOp.DS_ADD_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_SUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_RSUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp', - DSOp.DS_INC_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp', - DSOp.DS_DEC_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp', - DSOp.DS_MIN_RTN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - DSOp.DS_MAX_RTN_I32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp', - DSOp.DS_MIN_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_MAX_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_AND_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_OR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_XOR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_MSKOR_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp', - DSOp.DS_STOREXCHG_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp', - DSOp.DS_STOREXCHG_2ADDR_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', - DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2', - DSOp.DS_CMPSTORE_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp', - DSOp.DS_MIN_NUM_RTN_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", - DSOp.DS_MAX_NUM_RTN_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp", - DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;', - DSOp.DS_LOAD_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32', - DSOp.DS_LOAD_2ADDR_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4U].b32', - DSOp.DS_LOAD_2ADDR_STRIDE64_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256U].b32', - DSOp.DS_LOAD_I8: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i8))", - DSOp.DS_LOAD_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })", - DSOp.DS_LOAD_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))", - DSOp.DS_LOAD_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })", - DSOp.DS_CONSUME: 'addr = offset; // offset by LDS HWBASE\nrtnval = LDS(addr);\nGPR[VDST] = rtnval; // return to all valid threads', - DSOp.DS_APPEND: 'addr = offset; // offset by LDS HWBASE\nrtnval = LDS(addr);\nGPR[VDST] = rtnval; // return to all valid threads', - DSOp.DS_ADD_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_SUB_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_RSUB_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_INC_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', - DSOp.DS_DEC_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', - DSOp.DS_MIN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - DSOp.DS_MAX_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - DSOp.DS_MIN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - DSOp.DS_MAX_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - DSOp.DS_AND_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_OR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_XOR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_MSKOR_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_STORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]', - DSOp.DS_STORE_2ADDR_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]', - DSOp.DS_STORE_2ADDR_STRIDE64_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]', - DSOp.DS_CMPSTORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', - DSOp.DS_MIN_NUM_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 < tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && sign(src.f64) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', - DSOp.DS_MAX_NUM_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 > tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && !sign(src.f64) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', - DSOp.DS_ADD_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_SUB_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_RSUB_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp', - DSOp.DS_INC_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp', - DSOp.DS_DEC_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp', - DSOp.DS_MIN_RTN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - DSOp.DS_MAX_RTN_I64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp', - DSOp.DS_MIN_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - DSOp.DS_MAX_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp', - DSOp.DS_AND_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_OR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_XOR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_MSKOR_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp', - DSOp.DS_STOREXCHG_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp', - DSOp.DS_STOREXCHG_2ADDR_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', - DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2', - DSOp.DS_CMPSTORE_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp', - DSOp.DS_MIN_NUM_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 < tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && sign(src.f64) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', - DSOp.DS_MAX_NUM_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 > tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && !sign(src.f64) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp', - DSOp.DS_LOAD_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32', - DSOp.DS_LOAD_2ADDR_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8U + 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8U + 4U].b32', - DSOp.DS_LOAD_2ADDR_STRIDE64_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512U + 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512U + 4U].b32', - DSOp.DS_ADD_RTN_F32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp', - DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif", - DSOp.DS_COND_SUB_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_SUB_CLAMP_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", - DSOp.DS_PK_ADD_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', - DSOp.DS_PK_ADD_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', - DSOp.DS_STORE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]', - DSOp.DS_STORE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]', - DSOp.DS_LOAD_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - DSOp.DS_LOAD_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });", - DSOp.DS_LOAD_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));", - DSOp.DS_LOAD_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));", - DSOp.DS_LOAD_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;', - DSOp.DS_LOAD_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;', - DSOp.DS_COND_SUB_RTN_U32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp', - DSOp.DS_SUB_CLAMP_RTN_U32: "declare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value", - DSOp.DS_PK_ADD_RTN_F16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].f16 = src[15 : 0].f16 + tmp[15 : 0].f16;\ndst[31 : 16].f16 = src[31 : 16].f16 + tmp[31 : 16].f16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', - DSOp.DS_PK_ADD_RTN_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32', - DSOp.DS_STORE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32", - DSOp.DS_LOAD_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32", - DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", - DSOp.DS_BPERMUTE_B32: "Note that EXEC mask is applied to both VGPR read and write. If src_lane selects a disabled thread then zero is\n// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", - DSOp.DS_BPERMUTE_FI_B32: "if no LDS memory is allocated to the wave. It uses LDS hardware to implement an arbitrary swizzle across\n// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\ntmp[i] = VGPR[src_lane][DATA0]\nendfor;\n// Copy data into destination VGPRs. Some source\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor", - DSOp.DS_STORE_B96: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]', - DSOp.DS_STORE_B128: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]', - DSOp.DS_BVH_STACK_PUSH4_POP1_RTN_B32: "The LDS stack address is computed using values packed into ADDR and part of OFFSET0. ADDR carries the\nstack address for the lane. OFFSET0[4:0] contains stack_size[4:0] -- this value is constant for all lanes and is\ndeclare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 3 passes: push data onto stack\nfor i in 0 : 2 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[127 : 96]) then\nRETURN_DATA[31 : 0] = DATA1[127 : 96]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;", - DSOp.DS_BVH_STACK_PUSH8_POP1_RTN_B32: "The LDS stack address is computed using values packed into ADDR and part of OFFSET0. ADDR carries the\nstack address for the lane. OFFSET0[4:0] contains stack_size[4:0] -- this value is constant for all lanes and is\ndeclare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 7 passes: push data onto stack\nfor i in 0 : 6 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[255 : 224]) then\nRETURN_DATA[31 : 0] = DATA1[255 : 224]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;", - DSOp.DS_BVH_STACK_PUSH8_POP2_RTN_B64: "The LDS stack address is computed using values packed into ADDR and part of OFFSET0. ADDR carries the\nstack address for the lane. OFFSET0[4:0] contains stack_size[4:0] -- this value is constant for all lanes and is\ndeclare stack_base : 32'B;\ndeclare stack_index : 32'U;\ndeclare DATA1 : 32'B;\ndeclare last_node_ptr : 32'B;\ndeclare INVALID_NODE : 32'B;\n// main code\n{ stack_base, stack_index } = 64'B(DECODE_ADDR(ADDR, OFFSET0));\nlast_node_ptr = DATA0.b32;\n// First 7 passes: push data onto stack\nfor i in 0 : 6 do\nif DATA_VALID(DATA1[i * 32 + 31 : i * 32]) then\nMEM[stack_base.u32 + stack_index] = DATA1[i * 32 + 31 : i * 32];\nstack_index += 1U\nelsif DATA1[i].b32 == last_node_ptr then\nendif\nendfor;\n// Last pass: return data or pop\nif DATA_VALID(DATA1[255 : 224]) then\nRETURN_DATA[31 : 0] = DATA1[255 : 224]\nelse\nRETURN_DATA[31 : 0] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;\n// Attempt a second pop\nif DATA_VALID(MEM[stack_base.u32 + stack_index]) then\nRETURN_DATA[63 : 32] = MEM[stack_base.u32 + stack_index];\nMEM[stack_base.u32 + stack_index] = INVALID_NODE;\nstack_index -= 1U\nendif;", - DSOp.DS_LOAD_B96: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32', - DSOp.DS_LOAD_B128: "addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b8 = VDATA[7 : 0]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b16 = VDATA[15 : 0]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[15 : 0].b16 = MEM[addr].b16;\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].b16 = MEM[addr].b16;\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b8 = VDATA[23 : 16]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr].b16 = VDATA[31 : 16]\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\n// Mem access size depends on format\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp\ndeclare new_value : 32'U;\nold_value = MEM[ADDR].u32;\nif old_value < DATA.u32 then\nnew_value = 0U\nelse\nnew_value = old_value - DATA.u32\nendif;\nMEM[ADDR].u32 = new_value;\nRETURN_DATA.u32 = old_value\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[ADDR].u32 = tmp >= src ? tmp - src : tmp;\nRETURN_DATA.u32 = tmp\ntmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp\ntmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp\naddr = CalcBufferAddr(vgpr_a.b64, sgpr_o.b64);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp", +VOPDOp_PCODE = { + VOPDOp.V_DUAL_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)', + VOPDOp.V_DUAL_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)', + VOPDOp.V_DUAL_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)', + VOPDOp.V_DUAL_MUL_F32: 'D0.f32 = S0.f32 * S1.f32', + VOPDOp.V_DUAL_ADD_F32: 'D0.f32 = S0.f32 + S1.f32', + VOPDOp.V_DUAL_SUB_F32: 'D0.f32 = S0.f32 - S1.f32', + VOPDOp.V_DUAL_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32', + VOPDOp.V_DUAL_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif", + VOPDOp.V_DUAL_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1', + VOPDOp.V_DUAL_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32', + VOPDOp.V_DUAL_MAX_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 > S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && !sign(S0.f32) &&\nsign(S1.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + VOPDOp.V_DUAL_MIN_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 < S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && sign(S0.f32) &&\n!sign(S1.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif", + VOPDOp.V_DUAL_DOT2ACC_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp', + VOPDOp.V_DUAL_DOT2ACC_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp', + VOPDOp.V_DUAL_ADD_NC_U32: 'D0.u32 = S0.u32 + S1.u32', + VOPDOp.V_DUAL_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)', + VOPDOp.V_DUAL_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)', } -PSEUDOCODE_STRINGS = { - SOP1Op: SOP1Op_PCODE, - SOP2Op: SOP2Op_PCODE, - SOPCOp: SOPCOp_PCODE, - SOPKOp: SOPKOp_PCODE, - SOPPOp: SOPPOp_PCODE, - SMEMOp: SMEMOp_PCODE, - VOP1Op: VOP1Op_PCODE, - VOP2Op: VOP2Op_PCODE, - VOP3Op: VOP3Op_PCODE, - VOP3SDOp: VOP3SDOp_PCODE, - VOP3POp: VOP3POp_PCODE, - VOPCOp: VOPCOp_PCODE, - DSOp: DSOp_PCODE, -} \ No newline at end of file +VSAMPLEOp_PCODE = { + VSAMPLEOp.IMAGE_GET_LOD: 'VDATA[0] = clampedLOD;\nVDATA[1] = rawLOD.', +} + +VSCRATCHOp_PCODE = { + VSCRATCHOp.SCRATCH_LOAD_U8: "addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })", + VSCRATCHOp.SCRATCH_LOAD_I8: "addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))", + VSCRATCHOp.SCRATCH_LOAD_U16: "addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })", + VSCRATCHOp.SCRATCH_LOAD_I16: "addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))", + VSCRATCHOp.SCRATCH_LOAD_B32: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[31 : 0] = MEM[addr].b32', + VSCRATCHOp.SCRATCH_LOAD_B64: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32', + VSCRATCHOp.SCRATCH_LOAD_B96: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32', + VSCRATCHOp.SCRATCH_LOAD_B128: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32', + VSCRATCHOp.SCRATCH_STORE_B8: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nMEM[addr].b8 = VDATA[7 : 0]', + VSCRATCHOp.SCRATCH_STORE_B16: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nMEM[addr].b16 = VDATA[15 : 0]', + VSCRATCHOp.SCRATCH_STORE_B32: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nMEM[addr].b32 = VDATA[31 : 0]', + VSCRATCHOp.SCRATCH_STORE_B64: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]', + VSCRATCHOp.SCRATCH_STORE_B96: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]', + VSCRATCHOp.SCRATCH_STORE_B128: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]', + VSCRATCHOp.SCRATCH_LOAD_D16_U8: "addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.", + VSCRATCHOp.SCRATCH_LOAD_D16_I8: "addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.", + VSCRATCHOp.SCRATCH_LOAD_D16_B16: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.', + VSCRATCHOp.SCRATCH_LOAD_D16_HI_U8: "addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.", + VSCRATCHOp.SCRATCH_LOAD_D16_HI_I8: "addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.", + VSCRATCHOp.SCRATCH_LOAD_D16_HI_B16: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.', + VSCRATCHOp.SCRATCH_STORE_D16_HI_B8: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nMEM[addr].b8 = VDATA[23 : 16]', + VSCRATCHOp.SCRATCH_STORE_D16_HI_B16: 'addr = CalcScratchAddr(v_addr_off.b64, s_saddr_off.b64);\nMEM[addr].b16 = VDATA[31 : 16]', + VSCRATCHOp.SCRATCH_LOAD_BLOCK: 'for i in 0 : 31 do\n// Algorithm can skip over "holes" both in the memory block and VGPR block.\nif M0[i].u1 then\nVGPR[laneId][VDST.u32 + i.u32] = MEM[VGPR[laneId][ADDR].i32 + i * 4].b32\nendif\nendfor', + VSCRATCHOp.SCRATCH_STORE_BLOCK: 'for i in 0 : 31 do\n// Algorithm can skip over "holes" both in the memory block and VGPR block.\nif M0[i].u1 then\nMEM[VGPR[laneId][ADDR].i32 + i * 4] = VGPR[laneId][VDATA.u32 + i.u32].b32\nendif\nendfor', +} + +PSEUDOCODE_STRINGS = {DSOp: DSOp_PCODE, SMEMOp: SMEMOp_PCODE, SOP1Op: SOP1Op_PCODE, SOP2Op: SOP2Op_PCODE, SOPCOp: SOPCOp_PCODE, SOPKOp: SOPKOp_PCODE, SOPPOp: SOPPOp_PCODE, VBUFFEROp: VBUFFEROp_PCODE, VFLATOp: VFLATOp_PCODE, VGLOBALOp: VGLOBALOp_PCODE, VIMAGEOp: VIMAGEOp_PCODE, VINTERPOp: VINTERPOp_PCODE, VOP1Op: VOP1Op_PCODE, VOP2Op: VOP2Op_PCODE, VOP3Op: VOP3Op_PCODE, VOP3POp: VOP3POp_PCODE, VOP3SDOp: VOP3SDOp_PCODE, VOPCOp: VOPCOp_PCODE, VOPDOp: VOPDOp_PCODE, VSAMPLEOp: VSAMPLEOp_PCODE, VSCRATCHOp: VSCRATCHOp_PCODE} \ No newline at end of file diff --git a/extra/assembly/amd/dsl.py b/extra/assembly/amd/dsl.py index 3de0c77a8e..94a91873c3 100644 --- a/extra/assembly/amd/dsl.py +++ b/extra/assembly/amd/dsl.py @@ -9,6 +9,18 @@ from extra.assembly.amd.autogen.rdna3.enum import (VOP1Op, VOP2Op, VOP3Op, VOP3S SOPCOp, SOPKOp, SOPPOp, SMEMOp, DSOp, FLATOp, MUBUFOp, MTBUFOp, MIMGOp, VINTERPOp) from extra.assembly.amd.autogen.cdna.enum import VOP1Op as CDNA_VOP1Op, VOP2Op as CDNA_VOP2Op +# Source operand encoding - constant across all AMD ISAs +class SrcEnum(IntEnum): + VCC_LO=106; VCC_HI=107; NULL=124; M0=125; EXEC_LO=126; EXEC_HI=127; ZERO=128 + DPP8=233; DPP8FI=234; SHARED_BASE=235; SHARED_LIMIT=236; PRIVATE_BASE=237; PRIVATE_LIMIT=238 + POS_HALF=240; NEG_HALF=241; POS_ONE=242; NEG_ONE=243; POS_TWO=244; NEG_TWO=245 + POS_FOUR=246; NEG_FOUR=247; INV_2PI=248; DPP16=250; VCCZ=251; EXECZ=252; SCC=253; LDS_DIRECT=254 +VCC_LO, VCC_HI, NULL, M0, EXEC_LO, EXEC_HI, ZERO = SrcEnum.VCC_LO, SrcEnum.VCC_HI, SrcEnum.NULL, SrcEnum.M0, SrcEnum.EXEC_LO, SrcEnum.EXEC_HI, SrcEnum.ZERO +DPP8FI, SHARED_BASE, SHARED_LIMIT, PRIVATE_BASE, PRIVATE_LIMIT = SrcEnum.DPP8FI, SrcEnum.SHARED_BASE, SrcEnum.SHARED_LIMIT, SrcEnum.PRIVATE_BASE, SrcEnum.PRIVATE_LIMIT +POS_HALF, NEG_HALF, POS_ONE, NEG_ONE, POS_TWO, NEG_TWO = SrcEnum.POS_HALF, SrcEnum.NEG_HALF, SrcEnum.POS_ONE, SrcEnum.NEG_ONE, SrcEnum.POS_TWO, SrcEnum.NEG_TWO +POS_FOUR, NEG_FOUR, INV_2PI, VCCZ, EXECZ, SCC, LDS_DIRECT = SrcEnum.POS_FOUR, SrcEnum.NEG_FOUR, SrcEnum.INV_2PI, SrcEnum.VCCZ, SrcEnum.EXECZ, SrcEnum.SCC, SrcEnum.LDS_DIRECT +OFF = NULL + # Common masks and bit conversion functions MASK32, MASK64, MASK128 = 0xffffffff, 0xffffffffffffffff, (1 << 128) - 1 _struct_f, _struct_I = struct.Struct(" 63 else 8 if max_bit > 31 else 4 if 'encoding' in cls._fields and isinstance(cls.__dict__.get('encoding'), tuple): cls._encoding = cls.__dict__['encoding'] def _or_field(self, name: str, bit: int): @@ -352,6 +373,16 @@ class Inst: field_names = [n for n in self._fields if n != 'encoding'] # Map Python-friendly names to actual field names (abs_ -> abs for Python reserved word) if 'abs_' in kwargs: kwargs['abs'] = kwargs.pop('abs_') + # If more args than fields, treat extra arg as literal (for FMAAK/FMAMK style instructions) + # FMAMK has K in middle (vdst, src0, K, vsrc1), FMAAK has K at end (vdst, src0, vsrc1, K) + args = list(args) + if len(args) > len(field_names) and literal is None: + for i, a in enumerate(args): + if isinstance(a, int) and not isinstance(a, SrcEnum) and i < len(field_names) and field_names[i] in ('vsrc1',): + literal = args.pop(i) + break + else: + literal = args.pop() # fallback: last arg is literal orig_args = dict(zip(field_names, args)) | kwargs self._values.update(orig_args) self._precompute() @@ -450,7 +481,7 @@ class Inst: return result + (lit32 & MASK32).to_bytes(4, 'little') @classmethod - def _size(cls) -> int: return 4 if issubclass(cls, Inst32) else 12 if issubclass(cls, Inst96) else 8 + def _size(cls) -> int: return cls._sz def size(self) -> int: # Literal is always 4 bytes in the binary (for 64-bit ops, it's in high 32 bits) return self._size() + (4 if self._literal is not None else 0) @@ -583,6 +614,4 @@ class Inst: def is_64bit(self) -> bool: return spec_is_64bit(self.op_name) def is_dst_16(self) -> bool: return self._spec_regs[0] == 1 and is_dtype_16(self._spec_dtype[0]) -class Inst32(Inst): pass -class Inst64(Inst): pass -class Inst96(Inst): pass + diff --git a/extra/assembly/amd/emu.py b/extra/assembly/amd/emu.py index 5de7a84e88..45b3245990 100644 --- a/extra/assembly/amd/emu.py +++ b/extra/assembly/amd/emu.py @@ -7,8 +7,9 @@ from extra.assembly.amd.dsl import Inst, unwrap, FLOAT_ENC, MASK32, MASK64, _f32 from extra.assembly.amd.asm import detect_format from extra.assembly.amd.pcode import compile_pseudocode from extra.assembly.amd.autogen.rdna3.str_pcode import PSEUDOCODE_STRINGS +from extra.assembly.amd.dsl import SrcEnum from extra.assembly.amd.autogen.rdna3.ins import (SOP1, SOP2, SOPC, SOPK, SOPP, SMEM, VOP1, VOP2, VOP3, VOP3SD, VOP3P, VOPC, DS, FLAT, VOPD, - SrcEnum, SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp, VOPDOp) + SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, SMEMOp, VOP1Op, VOP2Op, VOP3Op, VOP3SDOp, VOP3POp, VOPCOp, DSOp, FLATOp, GLOBALOp, SCRATCHOp, VOPDOp) WAVE_SIZE, SGPR_COUNT, VGPR_COUNT = 32, 128, 256 VCC_LO, VCC_HI, NULL, EXEC_LO, EXEC_HI, SCC = SrcEnum.VCC_LO, SrcEnum.VCC_HI, SrcEnum.NULL, SrcEnum.EXEC_LO, SrcEnum.EXEC_HI, SrcEnum.SCC diff --git a/extra/assembly/amd/pcode.py b/extra/assembly/amd/pcode.py index a64a990bc5..2e17375ed9 100644 --- a/extra/assembly/amd/pcode.py +++ b/extra/assembly/amd/pcode.py @@ -447,6 +447,27 @@ TWO_OVER_PI_1201 = Reg(0x0145f306dc9c882a53f84eafa3ea69bb81b6c52b3278872083fca2c # COMPILER: pseudocode -> Python (minimal transforms) # ═══════════════════════════════════════════════════════════════════════════════ +def _filter_pseudocode(pseudocode: str) -> str: + """Filter raw PDF pseudocode to only include actual code lines.""" + pcode_lines, in_lambda, depth = [], 0, 0 + for line in pseudocode.split('\n'): + s = line.strip() + if not s: continue + if '=>' in s or re.match(r'^[A-Z_]+\(', s): continue # Skip example lines + if '= lambda(' in s: in_lambda += 1; continue # Skip lambda definitions + if in_lambda > 0: + if s.endswith(');'): in_lambda -= 1 + continue + # Only include lines that look like pseudocode + is_code = (any(p in s for p in ['D0.', 'D1.', 'S0.', 'S1.', 'S2.', 'SCC =', 'SCC ?', 'VCC', 'EXEC', 'tmp =', 'tmp[', 'lane =', 'PC =', + 'D0[', 'D1[', 'S0[', 'S1[', 'S2[', 'MEM[', 'RETURN_DATA', 'VADDR', 'VDATA', 'VDST', 'SADDR', 'OFFSET']) or + s.startswith(('if ', 'else', 'elsif', 'endif', 'declare ', 'for ', 'endfor', '//')) or + re.match(r'^[a-z_]+\s*=', s) or re.match(r'^[a-z_]+\[', s) or (depth > 0 and '=' in s)) + if s.startswith('if '): depth += 1 + elif s.startswith('endif'): depth = max(0, depth - 1) + if is_code: pcode_lines.append(s) + return '\n'.join(pcode_lines) + def _compile_pseudocode(pseudocode: str) -> str: """Compile pseudocode to Python. Transforms are minimal - most syntax just works.""" pseudocode = re.sub(r'\bpass\b', 'pass_', pseudocode) # 'pass' is Python keyword @@ -756,9 +777,10 @@ _PCODE_GLOBALS = { @functools.cache def compile_pseudocode(cls_name: str, op_name: str, pseudocode: str): """Compile pseudocode string to executable function. Cached for performance.""" - code = _compile_pseudocode(pseudocode) + filtered = _filter_pseudocode(pseudocode) + code = _compile_pseudocode(filtered) code = _apply_pseudocode_fixes(op_name, code) - fn_code = _generate_function(cls_name, op_name, pseudocode, code) + fn_code = _generate_function(cls_name, op_name, filtered, code) fn_name = f"_{cls_name}_{op_name}" local_ns = {} exec(fn_code, _PCODE_GLOBALS, local_ns) diff --git a/extra/assembly/amd/pdf.py b/extra/assembly/amd/pdf.py index ce6ecd80f8..fede8e39b4 100644 --- a/extra/assembly/amd/pdf.py +++ b/extra/assembly/amd/pdf.py @@ -1,457 +1,305 @@ -# Generate AMD ISA autogen files from PDF documentation -# Combines format/enum generation (previously in dsl.py) and pseudocode compilation (previously in pcode.py) -# Usage: python -m extra.assembly.amd.pdf [--arch rdna3|rdna4|cdna|all] -import re, functools -from pathlib import Path -from concurrent.futures import ProcessPoolExecutor +# Generic PDF text extractor - no external dependencies +import re, zlib +from tinygrad.helpers import fetch, merge_dicts PDF_URLS = { "rdna3": "https://docs.amd.com/api/khub/documents/UVVZM22UN7tMUeiW_4ShTQ/content", "rdna4": "https://docs.amd.com/api/khub/documents/uQpkEvk3pv~kfAb2x~j4uw/content", - "cdna": ["https://www.amd.com/content/dam/amd/en/documents/instinct-tech-docs/instruction-set-architectures/amd-instinct-mi300-cdna3-instruction-set-architecture.pdf", - "https://www.amd.com/content/dam/amd/en/documents/instinct-tech-docs/instruction-set-architectures/amd-instinct-cdna4-instruction-set-architecture.pdf"], + "cdna": "https://www.amd.com/content/dam/amd/en/documents/instinct-tech-docs/instruction-set-architectures/amd-instinct-cdna4-instruction-set-architecture.pdf", } -# Field type mappings and ordering -FIELD_TYPES = {'SSRC0': 'SSrc', 'SSRC1': 'SSrc', 'SOFFSET': 'SSrc', 'SADDR': 'SSrc', 'SRC0': 'Src', 'SRC1': 'Src', 'SRC2': 'Src', - 'SDST': 'SGPRField', 'SBASE': 'SGPRField', 'SDATA': 'SGPRField', 'SRSRC': 'SGPRField', 'VDST': 'VGPRField', 'VSRC1': 'VGPRField', - 'VDATA': 'VGPRField', 'VADDR': 'VGPRField', 'ADDR': 'VGPRField', 'DATA': 'VGPRField', 'DATA0': 'VGPRField', 'DATA1': 'VGPRField', - 'SIMM16': 'SImm', 'OFFSET': 'Imm', 'OPX': 'VOPDOp', 'OPY': 'VOPDOp', 'SRCX0': 'Src', 'SRCY0': 'Src', - 'VSRCX1': 'VGPRField', 'VSRCY1': 'VGPRField', 'VDSTX': 'VGPRField', 'VDSTY': 'VDSTYEnc'} -FIELD_ORDER = { - 'SOP2': ['op', 'sdst', 'ssrc0', 'ssrc1'], 'SOP1': ['op', 'sdst', 'ssrc0'], 'SOPC': ['op', 'ssrc0', 'ssrc1'], - 'SOPK': ['op', 'sdst', 'simm16'], 'SOPP': ['op', 'simm16'], 'VOP1': ['op', 'vdst', 'src0'], 'VOPC': ['op', 'src0', 'vsrc1'], - 'VOP2': ['op', 'vdst', 'src0', 'vsrc1'], 'VOP3SD': ['op', 'vdst', 'sdst', 'src0', 'src1', 'src2', 'clmp'], - 'SMEM': ['op', 'sdata', 'sbase', 'soffset', 'offset', 'glc', 'dlc'], 'DS': ['op', 'vdst', 'addr', 'data0', 'data1'], - 'VOP3': ['op', 'vdst', 'src0', 'src1', 'src2', 'omod', 'neg', 'abs', 'clmp', 'opsel'], - 'VOP3P': ['op', 'vdst', 'src0', 'src1', 'src2', 'neg', 'neg_hi', 'opsel', 'opsel_hi', 'clmp'], - 'FLAT': ['op', 'vdst', 'addr', 'data', 'saddr', 'offset', 'seg', 'dlc', 'glc', 'slc'], - 'MUBUF': ['op', 'vdata', 'vaddr', 'srsrc', 'soffset', 'offset', 'offen', 'idxen', 'glc', 'dlc', 'slc', 'tfe'], - 'MTBUF': ['op', 'vdata', 'vaddr', 'srsrc', 'soffset', 'offset', 'format', 'offen', 'idxen', 'glc', 'dlc', 'slc', 'tfe'], - 'MIMG': ['op', 'vdata', 'vaddr', 'srsrc', 'ssamp', 'dmask', 'dim', 'unrm', 'dlc', 'glc', 'slc'], - 'EXP': ['en', 'target', 'vsrc0', 'vsrc1', 'vsrc2', 'vsrc3', 'done', 'row'], - 'VINTERP': ['op', 'vdst', 'src0', 'src1', 'src2', 'waitexp', 'clmp', 'opsel', 'neg'], - 'VOPD': ['opx', 'opy', 'vdstx', 'vdsty', 'srcx0', 'vsrcx1', 'srcy0', 'vsrcy1'], - 'LDSDIR': ['op', 'vdst', 'attr', 'attr_chan', 'wait_va']} -SRC_EXTRAS = {233: 'DPP8', 234: 'DPP8FI', 250: 'DPP16', 251: 'VCCZ', 252: 'EXECZ', 254: 'LDS_DIRECT'} -FLOAT_MAP = {'0.5': 'POS_HALF', '-0.5': 'NEG_HALF', '1.0': 'POS_ONE', '-1.0': 'NEG_ONE', '2.0': 'POS_TWO', '-2.0': 'NEG_TWO', - '4.0': 'POS_FOUR', '-4.0': 'NEG_FOUR', '1/(2*PI)': 'INV_2PI', '0': 'ZERO'} -INST_PATTERN = re.compile(r'^([SVD]S?_[A-Z0-9_]+|(?:FLAT|GLOBAL|SCRATCH)_[A-Z0-9_]+)\s+(\d+)\s*$', re.M) - - - # ═══════════════════════════════════════════════════════════════════════════════ -# PDF PARSING WITH PAGE CACHING +# Generic PDF extraction tools # ═══════════════════════════════════════════════════════════════════════════════ -class CachedPDF: - """PDF wrapper with page text/table caching for faster repeated access.""" - def __init__(self, pdf): - self._pdf, self._text_cache, self._table_cache = pdf, {}, {} - def __len__(self): return len(self._pdf.pages) - def text(self, i): - if i not in self._text_cache: self._text_cache[i] = self._pdf.pages[i].extract_text() or '' - return self._text_cache[i] - def tables(self, i): - if i not in self._table_cache: self._table_cache[i] = [t.extract() for t in self._pdf.pages[i].find_tables()] - return self._table_cache[i] +def extract(url: str) -> list[list[tuple[float, float, str, str]]]: + """Extract positioned text from PDF. Returns list of text elements (x, y, text, font) per page.""" + data = fetch(url).read_bytes() -def _parse_bits(s: str) -> tuple[int, int] | None: - return (int(m.group(1)), int(m.group(2) or m.group(1))) if (m := re.match(r'\[(\d+)(?::(\d+))?\]', s)) else None + # Parse xref table to locate objects + xref: dict[int, int] = {} + pos = int(re.search(rb'startxref\s+(\d+)', data).group(1)) + 4 + while data[pos:pos+7] != b'trailer': + while data[pos:pos+1] in b' \r\n': pos += 1 + line_end = data.find(b'\n', pos) + start_obj, count = map(int, data[pos:line_end].split()[:2]) + pos = line_end + 1 + for i in range(count): + if data[pos+17:pos+18] == b'n' and (off := int(data[pos:pos+10])) > 0: xref[start_obj + i] = off + pos += 20 -def _parse_fields_table(table: list, fmt: str, enums: set[str]) -> list[tuple]: - fields = [] - for row in table[1:]: - if not row or not row[0]: continue - name, bits_str = row[0].split('\n')[0].strip(), (row[1] or '').split('\n')[0].strip() - if not (bits := _parse_bits(bits_str)): continue - enc_val, hi, lo = None, bits[0], bits[1] - if name == 'ENCODING' and row[2]: - desc = row[2] - # Handle shared FLAT/GLOBAL/SCRATCH table: look for format-specific encoding - fmt_key = fmt.lstrip('V').lower().capitalize() # VFLAT -> Flat, VGLOBAL -> Global - if m := re.search(rf"{fmt_key}='b([01_]+)", desc): - enc_bits = m.group(1).replace('_', '') - elif m := re.search(r"(?:'b|Must be:\s*)([01_]+)", desc): - enc_bits = m.group(1).replace('_', '') - else: - enc_bits = None - if enc_bits: - enc_val, declared_width, actual_width = int(enc_bits, 2), hi - lo + 1, len(enc_bits) - if actual_width > declared_width: lo = hi - actual_width + 1 - ftype = f"{fmt}Op" if name == 'OP' and f"{fmt}Op" in enums else FIELD_TYPES.get(name.upper()) - fields.append((name, hi, lo, enc_val, ftype)) - return fields + def get_stream(n: int) -> bytes: + obj = data[xref[n]:data.find(b'endobj', xref[n])] + raw = obj[obj.find(b'stream\n') + 7:obj.find(b'\nendstream')] + return zlib.decompress(raw) if b'/FlateDecode' in obj else raw -def _parse_single_pdf(url: str): - """Parse a single PDF and return (formats, enums, src_enum, doc_name, instructions).""" - import pdfplumber - from tinygrad.helpers import fetch + # Find page content streams and extract text + pages = [] + for n in sorted(xref): + if b'/Type /Page' not in data[xref[n]:xref[n]+500]: continue + if not (m := re.search(rb'/Contents (\d+) 0 R', data[xref[n]:xref[n]+500])): continue + stream = get_stream(int(m.group(1))).decode('latin-1') + elements, font = [], '' + for bt in re.finditer(r'BT(.*?)ET', stream, re.S): + x, y = 0.0, 0.0 + for m in re.finditer(r'(/F[\d.]+) [\d.]+ Tf|([\d.+-]+) ([\d.+-]+) Td|[\d.+-]+ [\d.+-]+ [\d.+-]+ [\d.+-]+ ([\d.+-]+) ([\d.+-]+) Tm|<([0-9A-Fa-f]+)>.*?Tj|\[([^\]]+)\] TJ', bt.group(1)): + if m.group(1): font = m.group(1) + elif m.group(2): x, y = x + float(m.group(2)), y + float(m.group(3)) + elif m.group(4): x, y = float(m.group(4)), float(m.group(5)) + elif m.group(6) and (t := bytes.fromhex(m.group(6)).decode('latin-1')).strip(): elements.append((x, y, t, font)) + elif m.group(7) and (t := ''.join(bytes.fromhex(h).decode('latin-1') for h in re.findall(r'<([0-9A-Fa-f]+)>', m.group(7)))).strip(): elements.append((x, y, t, font)) + pages.append(sorted(elements, key=lambda e: (-e[1], e[0]))) + return pages - pdf = CachedPDF(pdfplumber.open(fetch(url))) - total_pages = len(pdf) +def extract_tables(pages: list[list[tuple[float, float, str, str]]]) -> dict[int, tuple[str, list[list[str]]]]: + """Extract numbered tables from PDF pages. Returns {table_num: (title, rows)} where rows is list of cells per row.""" + def group_by_y(texts, key=lambda y: round(y)): + by_y: dict[int, list[tuple[float, float, str]]] = {} + for x, y, t, _ in texts: + by_y.setdefault(key(y), []).append((x, y, t)) + return by_y - # Auto-detect document type - first_page = pdf.text(0) - is_cdna4, is_cdna3 = 'CDNA4' in first_page or 'CDNA 4' in first_page, 'CDNA3' in first_page or 'MI300' in first_page - is_cdna, is_rdna4 = is_cdna3 or is_cdna4, 'RDNA4' in first_page or 'RDNA 4' in first_page - is_rdna35, is_rdna3 = 'RDNA3.5' in first_page or 'RDNA 3.5' in first_page, 'RDNA3' in first_page and 'RDNA3.5' not in first_page - doc_name = "CDNA4" if is_cdna4 else "CDNA3" if is_cdna3 else "RDNA4" if is_rdna4 else "RDNA3.5" if is_rdna35 else "RDNA3" if is_rdna3 else "Unknown" + # Find all table headers by merging text on same line + table_positions = [] + for page_idx, texts in enumerate(pages): + for items in group_by_y(texts).values(): + line = ''.join(t for _, t in sorted((x, t) for x, _, t in items)) + if m := re.search(r'Table (\d+)\. (.+)', line): + table_positions.append((int(m.group(1)), m.group(2).strip(), page_idx, items[0][1])) + table_positions.sort(key=lambda t: (t[2], -t[3])) - # Find Microcode Formats section (for formats/enums) - microcode_start = next((i for i in range(int(total_pages * 0.2), total_pages) - if re.search(r'\d+\.\d+\.\d+\.\s+SOP2\b|Chapter \d+\.\s+Microcode Formats', pdf.text(i))), int(total_pages * 0.9)) - # Find Instructions section (for pseudocode) - instr_start = next((i for i in range(int(total_pages * 0.1), int(total_pages * 0.5)) - if re.search(r'Chapter \d+\.\s+Instructions\b', pdf.text(i))), total_pages // 3) - instr_end = next((i for start in [int(total_pages * 0.6), int(total_pages * 0.5), instr_start] - for i in range(start, min(start + 100, total_pages)) - if re.search(r'Chapter \d+\.\s+Microcode Formats', pdf.text(i))), total_pages) - - # Parse src enum from SSRC encoding table - src_enum = dict(SRC_EXTRAS) - for i in range(microcode_start, min(microcode_start + 10, total_pages)): - text = pdf.text(i) - if 'SSRC0' in text and 'VCC_LO' in text: - for m in re.finditer(r'^(\d+)\s+(\S+)', text, re.M): - val, name = int(m.group(1)), m.group(2).rstrip('.:') - if name in FLOAT_MAP: src_enum[val] = FLOAT_MAP[name] - elif re.match(r'^[A-Z][A-Z0-9_]*$', name): src_enum[val] = name + # For each table, find rows with matching X positions + result: dict[int, tuple[str, list[list[str]]]] = {} + for num, title, start_page, header_y in table_positions: + rows, col_xs = [], None + for page_idx in range(start_page, len(pages)): + page_texts = [(x, y, t) for x, y, t, _ in pages[page_idx] if 30 < y < 760 and (page_idx > start_page or y < header_y)] + for items in sorted(group_by_y([(x, y, t, '') for x, y, t in page_texts], key=lambda y: round(y / 5)).values(), key=lambda items: -items[0][1]): + xs = tuple(sorted(round(x) for x, _, _ in items)) + if col_xs is None: + if len(xs) < 2: continue # Skip single-column rows before table starts + col_xs = xs + elif len(xs) == 1 and xs[0] in col_xs: continue # Skip continuation rows at known column positions + elif not any(c in xs for c in col_xs[:2]): break # Row missing first columns = end of table + rows.append([t for _, t in sorted((x, t) for x, _, t in items)]) + else: continue break + if rows: result[num] = (title, rows) + return result - # Parse opcode tables - full_text = '\n'.join(pdf.text(i) for i in range(microcode_start, min(microcode_start + 50, total_pages))) +# ═══════════════════════════════════════════════════════════════════════════════ +# AMD specific extraction +# ═══════════════════════════════════════════════════════════════════════════════ + +def extract_enums(tables: dict[int, tuple[str, list[list[str]]]]) -> dict[str, dict[int, str]]: + """Extract all enums from tables. Returns {enum_name: {value: name}}.""" enums: dict[str, dict[int, str]] = {} - for m in re.finditer(r'Table \d+\. (\w+) Opcodes(.*?)(?=Table \d+\.|\n\d+\.\d+\.\d+\.\s+\w+\s*\nDescription|$)', full_text, re.S): - if ops := {int(x.group(1)): x.group(2) for x in re.finditer(r'(\d+)\s+([A-Z][A-Z0-9_]+)', m.group(2))}: - enums[m.group(1) + "Op"] = ops - if vopd_m := re.search(r'Table \d+\. VOPD Y-Opcodes\n(.*?)(?=Table \d+\.|15\.\d)', full_text, re.S): - if ops := {int(x.group(1)): x.group(2) for x in re.finditer(r'(\d+)\s+(V_DUAL_\w+)', vopd_m.group(1))}: - enums["VOPDOp"] = ops - enum_names = set(enums.keys()) + for num, (title, rows) in tables.items(): + # Opcode enums from "XXX Opcodes" tables + if m := re.match(r'(\w+) (?:Y-)?Opcodes', title): + fmt_name = 'VOPD' if 'Y-Opcodes' in title else m.group(1) + ops: dict[int, str] = {} + for row in rows: + for i in range(0, len(row) - 1, 2): + if row[i].isdigit() and re.match(r'^[A-Z][A-Z0-9_]+$', row[i + 1]): + ops[int(row[i])] = row[i + 1] + if ops: enums[fmt_name] = ops + # BufFmt from "Data Format" tables + if 'Data Format' in title: + for row in rows: + for i in range(0, len(row) - 1, 2): + if row[i].isdigit() and re.match(r'^[\dA-Z_]+$', row[i + 1]) and 'INVALID' not in row[i + 1]: + enums.setdefault('BufFmt', {})[int(row[i])] = row[i + 1] + return enums - # Parse instruction formats - def is_fields_table(t): return t and len(t) > 1 and t[0] and 'Field' in str(t[0][0] or '') - def has_encoding(fields): return any(f[0] == 'ENCODING' for f in fields) - def has_header_before_fields(text): return (pos := text.find('Field Name')) != -1 and bool(re.search(r'\d+\.\d+\.\d+\.\s+\w+\s*\n', text[:pos])) +def extract_ins(tables: dict[int, tuple[str, list[list[str]]]]) -> tuple[dict[str, list[tuple[str, int, int]]], dict[str, str]]: + """Extract formats and encodings from 'XXX Fields' tables. Returns (formats, encodings).""" + formats: dict[str, list[tuple[str, int, int]]] = {} + encodings: dict[str, str] = {} + for num, (title, rows) in tables.items(): + if not (m := re.match(r'(\w+) Fields$', title)): continue + fmt_name = m.group(1) + fields = [] + for row in rows: + if len(row) < 2: continue + if (bits := re.match(r'\[?(\d+):(\d+)\]?$', row[1])) or (bits := re.match(r'\[(\d+)\]$', row[1])): + field_name = row[0].lower() + hi, lo = int(bits.group(1)), int(bits.group(2)) if bits.lastindex >= 2 else int(bits.group(1)) + if field_name == 'encoding' and len(row) >= 3: + enc_bits = None + if "'b" in row[2]: enc_bits = row[2].split("'b")[-1].replace('_', '') + elif (enc := re.search(r':\s*([01_]+)', row[2])): enc_bits = enc.group(1).replace('_', '') + if enc_bits: + # If encoding bits exceed field width, extend field to match (AMD docs sometimes have this) + declared_width, actual_width = hi - lo + 1, len(enc_bits) + if actual_width > declared_width: lo = hi - actual_width + 1 + encodings[fmt_name] = enc_bits + fields.append((field_name, hi, lo)) + if fields: formats[fmt_name] = fields + return formats, encodings - format_headers = [] - for i in range(50): - if microcode_start + i >= total_pages: break - text = pdf.text(microcode_start + i) - for m in re.finditer(r'\d+\.\d+\.\d+\.\s+(\w+)\s*\n?Description', text): format_headers.append((m.group(1), i, m.start())) - for m in re.finditer(r'\d+\.\d+\.\d+\.\s+(\w+)\s*\n', text): - fmt_name = m.group(1) - if is_cdna and fmt_name.isupper() and len(fmt_name) >= 2: format_headers.append((fmt_name, i, m.start())) - elif m.start() > len(text) - 200 and 'Description' not in text[m.end():] and i + 1 < 50: - next_text = pdf.text(microcode_start + i + 1).lstrip() - if next_text.startswith('Description') or (next_text.startswith('"RDNA') and 'Description' in next_text[:200]): - format_headers.append((fmt_name, i, m.start())) - # RDNA4: Look for "Table X. Y Fields" patterns (e.g., VIMAGE, VSAMPLE, or shared FLAT/GLOBAL/SCRATCH) - for m in re.finditer(r'Table \d+\.\s+([\w,\s]+?)\s+Fields', text): - table_name = m.group(1).strip() - # Handle shared table like "FLAT, GLOBAL and SCRATCH" - if ',' in table_name or ' and ' in table_name: - for part in re.split(r',\s*|\s+and\s+', table_name): - fmt_name = 'V' + part.strip() - if fmt_name not in [h[0] for h in format_headers]: format_headers.append((fmt_name, i, m.start())) - elif table_name.startswith('V'): - if table_name not in [h[0] for h in format_headers]: format_headers.append((table_name, i, m.start())) +def extract_pcode(pages: list[list[tuple[float, float, str, str]]], enums: dict[str, dict[int, str]]) -> dict[tuple[str, int], str]: + """Extract pseudocode for instructions. Returns {(name, opcode): pseudocode}.""" + # Build lookup from instruction name to opcode + name_to_op = {name: op for ops in enums.values() for op, name in ops.items()} - formats: dict[str, list] = {} - for fmt_name, rel_idx, header_pos in format_headers: - if fmt_name in formats: continue - page_idx = microcode_start + rel_idx - text = pdf.text(page_idx) - field_pos = text.find('Field Name', header_pos) - fields = None - for offset in range(3): - if page_idx + offset >= total_pages: break - if offset > 0 and has_header_before_fields(pdf.text(page_idx + offset)): break - for t in pdf.tables(page_idx + offset) if offset > 0 or field_pos > header_pos else []: - if is_fields_table(t) and (f := _parse_fields_table(t, fmt_name, enum_names)) and has_encoding(f): fields = f; break - if fields: break - if not fields and field_pos > header_pos: - for t in pdf.tables(page_idx): - if is_fields_table(t) and (f := _parse_fields_table(t, fmt_name, enum_names)): fields = f; break - if not fields: continue - field_names = {f[0] for f in fields} - for pg_offset in range(1, 3): - if page_idx + pg_offset >= total_pages or has_header_before_fields(pdf.text(page_idx + pg_offset)): break - for t in pdf.tables(page_idx + pg_offset): - if is_fields_table(t) and (extra := _parse_fields_table(t, fmt_name, enum_names)) and not has_encoding(extra): - for ef in extra: - if ef[0] not in field_names: fields.append(ef); field_names.add(ef[0]) - break - formats[fmt_name] = fields + # First pass: find all instruction headers across all pages + all_instructions: list[tuple[int, float, str, int]] = [] # (page_idx, y, name, opcode) + for page_idx, page in enumerate(pages): + by_y: dict[int, list[tuple[float, str]]] = {} + for x, y, t, _ in page: + by_y.setdefault(round(y), []).append((x, t)) + for y, items in sorted(by_y.items(), reverse=True): + left = [(x, t) for x, t in items if 55 < x < 65] + right = [(x, t) for x, t in items if 535 < x < 550] + if left and right and left[0][1] in name_to_op and right[0][1].isdigit(): + all_instructions.append((page_idx, y, left[0][1], int(right[0][1]))) - # Fix known PDF errors (RDNA-specific SMEM bit positions) - if 'SMEM' in formats and not is_cdna: - formats['SMEM'] = [(n, 13 if n == 'DLC' else 14 if n == 'GLC' else h, 13 if n == 'DLC' else 14 if n == 'GLC' else l, e, t) - for n, h, l, e, t in formats['SMEM']] - # RDNA4: VFLAT/VGLOBAL/VSCRATCH OP field is [20:14] not [20:13] (PDF documentation error) - for fmt_name in ['VFLAT', 'VGLOBAL', 'VSCRATCH']: - if fmt_name in formats: - formats[fmt_name] = [(n, h, 14 if n == 'OP' else l, e, t) for n, h, l, e, t in formats[fmt_name]] - if doc_name in ('RDNA3', 'RDNA3.5'): - if 'SOPPOp' in enums: - for k, v in {8: 'S_WAITCNT_DEPCTR', 58: 'S_TTRACEDATA', 59: 'S_TTRACEDATA_IMM'}.items(): - assert k not in enums['SOPPOp']; enums['SOPPOp'][k] = v - if 'SOPKOp' in enums: - for k, v in {22: 'S_SUBVECTOR_LOOP_BEGIN', 23: 'S_SUBVECTOR_LOOP_END'}.items(): - assert k not in enums['SOPKOp']; enums['SOPKOp'][k] = v - if 'SMEMOp' in enums: - for k, v in {34: 'S_ATC_PROBE', 35: 'S_ATC_PROBE_BUFFER'}.items(): - assert k not in enums['SMEMOp']; enums['SMEMOp'][k] = v - if 'DSOp' in enums: - for k, v in {24: 'DS_GWS_SEMA_RELEASE_ALL', 25: 'DS_GWS_INIT', 26: 'DS_GWS_SEMA_V', 27: 'DS_GWS_SEMA_BR', 28: 'DS_GWS_SEMA_P', 29: 'DS_GWS_BARRIER'}.items(): - assert k not in enums['DSOp']; enums['DSOp'][k] = v - if 'FLATOp' in enums: - for k, v in {40: 'GLOBAL_LOAD_ADDTID_B32', 41: 'GLOBAL_STORE_ADDTID_B32', 55: 'FLAT_ATOMIC_CSUB_U32'}.items(): - assert k not in enums['FLATOp']; enums['FLATOp'][k] = v - # CDNA MTBUF: PDF is missing the FORMAT field (bits[25:19]) which is required for tbuffer_* instructions - if is_cdna and 'MTBUF' in formats: - field_names = {f[0] for f in formats['MTBUF']} - if 'FORMAT' not in field_names: - formats['MTBUF'].append(('FORMAT', 25, 19, None, None)) - # CDNA SDWA/DPP: PDF only has modifier fields, need VOP1/VOP2 overlay for correct encoding - if is_cdna: - if 'SDWA' in formats: - formats['SDWA'] = [('ENCODING', 8, 0, 0xf9, None), ('VOP_OP', 16, 9, None, None), ('VDST', 24, 17, None, 'VGPRField'), ('VOP2_OP', 31, 25, None, None)] + \ - [f for f in formats['SDWA'] if f[0] not in ('ENCODING', 'SDST', 'SD', 'ROW_MASK')] - if 'DPP' in formats: - formats['DPP'] = [('ENCODING', 8, 0, 0xfa, None), ('VOP_OP', 16, 9, None, None), ('VDST', 24, 17, None, 'VGPRField'), ('VOP2_OP', 31, 25, None, None), - ('SRC0', 39, 32, None, 'Src'), ('DPP_CTRL', 48, 40, None, None), ('BOUND_CTRL', 51, 51, None, None), ('SRC0_NEG', 52, 52, None, None), ('SRC0_ABS', 53, 53, None, None), - ('SRC1_NEG', 54, 54, None, None), ('SRC1_ABS', 55, 55, None, None), ('BANK_MASK', 59, 56, None, None), ('ROW_MASK', 63, 60, None, None)] - - # Extract pseudocode for instructions - all_text = '\n'.join(pdf.text(i) for i in range(instr_start, instr_end)) - matches = list(INST_PATTERN.finditer(all_text)) - raw_pseudocode: dict[tuple[str, int], str] = {} - for i, match in enumerate(matches): - name, opcode = match.group(1), int(match.group(2)) - start, end = match.end(), matches[i + 1].start() if i + 1 < len(matches) else match.end() + 2000 - snippet = all_text[start:end].strip() - if pseudocode := _extract_pseudocode(snippet): raw_pseudocode[(name, opcode)] = pseudocode - - # Extract unified buffer format table (RDNA only, for MTBUF format field) - buf_fmt = {} - if not is_cdna: - for i in range(total_pages): - for t in pdf.tables(i): - if t and len(t) > 2 and t[0] and '#' in str(t[0][0]) and 'Format' in str(t[0]): - for row in t[1:]: - for j in range(0, len(row) - 1, 3): # table has 3-column groups: #, Format, (empty) - if row[j] and row[j].isdigit() and row[j+1] and re.match(r'^[\d_]+_(UNORM|SNORM|USCALED|SSCALED|UINT|SINT|FLOAT)$', row[j+1]): - buf_fmt[int(row[j])] = row[j+1] - if buf_fmt: break - if buf_fmt: break - - return {"formats": formats, "enums": enums, "src_enum": src_enum, "doc_name": doc_name, "pseudocode": raw_pseudocode, "is_cdna": is_cdna, "buf_fmt": buf_fmt} - -def _extract_pseudocode(text: str) -> str | None: - """Extract pseudocode from an instruction description snippet.""" - lines, result, depth, in_lambda = text.split('\n'), [], 0, 0 - for line in lines: - s = line.strip() - if not s or re.match(r'^\d+ of \d+$', s) or re.match(r'^\d+\.\d+\..*Instructions', s): continue - if s.startswith(('Notes', 'Functional examples', '•', '-')): break # Stop at notes/bullets - if s.startswith(('"RDNA', 'AMD ', 'CDNA')): continue - if '•' in s or '–' in s: continue # Skip lines with bullets/dashes - if '= lambda(' in s: in_lambda += 1; continue - if in_lambda > 0: - if s.endswith(');'): in_lambda -= 1 - continue - if s.startswith('if '): depth += 1 - elif s.startswith('endif'): depth = max(0, depth - 1) - if s.endswith('.') and not any(p in s for p in ['D0', 'D1', 'S0', 'S1', 'S2', 'SCC', 'VCC', 'tmp', '=']): continue - if re.match(r'^[a-z].*\.$', s) and '=' not in s: continue - is_code = (any(p in s for p in ['D0.', 'D1.', 'S0.', 'S1.', 'S2.', 'SCC =', 'SCC ?', 'VCC', 'EXEC', 'tmp =', 'tmp[', 'lane =', 'PC =', - 'D0[', 'D1[', 'S0[', 'S1[', 'S2[', 'MEM[', 'RETURN_DATA', - 'VADDR', 'VDATA', 'VDST', 'SADDR', 'OFFSET']) or - s.startswith(('if ', 'else', 'elsif', 'endif', 'declare ', 'for ', 'endfor', '//')) or - re.match(r'^[a-z_]+\s*=', s) or re.match(r'^[a-z_]+\[', s) or (depth > 0 and '=' in s)) - if is_code: result.append(s) - return '\n'.join(result) if result else None - -def _merge_results(results: list[dict]) -> dict: - """Merge multiple PDF parse results into a superset.""" - merged = {"formats": {}, "enums": {}, "src_enum": dict(SRC_EXTRAS), "doc_names": [], "pseudocode": {}, "is_cdna": False, "buf_fmt": {}} - for r in results: - merged["doc_names"].append(r["doc_name"]) - merged["is_cdna"] = merged["is_cdna"] or r["is_cdna"] - for val, name in r["src_enum"].items(): - if val in merged["src_enum"]: assert merged["src_enum"][val] == name - else: merged["src_enum"][val] = name - for enum_name, ops in r["enums"].items(): - if enum_name not in merged["enums"]: merged["enums"][enum_name] = {} - for val, name in ops.items(): - if val in merged["enums"][enum_name]: assert merged["enums"][enum_name][val] == name - else: merged["enums"][enum_name][val] = name - for fmt_name, fields in r["formats"].items(): - if fmt_name not in merged["formats"]: merged["formats"][fmt_name] = list(fields) - else: - existing = {f[0]: (f[1], f[2]) for f in merged["formats"][fmt_name]} - for f in fields: - if f[0] in existing: assert existing[f[0]] == (f[1], f[2]) - else: merged["formats"][fmt_name].append(f) - for key, pc in r["pseudocode"].items(): - if key not in merged["pseudocode"]: merged["pseudocode"][key] = pc - for val, name in r.get("buf_fmt", {}).items(): - if val not in merged["buf_fmt"]: merged["buf_fmt"][val] = name - return merged + # Second pass: extract pseudocode between consecutive instructions + pcode: dict[tuple[str, int], str] = {} + for i, (page_idx, y, name, opcode) in enumerate(all_instructions): + # Get end boundary from next instruction + if i + 1 < len(all_instructions): + next_page, next_y = all_instructions[i + 1][0], all_instructions[i + 1][1] + else: + next_page, next_y = page_idx, 0 + # Collect F6 text from current position to next instruction + lines = [] + for p in range(page_idx, next_page + 1): + start_y = y if p == page_idx else 800 + end_y = next_y if p == next_page else 0 + lines.extend((p, y2, t) for x, y2, t, f in pages[p] if f in ('/F6.0', '/F7.0') and end_y < y2 < start_y) + if lines: + # Sort by page first, then by y descending within each page (higher y = earlier text in PDF) + pcode_lines = [t.replace('Ê', '').strip() for _, _, t in sorted(lines, key=lambda x: (x[0], -x[1]))] + if pcode_lines: pcode[(name, opcode)] = '\n'.join(pcode_lines) + return pcode # ═══════════════════════════════════════════════════════════════════════════════ -# CODE GENERATION +# Write autogen files # ═══════════════════════════════════════════════════════════════════════════════ -def _generate_enum_py(enums, src_enum, doc_name, buf_fmt=None) -> str: - """Generate enum.py content (just enums, no dsl.py dependency).""" - def enum_lines(name, items): return [f"class {name}(IntEnum):"] + [f" {n} = {v}" for v, n in sorted(items.items())] + [""] - lines = [f"# autogenerated from AMD {doc_name} ISA PDF by pdf.py - do not edit", "from enum import IntEnum", ""] - lines += enum_lines("SrcEnum", src_enum) + sum([enum_lines(n, ops) for n, ops in sorted(enums.items())], []) - if buf_fmt: lines += enum_lines("BufFmt", {v: f"BUF_FMT_{n}" for v, n in buf_fmt.items() if 1 <= v <= 63}) - return '\n'.join(lines) - -def _generate_ins_py(formats, enums, src_enum, doc_name) -> str: - """Generate ins.py content (instruction formats and helpers, imports dsl.py and enum.py).""" - def field_key(f, order): return order.index(f[0].lower()) if f[0].lower() in order else 1000 - lines = [f"# autogenerated from AMD {doc_name} ISA PDF by pdf.py - do not edit", - "# ruff: noqa: F401,F403", "from typing import Annotated", - "from extra.assembly.amd.dsl import bits, BitField, Inst32, Inst64, Inst96, SGPR, VGPR, TTMP as TTMP, s as s, v as v, ttmp as ttmp, SSrc, Src, SImm, Imm, VDSTYEnc, SGPRField, VGPRField", - "from extra.assembly.amd.autogen.{arch}.enum import *", - "import functools", ""] - format_defaults = {'VOP3P': {'opsel_hi': 3, 'opsel_hi2': 1}} - lines.append("# instruction formats") - # MIMG has optional NSA (Non-Sequential Address) fields that extend beyond 64 bits, but base encoding is 64-bit - inst64_override = {'MIMG'} - for fmt_name, fields in sorted(formats.items()): - max_bit = max(f[1] for f in fields) - if fmt_name in inst64_override: base = "Inst64" - else: base = "Inst96" if max_bit > 63 else "Inst64" if max_bit > 31 or fmt_name == 'VOP3SD' else "Inst32" - order = FIELD_ORDER.get(fmt_name, []) - lines.append(f"class {fmt_name}({base}):") - if enc := next((f for f in fields if f[0] == 'ENCODING'), None): - lines.append(f" encoding = bits[{enc[1]}:{enc[2]}] == 0b{enc[3]:b}" if enc[1] != enc[2] else f" encoding = bits[{enc[1]}] == {enc[3]}") - if defaults := format_defaults.get(fmt_name): lines.append(f" _defaults = {defaults}") - for name, hi, lo, _, ftype in sorted([f for f in fields if f[0] != 'ENCODING'], key=lambda f: field_key(f, order)): - ann = f":Annotated[BitField, {ftype}]" if ftype and ftype.endswith('Op') else f":{ftype}" if ftype else "" - lines.append(f" {name.lower()}{ann} = bits[{hi}]" if hi == lo else f" {name.lower()}{ann} = bits[{hi}:{lo}]") +def write_enums(enums: dict[str, dict[int, str]], arch: str, path: str): + """Write enum.py file from extracted enums.""" + lines = ["# autogenerated from AMD ISA PDF by pdf.py - do not edit", "from enum import IntEnum", ""] + for name, values in sorted(enums.items()): + suffix = "Op" if name not in ('Src', 'BufFmt') else ("Enum" if name == 'Src' else "") + prefix = "BUF_FMT_" if name == 'BufFmt' else "" + lines.append(f"class {name}{suffix}(IntEnum):") + for val, member in sorted(values.items()): + lines.append(f" {prefix}{member} = {val}") lines.append("") + with open(path, "w") as f: + f.write("\n".join(lines)) + +def write_ins(formats: dict[str, list[tuple[str, int, int]]], encodings: dict[str, str], enums: dict[str, dict[int, str]], arch: str, path: str): + """Write ins.py file from extracted formats and enums.""" + # Field types and ordering + def field_type(name, fmt): + if name == 'op' and fmt in enums: return f'Annotated[BitField, {fmt}Op]' + if name in ('opx', 'opy'): return 'Annotated[BitField, VOPDOp]' + if name == 'vdsty': return 'VDSTYEnc' + if name in ('vdst', 'vsrc1', 'vaddr', 'vdata', 'data', 'data0', 'data1', 'addr', 'vsrc0', 'vsrc2', 'vsrc3'): return 'VGPRField' + if name in ('sdst', 'sbase', 'sdata', 'srsrc', 'ssamp'): return 'SGPRField' + if name.startswith('ssrc') or name in ('saddr', 'soffset'): return 'SSrc' + if name in ('src0', 'srcx0', 'srcy0') or name.startswith('src') and name[3:].isdigit(): return 'Src' + if name.startswith('simm'): return 'SImm' + if name == 'offset' or name.startswith('imm'): return 'Imm' + return None + field_priority = ['encoding', 'op', 'opx', 'opy', 'vdst', 'vdstx', 'vdsty', 'sdst', 'vdata', 'sdata', 'addr', 'vaddr', 'data', 'data0', 'data1', + 'src0', 'srcx0', 'srcy0', 'vsrc0', 'ssrc0', 'src1', 'vsrc1', 'vsrcx1', 'vsrcy1', 'ssrc1', 'src2', 'vsrc2', 'src3', 'vsrc3', + 'saddr', 'sbase', 'srsrc', 'ssamp', 'soffset', 'offset', 'simm16', 'en', 'target', 'attr', 'attr_chan', + 'omod', 'neg', 'neg_hi', 'abs', 'clmp', 'opsel', 'opsel_hi', 'waitexp', 'wait_va', + 'dmask', 'dim', 'seg', 'format', 'offen', 'idxen', 'glc', 'dlc', 'slc', 'tfe', 'unrm', 'done', 'row'] + def sort_fields(fields): + order = {name: i for i, name in enumerate(field_priority)} + return sorted(fields, key=lambda f: (order.get(f[0], 1000), f[2])) + + # Generate format classes + lines = ["# autogenerated from AMD ISA PDF by pdf.py - do not edit", "# ruff: noqa: F401,F403", + "from typing import Annotated", + "from extra.assembly.amd.dsl import *", + f"from extra.assembly.amd.autogen.{arch}.enum import *", "import functools", ""] + for fmt_name, fields in sorted(formats.items()): + lines.append(f"class {fmt_name}(Inst):") + for name, hi, lo in sort_fields(fields): + bits_str = f"bits[{hi}:{lo}]" if hi != lo else f"bits[{hi}]" + if name == 'encoding' and fmt_name in encodings: lines.append(f" encoding = {bits_str} == 0b{encodings[fmt_name]}") + else: + ftype = field_type(name, fmt_name) + lines.append(f" {name}{f':{ftype}' if ftype else ''} = {bits_str}") + lines.append("") + + # Generate instruction helpers lines.append("# instruction helpers") - for cls_name, ops in sorted(enums.items()): - fmt = cls_name[:-2] - for op_val, name in sorted(ops.items()): - seg = {"GLOBAL": ", seg=2", "SCRATCH": ", seg=1"}.get(fmt, "") - tgt = {"GLOBAL": "FLAT, GLOBALOp", "SCRATCH": "FLAT, SCRATCHOp"}.get(fmt, f"{fmt}, {cls_name}") - if fmt in formats or fmt in ("GLOBAL", "SCRATCH"): - suffix = "_e32" if fmt in ("VOP1", "VOP2", "VOPC") else "_e64" if fmt == "VOP3" and op_val < 512 else "" - if name in ('V_FMAMK_F32', 'V_FMAMK_F16'): - lines.append(f"def {name.lower()}{suffix}(vdst, src0, K, vsrc1): return {fmt}({cls_name}.{name}, vdst, src0, vsrc1, literal=K)") - elif name in ('V_FMAAK_F32', 'V_FMAAK_F16'): - lines.append(f"def {name.lower()}{suffix}(vdst, src0, vsrc1, K): return {fmt}({cls_name}.{name}, vdst, src0, vsrc1, literal=K)") - else: lines.append(f"{name.lower()}{suffix} = functools.partial({tgt}.{name}{seg})") - src_names = {name for _, name in src_enum.items()} - lines += [""] + [f"{name} = SrcEnum.{name}" for _, name in sorted(src_enum.items()) if name not in {'DPP8', 'DPP16'}] - if "NULL" in src_names: lines.append("OFF = NULL\n") - return '\n'.join(lines) + for fmt_name, ops in sorted(enums.items()): + seg = {"GLOBAL": ", seg=2", "SCRATCH": ", seg=1"}.get(fmt_name, "") + tgt = {"GLOBAL": "FLAT, GLOBALOp", "SCRATCH": "FLAT, SCRATCHOp"}.get(fmt_name, f"{fmt_name}, {fmt_name}Op") + suffix = "_e32" if fmt_name in ("VOP1", "VOP2", "VOPC") else "_e64" if fmt_name == "VOP3" and len(ops) > 0 else "" + if fmt_name in formats or fmt_name in ("GLOBAL", "SCRATCH"): + for op_val, name in sorted(ops.items()): + fn_suffix = suffix if fmt_name != "VOP3" or op_val < 512 else "" + lines.append(f"{name.lower()}{fn_suffix} = functools.partial({tgt}.{name}{seg})") -def _generate_str_pcode_py(enums, pseudocode, arch) -> str: - """Generate str_pcode.py content (raw pseudocode strings).""" - # Get op enums for this arch (import from .ins which re-exports from .enum) - import importlib - autogen = importlib.import_module(f"extra.assembly.amd.autogen.{arch}.ins") - OP_ENUMS = [getattr(autogen, name) for name in ['SOP1Op', 'SOP2Op', 'SOPCOp', 'SOPKOp', 'SOPPOp', 'SMEMOp', 'VOP1Op', 'VOP2Op', 'VOP3Op', 'VOP3SDOp', 'VOP3POp', 'VOPCOp', 'VOP3AOp', 'VOP3BOp', 'DSOp', 'FLATOp', 'GLOBALOp', 'SCRATCHOp'] if hasattr(autogen, name)] + with open(path, "w") as f: + f.write("\n".join(lines)) - # Build defined ops mapping - defined_ops: dict[tuple, list] = {} - for enum_cls in OP_ENUMS: - for op in enum_cls: - if op.name.startswith(('S_', 'V_', 'DS_', 'FLAT_', 'GLOBAL_', 'SCRATCH_')): defined_ops.setdefault((op.name, op.value), []).append((enum_cls, op)) - - enum_names = [e.__name__ for e in OP_ENUMS] - instructions: dict = {cls: {} for cls in OP_ENUMS} - for key, pc in pseudocode.items(): - if key in defined_ops: - for enum_cls, enum_val in defined_ops[key]: instructions[enum_cls][enum_val] = pc - - # Build string dictionaries for each enum - lines = [f'''# autogenerated by pdf.py - do not edit -# to regenerate: python -m extra.assembly.amd.pdf --arch {arch} -# ruff: noqa: E501 -from extra.assembly.amd.autogen.{arch}.enum import {", ".join(enum_names)} -'''] - all_dict_entries: dict = {} - for enum_cls in OP_ENUMS: - cls_name = enum_cls.__name__ - if not instructions.get(enum_cls): continue - dict_entries = [(op, repr(pc)) for op, pc in instructions[enum_cls].items()] - if dict_entries: - all_dict_entries[enum_cls] = dict_entries - lines.append(f'{cls_name}_PCODE = {{') - for op, escaped in dict_entries: lines.append(f" {cls_name}.{op.name}: {escaped},") - lines.append('}\n') - - lines.append('PSEUDOCODE_STRINGS = {') - for enum_cls in OP_ENUMS: - if all_dict_entries.get(enum_cls): lines.append(f' {enum_cls.__name__}: {enum_cls.__name__}_PCODE,') - lines.append('}') - return '\n'.join(lines) - -# ═══════════════════════════════════════════════════════════════════════════════ -# MAIN GENERATION -# ═══════════════════════════════════════════════════════════════════════════════ - -def generate_arch(arch: str) -> dict: - """Generate enum.py, ins.py and str_pcode.py for a single architecture.""" - urls = PDF_URLS[arch] - if isinstance(urls, str): urls = [urls] - - print(f"\n{'='*60}\nGenerating {arch}...") - print(f"Parsing {len(urls)} PDF(s)...") - results = [_parse_single_pdf(url) for url in urls] - merged = _merge_results(results) if len(results) > 1 else results[0] - doc_name = "+".join(merged["doc_names"]) if len(results) > 1 else merged["doc_name"] - - base_path = Path(f"extra/assembly/amd/autogen/{arch}") - base_path.mkdir(parents=True, exist_ok=True) - (base_path / "__init__.py").touch() - - # Write enum.py (enums only, no dsl.py dependency) - enum_path = base_path / "enum.py" - enum_content = _generate_enum_py(merged["enums"], merged["src_enum"], doc_name, merged.get("buf_fmt")) - enum_path.write_text(enum_content) - buf_fmt_count = len([v for v in merged.get("buf_fmt", {}) if 1 <= v <= 63]) - print(f"Generated {enum_path}: SrcEnum ({len(merged['src_enum'])}) + {len(merged['enums'])} enums" + (f" + BufFmt ({buf_fmt_count})" if buf_fmt_count else "")) - - # Write ins.py (instruction formats and helpers, imports dsl.py and enum.py) - ins_path = base_path / "ins.py" - ins_content = _generate_ins_py(merged["formats"], merged["enums"], merged["src_enum"], doc_name).replace("{arch}", arch) - ins_path.write_text(ins_content) - print(f"Generated {ins_path}: {len(merged['formats'])} formats") - - # Write str_pcode.py (needs enum.py to exist first for imports) - pcode_path = base_path / "str_pcode.py" - pcode_content = _generate_str_pcode_py(merged["enums"], merged["pseudocode"], arch) - pcode_path.write_text(pcode_content) - print(f"Generated {pcode_path}: {len(merged['pseudocode'])} instructions") - - return merged - -def _generate_arch_wrapper(arch: str): - """Wrapper for multiprocessing - returns arch name for ordering.""" - generate_arch(arch) - return arch - -def generate_all(): - """Generate all architectures in parallel.""" - with ProcessPoolExecutor() as executor: - list(executor.map(_generate_arch_wrapper, PDF_URLS.keys())) +def write_pcode(pcode: dict[tuple[str, int], str], enums: dict[str, dict[int, str]], arch: str, path: str): + """Write str_pcode.py file from extracted pseudocode.""" + # Group pseudocode by enum class + by_enum: dict[str, list[tuple[str, int, str]]] = {} + for fmt_name, ops in enums.items(): + for opcode, name in ops.items(): + if (name, opcode) in pcode: by_enum.setdefault(f"{fmt_name}Op", []).append((name, opcode, pcode[(name, opcode)])) + # Generate file + enum_names = sorted(by_enum.keys()) + lines = [f"# autogenerated by pdf.py - do not edit", f"# to regenerate: python -m extra.assembly.amd.pdf", + "# ruff: noqa: E501", f"from extra.assembly.amd.autogen.{arch}.enum import {', '.join(enum_names)}", ""] + for enum_name in enum_names: + lines.append(f"{enum_name}_PCODE = {{") + for name, opcode, code in sorted(by_enum[enum_name], key=lambda x: x[1]): + lines.append(f" {enum_name}.{name}: {code!r},") + lines.append("}\n") + lines.append(f"PSEUDOCODE_STRINGS = {{{', '.join(f'{e}: {e}_PCODE' for e in enum_names)}}}") + with open(path, "w") as f: + f.write("\n".join(lines)) if __name__ == "__main__": - import argparse - parser = argparse.ArgumentParser(description="Generate AMD ISA autogen files from PDF documentation") - parser.add_argument("--arch", choices=list(PDF_URLS.keys()) + ["all"], default="rdna3") - args = parser.parse_args() - if args.arch == "all": generate_all() - else: generate_arch(args.arch) + import pathlib + for arch, url in PDF_URLS.items(): + print(f"Processing {arch}...") + pages = extract(url) + tables = extract_tables(pages) + enums = extract_enums(tables) + formats, encodings = extract_ins(tables) + pcode = extract_pcode(pages, enums) + # Fix known PDF errors + if arch == 'rdna3': + fixes = {'SOPP': {8: 'S_WAITCNT_DEPCTR', 58: 'S_TTRACEDATA', 59: 'S_TTRACEDATA_IMM'}, + 'SOPK': {22: 'S_SUBVECTOR_LOOP_BEGIN', 23: 'S_SUBVECTOR_LOOP_END'}, + 'SMEM': {34: 'S_ATC_PROBE', 35: 'S_ATC_PROBE_BUFFER'}, + 'DS': {24: 'DS_GWS_SEMA_RELEASE_ALL', 25: 'DS_GWS_INIT', 26: 'DS_GWS_SEMA_V', 27: 'DS_GWS_SEMA_BR', 28: 'DS_GWS_SEMA_P', 29: 'DS_GWS_BARRIER'}, + 'FLAT': {40: 'GLOBAL_LOAD_ADDTID_B32', 41: 'GLOBAL_STORE_ADDTID_B32', 55: 'FLAT_ATOMIC_CSUB_U32'}} + for fmt, ops in fixes.items(): enums[fmt] = merge_dicts([enums[fmt], ops]) + if arch in ('rdna3', 'rdna4'): + # RDNA SMEM: PDF says DLC=[14], GLC=[16] but hardware uses DLC=[13], GLC=[14] + if 'SMEM' in formats: + formats['SMEM'] = [(n, 13 if n == 'dlc' else 14 if n == 'glc' else h, 13 if n == 'dlc' else 14 if n == 'glc' else l) + for n, h, l in formats['SMEM']] + if arch == 'cdna': + # CDNA DS: PDF is missing the GDS field (bit 16) + if 'DS' in formats and not any(n == 'gds' for n, _, _ in formats['DS']): + formats['DS'].append(('gds', 16, 16)) + # CDNA DPP/SDWA: PDF only documents modifier fields (bits[63:32]), need to add VOP overlay fields (bits[31:0]) + vop_overlay = [('encoding', 8, 0), ('vop_op', 16, 9), ('vdst', 24, 17), ('vop2_op', 31, 25)] + if 'DPP' in formats and not any(n == 'encoding' for n, _, _ in formats['DPP']): + formats['DPP'] = vop_overlay + [('bc' if n == 'bound_ctrl' else n, h, l) for n, h, l in formats['DPP']] + encodings['DPP'] = '11111010' + if 'SDWA' in formats and not any(n == 'encoding' for n, _, _ in formats['SDWA']): + formats['SDWA'] = vop_overlay + [(n, h, l) for n, h, l in formats['SDWA']] + encodings['SDWA'] = '11111001' + base = pathlib.Path(__file__).parent / "autogen" / arch + write_enums(enums, arch, base / "enum.py") + write_ins(formats, encodings, enums, arch, base / "ins.py") + write_pcode(pcode, enums, arch, base / "str_pcode.py") + print(f" {len(tables)} tables, {len(pcode)} pcode -> {base}") diff --git a/extra/assembly/amd/test/hw/test_vop3.py b/extra/assembly/amd/test/hw/test_vop3.py index a44d5f60a5..7befb0e3a8 100644 --- a/extra/assembly/amd/test/hw/test_vop3.py +++ b/extra/assembly/amd/test/hw/test_vop3.py @@ -1615,7 +1615,7 @@ class TestCarryBorrow(unittest.TestCase): v_mov_b32_e32(v[2], s[2]), v_mov_b32_e32(v[3], s[3]), v_add_co_u32(v[4], VCC, v[0], v[2]), - v_add_co_ci_u32_e32(v[5], VCC, v[1], v[3]), + v_add_co_ci_u32_e32(v[5], v[1], v[3]), ] st = run_program(instructions, n_lanes=1) self.assertEqual(st.vgpr[0][4], 0x00000000, "lo result") diff --git a/extra/assembly/amd/test/hw/test_vop3p.py b/extra/assembly/amd/test/hw/test_vop3p.py index 5935b5abc2..92a70933bf 100644 --- a/extra/assembly/amd/test/hw/test_vop3p.py +++ b/extra/assembly/amd/test/hw/test_vop3p.py @@ -271,7 +271,7 @@ class TestVOP3P(unittest.TestCase): s_mov_b32(s[1], 0x44004200), # hi=4.0, lo=3.0 v_mov_b32_e32(v[0], s[0]), v_mov_b32_e32(v[1], s[1]), - v_pk_add_f16(v[2], v[0], v[1]), + v_pk_add_f16(v[2], v[0], v[1], opsel_hi=3, opsel_hi2=1), ] st = run_program(instructions, n_lanes=1) result = st.vgpr[0][2] @@ -288,7 +288,7 @@ class TestVOP3P(unittest.TestCase): s_mov_b32(s[1], 0x45004400), # hi=5.0, lo=4.0 v_mov_b32_e32(v[0], s[0]), v_mov_b32_e32(v[1], s[1]), - v_pk_mul_f16(v[2], v[0], v[1]), + v_pk_mul_f16(v[2], v[0], v[1], opsel_hi=3, opsel_hi2=1), ] st = run_program(instructions, n_lanes=1) result = st.vgpr[0][2] @@ -307,7 +307,7 @@ class TestVOP3P(unittest.TestCase): v_mov_b32_e32(v[0], s[0]), v_mov_b32_e32(v[1], s[1]), v_mov_b32_e32(v[2], s[2]), - v_pk_fma_f16(v[3], v[0], v[1], v[2]), + v_pk_fma_f16(v[3], v[0], v[1], v[2], opsel_hi=3, opsel_hi2=1), ] st = run_program(instructions, n_lanes=1) result = st.vgpr[0][3] @@ -325,7 +325,7 @@ class TestVOP3P(unittest.TestCase): instructions = [ s_mov_b32(s[0], 0x3c003c00), # packed f16: hi=1.0, lo=1.0 v_mov_b32_e32(v[0], s[0]), - v_pk_add_f16(v[1], v[0], SrcEnum.POS_ONE), # Add inline constant 1.0 + v_pk_add_f16(v[1], v[0], SrcEnum.POS_ONE, opsel_hi=3, opsel_hi2=1), # Add inline constant 1.0 ] st = run_program(instructions, n_lanes=1) result = st.vgpr[0][1] @@ -345,7 +345,7 @@ class TestVOP3P(unittest.TestCase): instructions = [ s_mov_b32(s[0], 0x44004200), # packed f16: hi=4.0, lo=3.0 v_mov_b32_e32(v[0], s[0]), - v_pk_mul_f16(v[1], v[0], SrcEnum.POS_TWO), + v_pk_mul_f16(v[1], v[0], SrcEnum.POS_TWO, opsel_hi=3, opsel_hi2=1), ] st = run_program(instructions, n_lanes=1) result = st.vgpr[0][1] @@ -486,12 +486,12 @@ class TestSpecialOps(unittest.TestCase): """V_DOT2_F32_BF16 computes dot product of bf16 pairs.""" # bf16 1.0 = 0x3f80, bf16 2.0 = 0x4000 instructions = [ - s_mov_b32(s[0], 0x3f803f80), # packed bf16: 1.0, 1.0 - s_mov_b32(s[1], 0x40003f80), # packed bf16: 2.0, 1.0 + s_mov_b32(s[0], 0x3f803f80), # packed bf16: lo=1.0, hi=1.0 + s_mov_b32(s[1], 0x40003f80), # packed bf16: lo=1.0, hi=2.0 v_mov_b32_e32(v[0], s[0]), v_mov_b32_e32(v[1], s[1]), v_mov_b32_e32(v[2], 0), - v_dot2_f32_bf16(v[3], v[0], v[1], v[2]), + v_dot2_f32_bf16(v[3], v[0], v[1], v[2], opsel_hi=3, opsel_hi2=1), ] st = run_program(instructions, n_lanes=1) # 1.0*1.0 + 1.0*2.0 + 0 = 3.0 @@ -510,7 +510,7 @@ class TestPackedMixedSigns(unittest.TestCase): s_mov_b32(s[1], 0x3c003c00), # packed: hi=1.0, lo=1.0 v_mov_b32_e32(v[0], s[0]), v_mov_b32_e32(v[1], s[1]), - v_pk_add_f16(v[2], v[0], v[1]), + v_pk_add_f16(v[2], v[0], v[1], opsel_hi=3, opsel_hi2=1), ] st = run_program(instructions, n_lanes=1) result = st.vgpr[0][2] diff --git a/extra/assembly/amd/test/test_pdf.py b/extra/assembly/amd/test/test_pdf.py new file mode 100644 index 0000000000..4b16981bca --- /dev/null +++ b/extra/assembly/amd/test/test_pdf.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +"""Test pdf.py PDF parser and enum generation.""" +import unittest, tempfile, importlib.util +from extra.assembly.amd.pdf import extract, extract_tables, extract_enums, write_enums, PDF_URLS + +EXPECTED = { + "rdna3": {"pages": 655, "tables": 115, "sop2_ops": 67, "sop2_first": "S_ADD_U32"}, + "rdna4": {"pages": 711, "tables": 125, "sop2_ops": 74, "sop2_first": "S_ADD_CO_U32"}, + "cdna": {"pages": 610, "tables": 104, "sop2_ops": 52, "sop2_first": "S_ADD_U32"}, +} + +class TestPDF2(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.data = {name: extract(url) for name, url in PDF_URLS.items()} + cls.tables = {name: extract_tables(pages) for name, pages in cls.data.items()} + cls.enums = {name: extract_enums(cls.tables[name]) for name in PDF_URLS} + + def test_page_counts(self): + for name, exp in EXPECTED.items(): + self.assertEqual(len(self.data[name]), exp["pages"], f"{name} page count") + + def test_table_counts(self): + for name, exp in EXPECTED.items(): + self.assertEqual(len(self.tables[name]), exp["tables"], f"{name} table count") + + def test_tables_sequential(self): + for name in PDF_URLS: + nums = sorted(self.tables[name].keys()) + missing = set(range(1, max(nums) + 1)) - set(nums) + self.assertEqual(missing, set(), f"{name} missing tables: {missing}") + + def test_generate_enums(self): + for name, exp in EXPECTED.items(): + with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: + write_enums(self.enums[name], name, f.name) + spec = importlib.util.spec_from_file_location("enum", f.name) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + # Check SOP2Op + self.assertTrue(hasattr(mod, 'SOP2Op'), f"{name} missing SOP2Op") + self.assertEqual(len(mod.SOP2Op), exp["sop2_ops"], f"{name} SOP2Op count") + self.assertEqual(mod.SOP2Op(0).name, exp["sop2_first"], f"{name} SOP2Op first") + # Check all enums have at least 2 ops + for attr in dir(mod): + if attr.endswith('Op'): + self.assertGreaterEqual(len(getattr(mod, attr)), 2, f"{name} {attr} has too few ops") + +if __name__ == "__main__": + unittest.main() diff --git a/extra/assembly/amd/test/test_pdf_parser.py b/extra/assembly/amd/test/test_pdf_parser.py deleted file mode 100644 index 91e648352d..0000000000 --- a/extra/assembly/amd/test/test_pdf_parser.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env python3 -"""Test that PDF parser correctly extracts format fields.""" -import unittest, os -from extra.assembly.amd.autogen.rdna3.ins import SOP1, SOP2, SOPK, SOPP, VOP1, VOP2, VOP3SD, VOPC, FLAT, VOPD, SOP1Op, SOP2Op, VOP1Op, VOP3Op - -# expected formats with key fields and whether they have ENCODING -EXPECTED_FORMATS = { - 'DPP16': (['SRC0', 'DPP_CTRL', 'BANK_MASK', 'ROW_MASK'], False), - 'DPP8': (['SRC0', 'LANE_SEL0', 'LANE_SEL7'], False), - 'DS': (['OP', 'ADDR', 'DATA0', 'DATA1', 'VDST'], True), - 'EXP': (['EN', 'TARGET', 'VSRC0', 'VSRC1', 'VSRC2', 'VSRC3'], True), - 'FLAT': (['OP', 'ADDR', 'DATA', 'SADDR', 'VDST', 'OFFSET'], True), - 'LDSDIR': (['VDST', 'OP'], True), - 'MIMG': (['OP', 'VADDR', 'VDATA', 'SRSRC', 'DMASK'], True), - 'MTBUF': (['OP', 'VADDR', 'VDATA', 'SRSRC', 'FORMAT', 'SOFFSET'], True), - 'MUBUF': (['OP', 'VADDR', 'VDATA', 'SRSRC', 'SOFFSET'], True), - 'SMEM': (['OP', 'SBASE', 'SDATA', 'OFFSET', 'SOFFSET'], True), - 'SOP1': (['OP', 'SDST', 'SSRC0'], True), - 'SOP2': (['OP', 'SDST', 'SSRC0', 'SSRC1'], True), - 'SOPC': (['OP', 'SSRC0', 'SSRC1'], True), - 'SOPK': (['OP', 'SDST', 'SIMM16'], True), - 'SOPP': (['OP', 'SIMM16'], True), - 'VINTERP': (['OP', 'VDST', 'SRC0', 'SRC1', 'SRC2'], True), - 'VOP1': (['OP', 'VDST', 'SRC0'], True), - 'VOP2': (['OP', 'VDST', 'SRC0', 'VSRC1'], True), - 'VOP3': (['OP', 'VDST', 'SRC0', 'SRC1', 'SRC2'], True), - 'VOP3P': (['OP', 'VDST', 'SRC0', 'SRC1', 'SRC2'], True), - 'VOP3SD': (['OP', 'VDST', 'SDST', 'SRC0', 'SRC1', 'SRC2'], True), - 'VOPC': (['OP', 'SRC0', 'VSRC1'], True), - 'VOPD': (['OPX', 'OPY', 'SRCX0', 'SRCY0', 'VDSTX', 'VDSTY'], True), -} - -# Skip PDF parsing tests by default - only run with TEST_PDF_PARSER=1 -# These are slow (~5s) and only needed when regenerating autogen/ -@unittest.skipUnless(os.environ.get("TEST_PDF_PARSER"), "set TEST_PDF_PARSER=1 to run PDF parser tests") -class TestPDFParserGenerate(unittest.TestCase): - """Test the PDF parser by running generate() and checking results.""" - - def test_pdf_parser(self): - """Single test that validates all PDF parser outputs.""" - from extra.assembly.amd.dsl import generate - result = generate() - - # test_all_formats_present - for fmt_name in EXPECTED_FORMATS: - self.assertIn(fmt_name, result["formats"], f"missing format {fmt_name}") - - # test_format_count - self.assertEqual(len(result["formats"]), 23) - - # test_no_duplicate_fields - for fmt_name, fields in result["formats"].items(): - field_names = [f[0] for f in fields] - self.assertEqual(len(field_names), len(set(field_names)), f"{fmt_name} has duplicate fields: {field_names}") - - # test_expected_fields - for fmt_name, (expected_fields, has_encoding) in EXPECTED_FORMATS.items(): - fields = {f[0] for f in result["formats"].get(fmt_name, [])} - for field in expected_fields: - self.assertIn(field, fields, f"{fmt_name} missing {field}") - if has_encoding: - self.assertIn("ENCODING", fields, f"{fmt_name} should have ENCODING") - else: - self.assertNotIn("ENCODING", fields, f"{fmt_name} should not have ENCODING") - - # test_vopd_no_dpp16_fields - vopd_fields = {f[0] for f in result["formats"].get("VOPD", [])} - for field in ['DPP_CTRL', 'BANK_MASK', 'ROW_MASK']: - self.assertNotIn(field, vopd_fields, f"VOPD should not have {field}") - - # test_dpp16_no_vinterp_fields - dpp16_fields = {f[0] for f in result["formats"].get("DPP16", [])} - for field in ['VDST', 'WAITEXP']: - self.assertNotIn(field, dpp16_fields, f"DPP16 should not have {field}") - - # test_sopp_no_smem_fields - sopp_fields = {f[0] for f in result["formats"].get("SOPP", [])} - for field in ['SBASE', 'SDATA']: - self.assertNotIn(field, sopp_fields, f"SOPP should not have {field}") - -class TestPDFParser(unittest.TestCase): - """Verify format classes have correct fields from PDF parsing.""" - - def test_sop2_fields(self): - """SOP2 should have op, sdst, ssrc0, ssrc1.""" - for field in ['op', 'sdst', 'ssrc0', 'ssrc1']: - self.assertIn(field, SOP2._fields) - self.assertEqual(SOP2._fields['op'].hi, 29) - self.assertEqual(SOP2._fields['op'].lo, 23) - - def test_sop1_fields(self): - """SOP1 should have op, sdst, ssrc0 with correct bit positions.""" - for field in ['op', 'sdst', 'ssrc0']: - self.assertIn(field, SOP1._fields) - self.assertNotIn('simm16', SOP1._fields) - self.assertEqual(SOP1._fields['ssrc0'].hi, 7) - self.assertEqual(SOP1._fields['ssrc0'].lo, 0) - assert SOP1._encoding is not None - self.assertEqual(SOP1._encoding[0].hi, 31) - self.assertEqual(SOP1._encoding[1], 0b101111101) - - def test_vop3sd_fields(self): - """VOP3SD should have all fields including src0/src1/src2 from page continuation.""" - for field in ['op', 'vdst', 'sdst', 'src0', 'src1', 'src2']: - self.assertIn(field, VOP3SD._fields) - self.assertEqual(VOP3SD._fields['src0'].hi, 40) - self.assertEqual(VOP3SD._fields['src0'].lo, 32) - self.assertEqual(VOP3SD._size(), 8) - - def test_flat_has_vdst(self): - """FLAT should have vdst field.""" - self.assertIn('vdst', FLAT._fields) - self.assertEqual(FLAT._fields['vdst'].hi, 63) - self.assertEqual(FLAT._fields['vdst'].lo, 56) - - def test_encoding_bits(self): - """Verify encoding bits are correct for major formats.""" - tests = [ - (SOP2, 31, 30, 0b10), - (SOPK, 31, 28, 0b1011), - (SOPP, 31, 23, 0b101111111), - (VOP1, 31, 25, 0b0111111), - (VOP2, 31, 31, 0b0), - (VOPC, 31, 25, 0b0111110), - (FLAT, 31, 26, 0b110111), - ] - for cls, hi, lo, val in tests: - assert cls._encoding is not None - self.assertEqual(cls._encoding[0].hi, hi, f"{cls.__name__} encoding hi") - self.assertEqual(cls._encoding[0].lo, lo, f"{cls.__name__} encoding lo") - self.assertEqual(cls._encoding[1], val, f"{cls.__name__} encoding val") - - def test_opcode_enums_exist(self): - """Verify opcode enums are generated with expected counts.""" - self.assertGreater(len(SOP1Op), 50) - self.assertGreater(len(SOP2Op), 50) - self.assertGreater(len(VOP1Op), 50) - self.assertGreater(len(VOP3Op), 200) - - def test_vopd_no_duplicate_fields(self): - """VOPD should not have duplicate fields and should not include DPP16 fields.""" - field_names = list(VOPD._fields.keys()) - self.assertEqual(len(field_names), len(set(field_names))) - for field in ['srcx0', 'srcy0', 'opx', 'opy']: - self.assertIn(field, VOPD._fields) - for field in ['dpp_ctrl', 'bank_mask', 'row_mask']: - self.assertNotIn(field, VOPD._fields) - -if __name__ == "__main__": - unittest.main()