mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
* assembly/amd: make pdf.py code shine * no merge * pdf2 is the future * something * regen enums * test * work * remove junk * write * pcode extraction * pdf2 passes all tests * simplify * simpler pdf * late filter * remove hacks * simplify pdf2.py * field type * remove defaults * don't export srcenum * simple pdf.py * simpler * cleaner * less hack in PDF
1562 lines
245 KiB
Python
1562 lines
245 KiB
Python
# autogenerated by pdf.py - do not edit
|
|
# to regenerate: python -m extra.assembly.amd.pdf
|
|
# ruff: noqa: E501
|
|
from extra.assembly.amd.autogen.cdna.enum import DSOp, FLATOp, GLOBALOp, MTBUFOp, MUBUFOp, SCRATCHOp, SMEMOp, SOP1Op, SOP2Op, SOPCOp, SOPKOp, SOPPOp, VOP1Op, VOP2Op, VOP3AOp, VOP3BOp, VOP3POp, VOPCOp
|
|
|
|
DSOp_PCODE = {
|
|
DSOp.DS_ADD_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_SUB_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_RSUB_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_INC_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_DEC_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_MIN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
DSOp.DS_MAX_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
DSOp.DS_MIN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_MAX_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_AND_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_OR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_XOR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_MSKOR_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_WRITE_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]',
|
|
DSOp.DS_WRITE2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]',
|
|
DSOp.DS_WRITE2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]',
|
|
DSOp.DS_CMPST_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp',
|
|
DSOp.DS_CMPST_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp',
|
|
DSOp.DS_MIN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
|
|
DSOp.DS_MAX_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
|
|
DSOp.DS_ADD_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp',
|
|
DSOp.DS_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
DSOp.DS_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
DSOp.DS_WRITE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32",
|
|
DSOp.DS_WRITE_B8: 'MEM[ADDR].b8 = DATA[7 : 0]',
|
|
DSOp.DS_WRITE_B16: 'MEM[ADDR].b16 = DATA[15 : 0]',
|
|
DSOp.DS_ADD_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_SUB_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_RSUB_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 = DATA.u32 - MEM[addr].u32;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_INC_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_DEC_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_MIN_RTN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
DSOp.DS_MAX_RTN_I32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
DSOp.DS_MIN_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_MAX_RTN_U32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
DSOp.DS_AND_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_OR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_XOR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_MSKOR_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = ((tmp & ~DATA.b32) | DATA2.b32);\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_WRXCHG_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
|
|
DSOp.DS_WRXCHG2_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2',
|
|
DSOp.DS_WRXCHG2ST64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2',
|
|
DSOp.DS_CMPST_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp',
|
|
DSOp.DS_CMPST_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp',
|
|
DSOp.DS_MIN_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
|
|
DSOp.DS_MAX_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
|
|
DSOp.DS_WRAP_RTN_B32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = tmp >= DATA.u32 ? tmp - DATA.u32 : tmp + DATA2.u32;\nRETURN_DATA = tmp',
|
|
DSOp.DS_ADD_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp',
|
|
DSOp.DS_READ_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32',
|
|
DSOp.DS_READ2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4U].b32',
|
|
DSOp.DS_READ2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256U].b32',
|
|
DSOp.DS_READ_I8: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i8))",
|
|
DSOp.DS_READ_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })",
|
|
DSOp.DS_READ_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))",
|
|
DSOp.DS_READ_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })",
|
|
DSOp.DS_SWIZZLE_B32: 'Offset[4:0]: Swizzle\n0x00: {1,11,9,19,5,15,d,1d,3,13,b,1b,7,17,f,1f,2,12,a,1a,6,16,e,1e,4,14,c,1c,8,18,10,20}\n0x10: {1,9,5,d,3,b,7,f,2,a,6,e,4,c,8,10,11,19,15,1d,13,1b,17,1f,12,1a,16,1e,14,1c,18,20}\n0x1f: No swizzle\nOffset[9:5]: Swizzle\n0x01, mask=0, rotate left:\n{2,3,4,5,6,7,8,9,a,b,c,d,e,f,10,11,12,13,14,15,16,17,18,19,1a,1b,1c,1d,1e,1f,20,1}\n0x01, mask=0, rotate right:\n{20,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f,10,11,12,13,14,15,16,17,18,19,1a,1b,1c,1d,1e,1f}\n0x01, mask=1, rotate left:\n{1,4,3,6,5,8,7,a,9,c,b,e,d,10,f,12,11,14,13,16,15,18,17,1a,19,1c,1b,1e,1d,20,1f,2}\n0x01, mask=1, rotate right:\n{1f,2,1,4,3,6,5,8,7,a,9,c,b,e,d,10,f,12,11,14,13,16,15,18,17,1a,19,1c,1b,1e,1d,20}\noffset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\n}\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset[15]) {\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n}\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n}',
|
|
DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\ndst_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
|
|
DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\nsrc_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
|
|
DSOp.DS_ADD_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_SUB_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_RSUB_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_INC_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_DEC_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_MIN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
DSOp.DS_MAX_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
DSOp.DS_MIN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_MAX_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_AND_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_OR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_XOR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_MSKOR_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_WRITE_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]',
|
|
DSOp.DS_WRITE2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]',
|
|
DSOp.DS_WRITE2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]',
|
|
DSOp.DS_CMPST_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp',
|
|
DSOp.DS_CMPST_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp',
|
|
DSOp.DS_MIN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
DSOp.DS_MAX_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
DSOp.DS_WRITE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]',
|
|
DSOp.DS_WRITE_B16_D16_HI: 'MEM[ADDR].b16 = DATA[31 : 16]',
|
|
DSOp.DS_READ_U8_D16: "RETURN_DATA[15 : 0].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// RETURN_DATA[31:16] is preserved.",
|
|
DSOp.DS_READ_U8_D16_HI: "RETURN_DATA[31 : 16].u16 = 16'U({ 8'0U, MEM[ADDR].u8 });\n// RETURN_DATA[15:0] is preserved.",
|
|
DSOp.DS_READ_I8_D16: "RETURN_DATA[15 : 0].i16 = 16'I(signext(MEM[ADDR].i8));\n// RETURN_DATA[31:16] is preserved.",
|
|
DSOp.DS_READ_I8_D16_HI: "RETURN_DATA[31 : 16].i16 = 16'I(signext(MEM[ADDR].i8));\n// RETURN_DATA[15:0] is preserved.",
|
|
DSOp.DS_READ_U16_D16: 'RETURN_DATA[15 : 0].u16 = MEM[ADDR].u16;\n// RETURN_DATA[31:16] is preserved.',
|
|
DSOp.DS_READ_U16_D16_HI: 'RETURN_DATA[31 : 16].u16 = MEM[ADDR].u16;\n// RETURN_DATA[15:0] is preserved.',
|
|
DSOp.DS_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp',
|
|
DSOp.DS_ADD_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_SUB_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_RSUB_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 = DATA.u64 - MEM[addr].u64;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_INC_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_DEC_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_MIN_RTN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
DSOp.DS_MAX_RTN_I64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
DSOp.DS_MIN_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_MAX_RTN_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
DSOp.DS_AND_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_OR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_XOR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_MSKOR_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = ((tmp & ~DATA.b64) | DATA2.b64);\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_WRXCHG_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
|
|
DSOp.DS_WRXCHG2_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2',
|
|
DSOp.DS_WRXCHG2ST64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2',
|
|
DSOp.DS_CMPST_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp',
|
|
DSOp.DS_CMPST_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp',
|
|
DSOp.DS_MIN_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
DSOp.DS_MAX_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
DSOp.DS_READ_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32',
|
|
DSOp.DS_READ2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 8U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 8U + 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 8U + 4U].b32',
|
|
DSOp.DS_READ2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 512U].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET0.u32 * 512U + 4U].b32;\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET1.u32 * 512U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET1.u32 * 512U + 4U].b32',
|
|
DSOp.DS_ADD_RTN_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp',
|
|
DSOp.DS_CONDXCHG32_RTN_B64: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\ndeclare RETURN_DATA : 32'U[2];\nADDR = S0.u32;\nDATA = S1.u64;\noffset = { OFFSET1, OFFSET0 };\nADDR0 = ((ADDR + offset.u32) & 0xfff8U);\nADDR1 = ADDR0 + 4U;\nRETURN_DATA[0] = LDS[ADDR0].u32;\nif DATA[31] then\nLDS[ADDR0] = { 1'0, DATA[30 : 0] }\nendif;\nRETURN_DATA[1] = LDS[ADDR1].u32;\nif DATA[63] then\nLDS[ADDR1] = { 1'0, DATA[62 : 32] }\nendif",
|
|
DSOp.DS_READ_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32",
|
|
DSOp.DS_PK_ADD_RTN_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
DSOp.DS_PK_ADD_RTN_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
DSOp.DS_WRITE_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]',
|
|
DSOp.DS_WRITE_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]',
|
|
DSOp.DS_READ_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32',
|
|
DSOp.DS_READ_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32\nwhere:\nOFFSET = Unsigned immediate byte offset.\nOFFEN = Send offset either as VADDR or as zero..\nIDXEN = Send index either as VADDR or as zero.\nLDS = Data read from/written to LDS or VGPR.\nOP = Instruction Opcode.\nVADDR = VGPR address source.\nVDATA = Destination vector GPR.\nSRSRC = Scalar GPR that specifies resource constant.\nACC = Return to ACC VGPRs\nSC = Scope\nNT = Non-Temporal\nSOFFSET = Byte offset added to the memory address of an SGPR.',
|
|
}
|
|
|
|
FLATOp_PCODE = {
|
|
FLATOp.FLAT_LOAD_UBYTE: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })",
|
|
FLATOp.FLAT_LOAD_SBYTE: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))",
|
|
FLATOp.FLAT_LOAD_USHORT: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })",
|
|
FLATOp.FLAT_LOAD_SSHORT: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))",
|
|
FLATOp.FLAT_LOAD_DWORD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32',
|
|
FLATOp.FLAT_LOAD_DWORDX2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32',
|
|
FLATOp.FLAT_LOAD_DWORDX3: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32',
|
|
FLATOp.FLAT_LOAD_DWORDX4: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32',
|
|
FLATOp.FLAT_STORE_BYTE: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]',
|
|
FLATOp.FLAT_STORE_BYTE_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]',
|
|
FLATOp.FLAT_STORE_SHORT: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]',
|
|
FLATOp.FLAT_STORE_SHORT_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]',
|
|
FLATOp.FLAT_STORE_DWORD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]',
|
|
FLATOp.FLAT_STORE_DWORDX2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]',
|
|
FLATOp.FLAT_STORE_DWORDX3: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]',
|
|
FLATOp.FLAT_STORE_DWORDX4: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]',
|
|
FLATOp.FLAT_LOAD_UBYTE_D16: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.",
|
|
FLATOp.FLAT_LOAD_UBYTE_D16_HI: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.",
|
|
FLATOp.FLAT_LOAD_SBYTE_D16: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.",
|
|
FLATOp.FLAT_LOAD_SBYTE_D16_HI: "addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.",
|
|
FLATOp.FLAT_LOAD_SHORT_D16: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.',
|
|
FLATOp.FLAT_LOAD_SHORT_D16_HI: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.',
|
|
FLATOp.FLAT_ATOMIC_SWAP: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_CMPSWAP: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_ADD: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_SUB: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_SMIN: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_UMIN: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_SMAX: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_UMAX: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_AND: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_OR: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_XOR: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_INC: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_DEC: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp',
|
|
FLATOp.FLAT_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp',
|
|
FLATOp.FLAT_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
FLATOp.FLAT_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp',
|
|
FLATOp.FLAT_ATOMIC_MIN_F64: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_MAX_F64: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
FLATOp.FLAT_ATOMIC_SWAP_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_CMPSWAP_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_ADD_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_SUB_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_SMIN_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_UMIN_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_SMAX_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_UMAX_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_AND_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_OR_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_XOR_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_INC_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
FLATOp.FLAT_ATOMIC_DEC_X2: 'addr = CalcFlatAddr(ADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
}
|
|
|
|
GLOBALOp_PCODE = {
|
|
GLOBALOp.GLOBAL_LOAD_UBYTE: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })",
|
|
GLOBALOp.GLOBAL_LOAD_SBYTE: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))",
|
|
GLOBALOp.GLOBAL_LOAD_USHORT: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })",
|
|
GLOBALOp.GLOBAL_LOAD_SSHORT: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))",
|
|
GLOBALOp.GLOBAL_LOAD_DWORD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32',
|
|
GLOBALOp.GLOBAL_LOAD_DWORDX2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32',
|
|
GLOBALOp.GLOBAL_LOAD_DWORDX3: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32',
|
|
GLOBALOp.GLOBAL_LOAD_DWORDX4: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32',
|
|
GLOBALOp.GLOBAL_STORE_BYTE: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]',
|
|
GLOBALOp.GLOBAL_STORE_BYTE_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]',
|
|
GLOBALOp.GLOBAL_STORE_SHORT: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]',
|
|
GLOBALOp.GLOBAL_STORE_SHORT_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]',
|
|
GLOBALOp.GLOBAL_STORE_DWORD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]',
|
|
GLOBALOp.GLOBAL_STORE_DWORDX2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]',
|
|
GLOBALOp.GLOBAL_STORE_DWORDX3: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]',
|
|
GLOBALOp.GLOBAL_STORE_DWORDX4: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]',
|
|
GLOBALOp.GLOBAL_LOAD_UBYTE_D16: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.",
|
|
GLOBALOp.GLOBAL_LOAD_UBYTE_D16_HI: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.",
|
|
GLOBALOp.GLOBAL_LOAD_SBYTE_D16: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.",
|
|
GLOBALOp.GLOBAL_LOAD_SBYTE_D16_HI: "addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.",
|
|
GLOBALOp.GLOBAL_LOAD_SHORT_D16: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.',
|
|
GLOBALOp.GLOBAL_LOAD_SHORT_D16_HI: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.',
|
|
GLOBALOp.GLOBAL_ATOMIC_SWAP: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_CMPSWAP: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_ADD: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_SUB: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_SMIN: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_UMIN: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_SMAX: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_UMAX: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_AND: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_OR: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_XOR: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_INC: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_DEC: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_MIN_F64: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_MAX_F64: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_SWAP_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_CMPSWAP_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_ADD_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_SUB_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_SMIN_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_UMIN_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_SMAX_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_UMAX_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_AND_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_OR_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_XOR_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_INC_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
GLOBALOp.GLOBAL_ATOMIC_DEC_X2: 'addr = CalcGlobalAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
}
|
|
|
|
MTBUFOp_PCODE = {
|
|
MTBUFOp.TBUFFER_LOAD_FORMAT_X: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format',
|
|
MTBUFOp.TBUFFER_LOAD_FORMAT_XY: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])',
|
|
MTBUFOp.TBUFFER_LOAD_FORMAT_XYZ: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])',
|
|
MTBUFOp.TBUFFER_LOAD_FORMAT_XYZW: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])',
|
|
MTBUFOp.TBUFFER_STORE_FORMAT_X: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format',
|
|
MTBUFOp.TBUFFER_STORE_FORMAT_XY: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)',
|
|
MTBUFOp.TBUFFER_STORE_FORMAT_XYZ: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)',
|
|
MTBUFOp.TBUFFER_STORE_FORMAT_XYZW: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)',
|
|
MTBUFOp.TBUFFER_LOAD_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[31:16].b16 is preserved.",
|
|
MTBUFOp.TBUFFER_LOAD_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))",
|
|
MTBUFOp.TBUFFER_LOAD_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\n// VDATA[63:48].b16 is preserved.",
|
|
MTBUFOp.TBUFFER_LOAD_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))",
|
|
MTBUFOp.TBUFFER_STORE_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format",
|
|
MTBUFOp.TBUFFER_STORE_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))",
|
|
MTBUFOp.TBUFFER_STORE_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))",
|
|
MTBUFOp.TBUFFER_STORE_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))\nwhere:\nOP = Instruction Opcode.\nADDR = Source of flat address VGPR.\nDATA = Source data.\nVDST = Destination VGPR.\nNV = Access to non-volatile memory.\nSADDR = SGPR holding address or offset\nSEG = Instruction type: Flat, Scratch, or Global\nLDS = Data is transferred between LDS and Memory, not VGPRs.\nOFFSET = Immediate address byte-offset.\nSC = Scope\nNT = Non-Temporal",
|
|
}
|
|
|
|
MUBUFOp_PCODE = {
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_X: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format',
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_XY: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()])',
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_XYZ: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()])',
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_XYZW: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetX()]);\n// Mem access size depends on format\nVDATA[63 : 32].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetY()]);\nVDATA[95 : 64].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetZ()]);\nVDATA[127 : 96].b32 = ConvertFromFormat(MEM[addr + ChannelOffsetW()])',
|
|
MUBUFOp.BUFFER_STORE_FORMAT_X: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format',
|
|
MUBUFOp.BUFFER_STORE_FORMAT_XY: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32)',
|
|
MUBUFOp.BUFFER_STORE_FORMAT_XYZ: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32)',
|
|
MUBUFOp.BUFFER_STORE_FORMAT_XYZW: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(VDATA[31 : 0].b32);\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(VDATA[63 : 32].b32);\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(VDATA[95 : 64].b32);\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(VDATA[127 : 96].b32)',
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[31:16].b16 is preserved.",
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]))",
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\n// VDATA[63:48].b16 is preserved.",
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetY()]));\nVDATA[47 : 32].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetZ()]));\nVDATA[63 : 48].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetW()]))",
|
|
MUBUFOp.BUFFER_STORE_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format",
|
|
MUBUFOp.BUFFER_STORE_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))",
|
|
MUBUFOp.BUFFER_STORE_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))",
|
|
MUBUFOp.BUFFER_STORE_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))",
|
|
MUBUFOp.BUFFER_LOAD_UBYTE: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })",
|
|
MUBUFOp.BUFFER_LOAD_SBYTE: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))",
|
|
MUBUFOp.BUFFER_LOAD_USHORT: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })",
|
|
MUBUFOp.BUFFER_LOAD_SSHORT: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))",
|
|
MUBUFOp.BUFFER_LOAD_DWORD: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32',
|
|
MUBUFOp.BUFFER_LOAD_DWORDX2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32',
|
|
MUBUFOp.BUFFER_LOAD_DWORDX3: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32',
|
|
MUBUFOp.BUFFER_LOAD_DWORDX4: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32',
|
|
MUBUFOp.BUFFER_STORE_BYTE: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]',
|
|
MUBUFOp.BUFFER_STORE_BYTE_D16_HI: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]',
|
|
MUBUFOp.BUFFER_STORE_SHORT: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]',
|
|
MUBUFOp.BUFFER_STORE_SHORT_D16_HI: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]',
|
|
MUBUFOp.BUFFER_STORE_DWORD: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]',
|
|
MUBUFOp.BUFFER_STORE_DWORDX2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]',
|
|
MUBUFOp.BUFFER_STORE_DWORDX3: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]',
|
|
MUBUFOp.BUFFER_STORE_DWORDX4: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]',
|
|
MUBUFOp.BUFFER_LOAD_UBYTE_D16: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.",
|
|
MUBUFOp.BUFFER_LOAD_UBYTE_D16_HI: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.",
|
|
MUBUFOp.BUFFER_LOAD_SBYTE_D16: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.",
|
|
MUBUFOp.BUFFER_LOAD_SBYTE_D16_HI: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.",
|
|
MUBUFOp.BUFFER_LOAD_SHORT_D16: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.',
|
|
MUBUFOp.BUFFER_LOAD_SHORT_D16_HI: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.',
|
|
MUBUFOp.BUFFER_LOAD_FORMAT_D16_HI_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = 16'B(ConvertFromFormat(MEM[addr + ChannelOffsetX()]));\n// Mem access size depends on format\n// VDATA[15:0].b16 is preserved.",
|
|
MUBUFOp.BUFFER_STORE_FORMAT_D16_HI_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\n// Mem access size depends on format",
|
|
MUBUFOp.BUFFER_ATOMIC_SWAP: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_CMPSWAP: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_ADD: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_SUB: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_SMIN: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_UMIN: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_SMAX: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_UMAX: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_AND: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_OR: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_XOR: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_INC: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_DEC: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_PK_ADD_F16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].f16 = tmp[31 : 16].f16 + src[31 : 16].f16;\ndst[15 : 0].f16 = tmp[15 : 0].f16 + src[15 : 0].f16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_ADD_F64: 'tmp = MEM[ADDR].f64;\nMEM[ADDR].f64 += DATA.f64;\nRETURN_DATA = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_MIN_F64: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_MAX_F64: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_PK_ADD_BF16: 'tmp = MEM[ADDR];\nsrc = DATA;\ndst[31 : 16].bf16 = tmp[31 : 16].bf16 + src[31 : 16].bf16;\ndst[15 : 0].bf16 = tmp[15 : 0].bf16 + src[15 : 0].bf16;\nMEM[ADDR] = dst.b32;\nRETURN_DATA = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_SWAP_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_CMPSWAP_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_ADD_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_SUB_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_SMIN_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_UMIN_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_SMAX_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_UMAX_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_AND_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_OR_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_XOR_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_INC_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
MUBUFOp.BUFFER_ATOMIC_DEC_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp\nwhere:\nOFFSET = Unsigned immediate byte offset.\nOFFEN = Send offset either as VADDR or as zero.\nIDXEN = Send index either as VADDR or as zero.\nLDS = Data is transferred between LDS and Memory, not VGPRs.\nOP = Instruction Opcode.\nDFMT = Data format for typed buffer.\nNFMT = Number format for typed buffer.\nVADDR = VGPR address source.\nVDATA = Vector GPR for read/write result.\nSRSRC = Scalar GPR that specifies resource constant.\nSOFFSET = Unsigned byte offset from an SGPR.\nSC = Scope\nNT = Non-Temporal\nACC = Return to ACC VGPRs',
|
|
}
|
|
|
|
SCRATCHOp_PCODE = {
|
|
SCRATCHOp.SCRATCH_LOAD_UBYTE: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 24'0U, MEM[addr].u8 })",
|
|
SCRATCHOp.SCRATCH_LOAD_SBYTE: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i8))",
|
|
SCRATCHOp.SCRATCH_LOAD_USHORT: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.u32 = 32'U({ 16'0U, MEM[addr].u16 })",
|
|
SCRATCHOp.SCRATCH_LOAD_SSHORT: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA.i32 = 32'I(signext(MEM[addr].i16))",
|
|
SCRATCHOp.SCRATCH_LOAD_DWORD: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32',
|
|
SCRATCHOp.SCRATCH_LOAD_DWORDX2: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32',
|
|
SCRATCHOp.SCRATCH_LOAD_DWORDX3: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32',
|
|
SCRATCHOp.SCRATCH_LOAD_DWORDX4: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 0] = MEM[addr].b32;\nVDATA[63 : 32] = MEM[addr + 4U].b32;\nVDATA[95 : 64] = MEM[addr + 8U].b32;\nVDATA[127 : 96] = MEM[addr + 12U].b32',
|
|
SCRATCHOp.SCRATCH_STORE_BYTE: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[7 : 0]',
|
|
SCRATCHOp.SCRATCH_STORE_BYTE_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b8 = VDATA[23 : 16]',
|
|
SCRATCHOp.SCRATCH_STORE_SHORT: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[15 : 0]',
|
|
SCRATCHOp.SCRATCH_STORE_SHORT_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b16 = VDATA[31 : 16]',
|
|
SCRATCHOp.SCRATCH_STORE_DWORD: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0]',
|
|
SCRATCHOp.SCRATCH_STORE_DWORDX2: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32]',
|
|
SCRATCHOp.SCRATCH_STORE_DWORDX3: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64]',
|
|
SCRATCHOp.SCRATCH_STORE_DWORDX4: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nMEM[addr].b32 = VDATA[31 : 0];\nMEM[addr + 4U].b32 = VDATA[63 : 32];\nMEM[addr + 8U].b32 = VDATA[95 : 64];\nMEM[addr + 12U].b32 = VDATA[127 : 96]',
|
|
SCRATCHOp.SCRATCH_LOAD_UBYTE_D16: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[31:16] is preserved.",
|
|
SCRATCHOp.SCRATCH_LOAD_UBYTE_D16_HI: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].u16 = 16'U({ 8'0U, MEM[addr].u8 });\n// VDATA[15:0] is preserved.",
|
|
SCRATCHOp.SCRATCH_LOAD_SBYTE_D16: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[31:16] is preserved.",
|
|
SCRATCHOp.SCRATCH_LOAD_SBYTE_D16_HI: "addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].i16 = 16'I(signext(MEM[addr].i8));\n// VDATA[15:0] is preserved.",
|
|
SCRATCHOp.SCRATCH_LOAD_SHORT_D16: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[15 : 0].b16 = MEM[addr].b16;\n// VDATA[31:16] is preserved.',
|
|
SCRATCHOp.SCRATCH_LOAD_SHORT_D16_HI: 'addr = CalcScratchAddr(ADDR.b32, SADDR.b32, OFFSET.b32);\nVDATA[31 : 16].b16 = MEM[addr].b16;\n// VDATA[15:0] is preserved.',
|
|
}
|
|
|
|
SMEMOp_PCODE = {
|
|
SMEMOp.S_LOAD_DWORD: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32',
|
|
SMEMOp.S_LOAD_DWORDX2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32',
|
|
SMEMOp.S_LOAD_DWORDX4: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32',
|
|
SMEMOp.S_LOAD_DWORDX8: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32',
|
|
SMEMOp.S_LOAD_DWORDX16: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32',
|
|
SMEMOp.S_SCRATCH_LOAD_DWORD: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32',
|
|
SMEMOp.S_SCRATCH_LOAD_DWORDX2: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32',
|
|
SMEMOp.S_SCRATCH_LOAD_DWORDX4: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32',
|
|
SMEMOp.S_BUFFER_LOAD_DWORD: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32',
|
|
SMEMOp.S_BUFFER_LOAD_DWORDX2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32',
|
|
SMEMOp.S_BUFFER_LOAD_DWORDX4: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32',
|
|
SMEMOp.S_BUFFER_LOAD_DWORDX8: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32',
|
|
SMEMOp.S_BUFFER_LOAD_DWORDX16: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nSDATA[31 : 0] = MEM[addr].b32;\nSDATA[63 : 32] = MEM[addr + 4U].b32;\nSDATA[95 : 64] = MEM[addr + 8U].b32;\nSDATA[127 : 96] = MEM[addr + 12U].b32;\nSDATA[159 : 128] = MEM[addr + 16U].b32;\nSDATA[191 : 160] = MEM[addr + 20U].b32;\nSDATA[223 : 192] = MEM[addr + 24U].b32;\nSDATA[255 : 224] = MEM[addr + 28U].b32;\nSDATA[287 : 256] = MEM[addr + 32U].b32;\nSDATA[319 : 288] = MEM[addr + 36U].b32;\nSDATA[351 : 320] = MEM[addr + 40U].b32;\nSDATA[383 : 352] = MEM[addr + 44U].b32;\nSDATA[415 : 384] = MEM[addr + 48U].b32;\nSDATA[447 : 416] = MEM[addr + 52U].b32;\nSDATA[479 : 448] = MEM[addr + 56U].b32;\nSDATA[511 : 480] = MEM[addr + 60U].b32',
|
|
SMEMOp.S_STORE_DWORD: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0]',
|
|
SMEMOp.S_STORE_DWORDX2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32]',
|
|
SMEMOp.S_STORE_DWORDX4: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32];\nMEM[addr + 8U].b32 = SDATA[95 : 64];\nMEM[addr + 12U].b32 = SDATA[127 : 96]',
|
|
SMEMOp.S_SCRATCH_STORE_DWORD: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0]',
|
|
SMEMOp.S_SCRATCH_STORE_DWORDX2: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32]',
|
|
SMEMOp.S_SCRATCH_STORE_DWORDX4: 'addr = CalcScalarScratchAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32];\nMEM[addr + 8U].b32 = SDATA[95 : 64];\nMEM[addr + 12U].b32 = SDATA[127 : 96]',
|
|
SMEMOp.S_BUFFER_STORE_DWORD: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0]',
|
|
SMEMOp.S_BUFFER_STORE_DWORDX2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32]',
|
|
SMEMOp.S_BUFFER_STORE_DWORDX4: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\nMEM[addr].b32 = SDATA[31 : 0];\nMEM[addr + 4U].b32 = SDATA[63 : 32];\nMEM[addr + 8U].b32 = SDATA[95 : 64];\nMEM[addr + 12U].b32 = SDATA[127 : 96]',
|
|
SMEMOp.S_BUFFER_ATOMIC_SWAP: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_CMPSWAP: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_ADD: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_SUB: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_SMIN: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_UMIN: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_SMAX: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_UMAX: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_AND: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_OR: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_XOR: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_INC: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_DEC: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_SWAP_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_CMPSWAP_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_ADD_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_SUB_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_SMIN_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_UMIN_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_SMAX_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_UMAX_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_AND_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_OR_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_XOR_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_INC_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_BUFFER_ATOMIC_DEC_X2: 'addr = CalcScalarBufferAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_ATOMIC_SWAP: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
|
|
SMEMOp.S_ATOMIC_CMPSWAP: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA[31 : 0].u32;\ncmp = DATA[63 : 32].u32;\nMEM[addr].u32 = tmp == cmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_ATOMIC_ADD: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 += DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_ATOMIC_SUB: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nMEM[addr].u32 -= DATA.u32;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_ATOMIC_SMIN: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src < tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
SMEMOp.S_ATOMIC_UMIN: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src < tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_ATOMIC_SMAX: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i32;\nsrc = DATA.i32;\nMEM[addr].i32 = src >= tmp ? src : tmp;\nRETURN_DATA.i32 = tmp',
|
|
SMEMOp.S_ATOMIC_UMAX: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = src >= tmp ? src : tmp;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_ATOMIC_AND: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp & DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
SMEMOp.S_ATOMIC_OR: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp | DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
SMEMOp.S_ATOMIC_XOR: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = (tmp ^ DATA.b32);\nRETURN_DATA.b32 = tmp',
|
|
SMEMOp.S_ATOMIC_INC: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = tmp >= src ? 0U : tmp + 1U;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_ATOMIC_DEC: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u32;\nsrc = DATA.u32;\nMEM[addr].u32 = ((tmp == 0U) || (tmp > src)) ? src : tmp - 1U;\nRETURN_DATA.u32 = tmp',
|
|
SMEMOp.S_ATOMIC_SWAP_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
|
|
SMEMOp.S_ATOMIC_CMPSWAP_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA[63 : 0].u64;\ncmp = DATA[127 : 64].u64;\nMEM[addr].u64 = tmp == cmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_ATOMIC_ADD_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_ATOMIC_SUB_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 -= DATA.u64;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_ATOMIC_SMIN_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src < tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
SMEMOp.S_ATOMIC_UMIN_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src < tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_ATOMIC_SMAX_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].i64;\nsrc = DATA.i64;\nMEM[addr].i64 = src >= tmp ? src : tmp;\nRETURN_DATA.i64 = tmp',
|
|
SMEMOp.S_ATOMIC_UMAX_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_ATOMIC_AND_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp & DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
SMEMOp.S_ATOMIC_OR_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
SMEMOp.S_ATOMIC_XOR_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp',
|
|
SMEMOp.S_ATOMIC_INC_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
SMEMOp.S_ATOMIC_DEC_X2: 'addr = CalcScalarGlobalAddr(SBASE.b32, SOFFSET.b32, OFFSET.i32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp',
|
|
}
|
|
|
|
SOP1Op_PCODE = {
|
|
SOP1Op.S_MOV_B32: 'D0.b32 = S0.b32',
|
|
SOP1Op.S_MOV_B64: 'D0.b64 = S0.b64',
|
|
SOP1Op.S_CMOV_B32: 'if SCC then\nD0.b32 = S0.b32\nendif',
|
|
SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif',
|
|
SOP1Op.S_NOT_B32: 'D0.u32 = ~S0.u32;\nSCC = D0.u32 != 0U',
|
|
SOP1Op.S_NOT_B64: 'D0.u64 = ~S0.u64;\nSCC = D0.u64 != 0ULL',
|
|
SOP1Op.S_WQM_B32: "tmp = 0U;\ndeclare i : 6'U;\nfor i in 6'0U : 6'31U do\ntmp[i] = S0.u32[i & 6'60U +: 6'4U] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U",
|
|
SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL",
|
|
SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
|
|
SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]',
|
|
SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT0_I32_B32(0x00000000) => 32\nS_BCNT0_I32_B32(0xcccccccc) => 16\nS_BCNT0_I32_B32(0xffffffff) => 0",
|
|
SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL",
|
|
SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT1_I32_B32(0x00000000) => 0\nS_BCNT1_I32_B32(0xcccccccc) => 16\nS_BCNT1_I32_B32(0xffffffff) => 32",
|
|
SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL",
|
|
SOP1Op.S_FF0_I32_B32: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'0U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FF0_I32_B32(0xaaaaaaaa) => 0\nS_FF0_I32_B32(0x55555555) => 1\nS_FF0_I32_B32(0x00000000) => 0\nS_FF0_I32_B32(0xffffffff) => 0xffffffff\nS_FF0_I32_B32(0xfffeffff) => 16",
|
|
SOP1Op.S_FF0_I32_B64: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'0U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
|
|
SOP1Op.S_FF1_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FF1_I32_B32(0xaaaaaaaa) => 1\nS_FF1_I32_B32(0x55555555) => 0\nS_FF1_I32_B32(0x00000000) => 0xffffffff\nS_FF1_I32_B32(0xffffffff) => 0\nS_FF1_I32_B32(0x00010000) => 16",
|
|
SOP1Op.S_FF1_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
|
|
SOP1Op.S_FLBIT_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FLBIT_I32_B32(0x00000000) => 0xffffffff\nS_FLBIT_I32_B32(0x0000cccc) => 16\nS_FLBIT_I32_B32(0xffff3333) => 0\nS_FLBIT_I32_B32(0x7fffffff) => 1\nS_FLBIT_I32_B32(0x80000000) => 0\nS_FLBIT_I32_B32(0xffffffff) => 0",
|
|
SOP1Op.S_FLBIT_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
|
|
SOP1Op.S_FLBIT_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FLBIT_I32(0x00000000) => 0xffffffff\nS_FLBIT_I32(0x0000cccc) => 16\nS_FLBIT_I32(0xffff3333) => 16\nS_FLBIT_I32(0x7fffffff) => 1\nS_FLBIT_I32(0x80000000) => 1\nS_FLBIT_I32(0xffffffff) => 0xffffffff',
|
|
SOP1Op.S_FLBIT_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp',
|
|
SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))",
|
|
SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))",
|
|
SOP1Op.S_BITSET0_B32: "D0.u32[S0.u32[4 : 0]] = 1'0U",
|
|
SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U",
|
|
SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U",
|
|
SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U",
|
|
SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL',
|
|
SOP1Op.S_SETPC_B64: 'PC = S0.i64',
|
|
SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64',
|
|
SOP1Op.S_RFE_B64: "WAVE_STATUS.PRIV = 1'0U;\nPC = S0.i64",
|
|
SOP1Op.S_AND_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_OR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_XOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_ANDN2_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_ORN2_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (S0.u64 | ~EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_NAND_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_NOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_XNOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U',
|
|
SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL',
|
|
SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32\ns_mov_b32 m0, 10\ns_movrels_b32 s5, s7',
|
|
SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b64 = SGPR[addr].b64',
|
|
SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32\ns_mov_b32 m0, 10\ns_movreld_b32 s5, s7',
|
|
SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b64 = S0.b64',
|
|
SOP1Op.S_CBRANCH_JOIN: "saved_csp = S0.u32;\nif WAVE_MODE.CSP.u32 == saved_csp then\nPC += 4LL;\n// Second time to JOIN: continue with program.\nelse\nWAVE_MODE.CSP -= 3'1U;\n// First time to JOIN; jump to other FORK path.\n{ PC, EXEC } = SGPR[WAVE_MODE.CSP.u32 * 4U].b128;\n// Read 128 bits from 4 consecutive SGPRs.\nendif",
|
|
SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0\nS_ABS_I32(0x00000001) => 0x00000001\nS_ABS_I32(0x7fffffff) => 0x7fffffff\nS_ABS_I32(0x80000000) => 0x80000000 // Note this is negative!\nS_ABS_I32(0x80000001) => 0x7fffffff\nS_ABS_I32(0x80000002) => 0x7ffffffe\nS_ABS_I32(0xffffffff) => 0x00000001',
|
|
SOP1Op.S_SET_GPR_IDX_IDX: 'M0[7 : 0] = S0.u32[7 : 0].b8',
|
|
SOP1Op.S_ANDN1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_ORN1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_ANDN1_WREXEC_B64: 'EXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL',
|
|
SOP1Op.S_ANDN2_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL\n// V0 holds the index value per lane\n// save exec mask for restore at the end\ns_mov_b64 s2, exec\n// exec mask of remaining (unprocessed) threads\ns_mov_b64 s4, exec\nloop:\n// get the index value for the first active lane\nv_readfirstlane_b32 s0, v0\n// find all other lanes with same index value\nv_cmpx_eq s0, v0\n<OP> // do the operation using the current EXEC mask. S0 holds the index.\n// mask out thread that was just executed\n// s_andn2_b64 s4, s4, exec\n// s_mov_b64 exec, s4\ns_andn2_wrexec_b64 s4, s4 // replaces above 2 ops\n// repeat until EXEC==0\ns_cbranch_scc1 loop\ns_mov_b64 exec, s2',
|
|
SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor\ns_bitreplicate_b64 s2, s0\ns_bitreplicate_b64 s2, s2',
|
|
}
|
|
|
|
SOP2Op_PCODE = {
|
|
SOP2Op.S_ADD_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADDC_U32.\nD0.u32 = tmp.u32",
|
|
SOP2Op.S_SUB_U32: "tmp = S0.u32 - S1.u32;\nSCC = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUBB_U32.\nD0.u32 = tmp.u32",
|
|
SOP2Op.S_ADD_I32: 'tmp = S0.i32 + S1.i32;\nSCC = ((S0.u32[31] == S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32',
|
|
SOP2Op.S_SUB_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32',
|
|
SOP2Op.S_ADDC_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADDC_U32.\nD0.u32 = tmp.u32",
|
|
SOP2Op.S_SUBB_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUBB_U32.\nD0.u32 = tmp.u32",
|
|
SOP2Op.S_MIN_I32: 'SCC = S0.i32 < S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32',
|
|
SOP2Op.S_MIN_U32: 'SCC = S0.u32 < S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32',
|
|
SOP2Op.S_MAX_I32: 'SCC = S0.i32 >= S1.i32;\nD0.i32 = SCC ? S0.i32 : S1.i32',
|
|
SOP2Op.S_MAX_U32: 'SCC = S0.u32 >= S1.u32;\nD0.u32 = SCC ? S0.u32 : S1.u32',
|
|
SOP2Op.S_CSELECT_B32: 'D0.u32 = SCC ? S0.u32 : S1.u32',
|
|
SOP2Op.S_CSELECT_B64: 'D0.u64 = SCC ? S0.u64 : S1.u64',
|
|
SOP2Op.S_AND_B32: 'D0.u32 = (S0.u32 & S1.u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_AND_B64: 'D0.u64 = (S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_OR_B32: 'D0.u32 = (S0.u32 | S1.u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_OR_B64: 'D0.u64 = (S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_XOR_B64: 'D0.u64 = (S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_ANDN2_B32: 'D0.u32 = (S0.u32 & ~S1.u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_ANDN2_B64: 'D0.u64 = (S0.u64 & ~S1.u64);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_ORN2_B32: 'D0.u32 = (S0.u32 | ~S1.u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_ORN2_B64: 'D0.u64 = (S0.u64 | ~S1.u64);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_NAND_B32: 'D0.u32 = ~(S0.u32 & S1.u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_NAND_B64: 'D0.u64 = ~(S0.u64 & S1.u64);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_NOR_B32: 'D0.u32 = ~(S0.u32 | S1.u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_NOR_B64: 'D0.u64 = ~(S0.u64 | S1.u64);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_XNOR_B64: 'D0.u64 = ~(S0.u64 ^ S1.u64);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_LSHR_B64: 'D0.u64 = (S0.u64 >> S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_ASHR_I32: "D0.i32 = 32'I(signext(S0.i32) >> S1[4 : 0].u32);\nSCC = D0.i32 != 0",
|
|
SOP2Op.S_ASHR_I64: 'D0.i64 = (signext(S0.i64) >> S1[5 : 0].u32);\nSCC = D0.i64 != 0LL',
|
|
SOP2Op.S_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)',
|
|
SOP2Op.S_BFM_B64: 'D0.u64 = (((1ULL << S0[5 : 0].u32) - 1ULL) << S1[5 : 0].u32)',
|
|
SOP2Op.S_MUL_I32: 'D0.i32 = S0.i32 * S1.i32',
|
|
SOP2Op.S_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S1[22 : 16].u32) - 1U));\nSCC = D0.u32 != 0U',
|
|
SOP2Op.S_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S1[22 : 16].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S1[22 : 16].u32);\nSCC = D0.i32 != 0',
|
|
SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL',
|
|
SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL',
|
|
SOP2Op.S_CBRANCH_G_FORK: "mask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\nif mask_pass == EXEC.u64 then\nPC = 64'I(S1.u64)\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { S1.u64, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = 64'I(S1.u64)\nendif",
|
|
SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0\nS_ABSDIFF_I32(0x00000002, 0x00000005) => 0x00000003\nS_ABSDIFF_I32(0xffffffff, 0x00000000) => 0x00000001\nS_ABSDIFF_I32(0x80000000, 0x00000000) => 0x80000000 // Note: result is negative!\nS_ABSDIFF_I32(0x80000000, 0x00000001) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xffffffff) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xfffffffe) => 0x7ffffffe',
|
|
SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)",
|
|
SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)",
|
|
SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32",
|
|
SOP2Op.S_LSHL2_ADD_U32: "tmp = (64'U(S0.u32) << 2U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32",
|
|
SOP2Op.S_LSHL3_ADD_U32: "tmp = (64'U(S0.u32) << 3U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32",
|
|
SOP2Op.S_LSHL4_ADD_U32: "tmp = (64'U(S0.u32) << 4U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32",
|
|
SOP2Op.S_PACK_LL_B32_B16: 'D0 = { S1[15 : 0].u16, S0[15 : 0].u16 }',
|
|
SOP2Op.S_PACK_LH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[15 : 0].u16 }',
|
|
SOP2Op.S_PACK_HH_B32_B16: 'D0 = { S1[31 : 16].u16, S0[31 : 16].u16 }',
|
|
}
|
|
|
|
SOPCOp_PCODE = {
|
|
SOPCOp.S_CMP_EQ_I32: 'SCC = S0.i32 == S1.i32',
|
|
SOPCOp.S_CMP_LG_I32: 'SCC = S0.i32 <> S1.i32',
|
|
SOPCOp.S_CMP_GT_I32: 'SCC = S0.i32 > S1.i32',
|
|
SOPCOp.S_CMP_GE_I32: 'SCC = S0.i32 >= S1.i32',
|
|
SOPCOp.S_CMP_LT_I32: 'SCC = S0.i32 < S1.i32',
|
|
SOPCOp.S_CMP_LE_I32: 'SCC = S0.i32 <= S1.i32',
|
|
SOPCOp.S_CMP_EQ_U32: 'SCC = S0.u32 == S1.u32',
|
|
SOPCOp.S_CMP_LG_U32: 'SCC = S0.u32 <> S1.u32',
|
|
SOPCOp.S_CMP_GT_U32: 'SCC = S0.u32 > S1.u32',
|
|
SOPCOp.S_CMP_GE_U32: 'SCC = S0.u32 >= S1.u32',
|
|
SOPCOp.S_CMP_LT_U32: 'SCC = S0.u32 < S1.u32',
|
|
SOPCOp.S_CMP_LE_U32: 'SCC = S0.u32 <= S1.u32',
|
|
SOPCOp.S_BITCMP0_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'0U",
|
|
SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U",
|
|
SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U",
|
|
SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U",
|
|
SOPCOp.S_SETVSKIP: 'VSKIP = S0.u32[S1.u32[4 : 0]]\ns_setvskip 1, 0 // Enable vskip mode.\ns_setvskip 0, 0 // Disable vskip mode.',
|
|
SOPCOp.S_SET_GPR_IDX_ON: "WAVE_MODE.GPR_IDX_EN = 1'1U;\nM0[7 : 0] = S0.u32[7 : 0].b8;\nM0[15 : 12] = SRC1.u32[3 : 0].b4;\n// this is the direct content of raw S1 field\n// Remaining bits of M0 are unmodified.",
|
|
SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64',
|
|
SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64',
|
|
}
|
|
|
|
SOPKOp_PCODE = {
|
|
SOPKOp.S_MOVK_I32: "D0.i32 = 32'I(signext(S0.i16))",
|
|
SOPKOp.S_CMOVK_I32: "if SCC then\nD0.i32 = 32'I(signext(S0.i16))\nendif",
|
|
SOPKOp.S_CMPK_EQ_I32: "SCC = S0.i32 == 32'I(signext(S1.i16))",
|
|
SOPKOp.S_CMPK_LG_I32: "SCC = S0.i32 != 32'I(signext(S1.i16))",
|
|
SOPKOp.S_CMPK_GT_I32: "SCC = S0.i32 > 32'I(signext(S1.i16))",
|
|
SOPKOp.S_CMPK_GE_I32: "SCC = S0.i32 >= 32'I(signext(S1.i16))",
|
|
SOPKOp.S_CMPK_LT_I32: "SCC = S0.i32 < 32'I(signext(S1.i16))",
|
|
SOPKOp.S_CMPK_LE_I32: "SCC = S0.i32 <= 32'I(signext(S1.i16))",
|
|
SOPKOp.S_CMPK_EQ_U32: "SCC = S0.u32 == 32'U(S1.u16)",
|
|
SOPKOp.S_CMPK_LG_U32: "SCC = S0.u32 != 32'U(S1.u16)",
|
|
SOPKOp.S_CMPK_GT_U32: "SCC = S0.u32 > 32'U(S1.u16)",
|
|
SOPKOp.S_CMPK_GE_U32: "SCC = S0.u32 >= 32'U(S1.u16)",
|
|
SOPKOp.S_CMPK_LT_U32: "SCC = S0.u32 < 32'U(S1.u16)",
|
|
SOPKOp.S_CMPK_LE_U32: "SCC = S0.u32 <= 32'U(S1.u16)",
|
|
SOPKOp.S_ADDK_I32: "tmp = D0.i32;\n// Save value to check sign bits for overflow later.\nD0.i32 = D0.i32 + 32'I(signext(S0.i16));\nSCC = ((tmp[31] == S0.i16[15]) && (tmp[31] != D0.i32[31]));\n// signed overflow.",
|
|
SOPKOp.S_MULK_I32: "D0.i32 = D0.i32 * 32'I(signext(S0.i16))",
|
|
SOPKOp.S_CBRANCH_I_FORK: "// Initial setup.\nmask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\ntarget_addr = PC + signext(SIMM16.i32 * 4) + 4LL;\n// Decide where to jump to.\nif mask_pass == EXEC.u64 then\nPC = target_addr\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { target_addr, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = target_addr\nendif",
|
|
SOPKOp.S_GETREG_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nvalue = HW_REGISTERS[hwRegId];\nD0.u32 = 32'U(32'I(value >> offset.u32) & ((1 << size) - 1))",
|
|
SOPKOp.S_SETREG_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((S0.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\nHW_REGISTERS[hwRegId] = value.b32;\n// Side-effects may trigger here if certain bits are modified",
|
|
SOPKOp.S_SETREG_IMM32_B32: "hwRegId = SIMM16.u16[5 : 0];\noffset = SIMM16.u16[10 : 6];\nsize = SIMM16.u16[15 : 11].u32 + 1U;\n// logical size is in range 1:32\nmask = (1 << size) - 1;\nmask = (mask << offset.u32);\nmask = (mask & HwRegWriteMask(hwRegId, WAVE_STATUS.PRIV));\n// Mask of bits that can be modified\nvalue = ((SIMM32.u32 << offset.u32) & mask.u32);\nvalue = (value | 32'U(HW_REGISTERS[hwRegId].i32 & ~mask));\nHW_REGISTERS[hwRegId] = value.b32;\n// Side-effects may trigger here if certain bits are modified",
|
|
SOPKOp.S_CALL_B64: "D0.i64 = PC + 4LL;\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL",
|
|
}
|
|
|
|
SOPPOp_PCODE = {
|
|
SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor\ns_nop 0 // Wait 1 cycle.\ns_nop 0xf // Wait 16 cycles.',
|
|
SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.\ns_branch label // Set SIMM16 = +4 = 0x0004\ns_nop 0 // 4 bytes\nlabel:\ns_nop 0 // 4 bytes\ns_branch label // Set SIMM16 = -8 = 0xfff8",
|
|
SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_CBRANCH_VCCZ: "if VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_CBRANCH_VCCNZ: "if VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_WAITCNT: 'SIMM16[3:0] = vmcount (vector memory operations) lower bits [3:0],\nSIMM16[6:4] = export/mem-write-data count,\nSIMM16[11:8] = LGKMcnt (scalar-mem/GDS/LDS count),\nSIMM16[15:14] = vmcount (vector memory operations) upper bits [5:4].',
|
|
SOPPOp.S_SLEEP: 's_sleep 0 // Wait for 0 clocks.\ns_sleep 1 // Wait for 1-64 clocks.\ns_sleep 2 // Wait for 65-128 clocks.',
|
|
SOPPOp.S_TRAP: 'TrapID = SIMM16.u16[7 : 0];\n"Wait for all instructions to complete";\n// PC passed into trap handler points to S_TRAP itself,\n// *not* to the next instruction.\n{ TTMP[1], TTMP[0] } = { 3\'0, PCRewind[3 : 0], HT[0], TrapID[7 : 0], PC[47 : 0] };\nPC = TBA.i64;\n// trap base address\nWAVE_STATUS.PRIV = 1\'1U',
|
|
SOPPOp.S_CBRANCH_CDBGSYS: "if WAVE_STATUS.COND_DBG_SYS.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_CBRANCH_CDBGUSER: "if WAVE_STATUS.COND_DBG_USER.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_CBRANCH_CDBGSYS_OR_USER: "if (WAVE_STATUS.COND_DBG_SYS || WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_CBRANCH_CDBGSYS_AND_USER: "if (WAVE_STATUS.COND_DBG_SYS && WAVE_STATUS.COND_DBG_USER) then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
|
|
SOPPOp.S_SET_GPR_IDX_OFF: "WAVE_MODE.GPR_IDX_EN = 1'0U",
|
|
SOPPOp.S_SET_GPR_IDX_MODE: 'M0[15 : 12] = SIMM16.u16[3 : 0].b4',
|
|
}
|
|
|
|
VOP1Op_PCODE = {
|
|
VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
|
|
VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'I;\nif EXEC == 0x0LL then\nlane = 0;\n// Force lane 0 if all lanes are disabled\nelse\nlane = s_ff1_i32_b64(EXEC);\n// Lowest active lane\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]",
|
|
VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)',
|
|
VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)',
|
|
VOP1Op.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)',
|
|
VOP1Op.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)',
|
|
VOP1Op.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)',
|
|
VOP1Op.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)',
|
|
VOP1Op.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)',
|
|
VOP1Op.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)',
|
|
VOP1Op.V_CVT_RPI_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))',
|
|
VOP1Op.V_CVT_FLR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))',
|
|
VOP1Op.V_CVT_OFF_F32_I4: "declare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]",
|
|
VOP1Op.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)',
|
|
VOP1Op.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)',
|
|
VOP1Op.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)',
|
|
VOP1Op.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)',
|
|
VOP1Op.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)',
|
|
VOP1Op.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)',
|
|
VOP1Op.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)',
|
|
VOP1Op.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)',
|
|
VOP1Op.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)',
|
|
VOP1Op.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif',
|
|
VOP1Op.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif',
|
|
VOP1Op.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif',
|
|
VOP1Op.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)',
|
|
VOP1Op.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)',
|
|
VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif',
|
|
VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif",
|
|
VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif',
|
|
VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
|
|
VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
|
|
VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
|
|
VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception',
|
|
VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
|
|
VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64',
|
|
VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)',
|
|
VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
|
|
VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)',
|
|
VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN",
|
|
VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN",
|
|
VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32',
|
|
VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
|
|
VOP1Op.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_U32(0x00000000) => 0xffffffff\nV_FFBH_U32(0x800000ff) => 0\nV_FFBH_U32(0x100000ff) => 3\nV_FFBH_U32(0x0000ffff) => 16\nV_FFBH_U32(0x00000001) => 31",
|
|
VOP1Op.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBL_B32(0x00000000) => 0xffffffff\nV_FFBL_B32(0xff000001) => 0\nV_FFBL_B32(0xff000008) => 3\nV_FFBL_B32(0xffff0000) => 16\nV_FFBL_B32(0x80000000) => 31",
|
|
VOP1Op.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_I32(0x00000000) => 0xffffffff\nV_FFBH_I32(0x40000000) => 1\nV_FFBH_I32(0x80000000) => 1\nV_FFBH_I32(0x0fffffff) => 4\nV_FFBH_I32(0xffff0000) => 16\nV_FFBH_I32(0xfffffffe) => 31\nV_FFBH_I32(0xffffffff) => 0xffffffff',
|
|
VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()',
|
|
VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()',
|
|
VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)',
|
|
VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()",
|
|
VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()",
|
|
VOP1Op.V_MOV_B64: 'D0.b64 = S0.b64',
|
|
VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)',
|
|
VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)',
|
|
VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)',
|
|
VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)',
|
|
VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
|
|
VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF',
|
|
VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
|
|
VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF',
|
|
VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
|
|
VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()",
|
|
VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()",
|
|
VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif",
|
|
VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif",
|
|
VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)',
|
|
VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif",
|
|
VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)',
|
|
VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN",
|
|
VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN",
|
|
VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)',
|
|
VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)',
|
|
VOP1Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n <= 16'0 then\nreturn 8'0U\nelsif n >= 16'255 then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\ntmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16",
|
|
VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp',
|
|
VOP1Op.V_CVT_F32_FP8: 'if SDWA_SRC0_SEL == BYTE1.b3 then\nD0.f32 = fp8_to_f32(S0[15 : 8].fp8)\nelsif SDWA_SRC0_SEL == BYTE2.b3 then\nD0.f32 = fp8_to_f32(S0[23 : 16].fp8)\nelsif SDWA_SRC0_SEL == BYTE3.b3 then\nD0.f32 = fp8_to_f32(S0[31 : 24].fp8)\nelse\n// BYTE0 implied\nD0.f32 = fp8_to_f32(S0[7 : 0].fp8)\nendif',
|
|
VOP1Op.V_CVT_F32_BF8: 'if SDWA_SRC0_SEL == BYTE1.b3 then\nD0.f32 = bf8_to_f32(S0[15 : 8].bf8)\nelsif SDWA_SRC0_SEL == BYTE2.b3 then\nD0.f32 = bf8_to_f32(S0[23 : 16].bf8)\nelsif SDWA_SRC0_SEL == BYTE3.b3 then\nD0.f32 = bf8_to_f32(S0[31 : 24].bf8)\nelse\n// BYTE0 implied\nD0.f32 = bf8_to_f32(S0[7 : 0].bf8)\nendif',
|
|
VOP1Op.V_CVT_PK_F32_FP8: 'tmp = SDWA_SRC0_SEL[1 : 0] == WORD1.b2 ? S0[31 : 16] : S0[15 : 0];\nD0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8);\nD0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8)',
|
|
VOP1Op.V_CVT_PK_F32_BF8: 'tmp = SDWA_SRC0_SEL[1 : 0] == WORD1.b2 ? S0[31 : 16] : S0[15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)',
|
|
VOP1Op.V_PRNG_B32: 'in = S0.u32;\nD0.u32 = ((in << 1U) ^ (in[31] ? 197U : 0U))',
|
|
VOP1Op.V_PERMLANE16_SWAP_B32: 'for pass in 0 : 1 do\nfor lane in 0 : 15 do\ntmp = VGPR[pass * 32 + lane][SRC0.u32];\nVGPR[pass * 32 + lane][SRC0.u32] = VGPR[pass * 32 + lane + 16][VDST.u32];\nVGPR[pass * 32 + lane + 16][VDST.u32] = tmp\nendfor\nendfor',
|
|
VOP1Op.V_PERMLANE32_SWAP_B32: 'for lane in 0 : 31 do\ntmp = VGPR[lane][SRC0.u32];\nVGPR[lane][SRC0.u32] = VGPR[lane + 32][VDST.u32];\nVGPR[lane + 32][VDST.u32] = tmp\nendfor',
|
|
VOP1Op.V_CVT_F32_BF16: "D0.f32 = 32'F({ S0.b16, 16'0U })\nwhere:\nSRC0 = First operand for instruction.\nVSRC1 = Second operand for instruction.\nOP = Instructions.\nAll VOPC instructions can alternatively be encoded in the VOP3A format.",
|
|
}
|
|
|
|
VOP2Op_PCODE = {
|
|
VOP2Op.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32',
|
|
VOP2Op.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32',
|
|
VOP2Op.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32',
|
|
VOP2Op.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32',
|
|
VOP2Op.V_FMAC_F64: 'D0.f64 = fma(S0.f64, S1.f64, D0.f64)',
|
|
VOP2Op.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32',
|
|
VOP2Op.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)",
|
|
VOP2Op.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)",
|
|
VOP2Op.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)",
|
|
VOP2Op.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)",
|
|
VOP2Op.V_MIN_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S1.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S0.f32\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F32.\nD0.f32 = S0.f32 < S1.f32 ? S0.f32 : S1.f32\nendif",
|
|
VOP2Op.V_MAX_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S1.f32\nelsif WAVE_MODE.IEEE then\nD0.f32 = S0.f32 >= S1.f32 ? S0.f32 : S1.f32\nelse\nD0.f32 = S0.f32 > S1.f32 ? S0.f32 : S1.f32\nendif",
|
|
VOP2Op.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32',
|
|
VOP2Op.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32',
|
|
VOP2Op.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32',
|
|
VOP2Op.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32',
|
|
VOP2Op.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)',
|
|
VOP2Op.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)',
|
|
VOP2Op.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)',
|
|
VOP2Op.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)',
|
|
VOP2Op.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)',
|
|
VOP2Op.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)',
|
|
VOP2Op.V_DOT2C_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp',
|
|
VOP2Op.V_FMAMK_F32: 'D0.f32 = fma(S0.f32, SIMM32.f32, S1.f32)',
|
|
VOP2Op.V_FMAAK_F32: 'D0.f32 = fma(S0.f32, S1.f32, SIMM32.f32)',
|
|
VOP2Op.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP2Op.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP2Op.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP2Op.V_ADDC_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP2Op.V_SUBB_CO_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP2Op.V_SUBBREV_CO_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP2Op.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16',
|
|
VOP2Op.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16',
|
|
VOP2Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16',
|
|
VOP2Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16',
|
|
VOP2Op.V_MAC_F16: "tmp = S0.f16 * S1.f16 + D0.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif",
|
|
VOP2Op.V_MADMK_F16: "tmp = S0.f16 * SIMM16.f16 + S1.f16;\nD0 = { 16'0, tmp.f16 }",
|
|
VOP2Op.V_MADAK_F16: "tmp = S0.f16 * S1.f16 + SIMM16.f16;\nD0 = { 16'0, tmp.f16 }",
|
|
VOP2Op.V_ADD_U16: 'D0.u16 = S0.u16 + S1.u16',
|
|
VOP2Op.V_SUB_U16: 'D0.u16 = S0.u16 - S1.u16',
|
|
VOP2Op.V_SUBREV_U16: 'D0.u16 = S1.u16 - S0.u16',
|
|
VOP2Op.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16',
|
|
VOP2Op.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)',
|
|
VOP2Op.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)',
|
|
VOP2Op.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)',
|
|
VOP2Op.V_MAX_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S1.f16\nelsif WAVE_MODE.IEEE then\nD0.f16 = S0.f16 >= S1.f16 ? S0.f16 : S1.f16\nelse\nD0.f16 = S0.f16 > S1.f16 ? S0.f16 : S1.f16\nendif",
|
|
VOP2Op.V_MIN_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S1.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S0.f16\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F16.\nD0.f16 = S0.f16 < S1.f16 ? S0.f16 : S1.f16\nendif",
|
|
VOP2Op.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16',
|
|
VOP2Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16',
|
|
VOP2Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16',
|
|
VOP2Op.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16',
|
|
VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()",
|
|
VOP2Op.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32',
|
|
VOP2Op.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32',
|
|
VOP2Op.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32',
|
|
VOP2Op.V_DOT2C_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp',
|
|
VOP2Op.V_DOT2C_I32_I16: 'tmp = D0.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp',
|
|
VOP2Op.V_DOT4C_I32_I8: 'tmp = D0.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp',
|
|
VOP2Op.V_DOT8C_I32_I4: 'tmp = D0.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp',
|
|
VOP2Op.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)',
|
|
VOP2Op.V_PK_FMAC_F16: 'D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16);\nD0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16)',
|
|
VOP2Op.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)',
|
|
}
|
|
|
|
VOP3AOp_PCODE = {
|
|
VOP3AOp.V_CMP_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result",
|
|
VOP3AOp.V_CMP_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result",
|
|
VOP3AOp.V_CMP_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result",
|
|
VOP3AOp.V_CMP_F_F16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_F16: 'D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_F16: 'D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_F16: 'D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_O_F16: "D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_U_F16: "D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NGT_F16: 'D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NEQ_F16: 'D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLT_F16: 'D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_TRU_F16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_F16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_O_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_U_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_TRU_F16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_F_F32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_F32: 'D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_F32: 'D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_F32: 'D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_O_F32: "D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_U_F32: "D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NGT_F32: 'D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NEQ_F32: 'D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLT_F32: 'D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_TRU_F32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_F32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_O_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_U_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_TRU_F32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_F_F64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_F64: 'D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_F64: 'D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_F64: 'D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_O_F64: 'D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_U_F64: 'D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NGT_F64: 'D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NEQ_F64: 'D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NLT_F64: 'D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_TRU_F64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_F64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_O_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_TRU_F64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_F_I16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_I16: 'D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_I16: 'D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_I16: 'D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NE_I16: 'D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_T_I16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_F_U16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_U16: 'D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_U16: 'D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_U16: 'D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NE_U16: 'D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_T_U16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_I16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_T_I16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_U16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_T_U16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_F_I32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_I32: 'D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_I32: 'D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_I32: 'D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NE_I32: 'D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_T_I32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_F_U32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_U32: 'D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_U32: 'D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_U32: 'D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NE_U32: 'D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_T_U32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_I32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_T_I32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_U32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_T_U32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_F_I64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_I64: 'D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_I64: 'D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_I64: 'D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NE_I64: 'D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_T_I64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_F_U64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMP_LT_U64: 'D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_EQ_U64: 'D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GT_U64: 'D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_NE_U64: 'D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMP_T_U64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_I64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_T_I64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_F_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOP3AOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_EQ_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOP3AOp.V_CMPX_T_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nwhere:\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nOP = DS instructions.\nADDR = Source LDS address VGPR 0 - 255.\nDATA0 = Source data0 VGPR 0 - 255.\nDATA1 = Source data1 VGPR 0 - 255.\nVDST = Destination VGPR 0- 255.",
|
|
VOP3AOp.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32',
|
|
VOP3AOp.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32',
|
|
VOP3AOp.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32',
|
|
VOP3AOp.V_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32',
|
|
VOP3AOp.V_FMAC_F64: 'D0.f64 = fma(S0.f64, S1.f64, D0.f64)',
|
|
VOP3AOp.V_MUL_F32: 'D0.f32 = S0.f32 * S1.f32',
|
|
VOP3AOp.V_MUL_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24)",
|
|
VOP3AOp.V_MUL_HI_I32_I24: "D0.i32 = 32'I((64'I(S0.i24) * 64'I(S1.i24)) >> 32U)",
|
|
VOP3AOp.V_MUL_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24)",
|
|
VOP3AOp.V_MUL_HI_U32_U24: "D0.u32 = 32'U((64'U(S0.u24) * 64'U(S1.u24)) >> 32U)",
|
|
VOP3AOp.V_MIN_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S1.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S0.f32\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F32.\nD0.f32 = S0.f32 < S1.f32 ? S0.f32 : S1.f32\nendif",
|
|
VOP3AOp.V_MAX_F32: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == +0.0) && (64'F(S1.f32) == -0.0)) then\nD0.f32 = S0.f32\nelsif ((64'F(S0.f32) == -0.0) && (64'F(S1.f32) == +0.0)) then\nD0.f32 = S1.f32\nelsif WAVE_MODE.IEEE then\nD0.f32 = S0.f32 >= S1.f32 ? S0.f32 : S1.f32\nelse\nD0.f32 = S0.f32 > S1.f32 ? S0.f32 : S1.f32\nendif",
|
|
VOP3AOp.V_MIN_I32: 'D0.i32 = S0.i32 < S1.i32 ? S0.i32 : S1.i32',
|
|
VOP3AOp.V_MAX_I32: 'D0.i32 = S0.i32 >= S1.i32 ? S0.i32 : S1.i32',
|
|
VOP3AOp.V_MIN_U32: 'D0.u32 = S0.u32 < S1.u32 ? S0.u32 : S1.u32',
|
|
VOP3AOp.V_MAX_U32: 'D0.u32 = S0.u32 >= S1.u32 ? S0.u32 : S1.u32',
|
|
VOP3AOp.V_LSHRREV_B32: 'D0.u32 = (S1.u32 >> S0[4 : 0].u32)',
|
|
VOP3AOp.V_ASHRREV_I32: 'D0.i32 = (S1.i32 >> S0[4 : 0].u32)',
|
|
VOP3AOp.V_LSHLREV_B32: 'D0.u32 = (S1.u32 << S0[4 : 0].u32)',
|
|
VOP3AOp.V_AND_B32: 'D0.u32 = (S0.u32 & S1.u32)',
|
|
VOP3AOp.V_OR_B32: 'D0.u32 = (S0.u32 | S1.u32)',
|
|
VOP3AOp.V_XOR_B32: 'D0.u32 = (S0.u32 ^ S1.u32)',
|
|
VOP3AOp.V_DOT2C_F32_BF16: 'tmp = D0.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp',
|
|
VOP3AOp.V_ADD_F16: 'D0.f16 = S0.f16 + S1.f16',
|
|
VOP3AOp.V_SUB_F16: 'D0.f16 = S0.f16 - S1.f16',
|
|
VOP3AOp.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16',
|
|
VOP3AOp.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16',
|
|
VOP3AOp.V_MAC_F16: "tmp = S0.f16 * S1.f16 + D0.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif",
|
|
VOP3AOp.V_ADD_U16: 'D0.u16 = S0.u16 + S1.u16',
|
|
VOP3AOp.V_SUB_U16: 'D0.u16 = S0.u16 - S1.u16',
|
|
VOP3AOp.V_SUBREV_U16: 'D0.u16 = S1.u16 - S0.u16',
|
|
VOP3AOp.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16',
|
|
VOP3AOp.V_LSHLREV_B16: 'D0.u16 = (S1.u16 << S0[3 : 0].u32)',
|
|
VOP3AOp.V_LSHRREV_B16: 'D0.u16 = (S1.u16 >> S0[3 : 0].u32)',
|
|
VOP3AOp.V_ASHRREV_I16: 'D0.i16 = (S1.i16 >> S0[3 : 0].u32)',
|
|
VOP3AOp.V_MAX_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S1.f16\nelsif WAVE_MODE.IEEE then\nD0.f16 = S0.f16 >= S1.f16 ? S0.f16 : S1.f16\nelse\nD0.f16 = S0.f16 > S1.f16 ? S0.f16 : S1.f16\nendif",
|
|
VOP3AOp.V_MIN_F16: "if (WAVE_MODE.IEEE && isSignalNAN(64'F(S0.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif (WAVE_MODE.IEEE && isSignalNAN(64'F(S1.f16))) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif ((64'F(S0.f16) == +0.0) && (64'F(S1.f16) == -0.0)) then\nD0.f16 = S1.f16\nelsif ((64'F(S0.f16) == -0.0) && (64'F(S1.f16) == +0.0)) then\nD0.f16 = S0.f16\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F16.\nD0.f16 = S0.f16 < S1.f16 ? S0.f16 : S1.f16\nendif",
|
|
VOP3AOp.V_MAX_U16: 'D0.u16 = S0.u16 >= S1.u16 ? S0.u16 : S1.u16',
|
|
VOP3AOp.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16',
|
|
VOP3AOp.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16',
|
|
VOP3AOp.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16',
|
|
VOP3AOp.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()",
|
|
VOP3AOp.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32',
|
|
VOP3AOp.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32',
|
|
VOP3AOp.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32',
|
|
VOP3AOp.V_DOT2C_F32_F16: 'tmp = D0.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp',
|
|
VOP3AOp.V_DOT2C_I32_I16: 'tmp = D0.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp',
|
|
VOP3AOp.V_DOT4C_I32_I8: 'tmp = D0.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp',
|
|
VOP3AOp.V_DOT8C_I32_I4: 'tmp = D0.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp',
|
|
VOP3AOp.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)',
|
|
VOP3AOp.V_PK_FMAC_F16: 'D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16);\nD0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16)',
|
|
VOP3AOp.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)',
|
|
VOP3AOp.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
|
|
VOP3AOp.V_READFIRSTLANE_B32: "declare lane : 32'I;\nif EXEC == 0x0LL then\nlane = 0;\n// Force lane 0 if all lanes are disabled\nelse\nlane = s_ff1_i32_b64(EXEC);\n// Lowest active lane\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]",
|
|
VOP3AOp.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)',
|
|
VOP3AOp.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)',
|
|
VOP3AOp.V_CVT_F32_I32: 'D0.f32 = i32_to_f32(S0.i32)',
|
|
VOP3AOp.V_CVT_F32_U32: 'D0.f32 = u32_to_f32(S0.u32)',
|
|
VOP3AOp.V_CVT_U32_F32: 'D0.u32 = f32_to_u32(S0.f32)',
|
|
VOP3AOp.V_CVT_I32_F32: 'D0.i32 = f32_to_i32(S0.f32)',
|
|
VOP3AOp.V_CVT_F16_F32: 'D0.f16 = f32_to_f16(S0.f32)',
|
|
VOP3AOp.V_CVT_F32_F16: 'D0.f32 = f16_to_f32(S0.f16)',
|
|
VOP3AOp.V_CVT_RPI_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32 + 0.5F))',
|
|
VOP3AOp.V_CVT_FLR_I32_F32: 'D0.i32 = f32_to_i32(floor(S0.f32))',
|
|
VOP3AOp.V_CVT_OFF_F32_I4: "declare CVT_OFF_TABLE : 32'F[16];\nD0.f32 = CVT_OFF_TABLE[S0.u32[3 : 0]]",
|
|
VOP3AOp.V_CVT_F32_F64: 'D0.f32 = f64_to_f32(S0.f64)',
|
|
VOP3AOp.V_CVT_F64_F32: 'D0.f64 = f32_to_f64(S0.f32)',
|
|
VOP3AOp.V_CVT_F32_UBYTE0: 'D0.f32 = u32_to_f32(S0[7 : 0].u32)',
|
|
VOP3AOp.V_CVT_F32_UBYTE1: 'D0.f32 = u32_to_f32(S0[15 : 8].u32)',
|
|
VOP3AOp.V_CVT_F32_UBYTE2: 'D0.f32 = u32_to_f32(S0[23 : 16].u32)',
|
|
VOP3AOp.V_CVT_F32_UBYTE3: 'D0.f32 = u32_to_f32(S0[31 : 24].u32)',
|
|
VOP3AOp.V_CVT_U32_F64: 'D0.u32 = f64_to_u32(S0.f64)',
|
|
VOP3AOp.V_CVT_F64_U32: 'D0.f64 = u32_to_f64(S0.u32)',
|
|
VOP3AOp.V_TRUNC_F64: 'D0.f64 = trunc(S0.f64)',
|
|
VOP3AOp.V_CEIL_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 > 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += 1.0\nendif',
|
|
VOP3AOp.V_RNDNE_F64: 'D0.f64 = floor(S0.f64 + 0.5);\nif (isEven(floor(S0.f64)) && (fract(S0.f64) == 0.5)) then\nD0.f64 -= 1.0\nendif',
|
|
VOP3AOp.V_FLOOR_F64: 'D0.f64 = trunc(S0.f64);\nif ((S0.f64 < 0.0) && (S0.f64 != D0.f64)) then\nD0.f64 += -1.0\nendif',
|
|
VOP3AOp.V_FRACT_F32: 'D0.f32 = S0.f32 + -floor(S0.f32)',
|
|
VOP3AOp.V_TRUNC_F32: 'D0.f32 = trunc(S0.f32)',
|
|
VOP3AOp.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif',
|
|
VOP3AOp.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif",
|
|
VOP3AOp.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif',
|
|
VOP3AOp.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
|
|
VOP3AOp.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
|
|
VOP3AOp.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
|
|
VOP3AOp.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception',
|
|
VOP3AOp.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
|
|
VOP3AOp.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64',
|
|
VOP3AOp.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)',
|
|
VOP3AOp.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
|
|
VOP3AOp.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)',
|
|
VOP3AOp.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN",
|
|
VOP3AOp.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN",
|
|
VOP3AOp.V_NOT_B32: 'D0.u32 = ~S0.u32',
|
|
VOP3AOp.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
|
|
VOP3AOp.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_U32(0x00000000) => 0xffffffff\nV_FFBH_U32(0x800000ff) => 0\nV_FFBH_U32(0x100000ff) => 3\nV_FFBH_U32(0x0000ffff) => 16\nV_FFBH_U32(0x00000001) => 31",
|
|
VOP3AOp.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBL_B32(0x00000000) => 0xffffffff\nV_FFBL_B32(0xff000001) => 0\nV_FFBL_B32(0xff000008) => 3\nV_FFBL_B32(0xffff0000) => 16\nV_FFBL_B32(0x80000000) => 31",
|
|
VOP3AOp.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_I32(0x00000000) => 0xffffffff\nV_FFBH_I32(0x40000000) => 1\nV_FFBH_I32(0x80000000) => 1\nV_FFBH_I32(0x0fffffff) => 4\nV_FFBH_I32(0xffff0000) => 16\nV_FFBH_I32(0xfffffffe) => 31\nV_FFBH_I32(0xffffffff) => 0xffffffff',
|
|
VOP3AOp.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()',
|
|
VOP3AOp.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()',
|
|
VOP3AOp.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)',
|
|
VOP3AOp.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()",
|
|
VOP3AOp.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()",
|
|
VOP3AOp.V_MOV_B64: 'D0.b64 = S0.b64',
|
|
VOP3AOp.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)',
|
|
VOP3AOp.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)',
|
|
VOP3AOp.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)',
|
|
VOP3AOp.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)',
|
|
VOP3AOp.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
|
|
VOP3AOp.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF',
|
|
VOP3AOp.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
|
|
VOP3AOp.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF',
|
|
VOP3AOp.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
|
|
VOP3AOp.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32",
|
|
VOP3AOp.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32",
|
|
VOP3AOp.V_CUBEID_F32: '// Set D0.f = cubemap face ID ({0.0, 1.0, ..., 5.0}).\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = 5.0F\nelse\nD0.f32 = 4.0F\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = 3.0F\nelse\nD0.f32 = 2.0F\nendif\nelse\nif S0.f32 < 0.0F then\nD0.f32 = 1.0F\nelse\nD0.f32 = 0.0F\nendif\nendif',
|
|
VOP3AOp.V_CUBESC_F32: '// D0.f = cubemap S coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = -S0.f32\nelse\nD0.f32 = S0.f32\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S0.f32\nelse\nif S0.f32 < 0.0F then\nD0.f32 = S2.f32\nelse\nD0.f32 = -S2.f32\nendif\nendif',
|
|
VOP3AOp.V_CUBETC_F32: '// D0.f = cubemap T coordinate.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = -S1.f32\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = -S2.f32\nelse\nD0.f32 = S2.f32\nendif\nelse\nD0.f32 = -S1.f32\nendif',
|
|
VOP3AOp.V_CUBEMA_F32: '// D0.f = 2.0 * cubemap major axis.\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nD0.f32 = S2.f32 * 2.0F\nelsif abs(S1.f32) >= abs(S0.f32) then\nD0.f32 = S1.f32 * 2.0F\nelse\nD0.f32 = S0.f32 * 2.0F\nendif',
|
|
VOP3AOp.V_BFE_U32: 'D0.u32 = ((S0.u32 >> S1[4 : 0].u32) & ((1U << S2[4 : 0].u32) - 1U))',
|
|
VOP3AOp.V_BFE_I32: 'tmp.i32 = ((S0.i32 >> S1[4 : 0].u32) & ((1 << S2[4 : 0].u32) - 1));\nD0.i32 = signext_from_bit(tmp.i32, S2[4 : 0].u32)',
|
|
VOP3AOp.V_BFI_B32: 'D0.u32 = ((S0.u32 & S1.u32) | (~S0.u32 & S2.u32))',
|
|
VOP3AOp.V_FMA_F32: 'D0.f32 = fma(S0.f32, S1.f32, S2.f32)',
|
|
VOP3AOp.V_FMA_F64: 'D0.f64 = fma(S0.f64, S1.f64, S2.f64)',
|
|
VOP3AOp.V_LERP_U8: 'tmp = ((S0.u32[31 : 24] + S1.u32[31 : 24] + S2.u32[24].u8) >> 1U << 24U);\ntmp += ((S0.u32[23 : 16] + S1.u32[23 : 16] + S2.u32[16].u8) >> 1U << 16U);\ntmp += ((S0.u32[15 : 8] + S1.u32[15 : 8] + S2.u32[8].u8) >> 1U << 8U);\ntmp += ((S0.u32[7 : 0] + S1.u32[7 : 0] + S2.u32[0].u8) >> 1U);\nD0.u32 = tmp.u32',
|
|
VOP3AOp.V_ALIGNBIT_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> S2.u32[4 : 0]) & 0xffffffffLL)",
|
|
VOP3AOp.V_ALIGNBYTE_B32: "D0.u32 = 32'U(({ S0.u32, S1.u32 } >> (S2.u32[1 : 0] * 8U)) & 0xffffffffLL)",
|
|
VOP3AOp.V_MIN3_F32: 'D0.f32 = v_min_f32(v_min_f32(S0.f32, S1.f32), S2.f32)',
|
|
VOP3AOp.V_MIN3_I32: 'D0.i32 = v_min_i32(v_min_i32(S0.i32, S1.i32), S2.i32)',
|
|
VOP3AOp.V_MIN3_U32: 'D0.u32 = v_min_u32(v_min_u32(S0.u32, S1.u32), S2.u32)',
|
|
VOP3AOp.V_MAX3_F32: 'D0.f32 = v_max_f32(v_max_f32(S0.f32, S1.f32), S2.f32)',
|
|
VOP3AOp.V_MAX3_I32: 'D0.i32 = v_max_i32(v_max_i32(S0.i32, S1.i32), S2.i32)',
|
|
VOP3AOp.V_MAX3_U32: 'D0.u32 = v_max_u32(v_max_u32(S0.u32, S1.u32), S2.u32)',
|
|
VOP3AOp.V_MED3_F32: "if (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)) || isNAN(64'F(S2.f32))) then\nD0.f32 = v_min3_f32(S0.f32, S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S0.f32 then\nD0.f32 = v_max_f32(S1.f32, S2.f32)\nelsif v_max3_f32(S0.f32, S1.f32, S2.f32) == S1.f32 then\nD0.f32 = v_max_f32(S0.f32, S2.f32)\nelse\nD0.f32 = v_max_f32(S0.f32, S1.f32)\nendif",
|
|
VOP3AOp.V_MED3_I32: 'if v_max3_i32(S0.i32, S1.i32, S2.i32) == S0.i32 then\nD0.i32 = v_max_i32(S1.i32, S2.i32)\nelsif v_max3_i32(S0.i32, S1.i32, S2.i32) == S1.i32 then\nD0.i32 = v_max_i32(S0.i32, S2.i32)\nelse\nD0.i32 = v_max_i32(S0.i32, S1.i32)\nendif',
|
|
VOP3AOp.V_MED3_U32: 'if v_max3_u32(S0.u32, S1.u32, S2.u32) == S0.u32 then\nD0.u32 = v_max_u32(S1.u32, S2.u32)\nelsif v_max3_u32(S0.u32, S1.u32, S2.u32) == S1.u32 then\nD0.u32 = v_max_u32(S0.u32, S2.u32)\nelse\nD0.u32 = v_max_u32(S0.u32, S1.u32)\nendif',
|
|
VOP3AOp.V_SAD_U8: "ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp",
|
|
VOP3AOp.V_SAD_HI_U8: "D0.u32 = (32'U(v_sad_u8(S0, S1, 0U)) << 16U) + S2.u32",
|
|
VOP3AOp.V_SAD_U16: 'ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += ABSDIFF(S0[15 : 0].u16, S1[15 : 0].u16);\ntmp += ABSDIFF(S0[31 : 16].u16, S1[31 : 16].u16);\nD0.u32 = tmp',
|
|
VOP3AOp.V_SAD_U32: 'ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\nD0.u32 = ABSDIFF(S0.u32, S1.u32) + S2.u32',
|
|
VOP3AOp.V_CVT_PK_U8_F32: "tmp = (S2.u32 & 32'U(~(0xff << (S1.u32[1 : 0].u32 * 8U))));\ntmp = (tmp | ((32'U(f32_to_u8(S0.f32)) & 255U) << (S1.u32[1 : 0].u32 * 8U)));\nD0.u32 = tmp",
|
|
VOP3AOp.V_DIV_FIXUP_F32: "sign_out = (sign(S1.f32) ^ sign(S2.f32));\nif isNAN(64'F(S2.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S2.f32)))\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S1.f32)))\nelsif ((64'F(S1.f32) == 0.0) && (64'F(S2.f32) == 0.0)) then\n// 0/0\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(abs(S1.f32)) == +INF) && (64'F(abs(S2.f32)) == +INF)) then\n// inf/inf\nD0.f32 = 32'F(0xffc00000)\nelsif ((64'F(S1.f32) == 0.0) || (64'F(abs(S2.f32)) == +INF)) then\n// x/0, or inf/y\nD0.f32 = sign_out ? -INF.f32 : +INF.f32\nelsif ((64'F(abs(S1.f32)) == +INF) || (64'F(S2.f32) == 0.0)) then\n// x/inf, 0/y\nD0.f32 = sign_out ? -0.0F : 0.0F\nelsif exponent(S2.f32) - exponent(S1.f32) < -150 then\nD0.f32 = sign_out ? -UNDERFLOW_F32 : UNDERFLOW_F32\nelsif exponent(S1.f32) == 255 then\nD0.f32 = sign_out ? -OVERFLOW_F32 : OVERFLOW_F32\nelse\nD0.f32 = sign_out ? -abs(S0.f32) : abs(S0.f32)\nendif",
|
|
VOP3AOp.V_DIV_FIXUP_F64: "sign_out = (sign(S1.f64) ^ sign(S2.f64));\nif isNAN(S2.f64) then\nD0.f64 = cvtToQuietNAN(S2.f64)\nelsif isNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif ((S1.f64 == 0.0) && (S2.f64 == 0.0)) then\n// 0/0\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((abs(S1.f64) == +INF) && (abs(S2.f64) == +INF)) then\n// inf/inf\nD0.f64 = 64'F(0xfff8000000000000LL)\nelsif ((S1.f64 == 0.0) || (abs(S2.f64) == +INF)) then\n// x/0, or inf/y\nD0.f64 = sign_out ? -INF : +INF\nelsif ((abs(S1.f64) == +INF) || (S2.f64 == 0.0)) then\n// x/inf, 0/y\nD0.f64 = sign_out ? -0.0 : 0.0\nelsif exponent(S2.f64) - exponent(S1.f64) < -1075 then\nD0.f64 = sign_out ? -UNDERFLOW_F64 : UNDERFLOW_F64\nelsif exponent(S1.f64) == 2047 then\nD0.f64 = sign_out ? -OVERFLOW_F64 : OVERFLOW_F64\nelse\nD0.f64 = sign_out ? -abs(S0.f64) : abs(S0.f64)\nendif",
|
|
VOP3AOp.V_DIV_FMAS_F32: 'if VCC.u64[laneId] then\nD0.f32 = 2.0F ** 32 * fma(S0.f32, S1.f32, S2.f32)\nelse\nD0.f32 = fma(S0.f32, S1.f32, S2.f32)\nendif',
|
|
VOP3AOp.V_DIV_FMAS_F64: 'if VCC.u64[laneId] then\nD0.f64 = 2.0 ** 64 * fma(S0.f64, S1.f64, S2.f64)\nelse\nD0.f64 = fma(S0.f64, S1.f64, S2.f64)\nendif',
|
|
VOP3AOp.V_MSAD_U8: "ABSDIFF = lambda(x, y) (\nx > y ? x - y : y - x);\n// UNSIGNED comparison\ntmp = S2.u32;\ntmp += S1.u32[7 : 0] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[7 : 0], S1.u32[7 : 0]));\ntmp += S1.u32[15 : 8] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[15 : 8], S1.u32[15 : 8]));\ntmp += S1.u32[23 : 16] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[23 : 16], S1.u32[23 : 16]));\ntmp += S1.u32[31 : 24] == 8'0U ? 0U : 32'U(ABSDIFF(S0.u32[31 : 24], S1.u32[31 : 24]));\nD0.u32 = tmp",
|
|
VOP3AOp.V_QSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_sad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_sad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_sad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_sad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64",
|
|
VOP3AOp.V_MQSAD_PK_U16_U8: "tmp[63 : 48] = 16'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[63 : 48].u32));\ntmp[47 : 32] = 16'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[47 : 32].u32));\ntmp[31 : 16] = 16'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[31 : 16].u32));\ntmp[15 : 0] = 16'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[15 : 0].u32));\nD0.b64 = tmp.b64",
|
|
VOP3AOp.V_MQSAD_U32_U8: "tmp[127 : 96] = 32'B(v_msad_u8(S0[55 : 24], S1[31 : 0], S2[127 : 96].u32));\ntmp[95 : 64] = 32'B(v_msad_u8(S0[47 : 16], S1[31 : 0], S2[95 : 64].u32));\ntmp[63 : 32] = 32'B(v_msad_u8(S0[39 : 8], S1[31 : 0], S2[63 : 32].u32));\ntmp[31 : 0] = 32'B(v_msad_u8(S0[31 : 0], S1[31 : 0], S2[31 : 0].u32));\nD0.b128 = tmp.b128",
|
|
VOP3AOp.V_MAD_LEGACY_F16: "tmp = S0.f16 * S1.f16 + S2.f16;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif",
|
|
VOP3AOp.V_MAD_LEGACY_U16: "tmp = S0.u16 * S1.u16 + S2.u16;\nif OPSEL.u4[3] then\nD0 = { tmp.u16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.u16 }\nendif",
|
|
VOP3AOp.V_MAD_LEGACY_I16: "tmp = S0.i16 * S1.i16 + S2.i16;\nif OPSEL.u4[3] then\nD0 = { tmp.i16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.i16 }\nendif",
|
|
VOP3AOp.V_PERM_B32: "BYTE_PERMUTE = lambda(data, sel) (\ndeclare in : 8'B[8];\nfor i in 0 : 7 do\nin[i] = data[i * 8 + 7 : i * 8].b8\nendfor;\nif sel.u32 >= 13U then\nreturn 8'0xff\nelsif sel.u32 == 12U then\nreturn 8'0x0\nelsif sel.u32 == 11U then\nreturn in[7][7].b8 * 8'0xff\nelsif sel.u32 == 10U then\nreturn in[5][7].b8 * 8'0xff\nelsif sel.u32 == 9U then\nreturn in[3][7].b8 * 8'0xff\nelsif sel.u32 == 8U then\nreturn in[1][7].b8 * 8'0xff\nelse\nreturn in[sel]\nendif);\nD0[31 : 24] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[31 : 24]);\nD0[23 : 16] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[23 : 16]);\nD0[15 : 8] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[15 : 8]);\nD0[7 : 0] = BYTE_PERMUTE({ S0.u32, S1.u32 }, S2.u32[7 : 0])",
|
|
VOP3AOp.V_FMA_LEGACY_F16: "tmp = fma(S0.f16, S1.f16, S2.f16);\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif",
|
|
VOP3AOp.V_DIV_FIXUP_LEGACY_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\ntmp = cvtToQuietNAN(64'F(S2.f16))\nelsif isNAN(64'F(S1.f16)) then\ntmp = cvtToQuietNAN(64'F(S1.f16))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\ntmp = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\ntmp = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\ntmp = sign_out ? -INF : +INF\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\ntmp = sign_out ? -0.0 : 0.0\nelse\ntmp = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif;\nif OPSEL.u4[3] then\nD0 = { tmp.f16, D0[15 : 0] }\nelse\nD0 = { 16'0, tmp.f16 }\nendif",
|
|
VOP3AOp.V_CVT_PKACCUM_U8_F32: "byte = S1.u32[1 : 0];\nbit = byte.u32 * 8U;\nD0.u32[bit + 7U : bit] = 32'U(f32_to_u8(S0.f32))",
|
|
VOP3AOp.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32",
|
|
VOP3AOp.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32",
|
|
VOP3AOp.V_XAD_U32: 'D0.u32 = (S0.u32 ^ S1.u32) + S2.u32',
|
|
VOP3AOp.V_MIN3_F16: 'D0.f16 = v_min_f16(v_min_f16(S0.f16, S1.f16), S2.f16)',
|
|
VOP3AOp.V_MIN3_I16: 'D0.i16 = v_min_i16(v_min_i16(S0.i16, S1.i16), S2.i16)',
|
|
VOP3AOp.V_MIN3_U16: 'D0.u16 = v_min_u16(v_min_u16(S0.u16, S1.u16), S2.u16)',
|
|
VOP3AOp.V_MAX3_F16: 'D0.f16 = v_max_f16(v_max_f16(S0.f16, S1.f16), S2.f16)',
|
|
VOP3AOp.V_MAX3_I16: 'D0.i16 = v_max_i16(v_max_i16(S0.i16, S1.i16), S2.i16)',
|
|
VOP3AOp.V_MAX3_U16: 'D0.u16 = v_max_u16(v_max_u16(S0.u16, S1.u16), S2.u16)',
|
|
VOP3AOp.V_MED3_F16: "if (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)) || isNAN(64'F(S2.f16))) then\nD0.f16 = v_min3_f16(S0.f16, S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S0.f16 then\nD0.f16 = v_max_f16(S1.f16, S2.f16)\nelsif v_max3_f16(S0.f16, S1.f16, S2.f16) == S1.f16 then\nD0.f16 = v_max_f16(S0.f16, S2.f16)\nelse\nD0.f16 = v_max_f16(S0.f16, S1.f16)\nendif",
|
|
VOP3AOp.V_MED3_I16: 'if v_max3_i16(S0.i16, S1.i16, S2.i16) == S0.i16 then\nD0.i16 = v_max_i16(S1.i16, S2.i16)\nelsif v_max3_i16(S0.i16, S1.i16, S2.i16) == S1.i16 then\nD0.i16 = v_max_i16(S0.i16, S2.i16)\nelse\nD0.i16 = v_max_i16(S0.i16, S1.i16)\nendif',
|
|
VOP3AOp.V_MED3_U16: 'if v_max3_u16(S0.u16, S1.u16, S2.u16) == S0.u16 then\nD0.u16 = v_max_u16(S1.u16, S2.u16)\nelsif v_max3_u16(S0.u16, S1.u16, S2.u16) == S1.u16 then\nD0.u16 = v_max_u16(S0.u16, S2.u16)\nelse\nD0.u16 = v_max_u16(S0.u16, S1.u16)\nendif',
|
|
VOP3AOp.V_LSHL_ADD_U32: 'D0.u32 = (S0.u32 << S1.u32[4 : 0].u32) + S2.u32',
|
|
VOP3AOp.V_ADD_LSHL_U32: 'D0.u32 = ((S0.u32 + S1.u32) << S2.u32[4 : 0].u32)',
|
|
VOP3AOp.V_ADD3_U32: 'D0.u32 = S0.u32 + S1.u32 + S2.u32',
|
|
VOP3AOp.V_LSHL_OR_B32: 'D0.u32 = ((S0.u32 << S1.u32[4 : 0].u32) | S2.u32)',
|
|
VOP3AOp.V_AND_OR_B32: 'D0.u32 = ((S0.u32 & S1.u32) | S2.u32)',
|
|
VOP3AOp.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)',
|
|
VOP3AOp.V_MAD_F16: 'D0.f16 = S0.f16 * S1.f16 + S2.f16',
|
|
VOP3AOp.V_MAD_U16: 'D0.u16 = S0.u16 * S1.u16 + S2.u16',
|
|
VOP3AOp.V_MAD_I16: 'D0.i16 = S0.i16 * S1.i16 + S2.i16',
|
|
VOP3AOp.V_FMA_F16: 'D0.f16 = fma(S0.f16, S1.f16, S2.f16)',
|
|
VOP3AOp.V_DIV_FIXUP_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S2.f16)))\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\nD0.f16 = sign_out ? -INF.f16 : +INF.f16\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\nD0.f16 = sign_out ? -16'0.0 : 16'0.0\nelse\nD0.f16 = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif",
|
|
VOP3AOp.V_LSHL_ADD_U64: 'D0.u64 = (S0.u64 << S1.u32[2 : 0].u32) + S2.u64',
|
|
VOP3AOp.V_BITOP3_B16: "TTBL = { INST.OMOD[1 : 0], INST.ABS[2 : 0], INST.NEG[2 : 0] };\ntmp = 16'0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 16'U(~S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 16'U(~S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 16'U(~S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 16'U(~S0.b16 & S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 16'U(S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 16'U(S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 16'U(S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 16'U(S0.b16 & S1.b16 & S2.b16) : 16'0U));\nD.b16 = tmp.b16\n{ OMOD[1:0], ABS[2:0], NEG[2:0] }\nD0[i] = TTBL[{S0[i], S1[i], S2[i]}]",
|
|
VOP3AOp.V_BITOP3_B32: "TTBL = { INST.OMOD[1 : 0], INST.ABS[2 : 0], INST.NEG[2 : 0] };\ntmp = 0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 32'U(~S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 32'U(~S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 32'U(~S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 32'U(~S0.b32 & S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 32'U(S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 32'U(S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 32'U(S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 32'U(S0.b32 & S1.b32 & S2.b32) : 0U));\nD.b32 = tmp.b32\n{ OMOD[1:0], ABS[2:0], NEG[2:0] }\nD0[i] = TTBL[{S0[i], S1[i], S2[i]}]",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_FP8_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_fp8_scale(S0.f32, scale.u8);\ntmp1 = f32_to_fp8_scale(S1.f32, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_BF8_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_bf8_scale(S0.f32, scale.u8);\ntmp1 = f32_to_bf8_scale(S1.f32, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_FP8_F32: "scale = 32'U(exponent(S2.f32));\ntmp = f32_to_fp8_sr_scale(S0.f32, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_BF8_F32: "scale = 32'U(exponent(S2.f32));\ntmp = f32_to_bf8_sr_scale(S0.f32, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].bf8 = tmp;\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_F32_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = fp8_to_f32_scale(src[7 : 0].fp8, scale.u8);\ntmp1 = fp8_to_f32_scale(src[15 : 8].fp8, scale.u8);\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_F32_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = bf8_to_f32_scale(src[7 : 0].bf8, scale.u8);\ntmp1 = bf8_to_f32_scale(src[15 : 8].bf8, scale.u8);\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1",
|
|
VOP3AOp.V_CVT_SCALEF32_F32_FP8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8;\ntmp = fp8_to_f32_scale(src, scale.u8);\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_SCALEF32_F32_BF8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8;\ntmp = bf8_to_f32_scale(src, scale.u8);\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_FP4_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_fp4_scale(S0.f32, scale.u8);\ntmp1 = f32_to_fp4_scale(S1.f32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = f32_to_fp4_sr_scale(S0[31 : 0].f32, randomVal, scale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32));\ntmp1 = f32_to_fp4_sr_scale(S0[63 : 32].f32, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_F32_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\ntmp0 = fp4_to_f32_scale(src[3 : 0].fp4, scale.u8);\ntmp1 = fp4_to_f32_scale(src[7 : 4].fp4, scale.u8);\nD0[31 : 0].f32 = tmp0;\nD0[63 : 32].f32 = tmp1",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_FP8_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_fp8_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_fp8_scale(S0[31 : 16].f16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_BF8_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_bf8_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_bf8_scale(S0[31 : 16].f16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_FP8_F16: "scale = 32'U(exponent(S2.f32));\ntmp = f16_to_fp8_sr_scale(S0.f16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_BF8_F16: "scale = 32'U(exponent(S2.f32));\ntmp = f16_to_bf8_sr_scale(S0.f16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].bf8 = tmp;\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_FP8_BF16: "scale = 32'U(exponent(S1.f32));\ntmp0 = bf16_to_fp8_scale(S0[15 : 0].bf16, scale.u8);\ntmp1 = bf16_to_fp8_scale(S0[31 : 16].bf16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_BF8_BF16: "scale = 32'U(exponent(S1.f32));\ntmp0 = bf16_to_bf8_scale(S0[15 : 0].bf16, scale.u8);\ntmp1 = bf16_to_bf8_scale(S0[31 : 16].bf16, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_FP8_BF16: "scale = 32'U(exponent(S2.f32));\ntmp = bf16_to_fp8_sr_scale(S0.bf16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_BF8_BF16: "scale = 32'U(exponent(S2.f32));\ntmp = bf16_to_bf8_sr_scale(S0.bf16, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].bf8 = tmp;\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_F16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = fp8_to_f16_scale(src[7 : 0].fp8, scale.u8);\ntmp1 = fp8_to_f16_scale(src[15 : 8].fp8, scale.u8);\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_F16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = bf8_to_f16_scale(src[7 : 0].bf8, scale.u8);\ntmp1 = bf8_to_f16_scale(src[15 : 8].bf8, scale.u8);\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1",
|
|
VOP3AOp.V_CVT_SCALEF32_F16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].fp8;\ntmp = fp8_to_f16_scale(src, scale.u8);\n// OPSEL[3] controls destination hi/lo\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_SCALEF32_F16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].bf8;\ntmp = bf8_to_f16_scale(src, scale.u8);\n// OPSEL[3] controls destination hi/lo\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_FP4_F16: "scale = 32'U(exponent(S1.f32));\ntmp0 = f16_to_fp4_scale(S0[15 : 0].f16, scale.u8);\ntmp1 = f16_to_fp4_scale(S0[31 : 16].f16, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_FP4_BF16: "scale = 32'U(exponent(S1.f32));\ntmp0 = bf16_to_fp4_scale(S0[15 : 0].bf16, scale.u8);\ntmp1 = bf16_to_fp4_scale(S0[31 : 16].bf16, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = f16_to_fp4_sr_scale(S0[15 : 0].f16, randomVal, scale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32));\ntmp1 = f16_to_fp4_sr_scale(S0[31 : 16].f16, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK_FP4_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ntmp0 = bf16_to_fp4_sr_scale(S0[15 : 0].bf16, randomVal, scale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32));\ntmp1 = bf16_to_fp4_sr_scale(S0[31 : 16].bf16, randomVal, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].b8 = { tmp1, tmp0 };\n// Other destination bits are preserved",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_F16_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\ntmp0 = fp4_to_f16_scale(src[3 : 0].fp4, scale.u8);\ntmp1 = fp4_to_f16_scale(src[7 : 4].fp4, scale.u8);\nD0[15 : 0].f16 = tmp0;\nD0[31 : 16].f16 = tmp1",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP4: "scale = 32'U(exponent(S1.f32));\nsrcbyte = OPSEL[1 : 0].i32 * 8;\nsrc = VGPR[laneId][SRC0.u32][srcbyte + 7 : srcbyte].b8;\ntmp0 = fp4_to_bf16_scale(src[3 : 0].fp4, scale.u8);\ntmp1 = fp4_to_bf16_scale(src[7 : 4].fp4, scale.u8);\nD0[15 : 0].bf16 = tmp0;\nD0[31 : 16].bf16 = tmp1",
|
|
VOP3AOp.V_CVT_SCALEF32_2XPK16_FP6_F32: "scale = 32'U(exponent(S2.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 15 do\ndOffset = pass * 12;\nsOffset = pass * 32;\n// Note that S0 and S1 inputs are interleaved in the packed result.\ntmp[dOffset + 5 : dOffset].fp6 = f32_to_fp6_scale(S0[sOffset + 31 : sOffset].f32, scale.u8);\ntmp[dOffset + 11 : dOffset + 6].fp6 = f32_to_fp6_scale(S1[sOffset + 31 : sOffset].f32, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_2XPK16_BF6_F32: "scale = 32'U(exponent(S2.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 15 do\ndOffset = pass * 12;\nsOffset = pass * 32;\n// Note that S0 and S1 inputs are interleaved in the packed result.\ntmp[dOffset + 5 : dOffset].bf6 = f32_to_bf6_scale(S0[sOffset + 31 : sOffset].f32, scale.u8);\ntmp[dOffset + 11 : dOffset + 6].bf6 = f32_to_bf6_scale(S1[sOffset + 31 : sOffset].f32, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 32;\ntmp[dOffset + 5 : dOffset].fp6 = f32_to_fp6_sr_scale(S0[sOffset + 31 : sOffset].f32, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F32: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 32;\ntmp[dOffset + 5 : dOffset].bf6 = f32_to_bf6_sr_scale(S0[sOffset + 31 : sOffset].f32, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_F32_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 1024'B;\nfor pass in 0 : 31 do\ndOffset = pass * 32;\nsOffset = pass * 6;\ntmp[dOffset + 31 : dOffset].f32 = fp6_to_f32_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[1023 : 0] = tmp.b1024",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_F32_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 1024'B;\nfor pass in 0 : 31 do\ndOffset = pass * 32;\nsOffset = pass * 6;\ntmp[dOffset + 31 : dOffset].f32 = bf6_to_f32_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[1023 : 0] = tmp.b1024",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_FP6_F16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].fp6 = f16_to_fp6_scale(S0[sOffset + 15 : sOffset].f16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_FP6_BF16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].fp6 = bf16_to_fp6_scale(S0[sOffset + 15 : sOffset].bf16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_BF6_F16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].bf6 = f16_to_bf6_scale(S0[sOffset + 15 : sOffset].f16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_BF6_BF16: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].bf6 = bf16_to_bf6_scale(S0[sOffset + 15 : sOffset].bf16, scale.u8)\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].fp6 = f16_to_fp6_sr_scale(S0[sOffset + 15 : sOffset].f16, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK32_FP6_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].fp6 = bf16_to_fp6_sr_scale(S0[sOffset + 15 : sOffset].bf16, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_F16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].bf6 = f16_to_bf6_sr_scale(S0[sOffset + 15 : sOffset].f16, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_SR_PK32_BF6_BF16: "scale = 32'U(exponent(S2.f32));\nrandomVal = S1.u32;\ndeclare tmp : 192'B;\nfor pass in 0 : 31 do\ndOffset = pass * 6;\nsOffset = pass * 16;\ntmp[dOffset + 5 : dOffset].bf6 = bf16_to_bf6_sr_scale(S0[sOffset + 15 : sOffset].bf16, randomVal,\nscale.u8);\nrandomVal = 32'U(v_prng_b32(randomVal.b32))\nendfor;\nD0[191 : 0] = tmp.b192",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_F16_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ndOffset = pass * 16;\nsOffset = pass * 6;\ntmp[dOffset + 15 : dOffset].f16 = fp6_to_f16_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_BF16_FP6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ndOffset = pass * 16;\nsOffset = pass * 6;\ntmp[dOffset + 15 : dOffset].bf16 = fp6_to_bf16_scale(S0[sOffset + 5 : sOffset].fp6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_F16_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ndOffset = pass * 16;\nsOffset = pass * 6;\ntmp[dOffset + 15 : dOffset].f16 = bf6_to_f16_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512",
|
|
VOP3AOp.V_CVT_SCALEF32_PK32_BF16_BF6: "scale = 32'U(exponent(S1.f32));\ndeclare tmp : 512'B;\nfor pass in 0 : 31 do\ndOffset = pass * 16;\nsOffset = pass * 6;\ntmp[dOffset + 15 : dOffset].bf16 = bf6_to_bf16_scale(S0[sOffset + 5 : sOffset].bf6, scale.u8)\nendfor;\nD0[511 : 0] = tmp.b512",
|
|
VOP3AOp.V_ASHR_PK_I8_I32: "SAT8 = lambda(n) (\nif n <= -128 then\nreturn 8'0x80\nelsif n >= 127 then\nreturn 8'0x7f\nelse\nreturn n[7 : 0].b8\nendif);\ndeclare tmp : 16'B;\ntmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32);\ntmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32);\nD0[15 : 0] = tmp",
|
|
VOP3AOp.V_ASHR_PK_U8_I32: "SAT8 = lambda(n) (\nif n <= 0 then\nreturn 8'0x0\nelsif n >= 255 then\nreturn 8'0xff\nelse\nreturn n[7 : 0].b8\nendif);\ndeclare tmp : 16'B;\ntmp[7 : 0] = SAT8(S0.i32 >> S2[4 : 0].u32);\ntmp[15 : 8] = SAT8(S1.i32 >> S2[4 : 0].u32);\nD0[15 : 0] = tmp",
|
|
VOP3AOp.V_CVT_PK_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode',
|
|
VOP3AOp.V_CVT_PK_BF16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ntmp[15 : 0].bf16 = f32_to_bf16(S0.f32);\ntmp[31 : 16].bf16 = f32_to_bf16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode',
|
|
VOP3AOp.V_CVT_SCALEF32_PK_BF16_FP8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = fp8_to_bf16_scale(src[7 : 0].fp8, scale);\ntmp1 = fp8_to_bf16_scale(src[15 : 8].fp8, scale);\nD0[15 : 0].bf16 = tmp0.bf16;\nD0[31 : 16].bf16 = tmp1.bf16",
|
|
VOP3AOp.V_CVT_SCALEF32_PK_BF16_BF8: "scale = 32'U(exponent(S1.f32));\nsrcword = OPSEL[0].i32 * 16;\nsrc = VGPR[laneId][SRC0.u32][srcword + 15 : srcword].b16;\ntmp0 = bf8_to_bf16_scale(src[7 : 0].bf8, scale);\ntmp1 = bf8_to_bf16_scale(src[15 : 8].bf8, scale);\nD0[15 : 0].bf16 = tmp0.bf16;\nD0[31 : 16].bf16 = tmp1.bf16",
|
|
VOP3AOp.V_ADD_F64: 'D0.f64 = S0.f64 + S1.f64',
|
|
VOP3AOp.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64',
|
|
VOP3AOp.V_MIN_F64: "if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S1.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S0.f64\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F64.\nD0.f64 = S0.f64 < S1.f64 ? S0.f64 : S1.f64\nendif",
|
|
VOP3AOp.V_MAX_F64: 'if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S1.f64\nelsif WAVE_MODE.IEEE then\nD0.f64 = S0.f64 >= S1.f64 ? S0.f64 : S1.f64\nelse\nD0.f64 = S0.f64 > S1.f64 ? S0.f64 : S1.f64\nendif',
|
|
VOP3AOp.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32\nldexp()',
|
|
VOP3AOp.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32',
|
|
VOP3AOp.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)",
|
|
VOP3AOp.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)",
|
|
VOP3AOp.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32\nldexp()',
|
|
VOP3AOp.V_READLANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nD0.b32 = VGPR[lane][SRC0.u32]',
|
|
VOP3AOp.V_WRITELANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nVGPR[lane][VDST.u32] = S0.b32',
|
|
VOP3AOp.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp",
|
|
VOP3AOp.V_MBCNT_LO_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp",
|
|
VOP3AOp.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp\nv_mbcnt_lo_u32_b32 v0, -1, 0\nv_mbcnt_hi_u32_b32 v0, -1, v0\n// v0 now contains laneId\nv_mbcnt_lo_u32_b32 v0, vcc_lo, 0\nv_mbcnt_hi_u32_b32 v0, vcc_hi, v0 // Note vcc_hi is passed in for second instruction\n// v0 now contains position among lanes with VCC=1",
|
|
VOP3AOp.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)',
|
|
VOP3AOp.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)',
|
|
VOP3AOp.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)',
|
|
VOP3AOp.V_TRIG_PREOP_F64: "shift = 32'I(S1[4 : 0].u32) * 53;\nif exponent(S0.f64) > 1077 then\nshift += exponent(S0.f64) - 1077\nendif;\n// (2.0/PI) == 0.{b_1200, b_1199, b_1198, ..., b_1, b_0}\n// b_1200 is the MSB of the fractional part of 2.0/PI\n// Left shift operation indicates which bits are brought\n// into the whole part of the number.\n// Only whole part of result is kept.\nresult = 64'F((1201'B(2.0 / PI)[1200 : 0] << shift.u32) & 1201'0x1fffffffffffff);\nscale = -53 - shift;\nif exponent(S0.f64) >= 1968 then\nscale += 128\nendif;\nD0.f64 = ldexp(result, scale)",
|
|
VOP3AOp.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)',
|
|
VOP3AOp.V_CVT_PKNORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_PKNORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_PKRTZ_F16_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_TOWARD_ZERO;\ntmp[15 : 0].f16 = f32_to_f16(S0.f32);\ntmp[31 : 16].f16 = f32_to_f16(S1.f32);\nD0 = tmp.b32;\nROUND_MODE = prev_mode;\n// Round-toward-zero regardless of current round mode setting in hardware.',
|
|
VOP3AOp.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_PKNORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);\nD0 = tmp.b32",
|
|
VOP3AOp.V_CVT_PKNORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);\nD0 = tmp.b32",
|
|
VOP3AOp.V_ADD_I32: 'D0.i32 = S0.i32 + S1.i32',
|
|
VOP3AOp.V_SUB_I32: 'D0.i32 = S0.i32 - S1.i32',
|
|
VOP3AOp.V_ADD_I16: 'D0.i16 = S0.i16 + S1.i16',
|
|
VOP3AOp.V_SUB_I16: 'D0.i16 = S0.i16 - S1.i16',
|
|
VOP3AOp.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16',
|
|
VOP3AOp.V_MUL_LEGACY_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif",
|
|
VOP3AOp.V_CVT_PK_FP8_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_fp8(S1.f32), f32_to_fp8(S0.f32) };\n// D0[15:0] are preserved\nendif;\nROUND_MODE = prev_mode',
|
|
VOP3AOp.V_CVT_PK_BF8_F32: 'prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u32 == 0U then\nVGPR[laneId][VDST.u32][15 : 0].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[31:16] are preserved\nelse\nVGPR[laneId][VDST.u32][31 : 16].b16 = { f32_to_bf8(S1.f32), f32_to_bf8(S0.f32) };\n// D0[15:0] are preserved\nendif;\nROUND_MODE = prev_mode',
|
|
VOP3AOp.V_CVT_SR_FP8_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 12].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].fp8 = f32_to_fp8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].fp8 = f32_to_fp8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].fp8 = f32_to_fp8(tmp.f32)\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode",
|
|
VOP3AOp.V_CVT_SR_BF8_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\ns = sign(S0.f32);\ne = exponent(S0.f32);\nm = 23'U(32'U(23'B(mantissa(S0.f32))) + S1[31 : 11].u32);\ntmp = float32(s, e, m);\n// Add stochastic value to mantissa, wrap around on overflow\nif OPSEL[3 : 2].u2 == 2'0U then\nVGPR[laneId][VDST.u32][7 : 0].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'1U then\nVGPR[laneId][VDST.u32][15 : 8].bf8 = f32_to_bf8(tmp.f32)\nelsif OPSEL[3 : 2].u2 == 2'2U then\nVGPR[laneId][VDST.u32][23 : 16].bf8 = f32_to_bf8(tmp.f32)\nelse\nVGPR[laneId][VDST.u32][31 : 24].bf8 = f32_to_bf8(tmp.f32)\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode",
|
|
VOP3AOp.V_CVT_SR_F16_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u2 == 2'0U then\nVGPR[laneId][VDST.u32][15 : 0].f16 = 16'F(f32_to_f16_SR(S0.f32, S1.u32))\nelse\nVGPR[laneId][VDST.u32][31 : 16].f16 = 16'F(f32_to_f16_sr(S0.f32, S1.u32))\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode",
|
|
VOP3AOp.V_CVT_SR_BF16_F32: "prev_mode = ROUND_MODE;\nROUND_MODE = ROUND_NEAREST_EVEN;\nif OPSEL[3].u2 == 2'0U then\nVGPR[laneId][VDST.u32][15 : 0].bf16 = 16'BF(f32_to_bf16_SR(S0.f32, S1.u32))\nelse\nVGPR[laneId][VDST.u32][31 : 16].bf16 = 16'BF(f32_to_bf16_sr(S0.f32, S1.u32))\nendif;\n// Unwritten bytes of D are preserved.\nROUND_MODE = prev_mode",
|
|
VOP3AOp.V_MINIMUM3_F32: "D0.f32 = 32'F(v_minimum_f32(v_minimum_f32(S0.f32, S1.f32), S2.f32))",
|
|
VOP3AOp.V_MAXIMUM3_F32: "D0.f32 = 32'F(v_maximum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32))",
|
|
}
|
|
|
|
VOP3BOp_PCODE = {
|
|
VOP3BOp.V_ADD_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32);\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP3BOp.V_SUB_CO_U32: "tmp = S0.u32 - S1.u32;\nVCC.u64[laneId] = S1.u32 > S0.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP3BOp.V_SUBREV_CO_U32: "tmp = S1.u32 - S0.u32;\nVCC.u64[laneId] = S0.u32 > S1.u32 ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP3BOp.V_ADDC_CO_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + VCC.u64[laneId].u64;\nVCC.u64[laneId] = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_ADDC_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP3BOp.V_SUBB_CO_U32: "tmp = S0.u32 - S1.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S1.u32) + VCC.u64[laneId].u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP3BOp.V_SUBBREV_CO_U32: "tmp = S1.u32 - S0.u32 - VCC.u64[laneId].u32;\nVCC.u64[laneId] = 64'U(S0.u32) + VCC.u64[laneId].u64 > 64'U(S1.u32) ? 1'1U : 1'0U;\n// VCC is an UNSIGNED overflow/carry-out for V_SUBB_CO_U32.\nD0.u32 = tmp.u32",
|
|
VOP3BOp.V_DIV_SCALE_F32: "VCC = 0x0LL;\nif ((64'F(S2.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\nD0.f32 = NAN.f32\nelsif exponent(S2.f32) - exponent(S1.f32) >= 96 then\n// N/D near MAX_FLOAT_F32\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif S1.f32 == DENORM.f32 then\nD0.f32 = ldexp(S0.f32, 64)\nelsif ((1.0 / 64'F(S1.f32) == DENORM.f64) && (S2.f32 / S1.f32 == DENORM.f32)) then\nVCC = 0x1LL;\nif S0.f32 == S1.f32 then\n// Only scale the denominator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif 1.0 / 64'F(S1.f32) == DENORM.f64 then\nD0.f32 = ldexp(S0.f32, -64)\nelsif S2.f32 / S1.f32 == DENORM.f32 then\nVCC = 0x1LL;\nif S0.f32 == S2.f32 then\n// Only scale the numerator\nD0.f32 = ldexp(S0.f32, 64)\nendif\nelsif exponent(S2.f32) <= 23 then\n// Numerator is tiny\nD0.f32 = ldexp(S0.f32, 64)\nendif",
|
|
VOP3BOp.V_DIV_SCALE_F64: 'VCC = 0x0LL;\nif ((S2.f64 == 0.0) || (S1.f64 == 0.0)) then\nD0.f64 = NAN.f64\nelsif exponent(S2.f64) - exponent(S1.f64) >= 768 then\n// N/D near MAX_FLOAT_F64\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, 128)\nelsif ((1.0 / S1.f64 == DENORM.f64) && (S2.f64 / S1.f64 == DENORM.f64)) then\nVCC = 0x1LL;\nif S0.f64 == S1.f64 then\n// Only scale the denominator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif 1.0 / S1.f64 == DENORM.f64 then\nD0.f64 = ldexp(S0.f64, -128)\nelsif S2.f64 / S1.f64 == DENORM.f64 then\nVCC = 0x1LL;\nif S0.f64 == S2.f64 then\n// Only scale the numerator\nD0.f64 = ldexp(S0.f64, 128)\nendif\nelsif exponent(S2.f64) <= 53 then\n// Numerator is tiny\nD0.f64 = ldexp(S0.f64, 128)\nendif',
|
|
VOP3BOp.V_MAD_U64_U32: "{ D1.u1, D0.u64 } = 65'B(65'U(S0.u32) * 65'U(S1.u32) + 65'U(S2.u64))",
|
|
VOP3BOp.V_MAD_I64_I32: "{ D1.i1, D0.i64 } = 65'B(65'I(S0.i32) * 65'I(S1.i32) + 65'I(S2.i64))",
|
|
}
|
|
|
|
VOP3POp_PCODE = {
|
|
VOP3POp.V_PK_MAD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 * S1[15 : 0].i16 + S2[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 * S1[31 : 16].i16 + S2[31 : 16].i16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_MUL_LO_U16: 'tmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16;\nD0.b32 = tmp.b32',
|
|
VOP3POp.V_PK_ADD_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 + S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 + S1[31 : 16].i16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_SUB_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 - S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 - S1[31 : 16].i16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_LSHLREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 << S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 << S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32',
|
|
VOP3POp.V_PK_LSHRREV_B16: 'tmp[31 : 16].u16 = (S1[31 : 16].u16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].u16 = (S1[15 : 0].u16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32',
|
|
VOP3POp.V_PK_ASHRREV_I16: 'tmp[31 : 16].i16 = (S1[31 : 16].i16 >> S0.u32[19 : 16].u32);\ntmp[15 : 0].i16 = (S1[15 : 0].i16 >> S0.u32[3 : 0].u32);\nD0.b32 = tmp.b32',
|
|
VOP3POp.V_PK_MAX_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 >= S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 >= S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_MIN_I16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = S0[15 : 0].i16 < S1[15 : 0].i16 ? S0[15 : 0].i16 : S1[15 : 0].i16;\ntmp[31 : 16].i16 = S0[31 : 16].i16 < S1[31 : 16].i16 ? S0[31 : 16].i16 : S1[31 : 16].i16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_MAD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 * S1[15 : 0].u16 + S2[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 * S1[31 : 16].u16 + S2[31 : 16].u16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_ADD_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 + S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 + S1[31 : 16].u16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_SUB_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 - S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 - S1[31 : 16].u16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_MAX_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 >= S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 >= S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_MIN_U16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = S0[15 : 0].u16 < S1[15 : 0].u16 ? S0[15 : 0].u16 : S1[15 : 0].u16;\ntmp[31 : 16].u16 = S0[31 : 16].u16 < S1[31 : 16].u16 ? S0[31 : 16].u16 : S1[31 : 16].u16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_FMA_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16);\ntmp[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16);\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_ADD_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 + S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 + S1[31 : 16].f16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_MUL_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = S0[15 : 0].f16 * S1[15 : 0].f16;\ntmp[31 : 16].f16 = S0[31 : 16].f16 * S1[31 : 16].f16;\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_MIN_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_min_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_min_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp",
|
|
VOP3POp.V_PK_MAX_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_max_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_max_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp",
|
|
VOP3POp.V_DOT2_F32_BF16: "tmp = 32'F(S0[15 : 0].bf16) * 32'F(S1[15 : 0].bf16);\ntmp += 32'F(S0[31 : 16].bf16) * 32'F(S1[31 : 16].bf16);\ntmp += S2.f32;\nD0.f32 = tmp",
|
|
VOP3POp.V_PK_MINIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_minimum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_minimum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32",
|
|
VOP3POp.V_PK_MAXIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_maximum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_maximum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32",
|
|
VOP3POp.V_MAD_MIX_F32: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = in[0] * in[1] + in[2]",
|
|
VOP3POp.V_MAD_MIXLO_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(in[0] * in[1] + in[2])",
|
|
VOP3POp.V_MAD_MIXHI_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(in[0] * in[1] + in[2])",
|
|
VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp',
|
|
VOP3POp.V_DOT2_I32_I16: 'tmp = S2.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp',
|
|
VOP3POp.V_DOT2_U32_U16: 'tmp = S2.u32;\ntmp += u16_to_u32(S0[15 : 0].u16) * u16_to_u32(S1[15 : 0].u16);\ntmp += u16_to_u32(S0[31 : 16].u16) * u16_to_u32(S1[31 : 16].u16);\nD0.u32 = tmp',
|
|
VOP3POp.V_DOT4_I32_I8: 'tmp = S2.i32;\ntmp += i8_to_i32(S0[7 : 0].i8) * i8_to_i32(S1[7 : 0].i8);\ntmp += i8_to_i32(S0[15 : 8].i8) * i8_to_i32(S1[15 : 8].i8);\ntmp += i8_to_i32(S0[23 : 16].i8) * i8_to_i32(S1[23 : 16].i8);\ntmp += i8_to_i32(S0[31 : 24].i8) * i8_to_i32(S1[31 : 24].i8);\nD0.i32 = tmp',
|
|
VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp',
|
|
VOP3POp.V_DOT8_I32_I4: 'tmp = S2.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp',
|
|
VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp',
|
|
VOP3POp.V_MFMA_F32_16X16X128_F8F6F4: 'D = A (16x128) * B (128x16) + C (16x16)\nA, B: (16*128) elements * 8 bits/element = 16384 bits, divide by 64 lanes * 32 bits/register-lane = 8\nregisters C, D: (16*16) elements * 32 bits/element = 8192 bits, divide by 64 lanes * 32 bits/register-\nlane = 4 registers',
|
|
VOP3POp.V_MFMA_F32_32X32X64_F8F6F4: 'D = A (32x64) * B (64x32) + C (32x32)\nA, B: (32*64) elements * 8 bits/element = 16384 bits, divide by 64 lanes * 32 bits/register-lane = 8\nregisters C, D: (32*32) elements * 32 bits/element = 32768 bits, divide by 64 lanes * 32 bits/register-\nlane = 16 registers',
|
|
VOP3POp.V_PK_FMA_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = fma(S0[31 : 0].f32, S1[31 : 0].f32, S2[31 : 0].f32);\ntmp[63 : 32].f32 = fma(S0[63 : 32].f32, S1[63 : 32].f32, S2[63 : 32].f32);\nD0.b64 = tmp",
|
|
VOP3POp.V_PK_MUL_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 * S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 * S1[63 : 32].f32;\nD0.b64 = tmp",
|
|
VOP3POp.V_PK_ADD_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 + S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 + S1[63 : 32].f32;\nD0.b64 = tmp",
|
|
VOP3POp.V_PK_MOV_B32: 'tmp0.u32 = S0.u32[OPSEL[0].i32 * 32 + 31 : OPSEL[0].i32 * 32];\ntmp1.u32 = S1.u32[OPSEL[1].i32 * 32 + 31 : OPSEL[1].i32 * 32];\nD0.u32[31 : 0] = tmp0.u32;\nD0.u32[63 : 32] = tmp1.u32\nv_pk_mov_b32 v0, v2, v4 op_sel:[0,1] // evaluates v0 <- v2 and v1 <- v5.\nv_pk_mov_b32 v0, s6, s6 op_sel:[0,1] // 64-bit move from scalar s[6:7].',
|
|
VOP3POp.V_MFMA_F32_16X16X32_BF16: 'D = A (16x32) * B (32x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_I32_16X16X64_I8: 'D = A (16x64) * B (64x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_32X32X16_BF16: 'D = A (32x16) * B (16x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_I32_32X32X32_I8: 'D = A (32x32) * B (32x32) + C (32x32)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X64_BF16: 'D = A (sparse 16x64) * B (64x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_I32_16X16X128_I8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X128_BF8_BF8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X128_BF8_FP8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X128_FP8_BF8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)',
|
|
VOP3POp.V_MFMA_F32_32X32X1_2B_F32: 'D = A (32x1) * B (1x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_16X16X1_4B_F32: 'D = A (16x1) * B (1x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_4X4X1_16B_F32: 'D = A (4x1) * B (1x4) + C (4x4)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X128_FP8_FP8: 'D = A (sparse 16x128) * B (128x16) + D (16x16)',
|
|
VOP3POp.V_MFMA_F32_32X32X2_F32: 'D = A (32x2) * B (2x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_16X16X4_F32: 'D = A (16x4) * B (4x16) + C (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X32_BF16: 'D = A (sparse 32x32) * B (32x32) + D (32x32)',
|
|
VOP3POp.V_SMFMAC_I32_32X32X64_I8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)',
|
|
VOP3POp.V_MFMA_F32_32X32X4_2B_F16: 'D = A (32x4) * B (4x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_16X16X4_4B_F16: 'D = A (16x4) * B (4x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_4X4X4_16B_F16: 'D = A (4x4) * B (4x4) + C (4x4)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X64_BF8_BF8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)',
|
|
VOP3POp.V_MFMA_F32_32X32X8_F16: 'D = A (32x8) * B (8x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X64_BF8_FP8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X64_FP8_BF8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)',
|
|
VOP3POp.V_MFMA_I32_32X32X4_2B_I8: 'D = A (32x4) * B (4x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_I32_16X16X4_4B_I8: 'D = A (16x4) * B (4x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_I32_4X4X4_16B_I8: 'D = A (4x4) * B (4x4) + C (4x4)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X64_FP8_FP8: 'D = A (sparse 32x64) * B (64x32) + D (32x32)',
|
|
VOP3POp.V_MFMA_F32_16X16X32_F16: 'D = A (16x32) * B (32x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_32X32X16_F16: 'D = A (32x16) * B (16x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_I32_32X32X16_I8: 'D = A (32x16) * B (16x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_I32_16X16X32_I8: 'D = A (16x32) * B (32x16) + C (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X64_F16: 'D = A (sparse 16x64) * B (64x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X32_F16: 'D = A (sparse 32x32) * B (32x32) + D (32x32)',
|
|
VOP3POp.V_MFMA_F32_32X32X4_2B_BF16: 'D = A (32x4) * B (4x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_16X16X4_4B_BF16: 'D = A (16x4) * B (4x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_4X4X4_16B_BF16: 'D = A (4x4) * B (4x4) + C (4x4)',
|
|
VOP3POp.V_MFMA_F32_32X32X8_BF16: 'D = A (32x8) * B (8x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X32_F16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X16_F16: 'D = A (sparse 32x16) * B (16x32) + D (32x32)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X32_BF16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X16_BF16: 'D = A (sparse 32x16) * B (16x32) + D (32x32)',
|
|
VOP3POp.V_SMFMAC_I32_16X16X64_I8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_I32_32X32X32_I8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)',
|
|
VOP3POp.V_MFMA_F64_16X16X4_F64: 'D = A (16x4) * B (4x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F64_4X4X4_4B_F64: 'D = A (4x4) * B (4x4) + C (4x4)',
|
|
VOP3POp.V_MFMA_F32_16X16X32_BF8_BF8: 'D = A (16x32) * B (32x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_16X16X32_BF8_FP8: 'D = A (16x32) * B (32x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_16X16X32_FP8_BF8: 'D = A (16x32) * B (32x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_16X16X32_FP8_FP8: 'D = A (16x32) * B (32x16) + C (16x16)',
|
|
VOP3POp.V_MFMA_F32_32X32X16_BF8_BF8: 'D = A (32x16) * B (16x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_32X32X16_BF8_FP8: 'D = A (32x16) * B (16x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_32X32X16_FP8_BF8: 'D = A (32x16) * B (16x32) + C (32x32)',
|
|
VOP3POp.V_MFMA_F32_32X32X16_FP8_FP8: 'D = A (32x16) * B (16x32) + C (32x32)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X64_BF8_BF8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X64_BF8_FP8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X64_FP8_BF8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_16X16X64_FP8_FP8: 'D = A (sparse 16x64) * B (64x16) + D (16x16)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X32_BF8_BF8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X32_BF8_FP8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X32_FP8_BF8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)',
|
|
VOP3POp.V_SMFMAC_F32_32X32X32_FP8_FP8: 'D = A (sparse 32x32) * B (32x32) + D (32x32)',
|
|
}
|
|
|
|
VOPCOp_PCODE = {
|
|
VOPCOp.V_CMP_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_CLASS_F32: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f32)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f32)) then\nresult = S1.u32[1]\nelsif exponent(S0.f32) == 255 then\n// +-INF\nresult = S1.u32[sign(S0.f32) ? 2 : 9]\nelsif exponent(S0.f32) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f32) ? 3 : 8]\nelsif 64'F(abs(S0.f32)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f32) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f32) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result",
|
|
VOPCOp.V_CMP_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_CLASS_F64: "declare result : 1'U;\nif isSignalNAN(S0.f64) then\nresult = S1.u32[0]\nelsif isQuietNAN(S0.f64) then\nresult = S1.u32[1]\nelsif exponent(S0.f64) == 2047 then\n// +-INF\nresult = S1.u32[sign(S0.f64) ? 2 : 9]\nelsif exponent(S0.f64) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f64) ? 3 : 8]\nelsif abs(S0.f64) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f64) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f64) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result",
|
|
VOPCOp.V_CMP_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nD0.u64[laneId] = result;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_CLASS_F16: "declare result : 1'U;\nif isSignalNAN(64'F(S0.f16)) then\nresult = S1.u32[0]\nelsif isQuietNAN(64'F(S0.f16)) then\nresult = S1.u32[1]\nelsif exponent(S0.f16) == 31 then\n// +-INF\nresult = S1.u32[sign(S0.f16) ? 2 : 9]\nelsif exponent(S0.f16) > 0 then\n// +-normal value\nresult = S1.u32[sign(S0.f16) ? 3 : 8]\nelsif 64'F(abs(S0.f16)) > 0.0 then\n// +-denormal value\nresult = S1.u32[sign(S0.f16) ? 4 : 7]\nelse\n// +-0.0\nresult = S1.u32[sign(S0.f16) ? 5 : 6]\nendif;\nEXEC.u64[laneId] = D0.u64[laneId] = result",
|
|
VOPCOp.V_CMP_F_F16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_F16: 'D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_F16: 'D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_F16: 'D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_F16: 'D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LG_F16: 'D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_F16: 'D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_O_F16: "D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_U_F16: "D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_NGE_F16: 'D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLG_F16: 'D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NGT_F16: 'D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLE_F16: 'D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NEQ_F16: 'D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLT_F16: 'D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_TRU_F16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_F16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 < S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 == S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <= S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 > S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 <> S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f16 >= S1.f16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_O_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f16)) && !isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_U_F16: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f16)) || isNAN(64'F(S1.f16)));\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_NGE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 >= S1.f16);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLG_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <> S1.f16);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NGT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 > S1.f16);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLE_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 <= S1.f16);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NEQ_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 == S1.f16);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLT_F16: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f16 < S1.f16);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_TRU_F16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_F_F32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_F32: 'D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_F32: 'D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_F32: 'D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_F32: 'D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LG_F32: 'D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_F32: 'D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_O_F32: "D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_U_F32: "D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_NGE_F32: 'D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLG_F32: 'D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NGT_F32: 'D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLE_F32: 'D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NEQ_F32: 'D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLT_F32: 'D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_TRU_F32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_F32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 < S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 == S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <= S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 > S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 <> S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f32 >= S1.f32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_O_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(64'F(S0.f32)) && !isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_U_F32: "EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(64'F(S0.f32)) || isNAN(64'F(S1.f32)));\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_NGE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 >= S1.f32);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLG_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <> S1.f32);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NGT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 > S1.f32);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLE_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 <= S1.f32);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NEQ_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 == S1.f32);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLT_F32: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f32 < S1.f32);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_TRU_F32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_F_F64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_F64: 'D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_F64: 'D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_F64: 'D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_F64: 'D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LG_F64: 'D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_F64: 'D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_O_F64: 'D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_U_F64: 'D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NGE_F64: 'D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLG_F64: 'D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NGT_F64: 'D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLE_F64: 'D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NEQ_F64: 'D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NLT_F64: 'D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_TRU_F64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_F64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 < S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 == S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <= S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 > S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 <> S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.f64 >= S1.f64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_O_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (!isNAN(S0.f64) && !isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_U_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = (isNAN(S0.f64) || isNAN(S1.f64));\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NGE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 >= S1.f64);\n// With NAN inputs this is not the same operation as <\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLG_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <> S1.f64);\n// With NAN inputs this is not the same operation as ==\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NGT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 > S1.f64);\n// With NAN inputs this is not the same operation as <=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLE_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 <= S1.f64);\n// With NAN inputs this is not the same operation as >\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NEQ_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 == S1.f64);\n// With NAN inputs this is not the same operation as !=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NLT_F64: 'EXEC.u64[laneId] = D0.u64[laneId] = !(S0.f64 < S1.f64);\n// With NAN inputs this is not the same operation as >=\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_TRU_F64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_F_I16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_I16: 'D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_I16: 'D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_I16: 'D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_I16: 'D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NE_I16: 'D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_I16: 'D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_T_I16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_F_U16: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_U16: 'D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_U16: 'D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_U16: 'D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_U16: 'D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NE_U16: 'D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_U16: 'D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_T_U16: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_I16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 < S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 == S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <= S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 > S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 <> S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_I16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i16 >= S1.i16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_T_I16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_U16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 < S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 == S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <= S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 > S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 <> S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_U16: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u16 >= S1.u16;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_T_U16: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_F_I32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_I32: 'D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_I32: 'D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_I32: 'D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_I32: 'D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NE_I32: 'D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_I32: 'D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_T_I32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_F_U32: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_U32: 'D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_U32: 'D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_U32: 'D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_U32: 'D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NE_U32: 'D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_U32: 'D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_T_U32: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_I32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 < S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 == S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <= S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 > S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 <> S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_I32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i32 >= S1.i32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_T_I32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_U32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 < S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 == S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <= S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 > S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 <> S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_U32: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u32 >= S1.u32;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_T_U32: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_F_I64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_I64: 'D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_I64: 'D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_I64: 'D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_I64: 'D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NE_I64: 'D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_I64: 'D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_T_I64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_F_U64: "D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMP_LT_U64: 'D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_EQ_U64: 'D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_LE_U64: 'D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GT_U64: 'D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_NE_U64: 'D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_GE_U64: 'D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMP_T_U64: "D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_I64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 < S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 == S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <= S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 > S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 <> S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_I64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.i64 >= S1.i64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_T_I64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_F_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'0U;\n// D0 = VCC in VOPC encoding.",
|
|
VOPCOp.V_CMPX_LT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 < S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_EQ_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 == S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_LE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <= S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.',
|
|
VOPCOp.V_CMPX_T_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nwhere:\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nOP = DS instructions.\nADDR = Source LDS address VGPR 0 - 255.\nDATA0 = Source data0 VGPR 0 - 255.\nDATA1 = Source data1 VGPR 0 - 255.\nVDST = Destination VGPR 0- 255.",
|
|
}
|
|
|
|
PSEUDOCODE_STRINGS = {DSOp: DSOp_PCODE, FLATOp: FLATOp_PCODE, GLOBALOp: GLOBALOp_PCODE, MTBUFOp: MTBUFOp_PCODE, MUBUFOp: MUBUFOp_PCODE, SCRATCHOp: SCRATCHOp_PCODE, SMEMOp: SMEMOp_PCODE, SOP1Op: SOP1Op_PCODE, SOP2Op: SOP2Op_PCODE, SOPCOp: SOPCOp_PCODE, SOPKOp: SOPKOp_PCODE, SOPPOp: SOPPOp_PCODE, VOP1Op: VOP1Op_PCODE, VOP2Op: VOP2Op_PCODE, VOP3AOp: VOP3AOp_PCODE, VOP3BOp: VOP3BOp_PCODE, VOP3POp: VOP3POp_PCODE, VOPCOp: VOPCOp_PCODE} |