assembly/amd: pcode bug fixes (#14032)

* bring over pcode parser

* fixes

* pdf test

* delay alu
This commit is contained in:
George Hotz
2026-01-06 00:15:48 -08:00
committed by GitHub
parent 21d0f6bb76
commit 45f7fd073d
6 changed files with 341 additions and 311 deletions

View File

@@ -20,8 +20,8 @@ DSOp_PCODE = {
DSOp.DS_WRITE_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]',
DSOp.DS_WRITE2_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]',
DSOp.DS_WRITE2ST64_B32: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]',
DSOp.DS_CMPST_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp',
DSOp.DS_CMPST_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp',
DSOp.DS_CMPST_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp',
DSOp.DS_CMPST_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_MIN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_MAX_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_ADD_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp',
@@ -46,8 +46,8 @@ DSOp_PCODE = {
DSOp.DS_WRXCHG_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
DSOp.DS_WRXCHG2_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2',
DSOp.DS_WRXCHG2ST64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2',
DSOp.DS_CMPST_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp',
DSOp.DS_CMPST_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp',
DSOp.DS_CMPST_RTN_B32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b32;\nsrc = DATA2.b32;\ncmp = DATA.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp',
DSOp.DS_CMPST_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA2.f32;\ncmp = DATA.f32;\nMEM[addr].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_MIN_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_MAX_RTN_F32: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f32;\nsrc = DATA.f32;\nMEM[addr].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_WRAP_RTN_B32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = tmp >= DATA.u32 ? tmp - DATA.u32 : tmp + DATA2.u32;\nRETURN_DATA = tmp',
@@ -59,7 +59,7 @@ DSOp_PCODE = {
DSOp.DS_READ_U8: "RETURN_DATA.u32 = 32'U({ 24'0U, MEM[ADDR].u8 })",
DSOp.DS_READ_I16: "RETURN_DATA.i32 = 32'I(signext(MEM[ADDR].i16))",
DSOp.DS_READ_U16: "RETURN_DATA.u32 = 32'U({ 16'0U, MEM[ADDR].u16 })",
DSOp.DS_SWIZZLE_B32: 'Offset[4:0]: Swizzle\n0x00: {1,11,9,19,5,15,d,1d,3,13,b,1b,7,17,f,1f,2,12,a,1a,6,16,e,1e,4,14,c,1c,8,18,10,20}\n0x10: {1,9,5,d,3,b,7,f,2,a,6,e,4,c,8,10,11,19,15,1d,13,1b,17,1f,12,1a,16,1e,14,1c,18,20}\n0x1f: No swizzle\nOffset[9:5]: Swizzle\n0x01, mask=0, rotate left:\n{2,3,4,5,6,7,8,9,a,b,c,d,e,f,10,11,12,13,14,15,16,17,18,19,1a,1b,1c,1d,1e,1f,20,1}\n0x01, mask=0, rotate right:\n{20,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f,10,11,12,13,14,15,16,17,18,19,1a,1b,1c,1d,1e,1f}\n0x01, mask=1, rotate left:\n{1,4,3,6,5,8,7,a,9,c,b,e,d,10,f,12,11,14,13,16,15,18,17,1a,19,1c,1b,1e,1d,20,1f,2}\n0x01, mask=1, rotate right:\n{1f,2,1,4,3,6,5,8,7,a,9,c,b,e,d,10,f,12,11,14,13,16,15,18,17,1a,19,1c,1b,1e,1d,20}\noffset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\n}\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset[15]) {\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n}\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n}',
DSOp.DS_SWIZZLE_B32: 'Offset[4:0]: Swizzle\n0x00: {1,11,9,19,5,15,d,1d,3,13,b,1b,7,17,f,1f,2,12,a,1a,6,16,e,1e,4,14,c,1c,8,18,10,20}\n0x10: {1,9,5,d,3,b,7,f,2,a,6,e,4,c,8,10,11,19,15,1d,13,1b,17,1f,12,1a,16,1e,14,1c,18,20}\n0x1f: No swizzle',
DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\ndst_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : 63 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : 63 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\nsrc_lane = (VGPR[i][ADDR].u32 + OFFSET.u32) / 4U % 64U;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : 63 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
DSOp.DS_ADD_U64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
@@ -78,8 +78,8 @@ DSOp_PCODE = {
DSOp.DS_WRITE_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]',
DSOp.DS_WRITE2_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]',
DSOp.DS_WRITE2ST64_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]',
DSOp.DS_CMPST_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp',
DSOp.DS_CMPST_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp',
DSOp.DS_CMPST_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp',
DSOp.DS_CMPST_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MIN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MAX_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_WRITE_B8_D16_HI: 'MEM[ADDR].b8 = DATA[23 : 16]',
@@ -107,8 +107,8 @@ DSOp_PCODE = {
DSOp.DS_WRXCHG_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
DSOp.DS_WRXCHG2_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2',
DSOp.DS_WRXCHG2ST64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2',
DSOp.DS_CMPST_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp',
DSOp.DS_CMPST_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp',
DSOp.DS_CMPST_RTN_B64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].b64;\nsrc = DATA2.b64;\ncmp = DATA.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp',
DSOp.DS_CMPST_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA2.f64;\ncmp = DATA.f64;\nMEM[addr].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MIN_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MAX_RTN_F64: 'addr = CalcDsAddr(ADDR.b32, OFFSET0.b32, OFFSET1.b32);\ntmp = MEM[addr].f64;\nsrc = DATA.f64;\nMEM[addr].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_READ_B64: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32',
@@ -122,7 +122,7 @@ DSOp_PCODE = {
DSOp.DS_WRITE_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]',
DSOp.DS_WRITE_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]',
DSOp.DS_READ_B96: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32',
DSOp.DS_READ_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32\nwhere:\nOFFSET = Unsigned immediate byte offset.\nOFFEN = Send offset either as VADDR or as zero..\nIDXEN = Send index either as VADDR or as zero.\nLDS = Data read from/written to LDS or VGPR.\nOP = Instruction Opcode.\nVADDR = VGPR address source.\nVDATA = Destination vector GPR.\nSRSRC = Scalar GPR that specifies resource constant.\nACC = Return to ACC VGPRs\nSC = Scope\nNT = Non-Temporal\nSOFFSET = Byte offset added to the memory address of an SGPR.',
DSOp.DS_READ_B128: 'addr = CalcDsAddr(ADDR.b32, 0x0, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[addr + OFFSET.u32 + 8U].b32;\nRETURN_DATA[127 : 96] = MEM[addr + OFFSET.u32 + 12U].b32',
}
FLATOp_PCODE = {
@@ -255,7 +255,7 @@ MTBUFOp_PCODE = {
MTBUFOp.TBUFFER_STORE_FORMAT_D16_X: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format",
MTBUFOp.TBUFFER_STORE_FORMAT_D16_XY: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16))",
MTBUFOp.TBUFFER_STORE_FORMAT_D16_XYZ: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16))",
MTBUFOp.TBUFFER_STORE_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))\nwhere:\nOP = Instruction Opcode.\nADDR = Source of flat address VGPR.\nDATA = Source data.\nVDST = Destination VGPR.\nNV = Access to non-volatile memory.\nSADDR = SGPR holding address or offset\nSEG = Instruction type: Flat, Scratch, or Global\nLDS = Data is transferred between LDS and Memory, not VGPRs.\nOFFSET = Immediate address byte-offset.\nSC = Scope\nNT = Non-Temporal",
MTBUFOp.TBUFFER_STORE_FORMAT_D16_XYZW: "addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\nMEM[addr + ChannelOffsetX()] = ConvertToFormat(32'B(VDATA[15 : 0].b16));\n// Mem access size depends on format\nMEM[addr + ChannelOffsetY()] = ConvertToFormat(32'B(VDATA[31 : 16].b16));\nMEM[addr + ChannelOffsetZ()] = ConvertToFormat(32'B(VDATA[47 : 32].b16));\nMEM[addr + ChannelOffsetW()] = ConvertToFormat(32'B(VDATA[63 : 48].b16))",
}
MUBUFOp_PCODE = {
@@ -330,7 +330,7 @@ MUBUFOp_PCODE = {
MUBUFOp.BUFFER_ATOMIC_OR_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp | DATA.b64);\nRETURN_DATA.b64 = tmp',
MUBUFOp.BUFFER_ATOMIC_XOR_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = (tmp ^ DATA.b64);\nRETURN_DATA.b64 = tmp',
MUBUFOp.BUFFER_ATOMIC_INC_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = tmp >= src ? 0ULL : tmp + 1ULL;\nRETURN_DATA.u64 = tmp',
MUBUFOp.BUFFER_ATOMIC_DEC_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp\nwhere:\nOFFSET = Unsigned immediate byte offset.\nOFFEN = Send offset either as VADDR or as zero.\nIDXEN = Send index either as VADDR or as zero.\nLDS = Data is transferred between LDS and Memory, not VGPRs.\nOP = Instruction Opcode.\nDFMT = Data format for typed buffer.\nNFMT = Number format for typed buffer.\nVADDR = VGPR address source.\nVDATA = Vector GPR for read/write result.\nSRSRC = Scalar GPR that specifies resource constant.\nSOFFSET = Unsigned byte offset from an SGPR.\nSC = Scope\nNT = Non-Temporal\nACC = Return to ACC VGPRs',
MUBUFOp.BUFFER_ATOMIC_DEC_X2: 'addr = CalcBufferAddr(VADDR.b32, SRSRC.b32, SOFFSET.b32, OFFSET.b32);\ntmp = MEM[addr].u64;\nsrc = DATA.u64;\nMEM[addr].u64 = ((tmp == 0ULL) || (tmp > src)) ? src : tmp - 1ULL;\nRETURN_DATA.u64 = tmp',
}
SCRATCHOp_PCODE = {
@@ -446,17 +446,17 @@ SOP1Op_PCODE = {
SOP1Op.S_WQM_B64: "tmp = 0ULL;\ndeclare i : 6'U;\nfor i in 6'0U : 6'63U do\ntmp[i] = S0.u64[i & 6'60U +: 6'4U] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL",
SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]',
SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT0_I32_B32(0x00000000) => 32\nS_BCNT0_I32_B32(0xcccccccc) => 16\nS_BCNT0_I32_B32(0xffffffff) => 0",
SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U",
SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL",
SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT1_I32_B32(0x00000000) => 0\nS_BCNT1_I32_B32(0xcccccccc) => 16\nS_BCNT1_I32_B32(0xffffffff) => 32",
SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U",
SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL",
SOP1Op.S_FF0_I32_B32: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'0U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FF0_I32_B32(0xaaaaaaaa) => 0\nS_FF0_I32_B32(0x55555555) => 1\nS_FF0_I32_B32(0x00000000) => 0\nS_FF0_I32_B32(0xffffffff) => 0xffffffff\nS_FF0_I32_B32(0xfffeffff) => 16",
SOP1Op.S_FF0_I32_B32: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'0U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_FF0_I32_B64: "tmp = -1;\n// Set if no zeros are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'0U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_FF1_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FF1_I32_B32(0xaaaaaaaa) => 1\nS_FF1_I32_B32(0x55555555) => 0\nS_FF1_I32_B32(0x00000000) => 0xffffffff\nS_FF1_I32_B32(0xffffffff) => 0\nS_FF1_I32_B32(0x00010000) => 16",
SOP1Op.S_FF1_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_FF1_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_FLBIT_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FLBIT_I32_B32(0x00000000) => 0xffffffff\nS_FLBIT_I32_B32(0x0000cccc) => 16\nS_FLBIT_I32_B32(0xffff3333) => 0\nS_FLBIT_I32_B32(0x7fffffff) => 1\nS_FLBIT_I32_B32(0x80000000) => 0\nS_FLBIT_I32_B32(0xffffffff) => 0",
SOP1Op.S_FLBIT_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_FLBIT_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_FLBIT_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_FLBIT_I32(0x00000000) => 0xffffffff\nS_FLBIT_I32(0x0000cccc) => 16\nS_FLBIT_I32(0xffff3333) => 16\nS_FLBIT_I32(0x7fffffff) => 1\nS_FLBIT_I32(0x80000000) => 1\nS_FLBIT_I32(0xffffffff) => 0xffffffff',
SOP1Op.S_FLBIT_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp',
SOP1Op.S_FLBIT_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp',
SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))",
SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))",
@@ -478,18 +478,18 @@ SOP1Op_PCODE = {
SOP1Op.S_XNOR_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = ~(S0.u64 ^ EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U',
SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL',
SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32\ns_mov_b32 m0, 10\ns_movrels_b32 s5, s7',
SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32',
SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b64 = SGPR[addr].b64',
SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32\ns_mov_b32 m0, 10\ns_movreld_b32 s5, s7',
SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32',
SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b64 = S0.b64',
SOP1Op.S_CBRANCH_JOIN: "saved_csp = S0.u32;\nif WAVE_MODE.CSP.u32 == saved_csp then\nPC += 4LL;\n// Second time to JOIN: continue with program.\nelse\nWAVE_MODE.CSP -= 3'1U;\n// First time to JOIN; jump to other FORK path.\n{ PC, EXEC } = SGPR[WAVE_MODE.CSP.u32 * 4U].b128;\n// Read 128 bits from 4 consecutive SGPRs.\nendif",
SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0\nS_ABS_I32(0x00000001) => 0x00000001\nS_ABS_I32(0x7fffffff) => 0x7fffffff\nS_ABS_I32(0x80000000) => 0x80000000 // Note this is negative!\nS_ABS_I32(0x80000001) => 0x7fffffff\nS_ABS_I32(0x80000002) => 0x7ffffffe\nS_ABS_I32(0xffffffff) => 0x00000001',
SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0',
SOP1Op.S_SET_GPR_IDX_IDX: 'M0[7 : 0] = S0.u32[7 : 0].b8',
SOP1Op.S_ANDN1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_ORN1_SAVEEXEC_B64: 'saveexec = EXEC.u64;\nEXEC.u64 = (~S0.u64 | EXEC.u64);\nD0.u64 = saveexec.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_ANDN1_WREXEC_B64: 'EXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_ANDN2_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL\n// V0 holds the index value per lane\n// save exec mask for restore at the end\ns_mov_b64 s2, exec\n// exec mask of remaining (unprocessed) threads\ns_mov_b64 s4, exec\nloop:\n// get the index value for the first active lane\nv_readfirstlane_b32 s0, v0\n// find all other lanes with same index value\nv_cmpx_eq s0, v0\n<OP> // do the operation using the current EXEC mask. S0 holds the index.\n// mask out thread that was just executed\n// s_andn2_b64 s4, s4, exec\n// s_mov_b64 exec, s4\ns_andn2_wrexec_b64 s4, s4 // replaces above 2 ops\n// repeat until EXEC==0\ns_cbranch_scc1 loop\ns_mov_b64 exec, s2',
SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor\ns_bitreplicate_b64 s2, s0\ns_bitreplicate_b64 s2, s2',
SOP1Op.S_ANDN2_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor',
}
SOP2Op_PCODE = {
@@ -535,7 +535,7 @@ SOP2Op_PCODE = {
SOP2Op.S_BFE_U64: 'D0.u64 = ((S0.u64 >> S1[5 : 0].u32) & ((1ULL << S1[22 : 16].u32) - 1ULL));\nSCC = D0.u64 != 0ULL',
SOP2Op.S_BFE_I64: 'tmp.i64 = ((S0.i64 >> S1[5 : 0].u32) & ((1LL << S1[22 : 16].u32) - 1LL));\nD0.i64 = signext_from_bit(tmp.i64, S1[22 : 16].u32);\nSCC = D0.i64 != 0LL',
SOP2Op.S_CBRANCH_G_FORK: "mask_pass = (S0.u64 & EXEC.u64);\nmask_fail = (~S0.u64 & EXEC.u64);\nif mask_pass == EXEC.u64 then\nPC = 64'I(S1.u64)\nelsif mask_fail == EXEC.u64 then\nPC += 4LL\nelsif bitCount(mask_fail.b64) < bitCount(mask_pass.b64) then\nEXEC = mask_fail.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { S1.u64, mask_pass };\nWAVE_MODE.CSP += 3'1U;\nPC += 4LL\nelse\nEXEC = mask_pass.b64;\nSGPR[WAVE_MODE.CSP.u32 * 4U].b128 = { (PC + 4LL), mask_fail };\nWAVE_MODE.CSP += 3'1U;\nPC = 64'I(S1.u64)\nendif",
SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0\nS_ABSDIFF_I32(0x00000002, 0x00000005) => 0x00000003\nS_ABSDIFF_I32(0xffffffff, 0x00000000) => 0x00000001\nS_ABSDIFF_I32(0x80000000, 0x00000000) => 0x80000000 // Note: result is negative!\nS_ABSDIFF_I32(0x80000000, 0x00000001) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xffffffff) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xfffffffe) => 0x7ffffffe',
SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0',
SOP2Op.S_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)",
SOP2Op.S_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)",
SOP2Op.S_LSHL1_ADD_U32: "tmp = (64'U(S0.u32) << 1U) + 64'U(S1.u32);\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow.\nD0.u32 = tmp.u32",
@@ -564,7 +564,7 @@ SOPCOp_PCODE = {
SOPCOp.S_BITCMP1_B32: "SCC = S0.u32[S1.u32[4 : 0]] == 1'1U",
SOPCOp.S_BITCMP0_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'0U",
SOPCOp.S_BITCMP1_B64: "SCC = S0.u64[S1.u32[5 : 0]] == 1'1U",
SOPCOp.S_SETVSKIP: 'VSKIP = S0.u32[S1.u32[4 : 0]]\ns_setvskip 1, 0 // Enable vskip mode.\ns_setvskip 0, 0 // Disable vskip mode.',
SOPCOp.S_SETVSKIP: 'VSKIP = S0.u32[S1.u32[4 : 0]]',
SOPCOp.S_SET_GPR_IDX_ON: "WAVE_MODE.GPR_IDX_EN = 1'1U;\nM0[7 : 0] = S0.u32[7 : 0].b8;\nM0[15 : 12] = SRC1.u32[3 : 0].b4;\n// this is the direct content of raw S1 field\n// Remaining bits of M0 are unmodified.",
SOPCOp.S_CMP_EQ_U64: 'SCC = S0.u64 == S1.u64',
SOPCOp.S_CMP_LG_U64: 'SCC = S0.u64 <> S1.u64',
@@ -595,15 +595,15 @@ SOPKOp_PCODE = {
}
SOPPOp_PCODE = {
SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor\ns_nop 0 // Wait 1 cycle.\ns_nop 0xf // Wait 16 cycles.',
SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.\ns_branch label // Set SIMM16 = +4 = 0x0004\ns_nop 0 // 4 bytes\nlabel:\ns_nop 0 // 4 bytes\ns_branch label // Set SIMM16 = -8 = 0xfff8",
SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor',
SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.",
SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_VCCZ: "if VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_VCCNZ: "if VCCZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_EXECZ: "if EXECZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_EXECNZ: "if EXECZ.u1 == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_WAITCNT: 'SIMM16[3:0] = vmcount (vector memory operations) lower bits [3:0],\nSIMM16[6:4] = export/mem-write-data count,\nSIMM16[11:8] = LGKMcnt (scalar-mem/GDS/LDS count),\nSIMM16[15:14] = vmcount (vector memory operations) upper bits [5:4].',
SOPPOp.S_WAITCNT: 'SIMM16[3:0] = vmcount (vector memory operations) lower bits [3:0],',
SOPPOp.S_SLEEP: 's_sleep 0 // Wait for 0 clocks.\ns_sleep 1 // Wait for 1-64 clocks.\ns_sleep 2 // Wait for 65-128 clocks.',
SOPPOp.S_TRAP: 'TrapID = SIMM16.u16[7 : 0];\n"Wait for all instructions to complete";\n// PC passed into trap handler points to S_TRAP itself,\n// *not* to the next instruction.\n{ TTMP[1], TTMP[0] } = { 3\'0, PCRewind[3 : 0], HT[0], TrapID[7 : 0], PC[47 : 0] };\nPC = TBA.i64;\n// trap base address\nWAVE_STATUS.PRIV = 1\'1U',
SOPPOp.S_CBRANCH_CDBGSYS: "if WAVE_STATUS.COND_DBG_SYS.u32 != 0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
@@ -615,7 +615,7 @@ SOPPOp_PCODE = {
}
VOP1Op_PCODE = {
VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32',
VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'I;\nif EXEC == 0x0LL then\nlane = 0;\n// Force lane 0 if all lanes are disabled\nelse\nlane = s_ff1_i32_b64(EXEC);\n// Lowest active lane\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]",
VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)',
VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)',
@@ -645,46 +645,46 @@ VOP1Op_PCODE = {
VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif',
VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif",
VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif',
VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)',
VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)',
VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32',
VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception',
VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)',
VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64',
VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)',
VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)',
VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)',
VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN",
VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN",
VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))",
VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))",
VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32',
VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
VOP1Op.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_U32(0x00000000) => 0xffffffff\nV_FFBH_U32(0x800000ff) => 0\nV_FFBH_U32(0x100000ff) => 3\nV_FFBH_U32(0x0000ffff) => 16\nV_FFBH_U32(0x00000001) => 31",
VOP1Op.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBL_B32(0x00000000) => 0xffffffff\nV_FFBL_B32(0xff000001) => 0\nV_FFBL_B32(0xff000008) => 3\nV_FFBL_B32(0xffff0000) => 16\nV_FFBL_B32(0x80000000) => 31",
VOP1Op.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_I32(0x00000000) => 0xffffffff\nV_FFBH_I32(0x40000000) => 1\nV_FFBH_I32(0x80000000) => 1\nV_FFBH_I32(0x0fffffff) => 4\nV_FFBH_I32(0xffff0000) => 16\nV_FFBH_I32(0xfffffffe) => 31\nV_FFBH_I32(0xffffffff) => 0xffffffff',
VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()',
VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()',
VOP1Op.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP1Op.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP1Op.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor',
VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif',
VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif',
VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)',
VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()",
VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()",
VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif",
VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif",
VOP1Op.V_MOV_B64: 'D0.b64 = S0.b64',
VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)',
VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)',
VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)',
VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)',
VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF',
VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF',
VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()",
VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()",
VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16",
VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)',
VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)",
VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)',
VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)",
VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif",
VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif",
VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif",
VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif",
VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)',
VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif",
VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)',
VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN",
VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN",
VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))",
VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))",
VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)',
VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)',
VOP1Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n <= 16'0 then\nreturn 8'0U\nelsif n >= 16'255 then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\ntmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16",
@@ -696,7 +696,7 @@ VOP1Op_PCODE = {
VOP1Op.V_PRNG_B32: 'in = S0.u32;\nD0.u32 = ((in << 1U) ^ (in[31] ? 197U : 0U))',
VOP1Op.V_PERMLANE16_SWAP_B32: 'for pass in 0 : 1 do\nfor lane in 0 : 15 do\ntmp = VGPR[pass * 32 + lane][SRC0.u32];\nVGPR[pass * 32 + lane][SRC0.u32] = VGPR[pass * 32 + lane + 16][VDST.u32];\nVGPR[pass * 32 + lane + 16][VDST.u32] = tmp\nendfor\nendfor',
VOP1Op.V_PERMLANE32_SWAP_B32: 'for lane in 0 : 31 do\ntmp = VGPR[lane][SRC0.u32];\nVGPR[lane][SRC0.u32] = VGPR[lane + 32][VDST.u32];\nVGPR[lane + 32][VDST.u32] = tmp\nendfor',
VOP1Op.V_CVT_F32_BF16: "D0.f32 = 32'F({ S0.b16, 16'0U })\nwhere:\nSRC0 = First operand for instruction.\nVSRC1 = Second operand for instruction.\nOP = Instructions.\nAll VOPC instructions can alternatively be encoded in the VOP3A format.",
VOP1Op.V_CVT_F32_BF16: "D0.f32 = 32'F({ S0.b16, 16'0U })",
}
VOP2Op_PCODE = {
@@ -751,7 +751,7 @@ VOP2Op_PCODE = {
VOP2Op.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16',
VOP2Op.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16',
VOP2Op.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16',
VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()",
VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))",
VOP2Op.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32',
VOP2Op.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32',
VOP2Op.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32',
@@ -962,7 +962,7 @@ VOP3AOp_PCODE = {
VOP3AOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.',
VOP3AOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.',
VOP3AOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.',
VOP3AOp.V_CMPX_T_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nwhere:\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nOP = DS instructions.\nADDR = Source LDS address VGPR 0 - 255.\nDATA0 = Source data0 VGPR 0 - 255.\nDATA1 = Source data1 VGPR 0 - 255.\nVDST = Destination VGPR 0- 255.",
VOP3AOp.V_CMPX_T_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
VOP3AOp.V_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32',
VOP3AOp.V_ADD_F32: 'D0.f32 = S0.f32 + S1.f32',
VOP3AOp.V_SUB_F32: 'D0.f32 = S0.f32 - S1.f32',
@@ -1004,7 +1004,7 @@ VOP3AOp_PCODE = {
VOP3AOp.V_MAX_I16: 'D0.i16 = S0.i16 >= S1.i16 ? S0.i16 : S1.i16',
VOP3AOp.V_MIN_U16: 'D0.u16 = S0.u16 < S1.u16 ? S0.u16 : S1.u16',
VOP3AOp.V_MIN_I16: 'D0.i16 = S0.i16 < S1.i16 ? S0.i16 : S1.i16',
VOP3AOp.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()",
VOP3AOp.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))",
VOP3AOp.V_ADD_U32: 'D0.u32 = S0.u32 + S1.u32',
VOP3AOp.V_SUB_U32: 'D0.u32 = S0.u32 - S1.u32',
VOP3AOp.V_SUBREV_U32: 'D0.u32 = S1.u32 - S0.u32',
@@ -1015,7 +1015,7 @@ VOP3AOp_PCODE = {
VOP3AOp.V_FMAC_F32: 'D0.f32 = fma(S0.f32, S1.f32, D0.f32)',
VOP3AOp.V_PK_FMAC_F16: 'D0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16);\nD0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16)',
VOP3AOp.V_XNOR_B32: 'D0.u32 = ~(S0.u32 ^ S1.u32)',
VOP3AOp.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
VOP3AOp.V_MOV_B32: 'D0.b32 = S0.b32',
VOP3AOp.V_READFIRSTLANE_B32: "declare lane : 32'I;\nif EXEC == 0x0LL then\nlane = 0;\n// Force lane 0 if all lanes are disabled\nelse\nlane = s_ff1_i32_b64(EXEC);\n// Lowest active lane\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]",
VOP3AOp.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)',
VOP3AOp.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)',
@@ -1045,37 +1045,37 @@ VOP3AOp_PCODE = {
VOP3AOp.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif',
VOP3AOp.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif",
VOP3AOp.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif',
VOP3AOp.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
VOP3AOp.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
VOP3AOp.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
VOP3AOp.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)',
VOP3AOp.V_LOG_F32: 'D0.f32 = log2(S0.f32)',
VOP3AOp.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32',
VOP3AOp.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception',
VOP3AOp.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
VOP3AOp.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)',
VOP3AOp.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64',
VOP3AOp.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)',
VOP3AOp.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
VOP3AOp.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)',
VOP3AOp.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)',
VOP3AOp.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN",
VOP3AOp.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN",
VOP3AOp.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))",
VOP3AOp.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))",
VOP3AOp.V_NOT_B32: 'D0.u32 = ~S0.u32',
VOP3AOp.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
VOP3AOp.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_U32(0x00000000) => 0xffffffff\nV_FFBH_U32(0x800000ff) => 0\nV_FFBH_U32(0x100000ff) => 3\nV_FFBH_U32(0x0000ffff) => 16\nV_FFBH_U32(0x00000001) => 31",
VOP3AOp.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBL_B32(0x00000000) => 0xffffffff\nV_FFBL_B32(0xff000001) => 0\nV_FFBL_B32(0xff000008) => 3\nV_FFBL_B32(0xffff0000) => 16\nV_FFBL_B32(0x80000000) => 31",
VOP3AOp.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_FFBH_I32(0x00000000) => 0xffffffff\nV_FFBH_I32(0x40000000) => 1\nV_FFBH_I32(0x80000000) => 1\nV_FFBH_I32(0x0fffffff) => 4\nV_FFBH_I32(0xffff0000) => 16\nV_FFBH_I32(0xfffffffe) => 31\nV_FFBH_I32(0xffffffff) => 0xffffffff',
VOP3AOp.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()',
VOP3AOp.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()',
VOP3AOp.V_FFBH_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP3AOp.V_FFBL_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP3AOp.V_FFBH_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor',
VOP3AOp.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif',
VOP3AOp.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif',
VOP3AOp.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)',
VOP3AOp.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()",
VOP3AOp.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()",
VOP3AOp.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif",
VOP3AOp.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif",
VOP3AOp.V_MOV_B64: 'D0.b64 = S0.b64',
VOP3AOp.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)',
VOP3AOp.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)',
VOP3AOp.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)',
VOP3AOp.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)',
VOP3AOp.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
VOP3AOp.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF',
VOP3AOp.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
VOP3AOp.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF',
VOP3AOp.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
VOP3AOp.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16",
VOP3AOp.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)',
VOP3AOp.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)",
VOP3AOp.V_LOG_F16: 'D0.f16 = log2(S0.f16)',
VOP3AOp.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)",
VOP3AOp.V_MAD_I32_I24: "D0.i32 = 32'I(S0.i24) * 32'I(S1.i24) + S2.i32",
VOP3AOp.V_MAD_U32_U24: "D0.u32 = 32'U(S0.u24) * 32'U(S1.u24) + S2.u32",
VOP3AOp.V_CUBEID_F32: '// Set D0.f = cubemap face ID ({0.0, 1.0, ..., 5.0}).\n// XYZ coordinate is given in (S0.f, S1.f, S2.f).\n// S0.f = x\n// S1.f = y\n// S2.f = z\nif ((abs(S2.f32) >= abs(S0.f32)) && (abs(S2.f32) >= abs(S1.f32))) then\nif S2.f32 < 0.0F then\nD0.f32 = 5.0F\nelse\nD0.f32 = 4.0F\nendif\nelsif abs(S1.f32) >= abs(S0.f32) then\nif S1.f32 < 0.0F then\nD0.f32 = 3.0F\nelse\nD0.f32 = 2.0F\nendif\nelse\nif S0.f32 < 0.0F then\nD0.f32 = 1.0F\nelse\nD0.f32 = 0.0F\nendif\nendif',
@@ -1143,8 +1143,8 @@ VOP3AOp_PCODE = {
VOP3AOp.V_FMA_F16: 'D0.f16 = fma(S0.f16, S1.f16, S2.f16)',
VOP3AOp.V_DIV_FIXUP_F16: "sign_out = (sign(S1.f16) ^ sign(S2.f16));\nif isNAN(64'F(S2.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S2.f16)))\nelsif isNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif ((64'F(S1.f16) == 0.0) && (64'F(S2.f16) == 0.0)) then\n// 0/0\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(abs(S1.f16)) == +INF) && (64'F(abs(S2.f16)) == +INF)) then\n// inf/inf\nD0.f16 = 16'F(0xfe00)\nelsif ((64'F(S1.f16) == 0.0) || (64'F(abs(S2.f16)) == +INF)) then\n// x/0, or inf/y\nD0.f16 = sign_out ? -INF.f16 : +INF.f16\nelsif ((64'F(abs(S1.f16)) == +INF) || (64'F(S2.f16) == 0.0)) then\n// x/inf, 0/y\nD0.f16 = sign_out ? -16'0.0 : 16'0.0\nelse\nD0.f16 = sign_out ? -abs(S0.f16) : abs(S0.f16)\nendif",
VOP3AOp.V_LSHL_ADD_U64: 'D0.u64 = (S0.u64 << S1.u32[2 : 0].u32) + S2.u64',
VOP3AOp.V_BITOP3_B16: "TTBL = { INST.OMOD[1 : 0], INST.ABS[2 : 0], INST.NEG[2 : 0] };\ntmp = 16'0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 16'U(~S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 16'U(~S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 16'U(~S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 16'U(~S0.b16 & S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 16'U(S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 16'U(S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 16'U(S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 16'U(S0.b16 & S1.b16 & S2.b16) : 16'0U));\nD.b16 = tmp.b16\n{ OMOD[1:0], ABS[2:0], NEG[2:0] }\nD0[i] = TTBL[{S0[i], S1[i], S2[i]}]",
VOP3AOp.V_BITOP3_B32: "TTBL = { INST.OMOD[1 : 0], INST.ABS[2 : 0], INST.NEG[2 : 0] };\ntmp = 0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 32'U(~S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 32'U(~S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 32'U(~S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 32'U(~S0.b32 & S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 32'U(S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 32'U(S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 32'U(S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 32'U(S0.b32 & S1.b32 & S2.b32) : 0U));\nD.b32 = tmp.b32\n{ OMOD[1:0], ABS[2:0], NEG[2:0] }\nD0[i] = TTBL[{S0[i], S1[i], S2[i]}]",
VOP3AOp.V_BITOP3_B16: "TTBL = { INST.OMOD[1 : 0], INST.ABS[2 : 0], INST.NEG[2 : 0] };\ntmp = 16'0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 16'U(~S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 16'U(~S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 16'U(~S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 16'U(~S0.b16 & S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 16'U(S0.b16 & ~S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 16'U(S0.b16 & ~S1.b16 & S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 16'U(S0.b16 & S1.b16 & ~S2.b16) : 16'0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 16'U(S0.b16 & S1.b16 & S2.b16) : 16'0U));\nD.b16 = tmp.b16",
VOP3AOp.V_BITOP3_B32: "TTBL = { INST.OMOD[1 : 0], INST.ABS[2 : 0], INST.NEG[2 : 0] };\ntmp = 0U;\ntmp = (tmp | (32'I(TTBL.b32 & 0x1) != 0 ? 32'U(~S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x2) != 0 ? 32'U(~S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x4) != 0 ? 32'U(~S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x8) != 0 ? 32'U(~S0.b32 & S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x10) != 0 ? 32'U(S0.b32 & ~S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x20) != 0 ? 32'U(S0.b32 & ~S1.b32 & S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x40) != 0 ? 32'U(S0.b32 & S1.b32 & ~S2.b32) : 0U));\ntmp = (tmp | (32'I(TTBL.b32 & 0x80) != 0 ? 32'U(S0.b32 & S1.b32 & S2.b32) : 0U));\nD.b32 = tmp.b32",
VOP3AOp.V_CVT_SCALEF32_PK_FP8_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_fp8_scale(S0.f32, scale.u8);\ntmp1 = f32_to_fp8_scale(S1.f32, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved",
VOP3AOp.V_CVT_SCALEF32_PK_BF8_F32: "scale = 32'U(exponent(S2.f32));\ntmp0 = f32_to_bf8_scale(S0.f32, scale.u8);\ntmp1 = f32_to_bf8_scale(S1.f32, scale.u8);\ndstword = OPSEL[3].i32 * 16;\nVGPR[laneId][VDST.u32][dstword + 15 : dstword].b16 = { tmp1, tmp0 };\n// Other destination bits are preserved",
VOP3AOp.V_CVT_SCALEF32_SR_FP8_F32: "scale = 32'U(exponent(S2.f32));\ntmp = f32_to_fp8_sr_scale(S0.f32, S1.u32, scale.u8);\ndstbyte = OPSEL[3 : 2].i32 * 8;\nVGPR[laneId][VDST.u32][dstbyte + 7 : dstbyte].fp8 = tmp;\n// Other destination bits are preserved",
@@ -1202,16 +1202,16 @@ VOP3AOp_PCODE = {
VOP3AOp.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64',
VOP3AOp.V_MIN_F64: "if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S1.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S0.f64\nelse\n// Note: there's no IEEE case here like there is for V_MAX_F64.\nD0.f64 = S0.f64 < S1.f64 ? S0.f64 : S1.f64\nendif",
VOP3AOp.V_MAX_F64: 'if (WAVE_MODE.IEEE && isSignalNAN(S0.f64)) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif (WAVE_MODE.IEEE && isSignalNAN(S1.f64)) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == +0.0) && (S1.f64 == -0.0)) then\nD0.f64 = S0.f64\nelsif ((S0.f64 == -0.0) && (S1.f64 == +0.0)) then\nD0.f64 = S1.f64\nelsif WAVE_MODE.IEEE then\nD0.f64 = S0.f64 >= S1.f64 ? S0.f64 : S1.f64\nelse\nD0.f64 = S0.f64 > S1.f64 ? S0.f64 : S1.f64\nendif',
VOP3AOp.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32\nldexp()',
VOP3AOp.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32',
VOP3AOp.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32',
VOP3AOp.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)",
VOP3AOp.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)",
VOP3AOp.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32\nldexp()',
VOP3AOp.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32',
VOP3AOp.V_READLANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nD0.b32 = VGPR[lane][SRC0.u32]',
VOP3AOp.V_WRITELANE_B32: 'lane = S1.u32[5 : 0];\n// Lane select\nVGPR[lane][VDST.u32] = S0.b32',
VOP3AOp.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp",
VOP3AOp.V_MBCNT_LO_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp",
VOP3AOp.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp\nv_mbcnt_lo_u32_b32 v0, -1, 0\nv_mbcnt_hi_u32_b32 v0, -1, v0\n// v0 now contains laneId\nv_mbcnt_lo_u32_b32 v0, vcc_lo, 0\nv_mbcnt_hi_u32_b32 v0, vcc_hi, v0 // Note vcc_hi is passed in for second instruction\n// v0 now contains position among lanes with VCC=1",
VOP3AOp.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp",
VOP3AOp.V_LSHLREV_B64: 'D0.u64 = (S1.u64 << S0[5 : 0].u32)',
VOP3AOp.V_LSHRREV_B64: 'D0.u64 = (S1.u64 >> S0[5 : 0].u32)',
VOP3AOp.V_ASHRREV_I64: 'D0.i64 = (S1.i64 >> S0[5 : 0].u32)',
@@ -1276,9 +1276,9 @@ VOP3POp_PCODE = {
VOP3POp.V_DOT2_F32_BF16: "tmp = 32'F(S0[15 : 0].bf16) * 32'F(S1[15 : 0].bf16);\ntmp += 32'F(S0[31 : 16].bf16) * 32'F(S1[31 : 16].bf16);\ntmp += S2.f32;\nD0.f32 = tmp",
VOP3POp.V_PK_MINIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_minimum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_minimum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32",
VOP3POp.V_PK_MAXIMUM3_F16: "tmp[31 : 16].f16 = 16'F(v_maximum3_f16(S0[31 : 16].f16, S1[31 : 16].f16, S2[31 : 16].f16));\ntmp[15 : 0].f16 = 16'F(v_maximum3_f16(S0[15 : 0].f16, S1[15 : 0].f16, S2[15 : 0].f16));\nD0.b32 = tmp.b32",
VOP3POp.V_MAD_MIX_F32: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = in[0] * in[1] + in[2]",
VOP3POp.V_MAD_MIXLO_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(in[0] * in[1] + in[2])",
VOP3POp.V_MAD_MIXHI_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(in[0] * in[1] + in[2])",
VOP3POp.V_MAD_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = in[0] * in[1] + in[2]",
VOP3POp.V_MAD_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(in[0] * in[1] + in[2])",
VOP3POp.V_MAD_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(in[0] * in[1] + in[2])",
VOP3POp.V_DOT2_F32_F16: 'tmp = S2.f32;\ntmp += f16_to_f32(S0[15 : 0].f16) * f16_to_f32(S1[15 : 0].f16);\ntmp += f16_to_f32(S0[31 : 16].f16) * f16_to_f32(S1[31 : 16].f16);\nD0.f32 = tmp',
VOP3POp.V_DOT2_I32_I16: 'tmp = S2.i32;\ntmp += i16_to_i32(S0[15 : 0].i16) * i16_to_i32(S1[15 : 0].i16);\ntmp += i16_to_i32(S0[31 : 16].i16) * i16_to_i32(S1[31 : 16].i16);\nD0.i32 = tmp',
VOP3POp.V_DOT2_U32_U16: 'tmp = S2.u32;\ntmp += u16_to_u32(S0[15 : 0].u16) * u16_to_u32(S1[15 : 0].u16);\ntmp += u16_to_u32(S0[31 : 16].u16) * u16_to_u32(S1[31 : 16].u16);\nD0.u32 = tmp',
@@ -1286,12 +1286,12 @@ VOP3POp_PCODE = {
VOP3POp.V_DOT4_U32_U8: 'tmp = S2.u32;\ntmp += u8_to_u32(S0[7 : 0].u8) * u8_to_u32(S1[7 : 0].u8);\ntmp += u8_to_u32(S0[15 : 8].u8) * u8_to_u32(S1[15 : 8].u8);\ntmp += u8_to_u32(S0[23 : 16].u8) * u8_to_u32(S1[23 : 16].u8);\ntmp += u8_to_u32(S0[31 : 24].u8) * u8_to_u32(S1[31 : 24].u8);\nD0.u32 = tmp',
VOP3POp.V_DOT8_I32_I4: 'tmp = S2.i32;\ntmp += i4_to_i32(S0[3 : 0].i4) * i4_to_i32(S1[3 : 0].i4);\ntmp += i4_to_i32(S0[7 : 4].i4) * i4_to_i32(S1[7 : 4].i4);\ntmp += i4_to_i32(S0[11 : 8].i4) * i4_to_i32(S1[11 : 8].i4);\ntmp += i4_to_i32(S0[15 : 12].i4) * i4_to_i32(S1[15 : 12].i4);\ntmp += i4_to_i32(S0[19 : 16].i4) * i4_to_i32(S1[19 : 16].i4);\ntmp += i4_to_i32(S0[23 : 20].i4) * i4_to_i32(S1[23 : 20].i4);\ntmp += i4_to_i32(S0[27 : 24].i4) * i4_to_i32(S1[27 : 24].i4);\ntmp += i4_to_i32(S0[31 : 28].i4) * i4_to_i32(S1[31 : 28].i4);\nD0.i32 = tmp',
VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp',
VOP3POp.V_MFMA_F32_16X16X128_F8F6F4: 'D = A (16x128) * B (128x16) + C (16x16)\nA, B: (16*128) elements * 8 bits/element = 16384 bits, divide by 64 lanes * 32 bits/register-lane = 8\nregisters C, D: (16*16) elements * 32 bits/element = 8192 bits, divide by 64 lanes * 32 bits/register-\nlane = 4 registers',
VOP3POp.V_MFMA_F32_32X32X64_F8F6F4: 'D = A (32x64) * B (64x32) + C (32x32)\nA, B: (32*64) elements * 8 bits/element = 16384 bits, divide by 64 lanes * 32 bits/register-lane = 8\nregisters C, D: (32*32) elements * 32 bits/element = 32768 bits, divide by 64 lanes * 32 bits/register-\nlane = 16 registers',
VOP3POp.V_MFMA_F32_16X16X128_F8F6F4: 'D = A (16x128) * B (128x16) + C (16x16)',
VOP3POp.V_MFMA_F32_32X32X64_F8F6F4: 'D = A (32x64) * B (64x32) + C (32x32)',
VOP3POp.V_PK_FMA_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = fma(S0[31 : 0].f32, S1[31 : 0].f32, S2[31 : 0].f32);\ntmp[63 : 32].f32 = fma(S0[63 : 32].f32, S1[63 : 32].f32, S2[63 : 32].f32);\nD0.b64 = tmp",
VOP3POp.V_PK_MUL_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 * S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 * S1[63 : 32].f32;\nD0.b64 = tmp",
VOP3POp.V_PK_ADD_F32: "declare tmp : 64'B;\ntmp[31 : 0].f32 = S0[31 : 0].f32 + S1[31 : 0].f32;\ntmp[63 : 32].f32 = S0[63 : 32].f32 + S1[63 : 32].f32;\nD0.b64 = tmp",
VOP3POp.V_PK_MOV_B32: 'tmp0.u32 = S0.u32[OPSEL[0].i32 * 32 + 31 : OPSEL[0].i32 * 32];\ntmp1.u32 = S1.u32[OPSEL[1].i32 * 32 + 31 : OPSEL[1].i32 * 32];\nD0.u32[31 : 0] = tmp0.u32;\nD0.u32[63 : 32] = tmp1.u32\nv_pk_mov_b32 v0, v2, v4 op_sel:[0,1] // evaluates v0 <- v2 and v1 <- v5.\nv_pk_mov_b32 v0, s6, s6 op_sel:[0,1] // 64-bit move from scalar s[6:7].',
VOP3POp.V_PK_MOV_B32: 'tmp0.u32 = S0.u32[OPSEL[0].i32 * 32 + 31 : OPSEL[0].i32 * 32];\ntmp1.u32 = S1.u32[OPSEL[1].i32 * 32 + 31 : OPSEL[1].i32 * 32];\nD0.u32[31 : 0] = tmp0.u32;\nD0.u32[63 : 32] = tmp1.u32',
VOP3POp.V_MFMA_F32_16X16X32_BF16: 'D = A (16x32) * B (32x16) + C (16x16)',
VOP3POp.V_MFMA_I32_16X16X64_I8: 'D = A (16x64) * B (64x16) + C (16x16)',
VOP3POp.V_MFMA_F32_32X32X16_BF16: 'D = A (32x16) * B (16x32) + C (32x32)',
@@ -1556,7 +1556,7 @@ VOPCOp_PCODE = {
VOPCOp.V_CMPX_GT_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 > S1.u64;\n// D0 = VCC in VOPC encoding.',
VOPCOp.V_CMPX_NE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 <> S1.u64;\n// D0 = VCC in VOPC encoding.',
VOPCOp.V_CMPX_GE_U64: 'EXEC.u64[laneId] = D0.u64[laneId] = S0.u64 >= S1.u64;\n// D0 = VCC in VOPC encoding.',
VOPCOp.V_CMPX_T_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.\nwhere:\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nOP = DS instructions.\nADDR = Source LDS address VGPR 0 - 255.\nDATA0 = Source data0 VGPR 0 - 255.\nDATA1 = Source data1 VGPR 0 - 255.\nVDST = Destination VGPR 0- 255.",
VOPCOp.V_CMPX_T_U64: "EXEC.u64[laneId] = D0.u64[laneId] = 1'1U;\n// D0 = VCC in VOPC encoding.",
}
PSEUDOCODE_STRINGS = {DSOp: DSOp_PCODE, FLATOp: FLATOp_PCODE, GLOBALOp: GLOBALOp_PCODE, MTBUFOp: MTBUFOp_PCODE, MUBUFOp: MUBUFOp_PCODE, SCRATCHOp: SCRATCHOp_PCODE, SMEMOp: SMEMOp_PCODE, SOP1Op: SOP1Op_PCODE, SOP2Op: SOP2Op_PCODE, SOPCOp: SOPCOp_PCODE, SOPKOp: SOPKOp_PCODE, SOPPOp: SOPPOp_PCODE, VOP1Op: VOP1Op_PCODE, VOP2Op: VOP2Op_PCODE, VOP3AOp: VOP3AOp_PCODE, VOP3BOp: VOP3BOp_PCODE, VOP3POp: VOP3POp_PCODE, VOPCOp: VOPCOp_PCODE}

View File

@@ -20,8 +20,8 @@ DSOp_PCODE = {
DSOp.DS_STORE_B32: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0]',
DSOp.DS_STORE_2ADDR_B32: 'MEM[ADDR + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]',
DSOp.DS_STORE_2ADDR_STRIDE64_B32: 'MEM[ADDR + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]',
DSOp.DS_CMPSTORE_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp',
DSOp.DS_CMPSTORE_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_MIN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_MAX_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_ADD_F32: 'tmp = MEM[ADDR].f32;\nMEM[ADDR].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp',
@@ -43,12 +43,12 @@ DSOp_PCODE = {
DSOp.DS_STOREXCHG_RTN_B32: 'tmp = MEM[ADDR].b32;\nMEM[ADDR].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
DSOp.DS_STOREXCHG_2ADDR_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2',
DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2',
DSOp.DS_CMPSTORE_RTN_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_RTN_B32: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[ADDR].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp',
DSOp.DS_CMPSTORE_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\ncmp = DATA2.f32;\nMEM[ADDR].f32 = tmp == cmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_MIN_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src < tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_MAX_RTN_F32: 'tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nMEM[ADDR].f32 = src > tmp ? src : tmp;\nRETURN_DATA.f32 = tmp',
DSOp.DS_WRAP_RTN_B32: 'tmp = MEM[ADDR].u32;\nMEM[ADDR].u32 = tmp >= DATA.u32 ? tmp - DATA.u32 : tmp + DATA2.u32;\nRETURN_DATA = tmp',
DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\n}\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset[15]) {\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n}\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n}',
DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}',
DSOp.DS_LOAD_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32',
DSOp.DS_LOAD_2ADDR_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 4U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET1.u32 * 4U].b32',
DSOp.DS_LOAD_2ADDR_STRIDE64_B32: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET0.u32 * 256U].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET1.u32 * 256U].b32',
@@ -74,8 +74,8 @@ DSOp_PCODE = {
DSOp.DS_STORE_B64: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32]',
DSOp.DS_STORE_2ADDR_B64: 'MEM[ADDR + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]',
DSOp.DS_STORE_2ADDR_STRIDE64_B64: 'MEM[ADDR + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[ADDR + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]',
DSOp.DS_CMPSTORE_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp',
DSOp.DS_CMPSTORE_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MIN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MAX_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_ADD_RTN_U64: 'tmp = MEM[ADDR].u64;\nMEM[ADDR].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
@@ -94,8 +94,8 @@ DSOp_PCODE = {
DSOp.DS_STOREXCHG_RTN_B64: 'tmp = MEM[ADDR].b64;\nMEM[ADDR].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
DSOp.DS_STOREXCHG_2ADDR_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2',
DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2',
DSOp.DS_CMPSTORE_RTN_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_RTN_B64: 'tmp = MEM[ADDR].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[ADDR].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp',
DSOp.DS_CMPSTORE_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\ncmp = DATA2.f64;\nMEM[ADDR].f64 = tmp == cmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MIN_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src < tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MAX_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nMEM[ADDR].f64 = src > tmp ? src : tmp;\nRETURN_DATA.f64 = tmp',
DSOp.DS_LOAD_B64: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4U].b32',
@@ -116,8 +116,8 @@ DSOp_PCODE = {
DSOp.DS_BVH_STACK_RTN_B32: '(stack_base, stack_index) = DECODE_ADDR(ADDR, OFFSET1);\nlast_node_ptr = DATA0;\n// First 3 passes: push data onto stack\nfor i = 0..2 do\nif DATA_VALID(DATA1[i])\nMEM[stack_base + stack_index] = DATA1[i];\nIncrement stack_index\nelsif DATA1[i] == last_node_ptr\n// Treat all further data as invalid as well.\nbreak\nendif\nendfor\n// Fourth pass: return data or pop\nif DATA_VALID(DATA1[3])\nVGPR_RTN = DATA1[3]\nelse\nVGPR_RTN = MEM[stack_base + stack_index];\nMEM[stack_base + stack_index] = INVALID_NODE;\nDecrement stack_index\nendif\nADDR = ENCODE_ADDR(stack_base, stack_index).\nfunction DATA_VALID(data):\nif data == INVALID_NODE\nreturn false\nelsif last_node_ptr != INVALID_NODE && data == last_node_ptr\n// Match last_node_ptr\nreturn false\nelse\nreturn true\nendif\nendfunction.',
DSOp.DS_STORE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32",
DSOp.DS_LOAD_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32",
DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\n// NOTE: destination lane is MOD 32 regardless of wave size.\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xF, OFFSET = 0\nVGPR[VDST] = { B, D, 0, C }\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xA, OFFSET = 0\nVGPR[VDST] = { -, D, -, 0 }",
DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\n// NOTE: destination lane is MOD 32 regardless of wave size.\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xF, OFFSET = 0\nVGPR[VDST] = { A, A, D, B }\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xA, OFFSET = 0\nVGPR[VDST] = { -, 0, -, B }",
DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\n// NOTE: destination lane is MOD 32 regardless of wave size.\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nfor i in 0 : WAVE64 ? 63 : 31 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : WAVE64 ? 63 : 31 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\n// NOTE: destination lane is MOD 32 regardless of wave size.\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % 32;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : WAVE64 ? 63 : 31 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
DSOp.DS_STORE_B96: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET.u32 + 8U].b32 = DATA[95 : 64]',
DSOp.DS_STORE_B128: 'MEM[ADDR + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[ADDR + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[ADDR + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[ADDR + OFFSET.u32 + 12U].b32 = DATA[127 : 96]',
DSOp.DS_LOAD_B96: 'RETURN_DATA[31 : 0] = MEM[ADDR + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[ADDR + OFFSET.u32 + 4U].b32;\nRETURN_DATA[95 : 64] = MEM[ADDR + OFFSET.u32 + 8U].b32',
@@ -394,11 +394,11 @@ SOP1Op_PCODE = {
SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif',
SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]',
SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CTZ_I32_B32(0xaaaaaaaa) => 1\nS_CTZ_I32_B32(0x55555555) => 0\nS_CTZ_I32_B32(0x00000000) => 0xffffffff\nS_CTZ_I32_B32(0xffffffff) => 0\nS_CTZ_I32_B32(0x00010000) => 16",
SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_CTZ_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CLZ_I32_U32(0x00000000) => 0xffffffff\nS_CLZ_I32_U32(0x0000cccc) => 16\nS_CLZ_I32_U32(0xffff3333) => 0\nS_CLZ_I32_U32(0x7fffffff) => 1\nS_CLZ_I32_U32(0x80000000) => 0\nS_CLZ_I32_U32(0xffffffff) => 0",
SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_CLZ_I32_U64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CLS_I32(0x00000000) => 0xffffffff\nS_CLS_I32(0x0000cccc) => 16\nS_CLS_I32(0xffff3333) => 16\nS_CLS_I32(0x7fffffff) => 1\nS_CLS_I32(0x80000000) => 1\nS_CLS_I32(0xffffffff) => 0xffffffff',
SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp',
SOP1Op.S_CLS_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp',
SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))",
SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))",
@@ -406,11 +406,11 @@ SOP1Op_PCODE = {
SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U",
SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U",
SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U",
SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor\ns_bitreplicate_b64 s2, s0\ns_bitreplicate_b64 s2, s2',
SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0\nS_ABS_I32(0x00000001) => 0x00000001\nS_ABS_I32(0x7fffffff) => 0x7fffffff\nS_ABS_I32(0x80000000) => 0x80000000 // Note this is negative!\nS_ABS_I32(0x80000001) => 0x7fffffff\nS_ABS_I32(0x80000002) => 0x7ffffffe\nS_ABS_I32(0xffffffff) => 0x00000001',
SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT0_I32_B32(0x00000000) => 32\nS_BCNT0_I32_B32(0xcccccccc) => 16\nS_BCNT0_I32_B32(0xffffffff) => 0",
SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor',
SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0',
SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U",
SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL",
SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT1_I32_B32(0x00000000) => 0\nS_BCNT1_I32_B32(0xcccccccc) => 16\nS_BCNT1_I32_B32(0xffffffff) => 32",
SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U",
SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL",
SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U',
SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL',
@@ -441,12 +441,12 @@ SOP1Op_PCODE = {
SOP1Op.S_AND_NOT0_WREXEC_B32: 'EXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U',
SOP1Op.S_AND_NOT0_WREXEC_B64: 'EXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_AND_NOT1_WREXEC_B32: 'EXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U',
SOP1Op.S_AND_NOT1_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL\n// V0 holds the index value per lane\n// save exec mask for restore at the end\ns_mov_b64 s2, exec\n// exec mask of remaining (unprocessed) threads\ns_mov_b64 s4, exec\nloop:\n// get the index value for the first active lane\nv_readfirstlane_b32 s0, v0\n// find all other lanes with same index value\nv_cmpx_eq s0, v0\n<OP> // do the operation using the current EXEC mask. S0 holds the index.\n// mask out thread that was just executed\n// s_andn2_b64 s4, s4, exec\n// s_mov_b64 exec, s4\ns_andn2_wrexec_b64 s4, s4 // replaces above 2 ops\n// repeat until EXEC==0\ns_cbranch_scc1 loop\ns_mov_b64 exec, s2',
SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32\ns_mov_b32 m0, 10\ns_movrels_b32 s5, s7',
SOP1Op.S_AND_NOT1_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32',
SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b64 = SGPR[addr].b64',
SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32\ns_mov_b32 m0, 10\ns_movreld_b32 s5, s7',
SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32',
SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b64 = S0.b64',
SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nSGPR[addrd].b32 = SGPR[addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\ns_movrelsd_2_b32 s5, s7',
SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nSGPR[addrd].b32 = SGPR[addrs].b32',
SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL',
SOP1Op.S_SETPC_B64: 'PC = S0.i64',
SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64',
@@ -475,7 +475,7 @@ SOP2Op_PCODE = {
SOP2Op.S_SUB_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32',
SOP2Op.S_ADDC_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADDC_U32.\nD0.u32 = tmp.u32",
SOP2Op.S_SUBB_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUBB_U32.\nD0.u32 = tmp.u32",
SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0\nS_ABSDIFF_I32(0x00000002, 0x00000005) => 0x00000003\nS_ABSDIFF_I32(0xffffffff, 0x00000000) => 0x00000001\nS_ABSDIFF_I32(0x80000000, 0x00000000) => 0x80000000 // Note: result is negative!\nS_ABSDIFF_I32(0x80000000, 0x00000001) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xffffffff) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xfffffffe) => 0x7ffffffe',
SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0',
SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U',
SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL',
SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U',
@@ -616,13 +616,13 @@ SOPKOp_PCODE = {
}
SOPPOp_PCODE = {
SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor\ns_nop 0 // Wait 1 cycle.\ns_nop 0xf // Wait 16 cycles.',
SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor',
SOPPOp.S_SLEEP: 's_sleep 0 // Wait for 0 clocks.\ns_sleep 1 // Wait for 1-64 clocks.\ns_sleep 2 // Wait for 65-128 clocks.',
SOPPOp.S_DELAY_ALU: 'v_mov_b32 v3, v0\nv_lshlrev_b32 v30, 1, v31\nv_lshlrev_b32 v24, 1, v25\ns_delay_alu instid0(INSTID_VALU_DEP_3) | instskip(INSTSKIP_SKIP_1) | instid1(INSTID_VALU_DEP_1)\n// 1 cycle delay here\nv_add_f32 v0, v1, v3\nv_sub_f32 v11, v9, v9\n// 2 cycles delay here\nv_mul_f32 v10, v13, v11',
SOPPOp.S_WAITCNT: 'expcnt <= WaitEXPCNT\nlgkmcnt <= WaitLGKMCNT\nvmcnt <= WaitVMCNT',
SOPPOp.S_TRAP: 'TrapID = SIMM16.u16[7 : 0];\n"Wait for all instructions to complete";\n// PC passed into trap handler points to S_TRAP itself,\n// *not* to the next instruction.\n{ TTMP[1], TTMP[0] } = { 7\'0, HT[0], TrapID[7 : 0], PC[47 : 0] };\nPC = TBA.i64;\n// trap base address\nWAVE_STATUS.PRIV = 1\'1U',
SOPPOp.S_CODE_END: '...\ns_endpgm // last real instruction in shader buffer\ns_code_end // 1\ns_code_end // 2\ns_code_end // 3\ns_code_end // 4\ns_code_end // done!',
SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.\ns_branch label // Set SIMM16 = +4 = 0x0004\ns_nop 0 // 4 bytes\nlabel:\ns_nop 0 // 4 bytes\ns_branch label // Set SIMM16 = -8 = 0xfff8",
SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.",
SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_VCCZ: "if VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
@@ -637,16 +637,16 @@ SOPPOp_PCODE = {
}
VINTERPOp_PCODE = {
VINTERPOp.V_INTERP_P10_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f32, S1.f32, VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f32)\ns_mov_b32 m0, s0 // assume s0 contains newprim mask\nlds_param_load v0, attr0 // v0 is a temporary register\nv_interp_p10_f32 v3, v0, v1, v0 // v1 contains i coordinate\nv_interp_p2_f32 v3, v0, v2, v3 // v2 contains j coordinate',
VINTERPOp.V_INTERP_P10_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f32, S1.f32, VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f32)',
VINTERPOp.V_INTERP_P2_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f32, S1.f32, S2.f32)',
VINTERPOp.V_INTERP_P10_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f16), S1.f32, 32'F(VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f16))",
VINTERPOp.V_INTERP_P2_F16_F32: "D0.f16 = 16'F(fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f16), S1.f32, S2.f32))",
VINTERPOp.V_INTERP_P10_RTZ_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f16), S1.f32, 32'F(VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f16))",
VINTERPOp.V_INTERP_P2_RTZ_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f16), S1.f32, S2.f32)\nOFFSET0 = Unsigned byte offset added to the address from the ADDR VGPR.\nOFFSET1 = Unsigned byte offset added to the address from the ADDR VGPR.\nGDS = Set if GDS, cleared if LDS.\nOP = DS instruction opcode\nADDR = Source LDS address VGPR 0 - 255.\nDATA0 = Source data0 VGPR 0 - 255.\nDATA1 = Source data1 VGPR 0 - 255.\nVDST = Destination VGPR 0- 255.",
VINTERPOp.V_INTERP_P2_RTZ_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f16), S1.f32, S2.f32)",
}
VOP1Op_PCODE = {
VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32',
VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]",
VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)',
VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)',
@@ -677,59 +677,59 @@ VOP1Op_PCODE = {
VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif',
VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif",
VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif',
VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)',
VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)',
VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32',
VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception',
VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)',
VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64',
VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)',
VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)',
VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)',
VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN",
VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN",
VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))",
VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))",
VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32',
VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CLZ_I32_U32(0x00000000) => 0xffffffff\nV_CLZ_I32_U32(0x800000ff) => 0\nV_CLZ_I32_U32(0x100000ff) => 3\nV_CLZ_I32_U32(0x0000ffff) => 16\nV_CLZ_I32_U32(0x00000001) => 31",
VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CTZ_I32_B32(0x00000000) => 0xffffffff\nV_CTZ_I32_B32(0xff000001) => 0\nV_CTZ_I32_B32(0xff000008) => 3\nV_CTZ_I32_B32(0xffff0000) => 16\nV_CTZ_I32_B32(0x80000000) => 31",
VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CLS_I32(0x00000000) => 0xffffffff\nV_CLS_I32(0x40000000) => 1\nV_CLS_I32(0x80000000) => 1\nV_CLS_I32(0x0fffffff) => 4\nV_CLS_I32(0xffff0000) => 16\nV_CLS_I32(0xfffffffe) => 31\nV_CLS_I32(0xffffffff) => 0xffffffff',
VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()',
VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()',
VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor',
VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif',
VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif',
VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)',
VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()",
VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()",
VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32\ns_mov_b32 m0, 10\nv_movreld_b32 v5, v7',
VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32\ns_mov_b32 m0, 10\nv_movrels_b32 v5, v7',
VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, 10\nv_movrelsd_b32 v5, v7',
VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\nv_movrelsd_2_b32 v5, v7',
VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif",
VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif",
VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32',
VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32',
VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32',
VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32',
VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)',
VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)',
VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)',
VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)',
VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF',
VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF',
VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()",
VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()",
VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16",
VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)',
VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)",
VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)',
VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)",
VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif",
VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif",
VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif",
VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif",
VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)',
VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif",
VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)',
VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN",
VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN",
VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))",
VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))",
VOP1Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n.i32 <= 0 then\nreturn 8'0U\nelsif n >= 16'I(0xff) then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\nD0.b16 = { SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16) }",
VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)',
VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)',
VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp',
VOP1Op.V_SWAP_B16: 'tmp = D0.b16;\nD0.b16 = S0.b16;\nS0.b16 = tmp',
VOP1Op.V_PERMLANE64_B32: "declare tmp : 32'B[64];\ndeclare lane : 32'U;\nif WAVE32 then\n// Supported in wave64 ONLY; treated as scalar NOP in wave32\ns_nop(16'0U)\nelse\nfor lane in 0U : 63U do\n// Copy original S0 in case D==S0\ntmp[lane] = VGPR[lane][SRC0.u32]\nendfor;\nfor lane in 0U : 63U do\naltlane = { ~lane[5], lane[4 : 0] };\n// 0<->32, ..., 31<->63\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altlane]\nendif\nendfor\nendif",
VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\ntmp = VGPR[laneId][addrd].b32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32;\nVGPR[laneId][addrs].b32 = tmp\ns_mov_b32 m0, ((20 << 16) | 10)\nv_swaprel_b32 v5, v7',
VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\ntmp = VGPR[laneId][addrd].b32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32;\nVGPR[laneId][addrs].b32 = tmp',
VOP1Op.V_NOT_B16: 'D0.u16 = ~S0.u16',
VOP1Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))",
VOP1Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }\nSRC0 = First operand for instruction.\nVSRC1 = Second operand for instruction.\nOP = Instruction opcode.\nAll VOPC instructions can alternatively be encoded in the VOP3 format.",
VOP1Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }",
}
VOP2Op_PCODE = {
@@ -777,7 +777,7 @@ VOP2Op_PCODE = {
VOP2Op.V_FMAAK_F16: 'D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16)',
VOP2Op.V_MAX_F16: "GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.",
VOP2Op.V_MIN_F16: "LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.",
VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()",
VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))",
VOP2Op.V_PK_FMAC_F16: 'D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16);\nD0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16)',
}
@@ -1008,8 +1008,8 @@ VOP3Op_PCODE = {
VOP3Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)',
VOP3Op.V_MAX_F16: "GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif GT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.",
VOP3Op.V_MIN_F16: "LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((64'F(abs(a)) == 0.0) && (64'F(abs(b)) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(64'F(S0.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S0.f16)))\nelsif isSignalNAN(64'F(S1.f16)) then\nD0.f16 = 16'F(cvtToQuietNAN(64'F(S1.f16)))\nelsif isQuietNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isQuietNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nelse\nif isNAN(64'F(S1.f16)) then\nD0.f16 = S0.f16\nelsif isNAN(64'F(S0.f16)) then\nD0.f16 = S1.f16\nelsif LT_NEG_ZERO(S0.f16, S1.f16) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f16 = S0.f16\nelse\nD0.f16 = S1.f16\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.",
VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()",
VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))",
VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32',
VOP3Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]",
VOP3Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)',
VOP3Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)',
@@ -1040,49 +1040,49 @@ VOP3Op_PCODE = {
VOP3Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif',
VOP3Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif",
VOP3Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif',
VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)',
VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)',
VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32',
VOP3Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception',
VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)',
VOP3Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64',
VOP3Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)',
VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)',
VOP3Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)',
VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN",
VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN",
VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))",
VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))",
VOP3Op.V_NOT_B32: 'D0.u32 = ~S0.u32',
VOP3Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CLZ_I32_U32(0x00000000) => 0xffffffff\nV_CLZ_I32_U32(0x800000ff) => 0\nV_CLZ_I32_U32(0x100000ff) => 3\nV_CLZ_I32_U32(0x0000ffff) => 16\nV_CLZ_I32_U32(0x00000001) => 31",
VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CTZ_I32_B32(0x00000000) => 0xffffffff\nV_CTZ_I32_B32(0xff000001) => 0\nV_CTZ_I32_B32(0xff000008) => 3\nV_CTZ_I32_B32(0xffff0000) => 16\nV_CTZ_I32_B32(0x80000000) => 31",
VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor\nV_CLS_I32(0x00000000) => 0xffffffff\nV_CLS_I32(0x40000000) => 1\nV_CLS_I32(0x80000000) => 1\nV_CLS_I32(0x0fffffff) => 4\nV_CLS_I32(0xffff0000) => 16\nV_CLS_I32(0xfffffffe) => 31\nV_CLS_I32(0xffffffff) => 0xffffffff',
VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()',
VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()',
VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor',
VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif',
VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif',
VOP3Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)',
VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()",
VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()",
VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32\ns_mov_b32 m0, 10\nv_movreld_b32 v5, v7',
VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32\ns_mov_b32 m0, 10\nv_movrels_b32 v5, v7',
VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, 10\nv_movrelsd_b32 v5, v7',
VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\nv_movrelsd_2_b32 v5, v7',
VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif",
VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif",
VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32',
VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32',
VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32',
VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32',
VOP3Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)',
VOP3Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)',
VOP3Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)',
VOP3Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)',
VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF',
VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF',
VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()",
VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()",
VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16",
VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)',
VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)",
VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)',
VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)",
VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif",
VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif",
VOP3Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif",
VOP3Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif",
VOP3Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)',
VOP3Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif",
VOP3Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)',
VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN",
VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN",
VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))",
VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))",
VOP3Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n.i32 <= 0 then\nreturn 8'0U\nelsif n >= 16'I(0xff) then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\nD0.b16 = { SAT8(S0[31 : 16].i16), SAT8(S0[15 : 0].i16) }",
VOP3Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)',
VOP3Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)',
@@ -1151,8 +1151,8 @@ VOP3Op_PCODE = {
VOP3Op.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)',
VOP3Op.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32",
VOP3Op.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32",
VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor\nv_mov_b32 s0, 0x87654321;\nv_mov_b32 s1, 0x0fedcba9;\nv_permlane16_b32 v1, v0, s0, s1;\n// ROW 0:\n// v1.lane[0] <- v0.lane[1]\n// v1.lane[1] <- v0.lane[2]\n// ...\n// v1.lane[14] <- v0.lane[15]\n// v1.lane[15] <- v0.lane[0]\n//\n// ROW 1:\n// v1.lane[16] <- v0.lane[17]\n// v1.lane[17] <- v0.lane[18]\n// ...\n// v1.lane[30] <- v0.lane[31]\n// v1.lane[31] <- v0.lane[16]",
VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor\n// Note for this to work, source and destination VGPRs must be different.\n// For this rotation, lane 15 gets data from lane 16, lane 31 gets data from lane 0.\n// These are the only two lanes that need to use v_permlanex16_b32.\n// Enable only the threads that get data from their own row.\nv_mov_b32 exec_lo, 0x7fff7fff; // Lanes getting data from their own row\nv_mov_b32 s0, 0x87654321;\nv_mov_b32 s1, 0x0fedcba9;\nv_permlane16_b32 v1, v0, s0, s1 fi; // FI bit needed for lanes 14 and 30\n// ROW 0:\n// v1.lane[0] <- v0.lane[1]\n// v1.lane[1] <- v0.lane[2]\n// ...\n// v1.lane[14] <- v0.lane[15] (needs FI to read)\n// v1.lane[15] unset\n//\n// ROW 1:\n// v1.lane[16] <- v0.lane[17]\n// v1.lane[17] <- v0.lane[18]\n// ...\n// v1.lane[30] <- v0.lane[31] (needs FI to read)\n// v1.lane[31] unset\n// Enable only the threads that get data from the other row.\nv_mov_b32 exec_lo, 0x80008000; // Lanes getting data from the other row\nv_permlanex16_b32 v1, v0, s0, s1 fi; // FI bit needed for lanes 15 and 31\n// v1.lane[15] <- v0.lane[16]\n// v1.lane[31] <- v0.lane[0]",
VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor",
VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor",
VOP3Op.V_CNDMASK_B16: 'D0.u16 = VCC.u64[laneId] ? S1.u16 : S0.u16',
VOP3Op.V_MAXMIN_F32: 'D0.f32 = v_min_f32(v_max_f32(S0.f32, S1.f32), S2.f32)',
VOP3Op.V_MINMAX_F32: 'D0.f32 = v_max_f32(v_min_f32(S0.f32, S1.f32), S2.f32)',
@@ -1178,11 +1178,11 @@ VOP3Op_PCODE = {
VOP3Op.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16',
VOP3Op.V_CVT_PK_NORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);\nD0 = tmp.b32",
VOP3Op.V_CVT_PK_NORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);\nD0 = tmp.b32",
VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32\nldexp()',
VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32',
VOP3Op.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)',
VOP3Op.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp",
VOP3Op.V_MBCNT_LO_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp",
VOP3Op.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp\nv_mbcnt_lo_u32_b32 v0, -1, 0\nv_mbcnt_hi_u32_b32 v0, -1, v0\n// v0 now contains laneId\nv_mbcnt_lo_u32_b32 v0, vcc_lo, 0\nv_mbcnt_hi_u32_b32 v0, vcc_hi, v0 // Note vcc_hi is passed in for second instruction\n// v0 now contains position among lanes with VCC=1",
VOP3Op.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp",
VOP3Op.V_CVT_PK_NORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);\nD0 = tmp.b32",
VOP3Op.V_CVT_PK_NORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);\nD0 = tmp.b32",
VOP3Op.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);\nD0 = tmp.b32",
@@ -1193,7 +1193,7 @@ VOP3Op_PCODE = {
VOP3Op.V_MUL_F64: 'D0.f64 = S0.f64 * S1.f64',
VOP3Op.V_MIN_F64: 'LT_NEG_ZERO = lambda(a, b) (\n((a < b) || ((abs(a) == 0.0) && (abs(b) == 0.0) && sign(a) && !sign(b))));\n// Version of comparison where -0.0 < +0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif LT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nelse\nif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif LT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.',
VOP3Op.V_MAX_F64: 'GT_NEG_ZERO = lambda(a, b) (\n((a > b) || ((abs(a) == 0.0) && (abs(b) == 0.0) && !sign(a) && sign(b))));\n// Version of comparison where +0.0 > -0.0, differs from IEEE\nif WAVE_MODE.IEEE then\nif isSignalNAN(S0.f64) then\nD0.f64 = cvtToQuietNAN(S0.f64)\nelsif isSignalNAN(S1.f64) then\nD0.f64 = cvtToQuietNAN(S1.f64)\nelsif isQuietNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isQuietNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif GT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nelse\nif isNAN(S1.f64) then\nD0.f64 = S0.f64\nelsif isNAN(S0.f64) then\nD0.f64 = S1.f64\nelsif GT_NEG_ZERO(S0.f64, S1.f64) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f64 = S0.f64\nelse\nD0.f64 = S1.f64\nendif\nendif;\n// Inequalities in the above pseudocode behave differently from IEEE\n// when both inputs are +-0.',
VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32\nldexp()',
VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32',
VOP3Op.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32',
VOP3Op.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)",
VOP3Op.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)",
@@ -1237,15 +1237,15 @@ VOP3POp_PCODE = {
VOP3POp.V_DOT8_I32_IU4: "declare A : 32'I[8];\ndeclare B : 32'I[8];\n// Figure out whether inputs are signed/unsigned.\nfor i in 0 : 7 do\nA4 = S0[i * 4 + 3 : i * 4];\nB4 = S1[i * 4 + 3 : i * 4];\nA[i] = NEG[0].u1 ? 32'I(signext(A4.i4)) : 32'I(32'U(A4.u4));\nB[i] = NEG[1].u1 ? 32'I(signext(B4.i4)) : 32'I(32'U(B4.u4))\nendfor;\nC = S2.i32;\n// Signed multiplier/adder. Extend unsigned inputs with leading 0.\ntmp = C.i32;\ntmp += A[0] * B[0];\ntmp += A[1] * B[1];\ntmp += A[2] * B[2];\ntmp += A[3] * B[3];\ntmp += A[4] * B[4];\ntmp += A[5] * B[5];\ntmp += A[6] * B[6];\ntmp += A[7] * B[7];\nD0.i32 = tmp",
VOP3POp.V_DOT8_U32_U4: 'tmp = S2.u32;\ntmp += u4_to_u32(S0[3 : 0].u4) * u4_to_u32(S1[3 : 0].u4);\ntmp += u4_to_u32(S0[7 : 4].u4) * u4_to_u32(S1[7 : 4].u4);\ntmp += u4_to_u32(S0[11 : 8].u4) * u4_to_u32(S1[11 : 8].u4);\ntmp += u4_to_u32(S0[15 : 12].u4) * u4_to_u32(S1[15 : 12].u4);\ntmp += u4_to_u32(S0[19 : 16].u4) * u4_to_u32(S1[19 : 16].u4);\ntmp += u4_to_u32(S0[23 : 20].u4) * u4_to_u32(S1[23 : 20].u4);\ntmp += u4_to_u32(S0[27 : 24].u4) * u4_to_u32(S1[27 : 24].u4);\ntmp += u4_to_u32(S0[31 : 28].u4) * u4_to_u32(S1[31 : 28].u4);\nD0.u32 = tmp',
VOP3POp.V_DOT2_F32_BF16: 'tmp = S2.f32;\ntmp += bf16_to_f32(S0[15 : 0].bf16) * bf16_to_f32(S1[15 : 0].bf16);\ntmp += bf16_to_f32(S0[31 : 16].bf16) * bf16_to_f32(S1[31 : 16].bf16);\nD0.f32 = tmp',
VOP3POp.V_FMA_MIX_F32: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])",
VOP3POp.V_FMA_MIXLO_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))",
VOP3POp.V_FMA_MIXHI_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))",
VOP3POp.V_WMMA_F32_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F32_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F16_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f16(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.bf16(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_I32_16X16X16_IU8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(16x16) + S2.i32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_I32_16X16X16_IU4: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(16x16) + S2.i32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_FMA_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])",
VOP3POp.V_FMA_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))",
VOP3POp.V_FMA_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))",
VOP3POp.V_WMMA_F32_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_F32_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_F16_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_I32_16X16X16_IU8: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_I32_16X16X16_IU4: 'D = A (16x16) * B (16x16) + C (16x16)',
}
VOP3SDOp_PCODE = {

View File

@@ -20,7 +20,7 @@ DSOp_PCODE = {
DSOp.DS_STORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0]',
DSOp.DS_STORE_2ADDR_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 4U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 4U].b32 = DATA2[31 : 0]',
DSOp.DS_STORE_2ADDR_STRIDE64_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 256U].b32 = DATA[31 : 0];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 256U].b32 = DATA2[31 : 0]',
DSOp.DS_CMPSTORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp',
DSOp.DS_MIN_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n!sign(tmp.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp",
DSOp.DS_MAX_NUM_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\nsign(tmp.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp",
DSOp.DS_ADD_F32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].f32;\nMEM[addr].f32 += DATA.f32;\nRETURN_DATA.f32 = tmp',
@@ -42,10 +42,10 @@ DSOp_PCODE = {
DSOp.DS_STOREXCHG_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nMEM[addr].b32 = DATA.b32;\nRETURN_DATA.b32 = tmp',
DSOp.DS_STOREXCHG_2ADDR_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 4U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 4U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2',
DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B32: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 256U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 256U;\ntmp1 = MEM[addr1].b32;\ntmp2 = MEM[addr2].b32;\nMEM[addr1].b32 = DATA.b32;\nMEM[addr2].b32 = DATA2.b32;\n// Note DATA2 can be any other register\nRETURN_DATA[31 : 0] = tmp1;\nRETURN_DATA[63 : 32] = tmp2',
DSOp.DS_CMPSTORE_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_RTN_B32: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b32;\nsrc = DATA.b32;\ncmp = DATA2.b32;\nMEM[addr].b32 = tmp == cmp ? src : tmp;\nRETURN_DATA.b32 = tmp',
DSOp.DS_MIN_NUM_RTN_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 < tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && sign(src.f32) &&\n!sign(tmp.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp",
DSOp.DS_MAX_NUM_RTN_F32: "tmp = MEM[ADDR].f32;\nsrc = DATA.f32;\nif (isNAN(64'F(src.f32)) && isNAN(64'F(tmp.f32))) then\nMEM[ADDR].f32 = 32'F(cvtToQuietNAN(64'F(src.f32)))\nelsif isNAN(64'F(src.f32)) then\nMEM[ADDR].f32 = tmp.f32\nelsif isNAN(64'F(tmp.f32)) then\nMEM[ADDR].f32 = src.f32\nelsif ((src.f32 > tmp.f32) || ((abs(src.f32) == 0.0F) && (abs(tmp.f32) == 0.0F) && !sign(src.f32) &&\nsign(tmp.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f32 = src.f32\nelse\nMEM[ADDR].f32 = tmp.f32\nendif;\nRETURN_DATA.f32 = tmp",
DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset >= 0xc000) {\n// rotate\nrotate = offset[9:5];\nmask = offset[4:0];\nif (offset[10]) {\nrotate = -rotate;\n}\nfor (i = 0; i < 64; i++) {\nj = (i & mask) | ((i + rotate) & ~mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n} elsif (offset[15]) {\n// full data sharing within 4 consecutive threads\nfor (i = 0; i < 64; i+=4) {\nthread_out[i+0] = thread_valid[i+offset[1:0]]?thread_in[i+offset[1:0]]:0;\nthread_out[i+1] = thread_valid[i+offset[3:2]]?thread_in[i+offset[3:2]]:0;\nthread_out[i+2] = thread_valid[i+offset[5:4]]?thread_in[i+offset[5:4]]:0;\nthread_out[i+3] = thread_valid[i+offset[7:6]]?thread_in[i+offset[7:6]]:0;\n}\n} else { // offset[15] == 0\n// limited data sharing within 32 consecutive threads\nxor_mask = offset[14:10];\nor_mask = offset[9:5];\nand_mask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = (((i & 0x1f) & and_mask) | or_mask) ^ xor_mask;\nj |= (i & 0x20); // which group of 32\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}\n}',
DSOp.DS_SWIZZLE_B32: 'offset = offset1:offset0;\nif (offset >= 0xe000) {\n// FFT decomposition\nmask = offset[4:0];\nfor (i = 0; i < 64; i++) {\nj = reverse_bits(i & 0x1f);\nj = (j >> count_ones(mask));\nj |= (i & mask);\nj |= i & 0x20;\nthread_out[i] = thread_valid[j] ? thread_in[j] : 0;\n}',
DSOp.DS_LOAD_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32',
DSOp.DS_LOAD_2ADDR_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 4U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 4U].b32',
DSOp.DS_LOAD_2ADDR_STRIDE64_B32: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET0.u32 * 256U].b32;\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET1.u32 * 256U].b32',
@@ -71,7 +71,7 @@ DSOp_PCODE = {
DSOp.DS_STORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32]',
DSOp.DS_STORE_2ADDR_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 8U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 8U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 8U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 8U + 4U].b32 = DATA2[63 : 32]',
DSOp.DS_STORE_2ADDR_STRIDE64_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET0.u32 * 512U].b32 = DATA[31 : 0];\nMEM[addr + OFFSET0.u32 * 512U + 4U].b32 = DATA[63 : 32];\naddr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET1.u32 * 512U].b32 = DATA2[31 : 0];\nMEM[addr + OFFSET1.u32 * 512U + 4U].b32 = DATA2[63 : 32]',
DSOp.DS_CMPSTORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp',
DSOp.DS_MIN_NUM_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 < tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && sign(src.f64) &&\n!sign(tmp.f64))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MAX_NUM_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 > tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && !sign(src.f64) &&\nsign(tmp.f64))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp',
DSOp.DS_ADD_RTN_U64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].u64;\nMEM[addr].u64 += DATA.u64;\nRETURN_DATA.u64 = tmp',
@@ -90,7 +90,7 @@ DSOp_PCODE = {
DSOp.DS_STOREXCHG_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nMEM[addr].b64 = DATA.b64;\nRETURN_DATA.b64 = tmp',
DSOp.DS_STOREXCHG_2ADDR_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 8U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 8U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2',
DSOp.DS_STOREXCHG_2ADDR_STRIDE64_RTN_B64: 'addr1 = ADDR_BASE.u32 + OFFSET0.u32 * 512U;\naddr2 = ADDR_BASE.u32 + OFFSET1.u32 * 512U;\ntmp1 = MEM[addr1].b64;\ntmp2 = MEM[addr2].b64;\nMEM[addr1].b64 = DATA.b64;\nMEM[addr2].b64 = DATA2.b64;\n// Note DATA2 can be any other register\nRETURN_DATA[63 : 0] = tmp1;\nRETURN_DATA[127 : 64] = tmp2',
DSOp.DS_CMPSTORE_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp\nsrc\ncmp',
DSOp.DS_CMPSTORE_RTN_B64: 'addr = CalcDsAddr(vgpr_a.b32, offset.b32);\ntmp = MEM[addr].b64;\nsrc = DATA.b64;\ncmp = DATA2.b64;\nMEM[addr].b64 = tmp == cmp ? src : tmp;\nRETURN_DATA.b64 = tmp',
DSOp.DS_MIN_NUM_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 < tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && sign(src.f64) &&\n!sign(tmp.f64))) then\n// NOTE: -0<+0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp',
DSOp.DS_MAX_NUM_RTN_F64: 'tmp = MEM[ADDR].f64;\nsrc = DATA.f64;\nif (isNAN(src.f64) && isNAN(tmp.f64)) then\nMEM[ADDR].f64 = cvtToQuietNAN(src.f64)\nelsif isNAN(src.f64) then\nMEM[ADDR].f64 = tmp.f64\nelsif isNAN(tmp.f64) then\nMEM[ADDR].f64 = src.f64\nelsif ((src.f64 > tmp.f64) || ((abs(src.f64) == 0.0) && (abs(tmp.f64) == 0.0) && !sign(src.f64) &&\nsign(tmp.f64))) then\n// NOTE: +0>-0 is TRUE in this comparison\nMEM[ADDR].f64 = src.f64\nelse\nMEM[ADDR].f64 = tmp.f64\nendif;\nRETURN_DATA.f64 = tmp',
DSOp.DS_LOAD_B64: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nRETURN_DATA[31 : 0] = MEM[addr + OFFSET.u32].b32;\nRETURN_DATA[63 : 32] = MEM[addr + OFFSET.u32 + 4U].b32',
@@ -116,8 +116,8 @@ DSOp_PCODE = {
DSOp.DS_PK_ADD_RTN_BF16: 'tmp = MEM[ADDR].b32;\nsrc = DATA.b32;\ndst[15 : 0].bf16 = src[15 : 0].bf16 + tmp[15 : 0].bf16;\ndst[31 : 16].bf16 = src[31 : 16].bf16 + tmp[31 : 16].bf16;\nMEM[ADDR].b32 = dst.b32;\nRETURN_DATA.b32 = tmp.b32',
DSOp.DS_STORE_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nMEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32 = DATA0.u32",
DSOp.DS_LOAD_ADDTID_B32: "declare OFFSET0 : 8'U;\ndeclare OFFSET1 : 8'U;\nRETURN_DATA.u32 = MEM[32'I({ OFFSET1, OFFSET0 } + M0[15 : 0]) + laneID.i32 * 4].u32",
DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xF, OFFSET = 0\nVGPR[VDST] = { B, D, 0, C }\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xA, OFFSET = 0\nVGPR[VDST] = { -, D, -, 0 }",
DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xF, OFFSET = 0\nVGPR[VDST] = { A, A, D, B }\nVGPR[SRC0] = { A, B, C, D }\nVGPR[ADDR] = { 0, 0, 12, 4 }\nEXEC = 0xA, OFFSET = 0\nVGPR[VDST] = { -, 0, -, B }",
DSOp.DS_PERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\n// If a source thread is disabled, it does not propagate data.\nif EXEC[i].u1 then\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\ndst_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\ntmp[dst_lane] = VGPR[i][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. If multiple sources\n// select the same destination thread, the highest-numbered\n// source thread wins.\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
DSOp.DS_BPERMUTE_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\n// EXEC is applied to the source VGPR reads.\nif EXEC[src_lane].u1 then\ntmp[i] = VGPR[src_lane][DATA0]\nendif\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
DSOp.DS_BPERMUTE_FI_B32: "// VGPR[laneId][index] is the VGPR RAM\n// VDST, ADDR and DATA0 are from the microcode DS encoding\ndeclare tmp : 32'B[64];\ndeclare OFFSET : 16'U;\ndeclare DATA0 : 32'U;\ndeclare VDST : 32'U;\nnum_lanes = WAVE64 ? 64 : 32;\nfor i in 0 : num_lanes - 1 do\ntmp[i] = 0x0\nendfor;\nfor i in 0 : num_lanes - 1 do\n// ADDR needs to be divided by 4.\n// High-order bits are ignored.\nsrc_lane = 32'I(VGPR[i][ADDR] + OFFSET.b32) / 4 % num_lanes;\n// Source VGPR is read even if src_lane is invalid in EXEC mask.\ntmp[i] = VGPR[src_lane][DATA0]\nendfor;\n// Copy data into destination VGPRs. Some source\n// data may be broadcast to multiple lanes.\nfor i in 0 : num_lanes - 1 do\nif EXEC[i].u1 then\nVGPR[i][VDST] = tmp[i]\nendif\nendfor",
DSOp.DS_STORE_B96: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64]',
DSOp.DS_STORE_B128: 'addr = CalcDsAddr(vgpr_a.b32, 0x0);\nMEM[addr + OFFSET.u32].b32 = DATA[31 : 0];\nMEM[addr + OFFSET.u32 + 4U].b32 = DATA[63 : 32];\nMEM[addr + OFFSET.u32 + 8U].b32 = DATA[95 : 64];\nMEM[addr + OFFSET.u32 + 12U].b32 = DATA[127 : 96]',
@@ -163,11 +163,11 @@ SOP1Op_PCODE = {
SOP1Op.S_CMOV_B64: 'if SCC then\nD0.b64 = S0.b64\nendif',
SOP1Op.S_BREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
SOP1Op.S_BREV_B64: 'D0.u64[63 : 0] = S0.u64[0 : 63]',
SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CTZ_I32_B32(0xaaaaaaaa) => 1\nS_CTZ_I32_B32(0x55555555) => 0\nS_CTZ_I32_B32(0x00000000) => 0xffffffff\nS_CTZ_I32_B32(0xffffffff) => 0\nS_CTZ_I32_B32(0x00010000) => 16",
SOP1Op.S_CTZ_I32_B32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_CTZ_I32_B64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from LSB\nif S0.u64[i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CLZ_I32_U32(0x00000000) => 0xffffffff\nS_CLZ_I32_U32(0x0000cccc) => 16\nS_CLZ_I32_U32(0xffff3333) => 0\nS_CLZ_I32_U32(0x7fffffff) => 1\nS_CLZ_I32_U32(0x80000000) => 0\nS_CLZ_I32_U32(0xffffffff) => 0",
SOP1Op.S_CLZ_I32_U32: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_CLZ_I32_U64: "tmp = -1;\n// Set if no ones are found\nfor i in 0 : 63 do\n// Search from MSB\nif S0.u64[63 - i] == 1'1U then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp",
SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp\nS_CLS_I32(0x00000000) => 0xffffffff\nS_CLS_I32(0x0000cccc) => 16\nS_CLS_I32(0xffff3333) => 16\nS_CLS_I32(0x7fffffff) => 1\nS_CLS_I32(0x80000000) => 1\nS_CLS_I32(0xffffffff) => 0xffffffff',
SOP1Op.S_CLS_I32: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.u32[31 - i] != S0.u32[31] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp',
SOP1Op.S_CLS_I32_I64: 'tmp = -1;\n// Set if all bits are the same\nfor i in 1 : 63 do\n// Search from MSB\nif S0.u64[63 - i] != S0.u64[63] then\ntmp = i;\nbreak\nendif\nendfor;\nD0.i32 = tmp',
SOP1Op.S_SEXT_I32_I8: "D0.i32 = 32'I(signext(S0.i8))",
SOP1Op.S_SEXT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))",
@@ -175,11 +175,11 @@ SOP1Op_PCODE = {
SOP1Op.S_BITSET0_B64: "D0.u64[S0.u32[5 : 0]] = 1'0U",
SOP1Op.S_BITSET1_B32: "D0.u32[S0.u32[4 : 0]] = 1'1U",
SOP1Op.S_BITSET1_B64: "D0.u64[S0.u32[5 : 0]] = 1'1U",
SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor\ns_bitreplicate_b64 s2, s0\ns_bitreplicate_b64 s2, s2',
SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0\nS_ABS_I32(0x00000001) => 0x00000001\nS_ABS_I32(0x7fffffff) => 0x7fffffff\nS_ABS_I32(0x80000000) => 0x80000000 // Note this is negative!\nS_ABS_I32(0x80000001) => 0x7fffffff\nS_ABS_I32(0x80000002) => 0x7ffffffe\nS_ABS_I32(0xffffffff) => 0x00000001',
SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT0_I32_B32(0x00000000) => 32\nS_BCNT0_I32_B32(0xcccccccc) => 16\nS_BCNT0_I32_B32(0xffffffff) => 0",
SOP1Op.S_BITREPLICATE_B64_B32: 'tmp = S0.u32;\nfor i in 0 : 31 do\nD0.u64[i * 2] = tmp[i];\nD0.u64[i * 2 + 1] = tmp[i]\nendfor',
SOP1Op.S_ABS_I32: 'D0.i32 = S0.i32 < 0 ? -S0.i32 : S0.i32;\nSCC = D0.i32 != 0',
SOP1Op.S_BCNT0_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U",
SOP1Op.S_BCNT0_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'0U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL",
SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U\nS_BCNT1_I32_B32(0x00000000) => 0\nS_BCNT1_I32_B32(0xcccccccc) => 16\nS_BCNT1_I32_B32(0xffffffff) => 32",
SOP1Op.S_BCNT1_I32_B32: "tmp = 0;\nfor i in 0 : 31 do\ntmp += S0.u32[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u32 != 0U",
SOP1Op.S_BCNT1_I32_B64: "tmp = 0;\nfor i in 0 : 63 do\ntmp += S0.u64[i] == 1'1U ? 1 : 0\nendfor;\nD0.i32 = tmp;\nSCC = D0.u64 != 0ULL",
SOP1Op.S_QUADMASK_B32: 'tmp = 0U;\nfor i in 0 : 7 do\ntmp[i] = S0.u32[i * 4 +: 4] != 0U\nendfor;\nD0.u32 = tmp;\nSCC = D0.u32 != 0U',
SOP1Op.S_QUADMASK_B64: 'tmp = 0ULL;\nfor i in 0 : 15 do\ntmp[i] = S0.u64[i * 4 +: 4] != 0ULL\nendfor;\nD0.u64 = tmp;\nSCC = D0.u64 != 0ULL',
@@ -210,12 +210,12 @@ SOP1Op_PCODE = {
SOP1Op.S_AND_NOT0_WREXEC_B32: 'EXEC.u32 = (~S0.u32 & EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U',
SOP1Op.S_AND_NOT0_WREXEC_B64: 'EXEC.u64 = (~S0.u64 & EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_AND_NOT1_WREXEC_B32: 'EXEC.u32 = (S0.u32 & ~EXEC.u32);\nD0.u32 = EXEC.u32;\nSCC = EXEC.u32 != 0U',
SOP1Op.S_AND_NOT1_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL\n// V0 holds the index value per lane\n// save exec mask for restore at the end\ns_mov_b64 s2, exec\n// exec mask of remaining (unprocessed) threads\ns_mov_b64 s4, exec\nloop:\n// get the index value for the first active lane\nv_readfirstlane_b32 s0, v0\n// find all other lanes with same index value\nv_cmpx_eq s0, v0\n<OP> // do the operation using the current EXEC mask. S0 holds the index.\n// mask out thread that was just executed\n// s_andn2_b64 s4, s4, exec\n// s_mov_b64 exec, s4\ns_andn2_wrexec_b64 s4, s4 // replaces above 2 ops\n// repeat until EXEC==0\ns_cbranch_scc1 loop\ns_mov_b64 exec, s2',
SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32\ns_mov_b32 m0, 10\ns_movrels_b32 s5, s7',
SOP1Op.S_AND_NOT1_WREXEC_B64: 'EXEC.u64 = (S0.u64 & ~EXEC.u64);\nD0.u64 = EXEC.u64;\nSCC = EXEC.u64 != 0ULL',
SOP1Op.S_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = SGPR[addr].b32',
SOP1Op.S_MOVRELS_B64: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b64 = SGPR[addr].b64',
SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32\ns_mov_b32 m0, 10\ns_movreld_b32 s5, s7',
SOP1Op.S_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b32 = S0.b32',
SOP1Op.S_MOVRELD_B64: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nSGPR[addr].b64 = S0.b64',
SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nSGPR[addrd].b32 = SGPR[addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\ns_movrelsd_2_b32 s5, s7',
SOP1Op.S_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nSGPR[addrd].b32 = SGPR[addrs].b32',
SOP1Op.S_GETPC_B64: 'D0.i64 = PC + 4LL',
SOP1Op.S_SETPC_B64: 'PC = S0.i64',
SOP1Op.S_SWAPPC_B64: 'jump_addr = S0.i64;\nD0.i64 = PC + 4LL;\nPC = jump_addr.i64',
@@ -248,7 +248,7 @@ SOP2Op_PCODE = {
SOP2Op.S_SUB_CO_I32: 'tmp = S0.i32 - S1.i32;\nSCC = ((S0.u32[31] != S1.u32[31]) && (S0.u32[31] != tmp.u32[31]));\n// signed overflow.\nD0.i32 = tmp.i32',
SOP2Op.S_ADD_CO_CI_U32: "tmp = 64'U(S0.u32) + 64'U(S1.u32) + SCC.u64;\nSCC = tmp >= 0x100000000ULL ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_ADD_CO_CI_U32.\nD0.u32 = tmp.u32",
SOP2Op.S_SUB_CO_CI_U32: "tmp = S0.u32 - S1.u32 - SCC.u32;\nSCC = 64'U(S1.u32) + SCC.u64 > 64'U(S0.u32) ? 1'1U : 1'0U;\n// unsigned overflow or carry-out for S_SUB_CO_CI_U32.\nD0.u32 = tmp.u32",
SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0\nS_ABSDIFF_I32(0x00000002, 0x00000005) => 0x00000003\nS_ABSDIFF_I32(0xffffffff, 0x00000000) => 0x00000001\nS_ABSDIFF_I32(0x80000000, 0x00000000) => 0x80000000 // Note: result is negative!\nS_ABSDIFF_I32(0x80000000, 0x00000001) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xffffffff) => 0x7fffffff\nS_ABSDIFF_I32(0x80000000, 0xfffffffe) => 0x7ffffffe',
SOP2Op.S_ABSDIFF_I32: 'D0.i32 = S0.i32 - S1.i32;\nif D0.i32 < 0 then\nD0.i32 = -D0.i32\nendif;\nSCC = D0.i32 != 0',
SOP2Op.S_LSHL_B32: 'D0.u32 = (S0.u32 << S1[4 : 0].u32);\nSCC = D0.u32 != 0U',
SOP2Op.S_LSHL_B64: 'D0.u64 = (S0.u64 << S1[5 : 0].u32);\nSCC = D0.u64 != 0ULL',
SOP2Op.S_LSHR_B32: 'D0.u32 = (S0.u32 >> S1[4 : 0].u32);\nSCC = D0.u32 != 0U',
@@ -380,13 +380,13 @@ SOPKOp_PCODE = {
}
SOPPOp_PCODE = {
SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor\ns_nop 0 // Wait 1 cycle.\ns_nop 0xf // Wait 16 cycles.',
SOPPOp.S_NOP: 'for i in 0U : SIMM16.u16[3 : 0].u32 do\nnop()\nendfor',
SOPPOp.S_SLEEP: 's_sleep { duration: 0 } // Wait for 0 clocks.\ns_sleep { duration: 1 } // Wait for 1-64 clocks.\ns_sleep { duration: 2 } // Wait for 65-128 clocks.\ns_sleep { sleep_forever: 1 } // Wait until an event occurs.',
SOPPOp.S_DELAY_ALU: 'v_mov_b32 v3, v0\nv_lshlrev_b32 v30, 1, v31\nv_lshlrev_b32 v24, 1, v25\ns_delay_alu { instid0: INSTID_VALU_DEP_3, instskip: INSTSKIP_SKIP_1, instid1: INSTID_VALU_DEP_1 }\n// 1 cycle delay here\nv_add_f32 v0, v1, v3\nv_sub_f32 v11, v9, v9\n// 2 cycles delay here\nv_mul_f32 v10, v13, v11',
SOPPOp.S_TRAP: 'TrapID = SIMM16.u16[3 : 0].u8;\n"Wait for all instructions to complete";\n// PC passed into trap handler points to S_TRAP itself,\n// *not* to the next instruction.\n{ TTMP[1], TTMP[0] } = { TrapID[3 : 0], 12\'0, PC[47 : 0] };\nPC = TBA.i64;\n// trap base address\nWAVE_STATUS.PRIV = 1\'1U',
SOPPOp.S_BARRIER_WAIT: ";\n// barrierBit 0: reserved\n// barrierBit 1: workgroup\n// barrierBit 2: trap\nbarrierBit = SIMM16.i32 >= 0 ? 0 : SIMM16.i32 == -1 ? 1 : 2;\nwhile !WAVE_BARRIER_COMPLETE[barrierBit] do\n// Implemented as a power-saving idle\ns_nop(16'0U)\nendwhile;\nWAVE_BARRIER_COMPLETE[barrierBit] = 1'0U",
SOPPOp.S_CODE_END: '...\ns_endpgm // last real instruction in shader buffer\ns_code_end // 1\ns_code_end // 2\ns_code_end // 3\ns_code_end // 4\ns_code_end // done!',
SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.\ns_branch label // Set SIMM16 = +4 = 0x0004\ns_nop 0 // 4 bytes\nlabel:\ns_nop 0 // 4 bytes\ns_branch label // Set SIMM16 = -8 = 0xfff8",
SOPPOp.S_BRANCH: "PC = PC + signext(SIMM16.i16 * 16'4) + 4LL;\n// short jump.",
SOPPOp.S_CBRANCH_SCC0: "if SCC == 1'0U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_SCC1: "if SCC == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
SOPPOp.S_CBRANCH_VCCZ: "if VCCZ.u1 == 1'1U then\nPC = PC + signext(SIMM16.i16 * 16'4) + 4LL\nelse\nPC = PC + 4LL\nendif",
@@ -631,7 +631,7 @@ VIMAGEOp_PCODE = {
}
VINTERPOp_PCODE = {
VINTERPOp.V_INTERP_P10_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f32, S1.f32, VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f32)\ns_mov_b32 m0, s0 // assume s0 contains newprim mask\nlds_param_load v0, attr0 // v0 is a temporary register\nv_interp_p10_f32 v3, v0, v1, v0 // v1 contains i coordinate\nv_interp_p2_f32 v3, v0, v2, v3 // v2 contains j coordinate',
VINTERPOp.V_INTERP_P10_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f32, S1.f32, VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f32)',
VINTERPOp.V_INTERP_P2_F32: 'D0.f32 = fma(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f32, S1.f32, S2.f32)',
VINTERPOp.V_INTERP_P10_F16_F32: "D0.f32 = fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 1U][SRC0.u32].f16), S1.f32, 32'F(VGPR[laneId.u32 &\n0xfffffffcU][SRC2.u32].f16))",
VINTERPOp.V_INTERP_P2_F16_F32: "D0.f16 = 16'F(fma(32'F(VGPR[(laneId.u32 & 0xfffffffcU) + 2U][SRC0.u32].f16), S1.f32, S2.f32))",
@@ -640,7 +640,7 @@ VINTERPOp_PCODE = {
}
VOP1Op_PCODE = {
VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
VOP1Op.V_MOV_B32: 'D0.b32 = S0.b32',
VOP1Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]",
VOP1Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)',
VOP1Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)',
@@ -671,63 +671,63 @@ VOP1Op_PCODE = {
VOP1Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif',
VOP1Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif",
VOP1Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif',
VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
VOP1Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)',
VOP1Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)',
VOP1Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32',
VOP1Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception',
VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
VOP1Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)',
VOP1Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64',
VOP1Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)',
VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
VOP1Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)',
VOP1Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)',
VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN",
VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN",
VOP1Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))",
VOP1Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))",
VOP1Op.V_NOT_B32: 'D0.u32 = ~S0.u32',
VOP1Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
VOP1Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP1Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP1Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor',
VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()',
VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()',
VOP1Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif',
VOP1Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif',
VOP1Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)',
VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()",
VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()",
VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32\ns_mov_b32 m0, 10\nv_movreld_b32 v5, v7',
VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32\ns_mov_b32 m0, 10\nv_movrels_b32 v5, v7',
VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, 10\nv_movrelsd_b32 v5, v7',
VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\nv_movrelsd_2_b32 v5, v7',
VOP1Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif",
VOP1Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif",
VOP1Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32',
VOP1Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32',
VOP1Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32',
VOP1Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32',
VOP1Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)',
VOP1Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)',
VOP1Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)',
VOP1Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)',
VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF',
VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF',
VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()",
VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()",
VOP1Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16",
VOP1Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)',
VOP1Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)",
VOP1Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)',
VOP1Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)",
VOP1Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif",
VOP1Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif",
VOP1Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif",
VOP1Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif",
VOP1Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)',
VOP1Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif",
VOP1Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)',
VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN",
VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN",
VOP1Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))",
VOP1Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))",
VOP1Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n <= 16'0 then\nreturn 8'0U\nelsif n >= 16'255 then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\ntmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16",
VOP1Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)',
VOP1Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)',
VOP1Op.V_SWAP_B32: 'tmp = D0.b32;\nD0.b32 = S0.b32;\nS0.b32 = tmp',
VOP1Op.V_SWAP_B16: 'tmp = D0.b16;\nD0.b16 = S0.b16;\nS0.b16 = tmp',
VOP1Op.V_PERMLANE64_B32: "declare tmp : 32'B[64];\ndeclare lane : 32'U;\nif WAVE32 then\n// Supported in wave64 ONLY; treated as scalar NOP in wave32\ns_nop(16'0U)\nelse\nfor lane in 0U : 63U do\n// Copy original S0 in case D==S0\ntmp[lane] = VGPR[lane][SRC0.u32]\nendfor;\nfor lane in 0U : 63U do\naltlane = { ~lane[5], lane[4 : 0] };\n// 0<->32, ..., 31<->63\nif EXEC[lane].u1 then\nVGPR[lane][VDST.u32] = tmp[altlane]\nendif\nendfor\nendif",
VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\ntmp = VGPR[laneId][addrd].b32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32;\nVGPR[laneId][addrs].b32 = tmp\ns_mov_b32 m0, ((20 << 16) | 10)\nv_swaprel_b32 v5, v7',
VOP1Op.V_SWAPREL_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\ntmp = VGPR[laneId][addrd].b32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32;\nVGPR[laneId][addrs].b32 = tmp',
VOP1Op.V_NOT_B16: 'D0.u16 = ~S0.u16',
VOP1Op.V_CVT_I32_I16: "D0.i32 = 32'I(signext(S0.i16))",
VOP1Op.V_CVT_U32_U16: "D0 = { 16'0, S0.u16 }",
VOP1Op.V_CVT_F32_FP8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].fp8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].fp8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].fp8)\nelse\nD0.f32 = fp8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].fp8)\nendif",
VOP1Op.V_CVT_F32_BF8: "if OPSEL[1 : 0].u2 == 2'0U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][7 : 0].bf8)\nelsif OPSEL[1 : 0].u2 == 2'2U then\n// Byte select bits are reversed\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][15 : 8].bf8)\nelsif OPSEL[1 : 0].u2 == 2'1U then\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][23 : 16].bf8)\nelse\nD0.f32 = bf8_to_f32(VGPR[laneId][SRC0.u32][31 : 24].bf8)\nendif",
VOP1Op.V_CVT_PK_F32_FP8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = fp8_to_f32(tmp[7 : 0].fp8);\nD0[63 : 32].f32 = fp8_to_f32(tmp[15 : 8].fp8)',
VOP1Op.V_CVT_PK_F32_BF8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)\nSRC0 = First operand for instruction.\nVSRC1 = Second operand for instruction.\nOP = Instruction opcode.\nAll VOPC instructions can alternatively be encoded in the VOP3 format.',
VOP1Op.V_CVT_PK_F32_BF8: 'tmp = OPSEL[0].u1 ? VGPR[laneId][SRC0.u32][31 : 16] : VGPR[laneId][SRC0.u32][15 : 0];\nD0[31 : 0].f32 = bf8_to_f32(tmp[7 : 0].bf8);\nD0[63 : 32].f32 = bf8_to_f32(tmp[15 : 8].bf8)',
}
VOP2Op_PCODE = {
@@ -778,7 +778,7 @@ VOP2Op_PCODE = {
VOP2Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)',
VOP2Op.V_FMAMK_F16: 'D0.f16 = fma(S0.f16, SIMM32.f16, S1.f16)',
VOP2Op.V_FMAAK_F16: 'D0.f16 = fma(S0.f16, S1.f16, SIMM32.f16)',
VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()",
VOP2Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))",
VOP2Op.V_PK_FMAC_F16: 'D0[31 : 16].f16 = fma(S0[31 : 16].f16, S1[31 : 16].f16, D0[31 : 16].f16);\nD0[15 : 0].f16 = fma(S0[15 : 0].f16, S1[15 : 0].f16, D0[15 : 0].f16)',
}
@@ -985,8 +985,8 @@ VOP3Op_PCODE = {
VOP3Op.V_SUBREV_F16: 'D0.f16 = S1.f16 - S0.f16',
VOP3Op.V_MUL_F16: 'D0.f16 = S0.f16 * S1.f16',
VOP3Op.V_FMAC_F16: 'D0.f16 = fma(S0.f16, S1.f16, D0.f16)',
VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))\nldexp()",
VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
VOP3Op.V_LDEXP_F16: "D0.f16 = S0.f16 * 16'F(2.0F ** 32'I(S1.i16))",
VOP3Op.V_MOV_B32: 'D0.b32 = S0.b32',
VOP3Op.V_READFIRSTLANE_B32: "declare lane : 32'U;\nif WAVE64 then\n// 64 lanes\nif EXEC == 0x0LL then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b64(EXEC));\n// Lowest active lane\nendif\nelse\n// 32 lanes\nif EXEC_LO.i32 == 0 then\nlane = 0U;\n// Force lane 0 if all lanes are disabled\nelse\nlane = 32'U(s_ff1_i32_b32(EXEC_LO));\n// Lowest active lane\nendif\nendif;\nD0.b32 = VGPR[lane][SRC0.u32]",
VOP3Op.V_CVT_I32_F64: 'D0.i32 = f64_to_i32(S0.f64)',
VOP3Op.V_CVT_F64_I32: 'D0.f64 = i32_to_f64(S0.i32)',
@@ -1017,49 +1017,49 @@ VOP3Op_PCODE = {
VOP3Op.V_CEIL_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 > 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += 1.0F\nendif',
VOP3Op.V_RNDNE_F32: "D0.f32 = floor(S0.f32 + 0.5F);\nif (isEven(64'F(floor(S0.f32))) && (fract(S0.f32) == 0.5F)) then\nD0.f32 -= 1.0F\nendif",
VOP3Op.V_FLOOR_F32: 'D0.f32 = trunc(S0.f32);\nif ((S0.f32 < 0.0F) && (S0.f32 != D0.f32)) then\nD0.f32 += -1.0F\nendif',
VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
VOP3Op.V_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)',
VOP3Op.V_LOG_F32: 'D0.f32 = log2(S0.f32)',
VOP3Op.V_RCP_F32: 'D0.f32 = 1.0F / S0.f32',
VOP3Op.V_RCP_IFLAG_F32: 'D0.f32 = 1.0F / S0.f32;\n// Can only raise integer DIV_BY_ZERO exception',
VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
VOP3Op.V_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)',
VOP3Op.V_RCP_F64: 'D0.f64 = 1.0 / S0.f64',
VOP3Op.V_RSQ_F64: 'D0.f64 = 1.0 / sqrt(S0.f64)',
VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
VOP3Op.V_SQRT_F32: 'D0.f32 = sqrt(S0.f32)',
VOP3Op.V_SQRT_F64: 'D0.f64 = sqrt(S0.f64)',
VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))\nV_SIN_F32(0xff800000) => 0xffc00000 // sin(-INF) = NAN\nV_SIN_F32(0xff7fffff) => 0x00000000 // -MaxFloat, finite\nV_SIN_F32(0x80000000) => 0x80000000 // sin(-0.0) = -0\nV_SIN_F32(0x3e800000) => 0x3f800000 // sin(0.25) = 1\nV_SIN_F32(0x7f800000) => 0xffc00000 // sin(+INF) = NAN",
VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))\nV_COS_F32(0xff800000) => 0xffc00000 // cos(-INF) = NAN\nV_COS_F32(0xff7fffff) => 0x3f800000 // -MaxFloat, finite\nV_COS_F32(0x80000000) => 0x3f800000 // cos(-0.0) = 1\nV_COS_F32(0x3e800000) => 0x00000000 // cos(0.25) = 0\nV_COS_F32(0x7f800000) => 0xffc00000 // cos(+INF) = NAN",
VOP3Op.V_SIN_F32: "D0.f32 = sin(S0.f32 * 32'F(PI * 2.0))",
VOP3Op.V_COS_F32: "D0.f32 = cos(S0.f32 * 32'F(PI * 2.0))",
VOP3Op.V_NOT_B32: 'D0.u32 = ~S0.u32',
VOP3Op.V_BFREV_B32: 'D0.u32[31 : 0] = S0.u32[0 : 31]',
VOP3Op.V_CLZ_I32_U32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from MSB\nif S0.u32[31 - i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP3Op.V_CTZ_I32_B32: "D0.i32 = -1;\n// Set if no ones are found\nfor i in 0 : 31 do\n// Search from LSB\nif S0.u32[i] == 1'1U then\nD0.i32 = i;\nbreak\nendif\nendfor",
VOP3Op.V_CLS_I32: 'D0.i32 = -1;\n// Set if all bits are the same\nfor i in 1 : 31 do\n// Search from MSB\nif S0.i32[31 - i] != S0.i32[31] then\nD0.i32 = i;\nbreak\nendif\nendfor',
VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif\nfrexp()',
VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif\nfrexp()',
VOP3Op.V_FREXP_EXP_I32_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f64) - 1023 + 1\nendif',
VOP3Op.V_FREXP_MANT_F64: 'if ((S0.f64 == +INF) || (S0.f64 == -INF) || isNAN(S0.f64)) then\nD0.f64 = S0.f64\nelse\nD0.f64 = mantissa(S0.f64)\nendif',
VOP3Op.V_FRACT_F64: 'D0.f64 = S0.f64 + -floor(S0.f64)',
VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif\nfrexp()",
VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif\nfrexp()",
VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32\ns_mov_b32 m0, 10\nv_movreld_b32 v5, v7',
VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32\ns_mov_b32 m0, 10\nv_movrels_b32 v5, v7',
VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, 10\nv_movrelsd_b32 v5, v7',
VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32\ns_mov_b32 m0, ((20 << 16) | 10)\nv_movrelsd_2_b32 v5, v7',
VOP3Op.V_FREXP_EXP_I32_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.i32 = 0\nelse\nD0.i32 = exponent(S0.f32) - 127 + 1\nendif",
VOP3Op.V_FREXP_MANT_F32: "if ((64'F(S0.f32) == +INF) || (64'F(S0.f32) == -INF) || isNAN(64'F(S0.f32))) then\nD0.f32 = S0.f32\nelse\nD0.f32 = mantissa(S0.f32)\nendif",
VOP3Op.V_MOVRELD_B32: 'addr = DST.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nVGPR[laneId][addr].b32 = S0.b32',
VOP3Op.V_MOVRELS_B32: 'addr = SRC0.u32;\n// Raw value from instruction\naddr += M0.u32[31 : 0];\nD0.b32 = VGPR[laneId][addr].b32',
VOP3Op.V_MOVRELSD_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[31 : 0];\naddrd += M0.u32[31 : 0];\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32',
VOP3Op.V_MOVRELSD_2_B32: 'addrs = SRC0.u32;\n// Raw value from instruction\naddrd = DST.u32;\n// Raw value from instruction\naddrs += M0.u32[9 : 0].u32;\naddrd += M0.u32[25 : 16].u32;\nVGPR[laneId][addrd].b32 = VGPR[laneId][addrs].b32',
VOP3Op.V_CVT_F16_U16: 'D0.f16 = u16_to_f16(S0.u16)',
VOP3Op.V_CVT_F16_I16: 'D0.f16 = i16_to_f16(S0.i16)',
VOP3Op.V_CVT_U16_F16: 'D0.u16 = f16_to_u16(S0.f16)',
VOP3Op.V_CVT_I16_F16: 'D0.i16 = f16_to_i16(S0.f16)',
VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF',
VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF',
VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif\nfrexp()",
VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif\nfrexp()",
VOP3Op.V_RCP_F16: "D0.f16 = 16'1.0 / S0.f16",
VOP3Op.V_SQRT_F16: 'D0.f16 = sqrt(S0.f16)',
VOP3Op.V_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16)",
VOP3Op.V_LOG_F16: 'D0.f16 = log2(S0.f16)',
VOP3Op.V_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16)",
VOP3Op.V_FREXP_MANT_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.f16 = S0.f16\nelse\nD0.f16 = mantissa(S0.f16)\nendif",
VOP3Op.V_FREXP_EXP_I16_F16: "if ((64'F(S0.f16) == +INF) || (64'F(S0.f16) == -INF) || isNAN(64'F(S0.f16))) then\nD0.i16 = 16'0\nelse\nD0.i16 = 16'I(exponent(S0.f16) - 15 + 1)\nendif",
VOP3Op.V_FLOOR_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 < 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += -16'1.0\nendif",
VOP3Op.V_CEIL_F16: "D0.f16 = trunc(S0.f16);\nif ((S0.f16 > 16'0.0) && (S0.f16 != D0.f16)) then\nD0.f16 += 16'1.0\nendif",
VOP3Op.V_TRUNC_F16: 'D0.f16 = trunc(S0.f16)',
VOP3Op.V_RNDNE_F16: "D0.f16 = floor(S0.f16 + 16'0.5);\nif (isEven(64'F(floor(S0.f16))) && (fract(S0.f16) == 16'0.5)) then\nD0.f16 -= 16'1.0\nendif",
VOP3Op.V_FRACT_F16: 'D0.f16 = S0.f16 + -floor(S0.f16)',
VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))\nV_SIN_F16(0xfc00) => 0xfe00 // sin(-INF) = NAN\nV_SIN_F16(0xfbff) => 0x0000 // Most negative finite FP16\nV_SIN_F16(0x8000) => 0x8000 // sin(-0.0) = -0\nV_SIN_F16(0x3400) => 0x3c00 // sin(0.25) = 1\nV_SIN_F16(0x7bff) => 0x0000 // Most positive finite FP16\nV_SIN_F16(0x7c00) => 0xfe00 // sin(+INF) = NAN",
VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))\nV_COS_F16(0xfc00) => 0xfe00 // cos(-INF) = NAN\nV_COS_F16(0xfbff) => 0x3c00 // Most negative finite FP16\nV_COS_F16(0x8000) => 0x3c00 // cos(-0.0) = 1\nV_COS_F16(0x3400) => 0x0000 // cos(0.25) = 0\nV_COS_F16(0x7bff) => 0x3c00 // Most positive finite FP16\nV_COS_F16(0x7c00) => 0xfe00 // cos(+INF) = NAN",
VOP3Op.V_SIN_F16: "D0.f16 = sin(S0.f16 * 16'F(PI * 2.0))",
VOP3Op.V_COS_F16: "D0.f16 = cos(S0.f16 * 16'F(PI * 2.0))",
VOP3Op.V_SAT_PK_U8_I16: "SAT8 = lambda(n) (\nif n <= 16'0 then\nreturn 8'0U\nelsif n >= 16'255 then\nreturn 8'255U\nelse\nreturn n[7 : 0].u8\nendif);\ntmp = 16'0;\ntmp[7 : 0].u8 = SAT8(S0[15 : 0].i16);\ntmp[15 : 8].u8 = SAT8(S0[31 : 16].i16);\nD0.b16 = tmp.b16",
VOP3Op.V_CVT_NORM_I16_F16: 'D0.i16 = f16_to_snorm(S0.f16)',
VOP3Op.V_CVT_NORM_U16_F16: 'D0.u16 = f16_to_unorm(S0.f16)',
@@ -1136,8 +1136,8 @@ VOP3Op_PCODE = {
VOP3Op.V_OR3_B32: 'D0.u32 = (S0.u32 | S1.u32 | S2.u32)',
VOP3Op.V_MAD_U32_U16: "D0.u32 = 32'U(S0.u16) * 32'U(S1.u16) + S2.u32",
VOP3Op.V_MAD_I32_I16: "D0.i32 = 32'I(S0.i16) * 32'I(S1.i16) + S2.i32",
VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor\nv_mov_b32 s0, 0x87654321;\nv_mov_b32 s1, 0x0fedcba9;\nv_permlane16_b32 v1, v0, s0, s1;\n// ROW 0:\n// v1.lane[0] <- v0.lane[1]\n// v1.lane[1] <- v0.lane[2]\n// ...\n// v1.lane[14] <- v0.lane[15]\n// v1.lane[15] <- v0.lane[0]\n//\n// ROW 1:\n// v1.lane[16] <- v0.lane[17]\n// v1.lane[17] <- v0.lane[18]\n// ...\n// v1.lane[30] <- v0.lane[31]\n// v1.lane[31] <- v0.lane[16]",
VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor\n// Note for this to work, source and destination VGPRs must be different.\n// For this rotation, lane 15 gets data from lane 16, lane 31 gets data from lane 0.\n// These are the only two lanes that need to use v_permlanex16_b32.\n// Enable only the threads that get data from their own row.\nv_mov_b32 exec_lo, 0x7fff7fff; // Lanes getting data from their own row\nv_mov_b32 s0, 0x87654321;\nv_mov_b32 s1, 0x0fedcba9;\nv_permlane16_b32 v1, v0, s0, s1 fi; // FI bit needed for lanes 14 and 30\n// ROW 0:\n// v1.lane[0] <- v0.lane[1]\n// v1.lane[1] <- v0.lane[2]\n// ...\n// v1.lane[14] <- v0.lane[15] (needs FI to read)\n// v1.lane[15] unset\n//\n// ROW 1:\n// v1.lane[16] <- v0.lane[17]\n// v1.lane[17] <- v0.lane[18]\n// ...\n// v1.lane[30] <- v0.lane[31] (needs FI to read)\n// v1.lane[31] unset\n// Enable only the threads that get data from the other row.\nv_mov_b32 exec_lo, 0x80008000; // Lanes getting data from the other row\nv_permlanex16_b32 v1, v0, s0, s1 fi; // FI bit needed for lanes 15 and 31\n// v1.lane[15] <- v0.lane[16]\n// v1.lane[31] <- v0.lane[0]",
VOP3Op.V_PERMLANE16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle within each row\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(row * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor",
VOP3Op.V_PERMLANEX16_B32: "declare tmp : 32'B[64];\nlanesel = { S2.u32, S1.u32 };\n// Concatenate lane select bits\nfor i in 0 : WAVE32 ? 31 : 63 do\n// Copy original S0 in case D==S0\ntmp[i] = VGPR[i][SRC0.u32]\nendfor;\nfor row in 0 : WAVE32 ? 1 : 3 do\n// Implement arbitrary swizzle across two rows\naltrow = { row[1], ~row[0] };\n// 1<->0, 3<->2\nfor i in 0 : 15 do\nif EXEC[row * 16 + i].u1 then\nVGPR[row * 16 + i][VDST.u32] = tmp[64'B(altrow.i32 * 16) + lanesel[i * 4 + 3 : i * 4]]\nendif\nendfor\nendfor",
VOP3Op.V_CNDMASK_B16: 'D0.u16 = VCC.u64[laneId] ? S1.u16 : S0.u16',
VOP3Op.V_MAXMIN_U32: 'D0.u32 = v_min_u32(v_max_u32(S0.u32, S1.u32), S2.u32)',
VOP3Op.V_MINMAX_U32: 'D0.u32 = v_max_u32(v_min_u32(S0.u32, S1.u32), S2.u32)',
@@ -1153,16 +1153,16 @@ VOP3Op_PCODE = {
VOP3Op.V_MAXIMUMMINIMUM_F32: 'D0.f32 = v_minimum_f32(v_maximum_f32(S0.f32, S1.f32), S2.f32)',
VOP3Op.V_MINIMUMMAXIMUM_F16: 'D0.f16 = v_maximum_f16(v_minimum_f16(S0.f16, S1.f16), S2.f16)',
VOP3Op.V_MAXIMUMMINIMUM_F16: 'D0.f16 = v_minimum_f16(v_maximum_f16(S0.f16, S1.f16), S2.f16)',
VOP3Op.V_S_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)\nV_EXP_F32(0xff800000) => 0x00000000 // exp(-INF) = 0\nV_EXP_F32(0x80000000) => 0x3f800000 // exp(-0.0) = 1\nV_EXP_F32(0x7f800000) => 0x7f800000 // exp(+INF) = +INF',
VOP3Op.V_S_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16);\nD0[31 : 16] = 16'0x0\nV_EXP_F16(0xfc00) => 0x0000 // exp(-INF) = 0\nV_EXP_F16(0x8000) => 0x3c00 // exp(-0.0) = 1\nV_EXP_F16(0x7c00) => 0x7c00 // exp(+INF) = +INF",
VOP3Op.V_S_LOG_F32: 'D0.f32 = log2(S0.f32)\nV_LOG_F32(0xff800000) => 0xffc00000 // log(-INF) = NAN\nV_LOG_F32(0xbf800000) => 0xffc00000 // log(-1.0) = NAN\nV_LOG_F32(0x80000000) => 0xff800000 // log(-0.0) = -INF\nV_LOG_F32(0x00000000) => 0xff800000 // log(+0.0) = -INF\nV_LOG_F32(0x3f800000) => 0x00000000 // log(+1.0) = 0\nV_LOG_F32(0x7f800000) => 0x7f800000 // log(+INF) = +INF',
VOP3Op.V_S_LOG_F16: "D0.f16 = log2(S0.f16);\nD0[31 : 16] = 16'0x0\nV_LOG_F16(0xfc00) => 0xfe00 // log(-INF) = NAN\nV_LOG_F16(0xbc00) => 0xfe00 // log(-1.0) = NAN\nV_LOG_F16(0x8000) => 0xfc00 // log(-0.0) = -INF\nV_LOG_F16(0x0000) => 0xfc00 // log(+0.0) = -INF\nV_LOG_F16(0x3c00) => 0x0000 // log(+1.0) = 0\nV_LOG_F16(0x7c00) => 0x7c00 // log(+INF) = +INF",
VOP3Op.V_S_RCP_F32: 'D0.f32 = 1.0F / S0.f32\nV_RCP_F32(0xff800000) => 0x80000000 // rcp(-INF) = -0\nV_RCP_F32(0xc0000000) => 0xbf000000 // rcp(-2.0) = -0.5\nV_RCP_F32(0x80000000) => 0xff800000 // rcp(-0.0) = -INF\nV_RCP_F32(0x00000000) => 0x7f800000 // rcp(+0.0) = +INF\nV_RCP_F32(0x7f800000) => 0x00000000 // rcp(+INF) = +0',
VOP3Op.V_S_RCP_F16: "D0.f16 = 16'1.0 / S0.f16;\nD0[31 : 16] = 16'0x0\nV_RCP_F16(0xfc00) => 0x8000 // rcp(-INF) = -0\nV_RCP_F16(0xc000) => 0xb800 // rcp(-2.0) = -0.5\nV_RCP_F16(0x8000) => 0xfc00 // rcp(-0.0) = -INF\nV_RCP_F16(0x0000) => 0x7c00 // rcp(+0.0) = +INF\nV_RCP_F16(0x7c00) => 0x0000 // rcp(+INF) = +0",
VOP3Op.V_S_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)\nV_RSQ_F32(0xff800000) => 0xffc00000 // rsq(-INF) = NAN\nV_RSQ_F32(0x80000000) => 0xff800000 // rsq(-0.0) = -INF\nV_RSQ_F32(0x00000000) => 0x7f800000 // rsq(+0.0) = +INF\nV_RSQ_F32(0x40800000) => 0x3f000000 // rsq(+4.0) = +0.5\nV_RSQ_F32(0x7f800000) => 0x00000000 // rsq(+INF) = +0',
VOP3Op.V_S_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16);\nD0[31 : 16] = 16'0x0\nV_RSQ_F16(0xfc00) => 0xfe00 // rsq(-INF) = NAN\nV_RSQ_F16(0x8000) => 0xfc00 // rsq(-0.0) = -INF\nV_RSQ_F16(0x0000) => 0x7c00 // rsq(+0.0) = +INF\nV_RSQ_F16(0x4400) => 0x3800 // rsq(+4.0) = +0.5\nV_RSQ_F16(0x7c00) => 0x0000 // rsq(+INF) = +0",
VOP3Op.V_S_SQRT_F32: 'D0.f32 = sqrt(S0.f32)\nV_SQRT_F32(0xff800000) => 0xffc00000 // sqrt(-INF) = NAN\nV_SQRT_F32(0x80000000) => 0x80000000 // sqrt(-0.0) = -0\nV_SQRT_F32(0x00000000) => 0x00000000 // sqrt(+0.0) = +0\nV_SQRT_F32(0x40800000) => 0x40000000 // sqrt(+4.0) = +2.0\nV_SQRT_F32(0x7f800000) => 0x7f800000 // sqrt(+INF) = +INF',
VOP3Op.V_S_SQRT_F16: "D0.f16 = sqrt(S0.f16);\nD0[31 : 16] = 16'0x0\nV_SQRT_F16(0xfc00) => 0xfe00 // sqrt(-INF) = NAN\nV_SQRT_F16(0x8000) => 0x8000 // sqrt(-0.0) = -0\nV_SQRT_F16(0x0000) => 0x0000 // sqrt(+0.0) = +0\nV_SQRT_F16(0x4400) => 0x4000 // sqrt(+4.0) = +2.0\nV_SQRT_F16(0x7c00) => 0x7c00 // sqrt(+INF) = +INF",
VOP3Op.V_S_EXP_F32: 'D0.f32 = pow(2.0F, S0.f32)',
VOP3Op.V_S_EXP_F16: "D0.f16 = pow(16'2.0, S0.f16);\nD0[31 : 16] = 16'0x0",
VOP3Op.V_S_LOG_F32: 'D0.f32 = log2(S0.f32)',
VOP3Op.V_S_LOG_F16: "D0.f16 = log2(S0.f16);\nD0[31 : 16] = 16'0x0",
VOP3Op.V_S_RCP_F32: 'D0.f32 = 1.0F / S0.f32',
VOP3Op.V_S_RCP_F16: "D0.f16 = 16'1.0 / S0.f16;\nD0[31 : 16] = 16'0x0",
VOP3Op.V_S_RSQ_F32: 'D0.f32 = 1.0F / sqrt(S0.f32)',
VOP3Op.V_S_RSQ_F16: "D0.f16 = 16'1.0 / sqrt(S0.f16);\nD0[31 : 16] = 16'0x0",
VOP3Op.V_S_SQRT_F32: 'D0.f32 = sqrt(S0.f32)',
VOP3Op.V_S_SQRT_F16: "D0.f16 = sqrt(S0.f16);\nD0[31 : 16] = 16'0x0",
VOP3Op.V_ADD_NC_U16: 'D0.u16 = S0.u16 + S1.u16',
VOP3Op.V_SUB_NC_U16: 'D0.u16 = S0.u16 - S1.u16',
VOP3Op.V_MUL_LO_U16: 'D0.u16 = S0.u16 * S1.u16',
@@ -1179,18 +1179,18 @@ VOP3Op_PCODE = {
VOP3Op.V_PACK_B32_F16: 'D0[31 : 16].f16 = S1.f16;\nD0[15 : 0].f16 = S0.f16',
VOP3Op.V_CVT_PK_NORM_I16_F16: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f16_to_snorm(S0.f16);\ntmp[31 : 16].i16 = f16_to_snorm(S1.f16);\nD0 = tmp.b32",
VOP3Op.V_CVT_PK_NORM_U16_F16: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f16_to_unorm(S0.f16);\ntmp[31 : 16].u16 = f16_to_unorm(S1.f16);\nD0 = tmp.b32",
VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32\nldexp()',
VOP3Op.V_LDEXP_F32: 'D0.f32 = S0.f32 * 2.0F ** S1.i32',
VOP3Op.V_BFM_B32: 'D0.u32 = (((1U << S0[4 : 0].u32) - 1U) << S1[4 : 0].u32)',
VOP3Op.V_BCNT_U32_B32: "tmp = S1.u32;\nfor i in 0 : 31 do\ntmp += S0[i].u32;\n// count i'th bit\nendfor;\nD0.u32 = tmp",
VOP3Op.V_MBCNT_LO_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[31 : 0].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp",
VOP3Op.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp\nv_mbcnt_lo_u32_b32 v0, -1, 0\nv_mbcnt_hi_u32_b32 v0, -1, v0\n// v0 now contains laneId\nv_mbcnt_lo_u32_b32 v0, vcc_lo, 0\nv_mbcnt_hi_u32_b32 v0, vcc_hi, v0 // Note vcc_hi is passed in for second instruction\n// v0 now contains position among lanes with VCC=1",
VOP3Op.V_MBCNT_HI_U32_B32: "ThreadMask = (1LL << laneId.u32) - 1LL;\nMaskedValue = (S0.u32 & ThreadMask[63 : 32].u32);\ntmp = S1.u32;\nfor i in 0 : 31 do\ntmp += MaskedValue[i] == 1'1U ? 1U : 0U\nendfor;\nD0.u32 = tmp",
VOP3Op.V_CVT_PK_NORM_I16_F32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = f32_to_snorm(S0.f32);\ntmp[31 : 16].i16 = f32_to_snorm(S1.f32);\nD0 = tmp.b32",
VOP3Op.V_CVT_PK_NORM_U16_F32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = f32_to_unorm(S0.f32);\ntmp[31 : 16].u16 = f32_to_unorm(S1.f32);\nD0 = tmp.b32",
VOP3Op.V_CVT_PK_U16_U32: "declare tmp : 32'B;\ntmp[15 : 0].u16 = u32_to_u16(S0.u32);\ntmp[31 : 16].u16 = u32_to_u16(S1.u32);\nD0 = tmp.b32",
VOP3Op.V_CVT_PK_I16_I32: "declare tmp : 32'B;\ntmp[15 : 0].i16 = i32_to_i16(S0.i32);\ntmp[31 : 16].i16 = i32_to_i16(S1.i32);\nD0 = tmp.b32",
VOP3Op.V_SUB_NC_I32: 'D0.i32 = S0.i32 - S1.i32',
VOP3Op.V_ADD_NC_I32: 'D0.i32 = S0.i32 + S1.i32',
VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32\nldexp()',
VOP3Op.V_LDEXP_F64: 'D0.f64 = S0.f64 * 2.0 ** S1.i32',
VOP3Op.V_MUL_LO_U32: 'D0.u32 = S0.u32 * S1.u32',
VOP3Op.V_MUL_HI_U32: "D0.u32 = 32'U((64'U(S0.u32) * 64'U(S1.u32)) >> 32U)",
VOP3Op.V_MUL_HI_I32: "D0.i32 = 32'I((64'I(S0.i32) * 64'I(S1.i32)) >> 32U)",
@@ -1245,35 +1245,35 @@ VOP3POp_PCODE = {
VOP3POp.V_PK_MAX_NUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_max_num_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_max_num_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp",
VOP3POp.V_PK_MINIMUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_minimum_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_minimum_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp",
VOP3POp.V_PK_MAXIMUM_F16: "declare tmp : 32'B;\ntmp[15 : 0].f16 = v_maximum_f16(S0[15 : 0].f16, S1[15 : 0].f16);\ntmp[31 : 16].f16 = v_maximum_f16(S0[31 : 16].f16, S1[31 : 16].f16);\nD0.b32 = tmp",
VOP3POp.V_FMA_MIX_F32: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])",
VOP3POp.V_FMA_MIXLO_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))",
VOP3POp.V_FMA_MIXHI_F16: "{ OPSEL_HI[i], OPSEL[i] }\n0=src[31:0]\n1=src[31:0]\n2=src[15:0]\n3=src[31:16]\ndeclare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))",
VOP3POp.V_FMA_MIX_F32: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 0].f32 = fma(in[0], in[1], in[2])",
VOP3POp.V_FMA_MIXLO_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[15 : 0].f16 = f32_to_f16(fma(in[0], in[1], in[2]))",
VOP3POp.V_FMA_MIXHI_F16: "declare in : 32'F[3];\ndeclare S : 32'B[3];\nfor i in 0 : 2 do\nif !OPSEL_HI.u3[i] then\nin[i] = S[i].f32\nelsif OPSEL.u3[i] then\nin[i] = f16_to_f32(S[i][31 : 16].f16)\nelse\nin[i] = f16_to_f32(S[i][15 : 0].f16)\nendif\nendfor;\nD0[31 : 16].f16 = f32_to_f16(fma(in[0], in[1], in[2]))",
VOP3POp.V_DOT4_F32_FP8_BF8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].fp8) * 32'F(S1[7 : 0].bf8);\ntmp += 32'F(S0[15 : 8].fp8) * 32'F(S1[15 : 8].bf8);\ntmp += 32'F(S0[23 : 16].fp8) * 32'F(S1[23 : 16].bf8);\ntmp += 32'F(S0[31 : 24].fp8) * 32'F(S1[31 : 24].bf8);\nD0.f32 = tmp",
VOP3POp.V_DOT4_F32_BF8_FP8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].bf8) * 32'F(S1[7 : 0].fp8);\ntmp += 32'F(S0[15 : 8].bf8) * 32'F(S1[15 : 8].fp8);\ntmp += 32'F(S0[23 : 16].bf8) * 32'F(S1[23 : 16].fp8);\ntmp += 32'F(S0[31 : 24].bf8) * 32'F(S1[31 : 24].fp8);\nD0.f32 = tmp",
VOP3POp.V_DOT4_F32_FP8_FP8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].fp8) * 32'F(S1[7 : 0].fp8);\ntmp += 32'F(S0[15 : 8].fp8) * 32'F(S1[15 : 8].fp8);\ntmp += 32'F(S0[23 : 16].fp8) * 32'F(S1[23 : 16].fp8);\ntmp += 32'F(S0[31 : 24].fp8) * 32'F(S1[31 : 24].fp8);\nD0.f32 = tmp",
VOP3POp.V_DOT4_F32_BF8_BF8: "tmp = S2.f32;\ntmp += 32'F(S0[7 : 0].bf8) * 32'F(S1[7 : 0].bf8);\ntmp += 32'F(S0[15 : 8].bf8) * 32'F(S1[15 : 8].bf8);\ntmp += 32'F(S0[23 : 16].bf8) * 32'F(S1[23 : 16].bf8);\ntmp += 32'F(S0[31 : 24].bf8) * 32'F(S1[31 : 24].bf8);\nD0.f32 = tmp",
VOP3POp.V_WMMA_F32_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F32_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F16_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(16x16) + S2.f16(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(16x16) + S2.bf16(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_I32_16X16X16_IU8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(16x16) + S2.i32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_I32_16X16X16_IU4: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(16x16) + S2.i32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F32_16X16X16_FP8_FP8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.fp8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F32_16X16X16_FP8_BF8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.bf8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F32_16X16X16_BF8_FP8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.fp8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F32_16X16X16_BF8_BF8: 'D = A (16x16) * B (16x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.bf8(16x16) + S2.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_I32_16X16X32_IU4: 'D = A (16x32) * B (32x16) + C (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x32) * S1.iu4(32x16) + S2.i32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_F32_16X16X32_F16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.f16(16x16) * S1.f16(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_F32_16X16X32_BF16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf16(16x16) * S1.bf16(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_F16_16X16X32_F16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f16(16x16) = S0.f16(16x16) * S1.f16(32x16, index set from S2) + D0.f16(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_BF16_16X16X32_BF16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.bf16(16x16) = S0.bf16(16x16) * S1.bf16(32x16, index set from S2) + D0.bf16(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_I32_16X16X32_IU8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu8(16x16) * S1.iu8(32x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_I32_16X16X32_IU4: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x16) * S1.iu4(32x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_I32_16X16X64_IU4: 'D = A (sparse 16x64) * B (64x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.i32(16x16) = S0.iu4(16x32) * S1.iu4(64x16, index set from S2) + D0.i32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_F32_16X16X32_FP8_FP8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.fp8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_F32_16X16X32_FP8_BF8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.fp8(16x16) * S1.bf8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_F32_16X16X32_BF8_FP8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.fp8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_SWMMAC_F32_16X16X32_BF8_BF8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)\nsaved_exec = EXEC;\nEXEC = 64\'B(-1);\neval "D0.f32(16x16) = S0.bf8(16x16) * S1.bf8(32x16, index set from S2) + D0.f32(16x16)";\nEXEC = saved_exec',
VOP3POp.V_WMMA_F32_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_F32_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_F16_16X16X16_F16: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_BF16_16X16X16_BF16: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_I32_16X16X16_IU8: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_I32_16X16X16_IU4: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_F32_16X16X16_FP8_FP8: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_F32_16X16X16_FP8_BF8: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_F32_16X16X16_BF8_FP8: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_F32_16X16X16_BF8_BF8: 'D = A (16x16) * B (16x16) + C (16x16)',
VOP3POp.V_WMMA_I32_16X16X32_IU4: 'D = A (16x32) * B (32x16) + C (16x16)',
VOP3POp.V_SWMMAC_F32_16X16X32_F16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_F32_16X16X32_BF16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_F16_16X16X32_F16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_BF16_16X16X32_BF16: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_I32_16X16X32_IU8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_I32_16X16X32_IU4: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_I32_16X16X64_IU4: 'D = A (sparse 16x64) * B (64x16) + D (16x16)',
VOP3POp.V_SWMMAC_F32_16X16X32_FP8_FP8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_F32_16X16X32_FP8_BF8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_F32_16X16X32_BF8_FP8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
VOP3POp.V_SWMMAC_F32_16X16X32_BF8_BF8: 'D = A (sparse 16x32) * B (32x16) + D (16x16)',
}
VOP3SDOp_PCODE = {
@@ -1463,7 +1463,7 @@ VOPDOp_PCODE = {
VOPDOp.V_DUAL_SUB_F32: 'D0.f32 = S0.f32 - S1.f32',
VOPDOp.V_DUAL_SUBREV_F32: 'D0.f32 = S1.f32 - S0.f32',
VOPDOp.V_DUAL_MUL_DX9_ZERO_F32: "if ((64'F(S0.f32) == 0.0) || (64'F(S1.f32) == 0.0)) then\n// DX9 rules, 0.0 * x = 0.0\nD0.f32 = 0.0F\nelse\nD0.f32 = S0.f32 * S1.f32\nendif",
VOPDOp.V_DUAL_MOV_B32: 'D0.b32 = S0.b32\nv_mov_b32 v0, v1 // Move into v0 from v1\nv_mov_b32 v0, -v1 // Set v0 to the negation of v1\nv_mov_b32 v0, abs(v1) // Set v0 to the absolute value of v1',
VOPDOp.V_DUAL_MOV_B32: 'D0.b32 = S0.b32',
VOPDOp.V_DUAL_CNDMASK_B32: 'D0.u32 = VCC.u64[laneId] ? S1.u32 : S0.u32',
VOPDOp.V_DUAL_MAX_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 > S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && !sign(S0.f32) &&\nsign(S1.f32))) then\n// NOTE: +0>-0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif",
VOPDOp.V_DUAL_MIN_NUM_F32: "if (isSignalNAN(64'F(S0.f32)) || isSignalNAN(64'F(S1.f32))) then\nTRAPSTS.INVALID = 1\nendif;\nif (isNAN(64'F(S0.f32)) && isNAN(64'F(S1.f32))) then\nD0.f32 = 32'F(cvtToQuietNAN(64'F(S0.f32)))\nelsif isNAN(64'F(S0.f32)) then\nD0.f32 = S1.f32\nelsif isNAN(64'F(S1.f32)) then\nD0.f32 = S0.f32\nelsif ((S0.f32 < S1.f32) || ((abs(S0.f32) == 0.0F) && (abs(S1.f32) == 0.0F) && sign(S0.f32) &&\n!sign(S1.f32))) then\n// NOTE: -0<+0 is TRUE in this comparison\nD0.f32 = S0.f32\nelse\nD0.f32 = S1.f32\nendif",

View File

@@ -369,7 +369,7 @@ def decode_program(data: bytes) -> dict[int, Inst]:
if isinstance(inst, SOPP) and inst.op == SOPPOp.S_CODE_END: break
elif isinstance(inst, SOPP) and inst.op == SOPPOp.S_ENDPGM: inst._dispatch = dispatch_endpgm
elif isinstance(inst, SOPP) and inst.op == SOPPOp.S_BARRIER: inst._dispatch = dispatch_barrier
elif isinstance(inst, SOPP) and inst.op in (SOPPOp.S_CLAUSE, SOPPOp.S_WAITCNT, SOPPOp.S_WAITCNT_DEPCTR, SOPPOp.S_SENDMSG, SOPPOp.S_SET_INST_PREFETCH_DISTANCE): inst._dispatch = dispatch_nop
elif isinstance(inst, SOPP) and inst.op in (SOPPOp.S_CLAUSE, SOPPOp.S_WAITCNT, SOPPOp.S_WAITCNT_DEPCTR, SOPPOp.S_SENDMSG, SOPPOp.S_SET_INST_PREFETCH_DISTANCE, SOPPOp.S_DELAY_ALU): inst._dispatch = dispatch_nop
elif isinstance(inst, (SOP1, SOP2, SOPC, SOPK, SOPP, SMEM)): inst._dispatch = exec_scalar
elif isinstance(inst, VOP1) and inst.op == VOP1Op.V_NOP: inst._dispatch = dispatch_nop
elif isinstance(inst, VOP3P) and 'WMMA' in inst.op_name: inst._dispatch = dispatch_wmma

View File

@@ -163,15 +163,24 @@ def extract_pcode(pages: list[list[tuple[float, float, str, str]]], enums: dict[
next_page, next_y = all_instructions[i + 1][0], all_instructions[i + 1][1]
else:
next_page, next_y = page_idx, 0
# Collect F6 text from current position to next instruction
# Collect F6 text from current position to next instruction (pseudocode is at x ≈ 69)
lines = []
for p in range(page_idx, next_page + 1):
start_y = y if p == page_idx else 800
end_y = next_y if p == next_page else 0
lines.extend((p, y2, t) for x, y2, t, f in pages[p] if f in ('/F6.0', '/F7.0') and end_y < y2 < start_y)
lines.extend((p, y2, t) for x, y2, t, f in pages[p] if f in ('/F6.0', '/F7.0') and end_y < y2 < start_y and 60 < x < 80)
if lines:
# Sort by page first, then by y descending within each page (higher y = earlier text in PDF)
pcode_lines = [t.replace('Ê', '').strip() for _, _, t in sorted(lines, key=lambda x: (x[0], -x[1]))]
sorted_lines = sorted(lines, key=lambda x: (x[0], -x[1]))
# Stop at large Y gaps (>30) - indicates section break (Notes, examples, etc)
filtered = [sorted_lines[0]]
for j in range(1, len(sorted_lines)):
prev_page, prev_y, _ = sorted_lines[j-1]
curr_page, curr_y, _ = sorted_lines[j]
if curr_page == prev_page and prev_y - curr_y > 30: break
if curr_page != prev_page and prev_y > 60 and curr_y < 730: break # examples spilled to next page (not at very top)
filtered.append(sorted_lines[j])
pcode_lines = [t.replace('Ê', '').strip() for _, _, t in filtered]
if pcode_lines: pcode[(name, opcode)] = '\n'.join(pcode_lines)
return pcode

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3
"""Test pdf.py PDF parser and enum generation."""
import unittest, tempfile, importlib.util
from extra.assembly.amd.pdf import extract, extract_tables, extract_enums, write_enums, PDF_URLS
from extra.assembly.amd.pdf import extract, extract_tables, extract_enums, extract_pcode, write_enums, PDF_URLS
EXPECTED = {
"rdna3": {"pages": 655, "tables": 115, "sop2_ops": 67, "sop2_first": "S_ADD_U32"},
@@ -15,6 +15,7 @@ class TestPDF2(unittest.TestCase):
cls.data = {name: extract(url) for name, url in PDF_URLS.items()}
cls.tables = {name: extract_tables(pages) for name, pages in cls.data.items()}
cls.enums = {name: extract_enums(cls.tables[name]) for name in PDF_URLS}
cls.pcode = {name: extract_pcode(cls.data[name], cls.enums[name]) for name in PDF_URLS}
def test_page_counts(self):
for name, exp in EXPECTED.items():
@@ -46,5 +47,25 @@ class TestPDF2(unittest.TestCase):
if attr.endswith('Op'):
self.assertGreaterEqual(len(getattr(mod, attr)), 2, f"{name} {attr} has too few ops")
def test_pcode_rdna3_tricky(self):
"""Test specific pseudocode patterns that are tricky to extract correctly."""
pcode = self.pcode['rdna3']
# BUFFER_ATOMIC_MAX_U64: should have 4 statements (not truncated)
self.assertEqual(pcode[('BUFFER_ATOMIC_MAX_U64', 72)],
'tmp = MEM[ADDR].u64;\nsrc = DATA.u64;\nMEM[ADDR].u64 = src >= tmp ? src : tmp;\nRETURN_DATA.u64 = tmp')
# GLOBAL_STORE_B128: should have 4 MEM stores (not truncated)
self.assertEqual(pcode[('GLOBAL_STORE_B128', 29)],
'MEM[ADDR].b32 = VDATA[31 : 0];\nMEM[ADDR + 4U].b32 = VDATA[63 : 32];\nMEM[ADDR + 8U].b32 = VDATA[95 : 64];\nMEM[ADDR + 12U].b32 = VDATA[127 : 96]')
# S_CMOVK_I32: should have full if/endif block
self.assertEqual(pcode[('S_CMOVK_I32', 2)],
"if SCC then\nD0.i32 = 32'I(signext(SIMM16.i16))\nendif")
def test_pcode_no_examples(self):
"""Pseudocode should not contain example lines with '=>'."""
for name in PDF_URLS:
for (op_name, opcode), code in self.pcode[name].items():
# No example lines (test vectors like "S_CTZ_I32_B32(0xaaaaaaaa) => 1")
self.assertNotIn('=>', code, f"{name} {op_name} contains example line with '=>'")
if __name__ == "__main__":
unittest.main()