mirror of
https://github.com/NationalSecurityAgency/ghidra.git
synced 2026-01-09 22:17:55 -05:00
10461 lines
525 KiB
Plaintext
10461 lines
525 KiB
Plaintext
# SLA specification file for Intel x86
|
|
|
|
@ifdef IA64
|
|
@define SIZE "8"
|
|
@define STACKPTR "RSP"
|
|
@else
|
|
@define SIZE "4"
|
|
@define STACKPTR "ESP"
|
|
@endif
|
|
|
|
define endian=little;
|
|
|
|
define space ram type=ram_space size=$(SIZE) default;
|
|
define space register type=register_space size=4;
|
|
|
|
# General purpose registers
|
|
|
|
@ifdef IA64
|
|
define register offset=0 size=8 [ RAX RCX RDX RBX RSP RBP RSI RDI ];
|
|
define register offset=0 size=4 [ EAX _ ECX _ EDX _ EBX _ ESP _ EBP _ ESI _ EDI ];
|
|
define register offset=0 size=2 [ AX _ _ _ CX _ _ _ DX _ _ _ BX _ _ _ SP _ _ _ BP _ _ _ SI _ _ _ DI ];
|
|
define register offset=0 size=1 [ AL AH _ _ _ _ _ _ CL CH _ _ _ _ _ _ DL DH _ _ _ _ _ _ BL BH _ _ _ _ _ _ SPL _ _ _ _ _ _ _ BPL _ _ _ _ _ _ _ SIL _ _ _ _ _ _ _ DIL ];
|
|
|
|
define register offset=0x80 size=8 [ R8 R9 R10 R11 R12 R13 R14 R15 ];
|
|
define register offset=0x80 size=4 [ R8D _ R9D _ R10D _ R11D _ R12D _ R13D _ R14D _ R15D _ ];
|
|
define register offset=0x80 size=2 [ R8W _ _ _ R9W _ _ _ R10W _ _ _ R11W _ _ _ R12W _ _ _ R13W _ _ _ R14W _ _ _ R15W _ _ _ ];
|
|
define register offset=0x80 size=1 [ R8B _ _ _ _ _ _ _ R9B _ _ _ _ _ _ _ R10B _ _ _ _ _ _ _ R11B _ _ _ _ _ _ _ R12B _ _ _ _ _ _ _ R13B _ _ _ _ _ _ _ R14B _ _ _ _ _ _ _ R15B _ _ _ _ _ _ _ ];
|
|
@else
|
|
define register offset=0 size=4 [ EAX ECX EDX EBX ESP EBP ESI EDI ];
|
|
define register offset=0 size=2 [ AX _ CX _ DX _ BX _ SP _ BP _ SI _ DI ];
|
|
define register offset=0 size=1 [ AL AH _ _ CL CH _ _ DL DH _ _ BL BH ];
|
|
@endif
|
|
|
|
# Segment registers
|
|
define register offset=0x100 size=2 [ ES CS SS DS FS GS ];
|
|
define register offset=0x110 size=$(SIZE) [ FS_OFFSET GS_OFFSET ];
|
|
|
|
# Flags
|
|
define register offset=0x200 size=1 [ CF F1 PF F3 AF F5 ZF SF
|
|
TF IF DF OF IOPL NT F15
|
|
RF VM AC VIF VIP ID ];
|
|
@ifdef IA64
|
|
define register offset=0x280 size=8 [ rflags RIP ];
|
|
define register offset=0x280 size=4 [ eflags _ EIP _ ];
|
|
define register offset=0x280 size=2 [ flags _ _ _ IP _ _ _];
|
|
@else
|
|
define register offset=0x280 size=4 [ eflags EIP] ;
|
|
define register offset=0x280 size=2 [ flags _ IP] ;
|
|
@endif
|
|
|
|
# Debug and control registers
|
|
|
|
@ifdef IA64
|
|
define register offset=0x300 size=8 [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7
|
|
DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15
|
|
CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7
|
|
CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 ];
|
|
@else
|
|
define register offset=0x300 size=4 [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7
|
|
CR0 _ CR2 CR3 CR4 ];
|
|
define register offset=0x400 size=4 [ TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 ];
|
|
@endif
|
|
|
|
#Processor State Register - currently only XFEATURE_ENABLED_MASK=XCR0 is defined
|
|
#
|
|
define register offset=0x600 size=8 [ XCR0 ];
|
|
|
|
# Memory Protection Extensions (MPX)
|
|
define register offset=0x700 size=8 [ BNDCFGS BNDCFGU BNDSTATUS ];
|
|
|
|
define register offset=0x740 size=16 [ BND0 BND1 BND2 BND3 _ _ _ _ ];
|
|
define register offset=0x740 size=8 [ BND0_LB BND0_UB BND1_LB BND1_UB BND2_LB BND2_UB BND3_LB BND3_UB _ _ _ _ _ _ _ _ ];
|
|
|
|
# Control Flow Extensions
|
|
define register offset=0x7c0 size=8 [ SSP IA32_PL2_SSP IA32_PL1_SSP IA32_PL0_SSP ];
|
|
|
|
# NOTE: ST registers moved with Ghidra 10.0.3 (v2.12) and previously occupied the offset range 0x1000-104f.
|
|
# Automated address re-mapping was not provided and requires use of FixOldSTVariableStorageScript
|
|
# to fixup uses within a program. The range 0x1000-104f should remain reserved and unused.
|
|
# define register offset=0x1000 size=80 [ OLD_ST_REGION ];
|
|
|
|
define register offset=0x1090 size=1 [ C0 C1 C2 C3 ];
|
|
define register offset=0x1094 size=4 [ MXCSR ];
|
|
define register offset=0x10a0 size=2 [ FPUControlWord FPUStatusWord FPUTagWord
|
|
FPULastInstructionOpcode ];
|
|
define register offset=0x10a8 size=$(SIZE) [ FPUDataPointer FPUInstructionPointer ];
|
|
define register offset=0x10c8 size=2 [ FPUPointerSelector FPUDataSelector]; #FCS FDS
|
|
# FCS is not modeled, deprecated as 0.
|
|
# FDS is not modeled, deprecated as 0.
|
|
|
|
# Floating point registers - as they are in 32-bit protected mode
|
|
# See overlapping MM registers below
|
|
define register offset=0x1100 size=10 [ ST0 ];
|
|
define register offset=0x1110 size=10 [ ST1 ];
|
|
define register offset=0x1120 size=10 [ ST2 ];
|
|
define register offset=0x1130 size=10 [ ST3 ];
|
|
define register offset=0x1140 size=10 [ ST4 ];
|
|
define register offset=0x1150 size=10 [ ST5 ];
|
|
define register offset=0x1160 size=10 [ ST6 ];
|
|
define register offset=0x1170 size=10 [ ST7 ];
|
|
|
|
# NOTE: The upper 16-bits of the x87 ST registers go unused in MMX.
|
|
# These upper 16-bits should be set to all ones by any MMX instruction, which correspond to the
|
|
# floating-point representation of NaNs or infinities.
|
|
# Although not currently modeled, the 2-byte ST0h..ST7h registers are provided for that purpose.
|
|
|
|
define register offset=0x1100 size=8 [ MM0 _ MM1 _ MM2 _ MM3 _ MM4 _ MM5 _ MM6 _ MM7 _ ];
|
|
define register offset=0x1100 size=4 [
|
|
MM0_Da MM0_Db _ _
|
|
MM1_Da MM1_Db _ _
|
|
MM2_Da MM2_Db _ _
|
|
MM3_Da MM3_Db _ _
|
|
MM4_Da MM4_Db _ _
|
|
MM5_Da MM5_Db _ _
|
|
MM6_Da MM6_Db _ _
|
|
MM7_Da MM7_Db _ _
|
|
];
|
|
define register offset=0x1100 size=2 [
|
|
MM0_Wa MM0_Wb MM0_Wc MM0_Wd ST0h _ _ _
|
|
MM1_Wa MM1_Wb MM1_Wc MM1_Wd ST1h _ _ _
|
|
MM2_Wa MM2_Wb MM2_Wc MM2_Wd ST2h _ _ _
|
|
MM3_Wa MM3_Wb MM3_Wc MM3_Wd ST3h _ _ _
|
|
MM4_Wa MM4_Wb MM4_Wc MM4_Wd ST4h _ _ _
|
|
MM5_Wa MM5_Wb MM5_Wc MM5_Wd ST5h _ _ _
|
|
MM6_Wa MM6_Wb MM6_Wc MM6_Wd ST6h _ _ _
|
|
MM7_Wa MM7_Wb MM7_Wc MM7_Wd ST7h _ _ _
|
|
];
|
|
define register offset=0x1100 size=1 [
|
|
MM0_Ba MM0_Bb MM0_Bc MM0_Bd MM0_Be MM0_Bf MM0_Bg MM0_Bh _ _ _ _ _ _ _ _
|
|
MM1_Ba MM1_Bb MM1_Bc MM1_Bd MM1_Be MM1_Bf MM1_Bg MM1_Bh _ _ _ _ _ _ _ _
|
|
MM2_Ba MM2_Bb MM2_Bc MM2_Bd MM2_Be MM2_Bf MM2_Bg MM2_Bh _ _ _ _ _ _ _ _
|
|
MM3_Ba MM3_Bb MM3_Bc MM3_Bd MM3_Be MM3_Bf MM3_Bg MM3_Bh _ _ _ _ _ _ _ _
|
|
MM4_Ba MM4_Bb MM4_Bc MM4_Bd MM4_Be MM4_Bf MM4_Bg MM4_Bh _ _ _ _ _ _ _ _
|
|
MM5_Ba MM5_Bb MM5_Bc MM5_Bd MM5_Be MM5_Bf MM5_Bg MM5_Bh _ _ _ _ _ _ _ _
|
|
MM6_Ba MM6_Bb MM6_Bc MM6_Bd MM6_Be MM6_Bf MM6_Bg MM6_Bh _ _ _ _ _ _ _ _
|
|
MM7_Ba MM7_Bb MM7_Bc MM7_Bd MM7_Be MM7_Bf MM7_Bg MM7_Bh _ _ _ _ _ _ _ _
|
|
];
|
|
|
|
|
|
define register offset=0x1180 size=16 [ xmmTmp1 xmmTmp2 ];
|
|
define register offset=0x1180 size=8 [
|
|
xmmTmp1_Qa xmmTmp1_Qb
|
|
xmmTmp2_Qa xmmTmp2_Qb
|
|
];
|
|
define register offset=0x1180 size=4 [
|
|
xmmTmp1_Da xmmTmp1_Db xmmTmp1_Dc xmmTmp1_Dd
|
|
xmmTmp2_Da xmmTmp2_Db xmmTmp2_Dc xmmTmp2_Dd
|
|
];
|
|
|
|
#
|
|
# YMM0 - YMM7 - available in 32 bit mode
|
|
# YMM0 - YMM15 - available in 64 bit mode
|
|
#
|
|
|
|
# YMMx_H is the formal name for the high double quadword of the YMMx register, XMMx is the overlay in the XMM register set
|
|
define register offset=0x1200 size=16 [
|
|
XMM0 YMM0_H _ _
|
|
XMM1 YMM1_H _ _
|
|
XMM2 YMM2_H _ _
|
|
XMM3 YMM3_H _ _
|
|
XMM4 YMM4_H _ _
|
|
XMM5 YMM5_H _ _
|
|
XMM6 YMM6_H _ _
|
|
XMM7 YMM7_H _ _
|
|
XMM8 YMM8_H _ _
|
|
XMM9 YMM9_H _ _
|
|
XMM10 YMM10_H _ _
|
|
XMM11 YMM11_H _ _
|
|
XMM12 YMM12_H _ _
|
|
XMM13 YMM13_H _ _
|
|
XMM14 YMM14_H _ _
|
|
XMM15 YMM15_H _ _
|
|
XMM16 YMM16_H _ _
|
|
XMM17 YMM17_H _ _
|
|
XMM18 YMM18_H _ _
|
|
XMM19 YMM19_H _ _
|
|
XMM20 YMM20_H _ _
|
|
XMM21 YMM21_H _ _
|
|
XMM22 YMM22_H _ _
|
|
XMM23 YMM23_H _ _
|
|
XMM24 YMM24_H _ _
|
|
XMM25 YMM25_H _ _
|
|
XMM26 YMM26_H _ _
|
|
XMM27 YMM27_H _ _
|
|
XMM28 YMM28_H _ _
|
|
XMM29 YMM29_H _ _
|
|
XMM30 YMM30_H _ _
|
|
XMM31 YMM31_H _ _
|
|
|
|
];
|
|
|
|
define register offset=0x1200 size=8 [
|
|
XMM0_Qa XMM0_Qb _ _ _ _ _ _
|
|
XMM1_Qa XMM1_Qb _ _ _ _ _ _
|
|
XMM2_Qa XMM2_Qb _ _ _ _ _ _
|
|
XMM3_Qa XMM3_Qb _ _ _ _ _ _
|
|
XMM4_Qa XMM4_Qb _ _ _ _ _ _
|
|
XMM5_Qa XMM5_Qb _ _ _ _ _ _
|
|
XMM6_Qa XMM6_Qb _ _ _ _ _ _
|
|
XMM7_Qa XMM7_Qb _ _ _ _ _ _
|
|
XMM8_Qa XMM8_Qb _ _ _ _ _ _
|
|
XMM9_Qa XMM9_Qb _ _ _ _ _ _
|
|
XMM10_Qa XMM10_Qb _ _ _ _ _ _
|
|
XMM11_Qa XMM11_Qb _ _ _ _ _ _
|
|
XMM12_Qa XMM12_Qb _ _ _ _ _ _
|
|
XMM13_Qa XMM13_Qb _ _ _ _ _ _
|
|
XMM14_Qa XMM14_Qb _ _ _ _ _ _
|
|
XMM15_Qa XMM15_Qb _ _ _ _ _ _
|
|
XMM16_Qa XMM16_Qb _ _ _ _ _ _
|
|
XMM17_Qa XMM17_Qb _ _ _ _ _ _
|
|
XMM18_Qa XMM18_Qb _ _ _ _ _ _
|
|
XMM19_Qa XMM19_Qb _ _ _ _ _ _
|
|
XMM20_Qa XMM20_Qb _ _ _ _ _ _
|
|
XMM21_Qa XMM21_Qb _ _ _ _ _ _
|
|
XMM22_Qa XMM22_Qb _ _ _ _ _ _
|
|
XMM23_Qa XMM23_Qb _ _ _ _ _ _
|
|
XMM24_Qa XMM24_Qb _ _ _ _ _ _
|
|
XMM25_Qa XMM25_Qb _ _ _ _ _ _
|
|
XMM26_Qa XMM26_Qb _ _ _ _ _ _
|
|
XMM27_Qa XMM27_Qb _ _ _ _ _ _
|
|
XMM28_Qa XMM28_Qb _ _ _ _ _ _
|
|
XMM29_Qa XMM29_Qb _ _ _ _ _ _
|
|
XMM30_Qa XMM30_Qb _ _ _ _ _ _
|
|
XMM31_Qa XMM31_Qb _ _ _ _ _ _
|
|
];
|
|
define register offset=0x1200 size=4 [
|
|
XMM0_Da XMM0_Db XMM0_Dc XMM0_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM1_Da XMM1_Db XMM1_Dc XMM1_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM2_Da XMM2_Db XMM2_Dc XMM2_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM3_Da XMM3_Db XMM3_Dc XMM3_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM4_Da XMM4_Db XMM4_Dc XMM4_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM5_Da XMM5_Db XMM5_Dc XMM5_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM6_Da XMM6_Db XMM6_Dc XMM6_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM7_Da XMM7_Db XMM7_Dc XMM7_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM8_Da XMM8_Db XMM8_Dc XMM8_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM9_Da XMM9_Db XMM9_Dc XMM9_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM10_Da XMM10_Db XMM10_Dc XMM10_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM11_Da XMM11_Db XMM11_Dc XMM11_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM12_Da XMM12_Db XMM12_Dc XMM12_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM13_Da XMM13_Db XMM13_Dc XMM13_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM14_Da XMM14_Db XMM14_Dc XMM14_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM15_Da XMM15_Db XMM15_Dc XMM15_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM16_Da XMM16_Db XMM16_Dc XMM16_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM17_Da XMM17_Db XMM17_Dc XMM17_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM18_Da XMM18_Db XMM18_Dc XMM18_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM19_Da XMM19_Db XMM19_Dc XMM19_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM20_Da XMM20_Db XMM20_Dc XMM20_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM21_Da XMM21_Db XMM21_Dc XMM21_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM22_Da XMM22_Db XMM22_Dc XMM22_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM23_Da XMM23_Db XMM23_Dc XMM23_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM24_Da XMM24_Db XMM24_Dc XMM24_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM25_Da XMM25_Db XMM25_Dc XMM25_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM26_Da XMM26_Db XMM26_Dc XMM26_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM27_Da XMM27_Db XMM27_Dc XMM27_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM28_Da XMM28_Db XMM28_Dc XMM28_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM29_Da XMM29_Db XMM29_Dc XMM29_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM30_Da XMM30_Db XMM30_Dc XMM30_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM31_Da XMM31_Db XMM31_Dc XMM31_Dd _ _ _ _ _ _ _ _ _ _ _ _
|
|
];
|
|
define register offset=0x1200 size=2 [
|
|
XMM0_Wa XMM0_Wb XMM0_Wc XMM0_Wd XMM0_We XMM0_Wf XMM0_Wg XMM0_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM1_Wa XMM1_Wb XMM1_Wc XMM1_Wd XMM1_We XMM1_Wf XMM1_Wg XMM1_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM2_Wa XMM2_Wb XMM2_Wc XMM2_Wd XMM2_We XMM2_Wf XMM2_Wg XMM2_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM3_Wa XMM3_Wb XMM3_Wc XMM3_Wd XMM3_We XMM3_Wf XMM3_Wg XMM3_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM4_Wa XMM4_Wb XMM4_Wc XMM4_Wd XMM4_We XMM4_Wf XMM4_Wg XMM4_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM5_Wa XMM5_Wb XMM5_Wc XMM5_Wd XMM5_We XMM5_Wf XMM5_Wg XMM5_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM6_Wa XMM6_Wb XMM6_Wc XMM6_Wd XMM6_We XMM6_Wf XMM6_Wg XMM6_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM7_Wa XMM7_Wb XMM7_Wc XMM7_Wd XMM7_We XMM7_Wf XMM7_Wg XMM7_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM8_Wa XMM8_Wb XMM8_Wc XMM8_Wd XMM8_We XMM8_Wf XMM8_Wg XMM8_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM9_Wa XMM9_Wb XMM9_Wc XMM9_Wd XMM9_We XMM9_Wf XMM9_Wg XMM9_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM10_Wa XMM10_Wb XMM10_Wc XMM10_Wd XMM10_We XMM10_Wf XMM10_Wg XMM10_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM11_Wa XMM11_Wb XMM11_Wc XMM11_Wd XMM11_We XMM11_Wf XMM11_Wg XMM11_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM12_Wa XMM12_Wb XMM12_Wc XMM12_Wd XMM12_We XMM12_Wf XMM12_Wg XMM12_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM13_Wa XMM13_Wb XMM13_Wc XMM13_Wd XMM13_We XMM13_Wf XMM13_Wg XMM13_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM14_Wa XMM14_Wb XMM14_Wc XMM14_Wd XMM14_We XMM14_Wf XMM14_Wg XMM14_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM15_Wa XMM15_Wb XMM15_Wc XMM15_Wd XMM15_We XMM15_Wf XMM15_Wg XMM15_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM16_Wa XMM16_Wb XMM16_Wc XMM16_Wd XMM16_We XMM16_Wf XMM16_Wg XMM16_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM17_Wa XMM17_Wb XMM17_Wc XMM17_Wd XMM17_We XMM17_Wf XMM17_Wg XMM17_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM18_Wa XMM18_Wb XMM18_Wc XMM18_Wd XMM18_We XMM18_Wf XMM18_Wg XMM18_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM19_Wa XMM19_Wb XMM19_Wc XMM19_Wd XMM19_We XMM19_Wf XMM19_Wg XMM19_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM20_Wa XMM20_Wb XMM20_Wc XMM20_Wd XMM20_We XMM20_Wf XMM20_Wg XMM20_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM21_Wa XMM21_Wb XMM21_Wc XMM21_Wd XMM21_We XMM21_Wf XMM21_Wg XMM21_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM22_Wa XMM22_Wb XMM22_Wc XMM22_Wd XMM22_We XMM22_Wf XMM22_Wg XMM22_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM23_Wa XMM23_Wb XMM23_Wc XMM23_Wd XMM23_We XMM23_Wf XMM23_Wg XMM23_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM24_Wa XMM24_Wb XMM24_Wc XMM24_Wd XMM24_We XMM24_Wf XMM24_Wg XMM24_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM25_Wa XMM25_Wb XMM25_Wc XMM25_Wd XMM25_We XMM25_Wf XMM25_Wg XMM25_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM26_Wa XMM26_Wb XMM26_Wc XMM26_Wd XMM26_We XMM26_Wf XMM26_Wg XMM26_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM27_Wa XMM27_Wb XMM27_Wc XMM27_Wd XMM27_We XMM27_Wf XMM27_Wg XMM27_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM28_Wa XMM28_Wb XMM28_Wc XMM28_Wd XMM28_We XMM28_Wf XMM28_Wg XMM28_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM29_Wa XMM29_Wb XMM29_Wc XMM29_Wd XMM29_We XMM29_Wf XMM29_Wg XMM29_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM30_Wa XMM30_Wb XMM30_Wc XMM30_Wd XMM30_We XMM30_Wf XMM30_Wg XMM30_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM31_Wa XMM31_Wb XMM31_Wc XMM31_Wd XMM31_We XMM31_Wf XMM31_Wg XMM31_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
];
|
|
define register offset=0x1200 size=1 [
|
|
XMM0_Ba XMM0_Bb XMM0_Bc XMM0_Bd XMM0_Be XMM0_Bf XMM0_Bg XMM0_Bh XMM0_Bi XMM0_Bj XMM0_Bk XMM0_Bl XMM0_Bm XMM0_Bn XMM0_Bo XMM0_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM1_Ba XMM1_Bb XMM1_Bc XMM1_Bd XMM1_Be XMM1_Bf XMM1_Bg XMM1_Bh XMM1_Bi XMM1_Bj XMM1_Bk XMM1_Bl XMM1_Bm XMM1_Bn XMM1_Bo XMM1_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM2_Ba XMM2_Bb XMM2_Bc XMM2_Bd XMM2_Be XMM2_Bf XMM2_Bg XMM2_Bh XMM2_Bi XMM2_Bj XMM2_Bk XMM2_Bl XMM2_Bm XMM2_Bn XMM2_Bo XMM2_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM3_Ba XMM3_Bb XMM3_Bc XMM3_Bd XMM3_Be XMM3_Bf XMM3_Bg XMM3_Bh XMM3_Bi XMM3_Bj XMM3_Bk XMM3_Bl XMM3_Bm XMM3_Bn XMM3_Bo XMM3_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM4_Ba XMM4_Bb XMM4_Bc XMM4_Bd XMM4_Be XMM4_Bf XMM4_Bg XMM4_Bh XMM4_Bi XMM4_Bj XMM4_Bk XMM4_Bl XMM4_Bm XMM4_Bn XMM4_Bo XMM4_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM5_Ba XMM5_Bb XMM5_Bc XMM5_Bd XMM5_Be XMM5_Bf XMM5_Bg XMM5_Bh XMM5_Bi XMM5_Bj XMM5_Bk XMM5_Bl XMM5_Bm XMM5_Bn XMM5_Bo XMM5_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM6_Ba XMM6_Bb XMM6_Bc XMM6_Bd XMM6_Be XMM6_Bf XMM6_Bg XMM6_Bh XMM6_Bi XMM6_Bj XMM6_Bk XMM6_Bl XMM6_Bm XMM6_Bn XMM6_Bo XMM6_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM7_Ba XMM7_Bb XMM7_Bc XMM7_Bd XMM7_Be XMM7_Bf XMM7_Bg XMM7_Bh XMM7_Bi XMM7_Bj XMM7_Bk XMM7_Bl XMM7_Bm XMM7_Bn XMM7_Bo XMM7_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM8_Ba XMM8_Bb XMM8_Bc XMM8_Bd XMM8_Be XMM8_Bf XMM8_Bg XMM8_Bh XMM8_Bi XMM8_Bj XMM8_Bk XMM8_Bl XMM8_Bm XMM8_Bn XMM8_Bo XMM8_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM9_Ba XMM9_Bb XMM9_Bc XMM9_Bd XMM9_Be XMM9_Bf XMM9_Bg XMM9_Bh XMM9_Bi XMM9_Bj XMM9_Bk XMM9_Bl XMM9_Bm XMM9_Bn XMM9_Bo XMM9_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM10_Ba XMM10_Bb XMM10_Bc XMM10_Bd XMM10_Be XMM10_Bf XMM10_Bg XMM10_Bh XMM10_Bi XMM10_Bj XMM10_Bk XMM10_Bl XMM10_Bm XMM10_Bn XMM10_Bo XMM10_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM11_Ba XMM11_Bb XMM11_Bc XMM11_Bd XMM11_Be XMM11_Bf XMM11_Bg XMM11_Bh XMM11_Bi XMM11_Bj XMM11_Bk XMM11_Bl XMM11_Bm XMM11_Bn XMM11_Bo XMM11_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM12_Ba XMM12_Bb XMM12_Bc XMM12_Bd XMM12_Be XMM12_Bf XMM12_Bg XMM12_Bh XMM12_Bi XMM12_Bj XMM12_Bk XMM12_Bl XMM12_Bm XMM12_Bn XMM12_Bo XMM12_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM13_Ba XMM13_Bb XMM13_Bc XMM13_Bd XMM13_Be XMM13_Bf XMM13_Bg XMM13_Bh XMM13_Bi XMM13_Bj XMM13_Bk XMM13_Bl XMM13_Bm XMM13_Bn XMM13_Bo XMM13_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM14_Ba XMM14_Bb XMM14_Bc XMM14_Bd XMM14_Be XMM14_Bf XMM14_Bg XMM14_Bh XMM14_Bi XMM14_Bj XMM14_Bk XMM14_Bl XMM14_Bm XMM14_Bn XMM14_Bo XMM14_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM15_Ba XMM15_Bb XMM15_Bc XMM15_Bd XMM15_Be XMM15_Bf XMM15_Bg XMM15_Bh XMM15_Bi XMM15_Bj XMM15_Bk XMM15_Bl XMM15_Bm XMM15_Bn XMM15_Bo XMM15_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM16_Ba XMM16_Bb XMM16_Bc XMM16_Bd XMM16_Be XMM16_Bf XMM16_Bg XMM16_Bh XMM16_Bi XMM16_Bj XMM16_Bk XMM16_Bl XMM16_Bm XMM16_Bn XMM16_Bo XMM16_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM17_Ba XMM17_Bb XMM17_Bc XMM17_Bd XMM17_Be XMM17_Bf XMM17_Bg XMM17_Bh XMM17_Bi XMM17_Bj XMM17_Bk XMM17_Bl XMM17_Bm XMM17_Bn XMM17_Bo XMM17_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM18_Ba XMM18_Bb XMM18_Bc XMM18_Bd XMM18_Be XMM18_Bf XMM18_Bg XMM18_Bh XMM18_Bi XMM18_Bj XMM18_Bk XMM18_Bl XMM18_Bm XMM18_Bn XMM18_Bo XMM18_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM19_Ba XMM19_Bb XMM19_Bc XMM19_Bd XMM19_Be XMM19_Bf XMM19_Bg XMM19_Bh XMM19_Bi XMM19_Bj XMM19_Bk XMM19_Bl XMM19_Bm XMM19_Bn XMM19_Bo XMM19_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM20_Ba XMM20_Bb XMM20_Bc XMM20_Bd XMM20_Be XMM20_Bf XMM20_Bg XMM20_Bh XMM20_Bi XMM20_Bj XMM20_Bk XMM20_Bl XMM20_Bm XMM20_Bn XMM20_Bo XMM20_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM21_Ba XMM21_Bb XMM21_Bc XMM21_Bd XMM21_Be XMM21_Bf XMM21_Bg XMM21_Bh XMM21_Bi XMM21_Bj XMM21_Bk XMM21_Bl XMM21_Bm XMM21_Bn XMM21_Bo XMM21_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM22_Ba XMM22_Bb XMM22_Bc XMM22_Bd XMM22_Be XMM22_Bf XMM22_Bg XMM22_Bh XMM22_Bi XMM22_Bj XMM22_Bk XMM22_Bl XMM22_Bm XMM22_Bn XMM22_Bo XMM22_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM23_Ba XMM23_Bb XMM23_Bc XMM23_Bd XMM23_Be XMM23_Bf XMM23_Bg XMM23_Bh XMM23_Bi XMM23_Bj XMM23_Bk XMM23_Bl XMM23_Bm XMM23_Bn XMM23_Bo XMM23_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM24_Ba XMM24_Bb XMM24_Bc XMM24_Bd XMM24_Be XMM24_Bf XMM24_Bg XMM24_Bh XMM24_Bi XMM24_Bj XMM24_Bk XMM24_Bl XMM24_Bm XMM24_Bn XMM24_Bo XMM24_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM25_Ba XMM25_Bb XMM25_Bc XMM25_Bd XMM25_Be XMM25_Bf XMM25_Bg XMM25_Bh XMM25_Bi XMM25_Bj XMM25_Bk XMM25_Bl XMM25_Bm XMM25_Bn XMM25_Bo XMM25_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM26_Ba XMM26_Bb XMM26_Bc XMM26_Bd XMM26_Be XMM26_Bf XMM26_Bg XMM26_Bh XMM26_Bi XMM26_Bj XMM26_Bk XMM26_Bl XMM26_Bm XMM26_Bn XMM26_Bo XMM26_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM27_Ba XMM27_Bb XMM27_Bc XMM27_Bd XMM27_Be XMM27_Bf XMM27_Bg XMM27_Bh XMM27_Bi XMM27_Bj XMM27_Bk XMM27_Bl XMM27_Bm XMM27_Bn XMM27_Bo XMM27_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM28_Ba XMM28_Bb XMM28_Bc XMM28_Bd XMM28_Be XMM28_Bf XMM28_Bg XMM28_Bh XMM28_Bi XMM28_Bj XMM28_Bk XMM28_Bl XMM28_Bm XMM28_Bn XMM28_Bo XMM28_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM29_Ba XMM29_Bb XMM29_Bc XMM29_Bd XMM29_Be XMM29_Bf XMM29_Bg XMM29_Bh XMM29_Bi XMM29_Bj XMM29_Bk XMM29_Bl XMM29_Bm XMM29_Bn XMM29_Bo XMM29_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM30_Ba XMM30_Bb XMM30_Bc XMM30_Bd XMM30_Be XMM30_Bf XMM30_Bg XMM30_Bh XMM30_Bi XMM30_Bj XMM30_Bk XMM30_Bl XMM30_Bm XMM30_Bn XMM30_Bo XMM30_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM31_Ba XMM31_Bb XMM31_Bc XMM31_Bd XMM31_Be XMM31_Bf XMM31_Bg XMM31_Bh XMM31_Bi XMM31_Bj XMM31_Bk XMM31_Bl XMM31_Bm XMM31_Bn XMM31_Bo XMM31_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
];
|
|
|
|
define register offset=0x1200 size=32 [
|
|
YMM0 _ YMM1 _
|
|
YMM2 _ YMM3 _
|
|
YMM4 _ YMM5 _
|
|
YMM6 _ YMM7 _
|
|
YMM8 _ YMM9 _
|
|
YMM10 _ YMM11 _
|
|
YMM12 _ YMM13 _
|
|
YMM14 _ YMM15 _
|
|
YMM16 _ YMM17 _
|
|
YMM18 _ YMM19 _
|
|
YMM20 _ YMM21 _
|
|
YMM22 _ YMM23 _
|
|
YMM24 _ YMM25 _
|
|
YMM26 _ YMM27 _
|
|
YMM28 _ YMM29 _
|
|
YMM30 _ YMM31 _
|
|
];
|
|
|
|
define register offset=0x1200 size=64 [
|
|
ZMM0 ZMM1
|
|
ZMM2 ZMM3
|
|
ZMM4 ZMM5
|
|
ZMM6 ZMM7
|
|
ZMM8 ZMM9
|
|
ZMM10 ZMM11
|
|
ZMM12 ZMM13
|
|
ZMM14 ZMM15
|
|
ZMM16 ZMM17
|
|
ZMM18 ZMM19
|
|
ZMM20 ZMM21
|
|
ZMM22 ZMM23
|
|
ZMM24 ZMM25
|
|
ZMM26 ZMM27
|
|
ZMM28 ZMM29
|
|
ZMM30 ZMM31
|
|
];
|
|
|
|
# Define context bits
|
|
define register offset=0x2000 size=8 contextreg;
|
|
|
|
# AVX-512 opmask registers
|
|
define register offset=2100 size=8 [
|
|
K0 K1 K2 K3 K4 K5 K6 K7
|
|
];
|
|
|
|
# dummy registers for managing broadcast data for AVX512
|
|
define register offset=2200 size=4 [ BCST4 ];
|
|
define register offset=2200 size=8 [ BCST8 ];
|
|
define register offset=2200 size=16 [ BCST16 ];
|
|
define register offset=2200 size=32 [ BCST32 ];
|
|
define register offset=2200 size=64 [ BCST64 ];
|
|
|
|
define register offset=2300 size=16 [ XmmResult _ _ _ XmmMask ];
|
|
define register offset=2300 size=32 [ YmmResult _ YmmMask ];
|
|
define register offset=2300 size=64 [ ZmmResult ZmmMask ];
|
|
|
|
|
|
#
|
|
#
|
|
# This context layout is important: the 32 bit version sees addrsize as just the
|
|
# low-order bit, whereas the 64 bit sees both bits. This ensures that the 32 and 64
|
|
# are technically binary compatible, but since the 32 bit language can't see that
|
|
# addrsize is 2 bits, they won't be pulled up into constructors where bit 0 is always
|
|
# 0 (which it is), and then you don't get the decision conflicts that choose
|
|
# context over table order
|
|
#
|
|
#
|
|
|
|
define context contextreg
|
|
@ifdef IA64
|
|
# Stored context
|
|
longMode=(0,0) # 0 for 32-bit emulation, 1 for 64-bit mode
|
|
reserved=(1,3)
|
|
addrsize=(4,5) # =0 16-bit addressing =1 32-bit addressing =2 64-bit addressing
|
|
@else
|
|
# Stored context
|
|
reserved=(0,3)
|
|
addrsize=(5,5) # =0 16-bit addressing =1 32-bit addressing
|
|
@endif
|
|
bit64=(4,4) # =0 16/32 bit =1 64-bit
|
|
opsize=(6,7) # =0 16-bit operands =1 32-bit operands =2 64-bit operands
|
|
segover=(8,10) # 0=default 1=cs 2=ss 3=ds 4=es 5=fs 6=gs
|
|
highseg=(8,8) # high bit of segover will be set for ES, FS, GS
|
|
protectedMode=(11,11) # 0 for real mode, 1 for protected mode
|
|
# End stored context
|
|
|
|
mandover=(12,14) # 0x66 0xf2 or 0xf3 overrides (for mandatory prefixes)
|
|
repneprefx=(12,12) # 0xf2 REPNE prefi
|
|
repprefx=(13,13) # 0xf3 REP prefix
|
|
xacquireprefx=(12,12) # 0xf2 XACQUIRE prefix
|
|
xreleaseprefx=(13,13) # 0xf3 XRELEASE prefix
|
|
prefix_f2=(12,12) # This is not really a REPNE override, it means there is a real(read)/implied(vex) f2 byte
|
|
prefix_f3=(13,13) # This is not really a REP override, it means there is an real(read)/implied(vex) f3 byte
|
|
prefix_66=(14,14) # This is not really a OPSIZE override, it means there is an real(read)/implied(vex) 66 byte
|
|
|
|
rexWRXBprefix=(15,18) # REX.WRXB bits
|
|
rexWprefix=(15,15) # REX.W bit prefix (opsize=2 when REX.W is set)
|
|
rexRprefix=(16,16) # REX.R bit prefix extend r
|
|
rexXprefix=(17,17) # REX.X bit prefix extend SIB index field to 4 bits
|
|
rexBprefix=(18,18) # REX.B bit prefix extend r/m, SIB base, Reg operand
|
|
rexprefix=(19,19) # True if the Rex prefix is present - note, if present, vex_mode is not supported
|
|
# rexWRXB bits can be re-used since they are incompatible.
|
|
|
|
vexMode=(20,21) # 2 for evex instruction, 1 for vexMode, 0 for normal
|
|
|
|
evexL = (22,23) # 0 for 128, 1 for 256, 2 for 512 (also used for rounding control)
|
|
evexLp=(22,22) # EVEX.L'
|
|
vexL=(23,23) # 0 for 128, 1 for 256
|
|
|
|
evexV5_XmmReg=(24,28) # evex byte for matching ZmmReg
|
|
evexV5_YmmReg=(24,28) # evex byte for matching ZmmReg
|
|
evexV5_ZmmReg=(24,28) # evex byte for matching ZmmReg
|
|
evexV5=(24,28) # EVEX.V' combined with EVEX.vvvv
|
|
evexVp=(24,24) # EVEX.V' bit prefix extends EVEX.vvvv (stored inverted)
|
|
vexVVVV=(25,28) # value of vex byte for matching
|
|
vexVVVV_r32=(25,28) # value of vex byte for matching a normal 32 bit register
|
|
vexVVVV_r64=(25,28) # value of vex byte for matching a normal 64 bit register
|
|
vexVVVV_XmmReg=(25,28) # value of vex byte for matching XmmReg
|
|
vexVVVV_YmmReg=(25,28) # value of vex byte for matching YmmReg
|
|
vexVVVV_ZmmReg=(25,28) # value of vex byte for matching ZmmReg
|
|
|
|
vexHighV=(25,25)
|
|
evexVopmask=(26,28) # VEX.vvvv opmask
|
|
|
|
suffix3D=(22,29) # 3DNow suffix byte (overlaps un-modified vex context region)
|
|
|
|
instrPhase=(30,30) # 0: initial/prefix phase, 1: primary instruction phase
|
|
|
|
lockprefx=(31,31) # 0xf0 LOCK prefix
|
|
|
|
vexMMMMM=(32,36) # need to match for preceding bytes 1=0x0F, 2=0x0F 0x38, 3=0x0F 0x3A
|
|
|
|
evexRp=(37,37) # EVEX.R' bit prefix extends r
|
|
evexB = (38,38) # EVEX.b Broadcast
|
|
evexZ = (39,39) # Opmask behavior 1 for zeroing-masking, 0 for merging-masking
|
|
evexAAA=(40,42) # Opmask selector
|
|
evexOpmask=(40,42) # Used for attaching Opmask registers
|
|
evexD8Type=(43,43) # Used for compressed Disp8*N, can range from 1 to 64
|
|
evexBType=(47,47) # Used for Disp8*N (see table 2-34 in 325462-sdm-vol-1-2abcd-3abcd-4.pdf)
|
|
evexTType=(44,47) # Used for Disp8*N (see table 2-35 in 325462-sdm-vol-1-2abcd-3abcd-4.pdf)
|
|
evexDisp8=(44,46)
|
|
reservedHigh=(48,63) # reserved for future use
|
|
|
|
;
|
|
|
|
|
|
# These are only to be used with pre-REX (original 8086, 80386) and REX encoding. Do not use with VEX encoding.
|
|
# These are to be used to designate that the opcode sequence begins with one of these "mandatory" prefix values.
|
|
# This allows the other prefixes to come before the mandatory value.
|
|
# For example: CRC32 r32, r16 -- 66 F2 OF 38 F1 C8
|
|
|
|
@define PRE_NO "mandover=0"
|
|
@define PRE_66 "prefix_66=1"
|
|
@define PRE_F3 "prefix_f3=1"
|
|
@define PRE_F2 "prefix_f2=1"
|
|
|
|
|
|
|
|
# Define special registers for debugger
|
|
@ifdef IA64
|
|
define register offset=0x2200 size=4 [ IDTR_Limit ];
|
|
define register offset=0x2200 size=12 [ IDTR ];
|
|
define register offset=0x2204 size=8 [ IDTR_Address ];
|
|
|
|
define register offset=0x2220 size=4 [ GDTR_Limit ];
|
|
define register offset=0x2220 size=12 [ GDTR ];
|
|
define register offset=0x2224 size=8 [ GDTR_Address ];
|
|
|
|
define register offset=0x2240 size=4 [ LDTR_Limit ];
|
|
define register offset=0x2240 size=14 [ LDTR ];
|
|
define register offset=0x2244 size=8 [ LDTR_Address ];
|
|
define register offset=0x2248 size=2 [ LDTR_Attributes ];
|
|
|
|
define register offset=0x2260 size=4 [ TR_Limit ];
|
|
define register offset=0x2260 size=14 [ TR ];
|
|
define register offset=0x2264 size=8 [ TR_Address ];
|
|
define register offset=0x2268 size=2 [ TR_Attributes ];
|
|
@else
|
|
define register offset=0x2200 size=6 [ IDTR ];
|
|
define register offset=0x2200 size=2 [ IDTR_Limit ];
|
|
define register offset=0x2202 size=4 [ IDTR_Address ];
|
|
|
|
define register offset=0x2210 size=6 [ GDTR ];
|
|
define register offset=0x2210 size=2 [ GDTR_Limit ];
|
|
define register offset=0x2212 size=4 [ GDTR_Address ];
|
|
|
|
define register offset=0x2220 size=6 [ LDTR ];
|
|
define register offset=0x2220 size=2 [ LDTR_Limit ];
|
|
define register offset=0x2222 size=4 [ LDTR_Address ];
|
|
|
|
define register offset=0x2230 size=6 [ TR ];
|
|
define register offset=0x2230 size=2 [ TR_Limit ];
|
|
define register offset=0x2232 size=4 [ TR_Address ];
|
|
@endif
|
|
|
|
define token opbyte (8)
|
|
byte=(0,7)
|
|
high4=(4,7)
|
|
high5=(3,7)
|
|
low5=(0,4)
|
|
byte_4=(4,4)
|
|
byte_0=(0,0)
|
|
;
|
|
|
|
define token modrm (8)
|
|
mod = (6,7)
|
|
reg_opcode = (3,5)
|
|
reg_opcode_hb = (5,5)
|
|
r_m = (0,2)
|
|
row = (4,7)
|
|
col = (0,2)
|
|
page = (3,3)
|
|
cond = (0,3)
|
|
reg8 = (3,5)
|
|
reg16 = (3,5)
|
|
reg32 = (3,5)
|
|
reg64 = (3,5)
|
|
reg8_x0 = (3,5)
|
|
reg8_x1 = (3,5)
|
|
reg16_x = (3,5)
|
|
reg32_x = (3,5)
|
|
reg64_x = (3,5)
|
|
Sreg = (3,5)
|
|
creg = (3,5)
|
|
creg_x = (3,5)
|
|
debugreg = (3,5)
|
|
debugreg_x = (3,5)
|
|
testreg = (3,5)
|
|
r8 = (0,2)
|
|
r16 = (0,2)
|
|
r32 = (0,2)
|
|
r64 = (0,2)
|
|
r8_x0 = (0,2)
|
|
r8_x1 = (0,2)
|
|
r16_x = (0,2)
|
|
r32_x = (0,2)
|
|
r64_x = (0,2)
|
|
frow = (4,7)
|
|
fpage = (3,3)
|
|
freg = (0,2)
|
|
rexw = (3,3)
|
|
rexr = (2,2)
|
|
rexx = (1,1)
|
|
rexb = (0,0)
|
|
mmxmod = (6,7)
|
|
mmxreg = (3,5)
|
|
mmxreg1 = (3,5)
|
|
mmxreg2 = (0,2)
|
|
xmmmod = (6,7)
|
|
xmmreg = (3,5)
|
|
ymmreg = (3,5)
|
|
zmmreg = (3,5)
|
|
|
|
xmmreg1 = (3,5)
|
|
ymmreg1 = (3,5)
|
|
zmmreg1 = (3,5)
|
|
xmmreg2 = (0,2)
|
|
ymmreg2 = (0,2)
|
|
zmmreg2 = (0,2)
|
|
|
|
xmmreg_x = (3,5)
|
|
ymmreg_x = (3,5)
|
|
zmmreg_x = (3,5)
|
|
xmmreg1_x = (3,5)
|
|
ymmreg1_x = (3,5)
|
|
zmmreg1_x = (3,5)
|
|
xmmreg1_r = (3,5)
|
|
ymmreg1_r = (3,5)
|
|
zmmreg1_r = (3,5)
|
|
xmmreg1_rx = (3,5)
|
|
ymmreg1_rx = (3,5)
|
|
zmmreg1_rx = (3,5)
|
|
xmmreg2_b = (0,2)
|
|
ymmreg2_b = (0,2)
|
|
zmmreg2_b = (0,2)
|
|
xmmreg2_x = (0,2)
|
|
ymmreg2_x = (0,2)
|
|
zmmreg2_x = (0,2)
|
|
xmmreg2_bx = (0,2)
|
|
ymmreg2_bx = (0,2)
|
|
zmmreg2_bx = (0,2)
|
|
|
|
|
|
vex_pp = (0,1)
|
|
vex_l = (2,2)
|
|
vex_vvvv = (3,6)
|
|
vex_r = (7,7)
|
|
vex_x = (6,6)
|
|
vex_b = (5,5)
|
|
vex_w = (7,7)
|
|
vex_mmmmm = (0,4)
|
|
|
|
evex_rp = (4,4)
|
|
evex_res = (3,3)
|
|
evex_res2 = (2,2)
|
|
evex_mmm = (0,2)
|
|
|
|
evex_z = (7,7)
|
|
evex_lp = (6,6)
|
|
evex_l = (5,5)
|
|
evex_b = (4,4)
|
|
evex_vp = (3,3)
|
|
evex_aaa = (0,2)
|
|
opmaskreg = (3,5)
|
|
opmaskrm = (0,2)
|
|
|
|
bnd1 = (3,5)
|
|
bnd1_lb = (3,5)
|
|
bnd1_ub = (3,5)
|
|
bnd2 = (0,2)
|
|
bnd2_lb = (0,2)
|
|
bnd2_ub = (0,2)
|
|
;
|
|
|
|
define token sib (8)
|
|
ss = (6,7)
|
|
index = (3,5)
|
|
index_x = (3,5)
|
|
index64 = (3,5)
|
|
index64_x = (3,5)
|
|
xmm_vsib = (3,5)
|
|
xmm_vsib_x = (3,5)
|
|
ymm_vsib = (3,5)
|
|
ymm_vsib_x = (3,5)
|
|
zmm_vsib = (3,5)
|
|
zmm_vsib_x = (3,5)
|
|
base = (0,2)
|
|
base_x = (0,2)
|
|
base64 = (0,2)
|
|
base64_x = (0,2)
|
|
;
|
|
|
|
define token I8 (8)
|
|
Xmm_imm8_7_4=(4,7)
|
|
Ymm_imm8_7_4=(4,7)
|
|
imm8_7=(7,7)
|
|
imm8_6=(6,6)
|
|
imm8_6_7=(6,7)
|
|
imm8_5=(5,5)
|
|
imm8_5_7=(5,7)
|
|
imm8_4=(4,4)
|
|
imm8_4_7=(4,7)
|
|
imm8_3=(3,3)
|
|
imm8_3_7=(3,7)
|
|
imm8_2=(2,2)
|
|
imm8_2_7=(2,7)
|
|
imm8_1=(1,1)
|
|
imm8_1_7=(1,7)
|
|
imm8_0=(0,0)
|
|
imm8_3_0=(0,3)
|
|
imm8=(0,7)
|
|
imm8_val=(0,7)
|
|
simm8=(0,7) signed
|
|
;
|
|
|
|
define token I16 (16) imm16_15=(15,15) imm16=(0,15) simm16=(0,15) signed j16=(0,15);
|
|
define token I32 (32) imm32=(0,31) simm32=(0,31) signed;
|
|
define token I64 (64) imm64=(0,63) simm64=(0,63) signed;
|
|
define token override (8) over=(0,7);
|
|
|
|
attach variables [ r32 reg32 base index ] [ EAX ECX EDX EBX ESP EBP ESI EDI ];
|
|
attach variables [ r16 reg16 ] [ AX CX DX BX SP BP SI DI ];
|
|
attach variables [ r8 reg8 ] [ AL CL DL BL AH CH DH BH ];
|
|
attach variables Sreg [ ES CS SS DS FS GS _ _ ];
|
|
attach variables freg [ ST0 ST1 ST2 ST3 ST4 ST5 ST6 ST7 ];
|
|
attach variables [ debugreg ] [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 ];
|
|
@ifdef IA64
|
|
attach variables [ r64 reg64 base64 index64 ] [ RAX RCX RDX RBX RSP RBP RSI RDI ];
|
|
attach variables [ r64_x reg64_x base64_x index64_x ] [ R8 R9 R10 R11 R12 R13 R14 R15 ];
|
|
attach variables [ r32_x reg32_x base_x index_x ] [ R8D R9D R10D R11D R12D R13D R14D R15D ];
|
|
attach variables [ r16_x reg16_x ] [ R8W R9W R10W R11W R12W R13W R14W R15W ];
|
|
attach variables [ r8_x0 reg8_x0 ] [ AL CL DL BL SPL BPL SIL DIL ];
|
|
attach variables [ r8_x1 reg8_x1 ] [ R8B R9B R10B R11B R12B R13B R14B R15B ];
|
|
attach variables [ debugreg_x ] [ DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15 ];
|
|
attach variables creg [ CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7 ];
|
|
attach variables creg_x [ CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 ];
|
|
@else
|
|
attach variables [ testreg ] [ TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 ];
|
|
attach variables creg [ CR0 _ CR2 CR3 CR4 _ _ _ ];
|
|
@endif
|
|
|
|
attach values ss [ 1 2 4 8];
|
|
|
|
attach variables [ mmxreg mmxreg1 mmxreg2 ] [ MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 ];
|
|
|
|
attach variables [ xmmreg xmmreg1 xmmreg2 xmm_vsib ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 ];
|
|
|
|
attach variables [ xmmreg_x xmmreg1_x xmmreg2_b xmm_vsib_x ] [ XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ];
|
|
|
|
attach variables [ xmmreg1_r xmmreg2_x ] [ XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 ];
|
|
|
|
attach variables [ xmmreg1_rx xmmreg2_bx ] [ XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 ];
|
|
|
|
attach variables [ vexVVVV_XmmReg Xmm_imm8_7_4 ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ];
|
|
|
|
attach variables [ vexVVVV_YmmReg Ymm_imm8_7_4 ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ];
|
|
|
|
attach variables [ vexVVVV_ZmmReg ] [ ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ];
|
|
|
|
attach variables [ evexV5_XmmReg ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15
|
|
XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 ];
|
|
|
|
attach variables [ evexV5_YmmReg ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15
|
|
YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ];
|
|
|
|
attach variables [ evexV5_ZmmReg ] [ ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15
|
|
ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 ];
|
|
|
|
@ifdef IA64
|
|
attach variables [ vexVVVV_r32 ] [ EAX ECX EDX EBX ESP EBP ESI EDI R8D R9D R10D R11D R12D R13D R14D R15D ];
|
|
attach variables [ vexVVVV_r64 ] [ RAX RCX RDX RBX RSP RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15 ];
|
|
@else
|
|
attach variables [ vexVVVV_r32 ] [ EAX ECX EDX EBX ESP EBP ESI EDI _ _ _ _ _ _ _ _ ];
|
|
@endif
|
|
|
|
attach variables [ evexOpmask opmaskreg opmaskrm evexVopmask ] [ K0 K1 K2 K3 K4 K5 K6 K7 ];
|
|
|
|
attach variables [ ymmreg ymmreg1 ymmreg2 ymm_vsib ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 ];
|
|
attach variables [ ymmreg_x ymmreg1_x ymmreg2_b ymm_vsib_x ] [ YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ];
|
|
attach variables [ ymmreg1_r ymmreg2_x ] [ YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 ];
|
|
attach variables [ ymmreg1_rx ymmreg2_bx ] [ YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ];
|
|
|
|
attach variables [ zmmreg zmmreg1 zmmreg2 zmm_vsib ] [ ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ];
|
|
attach variables [ zmmreg_x zmmreg1_x zmmreg2_b zmm_vsib_x ] [ ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ];
|
|
attach variables [ zmmreg1_r zmmreg2_x ] [ ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ];
|
|
attach variables [ zmmreg1_rx zmmreg2_bx ] [ ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 ];
|
|
|
|
attach variables [ bnd1 bnd2 ] [ BND0 BND1 BND2 BND3 _ _ _ _ ];
|
|
attach variables [ bnd1_lb bnd2_lb ] [ BND0_LB BND1_LB BND2_LB BND3_LB _ _ _ _ ];
|
|
attach variables [ bnd1_ub bnd2_ub ] [ BND0_UB BND1_UB BND2_UB BND3_UB _ _ _ _ ];
|
|
|
|
define pcodeop segment; # Define special pcodeop that calculates the RAM address
|
|
# given the segment selector and offset as input
|
|
|
|
define pcodeop in; # force in/out to show up in decompiler
|
|
define pcodeop out;
|
|
define pcodeop sysenter;
|
|
define pcodeop sysexit;
|
|
define pcodeop syscall;
|
|
define pcodeop sysret;
|
|
define pcodeop swapgs;
|
|
define pcodeop invlpg;
|
|
define pcodeop invlpga;
|
|
define pcodeop invpcid;
|
|
define pcodeop rdtscp;
|
|
define pcodeop mwait;
|
|
define pcodeop mwaitx;
|
|
define pcodeop monitor;
|
|
define pcodeop monitorx;
|
|
define pcodeop swi; # for INT instruction
|
|
|
|
define pcodeop LOCK; # for LOCK prefix
|
|
define pcodeop UNLOCK; # for LOCK prefix
|
|
define pcodeop XACQUIRE; # for XACQUIRE prefix
|
|
define pcodeop XRELEASE; # for XRELEASE prefix
|
|
|
|
# MFL: definitions for AMD hardware assisted virtualization instructions
|
|
define pcodeop clgi; # clear global interrupt flag (GIF)
|
|
define pcodeop stgi; # set global interrupt flag (GIF)
|
|
define pcodeop vmload; # Load state from VMCD, opcode 0f 01 da
|
|
define pcodeop vmmcall; # Call VMM, opcode 0f 01 d9
|
|
define pcodeop vmrun; # Run virtual machine, opcode 0f 01 d8
|
|
define pcodeop vmsave; # Save state to VMCB, opcode 0f 0a db
|
|
|
|
# MFL: definitions for Intel IA hardware assisted virtualization instructions
|
|
define pcodeop invept; # Invalidate Translations Derived from extended page tables (EPT); opcode 66 0f 38 80
|
|
define pcodeop invvpid; # Invalidate Translations Based on virtual-processor identifier (VPID); opcode 66 0f 38 81
|
|
define pcodeop vmcall; # Call to VM monitor by causing VM exit, opcode 0f 01 c1
|
|
define pcodeop vmclear; # Clear virtual-machine control structure, opcode 66 0f c7 /6
|
|
define pcodeop vmfunc; # call virtual-machine function refernced by EAX
|
|
define pcodeop vmlaunch; # Launch virtual machine managed by current VMCCS; opcode 0f 01 c2
|
|
define pcodeop vmresume; # Resume virtual machine managed by current VMCS; opcode 0f 01 c3
|
|
define pcodeop vmptrld; # Load pointer to virtual-machine control structure; opcode 0f c6 /6
|
|
define pcodeop vmptrst; # Store pointer to virtual-machine control structure; opcode 0f c7 /7
|
|
define pcodeop vmread; # Read field from virtual-machine control structure; opcode 0f 78
|
|
define pcodeop vmwrite; # Write field to virtual-machine control structure; opcode 0f 79
|
|
define pcodeop vmxoff; # Leave VMX operation; opcode 0f 01 c4
|
|
define pcodeop vmxon; # Enter VMX operation; opcode f3 0f C7 /6
|
|
|
|
@ifdef IA64
|
|
@define LONGMODE_ON "longMode=1"
|
|
@define LONGMODE_OFF "longMode=0"
|
|
@else
|
|
@define LONGMODE_OFF "opsize=opsize" # NOP
|
|
@endif
|
|
|
|
#when not in 64-bit mode, opcode 0x82 results in the same instruction as opcode 0x80
|
|
#in 64-bit mode, opcode 0x82 results in #UD
|
|
#see 22.15 "Undefined Opcodes" of the intel manual
|
|
@ifdef IA64
|
|
@define BYTE_80_82 "(byte=0x80 | (longMode=0 & byte=0x82))"
|
|
@else
|
|
@define BYTE_80_82 "(byte=0x80 | byte=0x82)"
|
|
@endif
|
|
|
|
@include "macros.sinc"
|
|
|
|
@ifdef IA64
|
|
Reg8: reg8 is rexprefix=0 & reg8 { export reg8; }
|
|
Reg8: reg8_x0 is rexprefix=1 & rexRprefix=0 & reg8_x0 { export reg8_x0; }
|
|
Reg8: reg8_x1 is rexprefix=1 & rexRprefix=1 & reg8_x1 { export reg8_x1; }
|
|
Reg16: reg16 is rexRprefix=0 & reg16 { export reg16; }
|
|
Reg16: reg16_x is rexRprefix=1 & reg16_x { export reg16_x; }
|
|
Reg32: reg32 is rexRprefix=0 & reg32 { export reg32; }
|
|
Reg32: reg32_x is rexRprefix=1 & reg32_x { export reg32_x; }
|
|
Reg64: reg64 is rexRprefix=0 & reg64 { export reg64; }
|
|
Reg64: reg64_x is rexRprefix=1 & reg64_x { export reg64_x; }
|
|
Rmr8: r8 is rexprefix=0 & r8 { export r8; }
|
|
Rmr8: r8_x0 is rexprefix=1 & rexBprefix=0 & r8_x0 { export r8_x0; }
|
|
Rmr8: r8_x1 is rexprefix=1 & rexBprefix=1 & r8_x1 { export r8_x1; }
|
|
CRmr8: r8 is rexBprefix=0 & r8 { export r8; }
|
|
CRmr8: r8 is addrsize=2 & rexBprefix=0 & r8 { export r8; }
|
|
CRmr8: r8_x0 is addrsize=2 & rexprefix=1 & rexBprefix=0 & r8_x0 { export r8_x0; }
|
|
CRmr8: r8_x1 is addrsize=2 & rexprefix=1 & rexBprefix=1 & r8_x1 { export r8_x1; }
|
|
Rmr16: r16 is rexBprefix=0 & r16 { export r16; }
|
|
Rmr16: r16_x is rexBprefix=1 & r16_x { export r16_x; }
|
|
CRmr16: r16 is rexBprefix=0 & r16 { export r16; }
|
|
CRmr16: r16_x is rexBprefix=1 & r16_x { export r16_x; }
|
|
Rmr32: r32 is rexBprefix=0 & r32 { export r32; }
|
|
Rmr32: r32_x is rexBprefix=1 & r32_x { export r32_x; }
|
|
CRmr32: r32 is rexBprefix=0 & r32 & r64 { export r64; }
|
|
CRmr32: r32_x is rexBprefix=1 & r32_x & r64_x { export r64_x; }
|
|
Rmr64: r64 is rexBprefix=0 & r64 { export r64; }
|
|
Rmr64: r64_x is rexBprefix=1 & r64_x { export r64_x; }
|
|
Base: base is rexBprefix=0 & base { export base; }
|
|
Base: base_x is rexBprefix=1 & base_x { export base_x; }
|
|
Index: index is rexXprefix=0 & index { export index; }
|
|
Index: index_x is rexXprefix=1 & index_x { export index_x; }
|
|
Base64: base64 is rexBprefix=0 & base64 { export base64; }
|
|
Base64: base64_x is rexBprefix=1 & base64_x { export base64_x; }
|
|
Index64: index64 is rexXprefix=0 & index64 { export index64; }
|
|
Index64: index64_x is rexXprefix=1 & index64_x { export index64_x; }
|
|
XmmReg: xmmreg is rexRprefix=0 & xmmreg { export xmmreg; }
|
|
XmmReg: xmmreg_x is rexRprefix=1 & xmmreg_x { export xmmreg_x; }
|
|
XmmReg1: xmmreg1 is rexRprefix=0 & xmmreg1 { export xmmreg1; }
|
|
XmmReg1: xmmreg1_x is rexRprefix=1 & xmmreg1_x { export xmmreg1_x; }
|
|
XmmReg1: xmmreg1_r is rexRprefix=0 & evexRp=1 & xmmreg1_r { export xmmreg1_r; }
|
|
XmmReg1: xmmreg1_rx is rexRprefix=1 & evexRp=1 & xmmreg1_rx { export xmmreg1_rx; }
|
|
XmmReg2: xmmreg2 is rexBprefix=0 & xmmreg2 { export xmmreg2; }
|
|
XmmReg2: xmmreg2_b is rexBprefix=1 & xmmreg2_b { export xmmreg2_b; }
|
|
XmmReg2: xmmreg2_x is rexBprefix=0 & rexXprefix=1 & xmmreg2_x { export xmmreg2_x; }
|
|
XmmReg2: xmmreg2_bx is rexBprefix=1 & rexXprefix=1 & xmmreg2_bx { export xmmreg2_bx; }
|
|
|
|
YmmReg1: ymmreg1 is rexRprefix=0 & ymmreg1 { export ymmreg1; }
|
|
YmmReg1: ymmreg1_x is rexRprefix=1 & ymmreg1_x { export ymmreg1_x; }
|
|
YmmReg1: ymmreg1_r is rexRprefix=0 & evexRp=1 & ymmreg1_r { export ymmreg1_r; }
|
|
YmmReg1: ymmreg1_rx is rexRprefix=1 & evexRp=1 & ymmreg1_rx { export ymmreg1_rx; }
|
|
YmmReg2: ymmreg2 is rexBprefix=0 & ymmreg2 { export ymmreg2; }
|
|
YmmReg2: ymmreg2_b is rexBprefix=1 & ymmreg2_b { export ymmreg2_b; }
|
|
YmmReg2: ymmreg2_x is rexBprefix=0 & rexXprefix=1 & ymmreg2_x { export ymmreg2_x; }
|
|
YmmReg2: ymmreg2_bx is rexBprefix=1 & rexXprefix=1 & ymmreg2_bx { export ymmreg2_bx; }
|
|
|
|
ZmmReg1: zmmreg1 is rexRprefix=0 & zmmreg1 { export zmmreg1; }
|
|
ZmmReg1: zmmreg1_x is rexRprefix=1 & zmmreg1_x { export zmmreg1_x; }
|
|
ZmmReg1: zmmreg1_r is rexRprefix=0 & evexRp=1 & zmmreg1_r { export zmmreg1_r; }
|
|
ZmmReg1: zmmreg1_rx is rexRprefix=1 & evexRp=1 & zmmreg1_rx { export zmmreg1_rx; }
|
|
ZmmReg2: zmmreg2 is rexBprefix=0 & zmmreg2 { export zmmreg2; }
|
|
ZmmReg2: zmmreg2_b is rexBprefix=1 & zmmreg2_b { export zmmreg2_b; }
|
|
ZmmReg2: zmmreg2_x is rexBprefix=0 & rexXprefix=1 & zmmreg2_x { export zmmreg2_x; }
|
|
ZmmReg2: zmmreg2_bx is rexBprefix=1 & rexXprefix=1 & zmmreg2_bx { export zmmreg2_bx; }
|
|
|
|
Xmm_vsib: xmm_vsib is rexXprefix=0 & xmm_vsib { export xmm_vsib; }
|
|
Xmm_vsib: xmm_vsib_x is rexXprefix=1 & xmm_vsib_x { export xmm_vsib_x; }
|
|
Ymm_vsib: ymm_vsib is rexXprefix=0 & ymm_vsib { export ymm_vsib; }
|
|
Ymm_vsib: ymm_vsib_x is rexXprefix=1 & ymm_vsib_x { export ymm_vsib_x; }
|
|
Zmm_vsib: zmm_vsib is rexXprefix=0 & zmm_vsib { export zmm_vsib; }
|
|
Zmm_vsib: zmm_vsib_x is rexXprefix=1 & zmm_vsib_x { export zmm_vsib_x; }
|
|
@else
|
|
Reg8: reg8 is reg8 { export reg8; }
|
|
Reg16: reg16 is reg16 { export reg16; }
|
|
Reg32: reg32 is reg32 { export reg32; }
|
|
Rmr8: r8 is r8 { export r8; }
|
|
CRmr8: r8 is r8 { export r8; }
|
|
Rmr16: r16 is r16 { export r16; }
|
|
CRmr16: r16 is r16 { export r16; }
|
|
Rmr32: r32 is r32 { export r32; }
|
|
CRmr32: r32 is r32 { export r32; }
|
|
Base: base is base { export base; }
|
|
Index: index is index { export index; }
|
|
XmmReg: xmmreg is xmmreg { export xmmreg; }
|
|
XmmReg1: xmmreg1 is xmmreg1 { export xmmreg1; }
|
|
XmmReg2: xmmreg2 is xmmreg2 { export xmmreg2; }
|
|
YmmReg1: ymmreg1 is ymmreg1 { export ymmreg1; }
|
|
YmmReg2: ymmreg2 is ymmreg2 { export ymmreg2; }
|
|
ZmmReg1: zmmreg1 is zmmreg1 { export zmmreg1; }
|
|
ZmmReg2: zmmreg2 is zmmreg2 { export zmmreg2; }
|
|
Xmm_vsib: xmm_vsib is xmm_vsib { export xmm_vsib; }
|
|
Ymm_vsib: ymm_vsib is ymm_vsib { export ymm_vsib; }
|
|
Zmm_vsib: zmm_vsib is zmm_vsib { export zmm_vsib; }
|
|
@endif
|
|
|
|
# signed immediate value subconstructors
|
|
|
|
simm8_16: simm8 is simm8 { export *[const]:2 simm8; }
|
|
simm8_32: simm8 is simm8 { export *[const]:4 simm8; }
|
|
@ifdef IA64
|
|
simm8_64: simm8 is simm8 { export *[const]:8 simm8; }
|
|
@endif
|
|
simm16_16: simm16 is simm16 { export *[const]:2 simm16; }
|
|
simm32_32: simm32 is simm32 { export *[const]:4 simm32; }
|
|
@ifdef IA64
|
|
simm32_64: simm32 is simm32 { export *[const]:8 simm32; }
|
|
imm32_64: imm32 is imm32 { export *[const]:8 imm32; }
|
|
@endif
|
|
|
|
# EVEX used a compressed Disp8*N format
|
|
|
|
# Table 2-35:
|
|
# TupleType | EVEX.B | InputSize | EVEX.W | Broadcast |N (VL=128) | N (VL=256) | N (VL=512) | evexBType
|
|
# Full Mem | 0 | 32bit | 0 | none | 16 | 32 | 64 | 0
|
|
# Full Mem | 1 | 32bit | 0 | {1tox} | 4 | 4 | 4 | 0
|
|
# Full Mem | 0 | 64bit | 1 | none | 16 | 32 | 64 | 0
|
|
# Full Mem | 1 | 64bit | 1 | {1tox} | 8 | 8 | 8 | 0
|
|
# Half Mem | 0 | 32bit | 0 | none | 8 | 16 | 32 | 1
|
|
# Half Mem | 1 | 32bit | 0 | {1tox} | 4 | 4 | 4 | 1
|
|
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=0 & evexL=0 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=0 & evexL=1 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=0 & evexL=2 [ offs = 6; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=1 & rexWprefix=0 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=1 & rexWprefix=1 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=1 & evexB=1 & rexWprefix=0 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=1 & evexB=0 & evexL=0 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=1 & evexB=0 & evexL=1 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=0 & evexBType=1 & evexB=0 & evexL=2 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
|
|
# Table 2-35:
|
|
# TupleType | InputSize | EVEX.W | N (VL=128) | N (VL=256) | N (VL=512) | evexTType
|
|
# Full Mem | N/A | N/A | 16 | 32 | 64 | 0
|
|
# Tuple 1 Scalar | 8bit | N/A | 1 | 1 | 1 | 1
|
|
# Tuple 1 Scalar | 16bit | N/A | 2 | 2 | 2 | 2
|
|
# Tuple 1 Scalar | 32bit | 0 | 4 | 4 | 4 | 3
|
|
# Tuple 1 Scalar | 64bit | 1 | 8 | 8 | 8 | 3
|
|
# Tuple 1 Fixed | 32bit | N/A | 4 | 4 | 4 | 4
|
|
# Tuple 1 Fixed | 64bit | N/A | 8 | 8 | 8 | 5
|
|
# Tuple 2 | 32bit | 0 | 8 | 8 | 8 | 6
|
|
# Tuple 2 | 64bit | 1 | N/A | 16 | 16 | 6
|
|
# Tuple 4 | 32bit | 0 | N/A | 16 | 16 | 7
|
|
# Tuple 4 | 64bit | 1 | N/A | N/A | 32 | 7
|
|
# Tuple 8 | 32bit | 0 | N/A | N/A | 32 | 8
|
|
# Half Mem | N/A | N/A | 8 | 16 | 32 | 9
|
|
# Quarter Mem | N/A | N/A | 4 | 8 | 16 | A
|
|
# Eighth Mem | N/A | N/A | 2 | 4 | 8 | B
|
|
# Mem128 | N/A | 1 | 16 | 16 | 16 | C
|
|
# MOVDDUP | N/A | N/A | 8 | 32 | 64 | D
|
|
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x0 & evexL=0 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x0 & evexL=1 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x0 & evexL=2 [ offs = 6; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x9 & evexL=0 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x9 & evexL=1 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x9 & evexL=2 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xa & evexL=0 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xa & evexL=1 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xa & evexL=2 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xb & evexL=0 [ offs = 1; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xb & evexL=1 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xb & evexL=2 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x1 [ offs = 0; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x2 [ offs = 1; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x3 & rexWprefix=0 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x3 & rexWprefix=1 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x4 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x5 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x6 & rexWprefix=0 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x6 & rexWprefix=1 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x7 & rexWprefix=0 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x7 & rexWprefix=1 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0x8 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xc [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xd & evexL=0 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xd & evexL=1 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
evexDisp8N: offs is evexD8Type=1 & evexTType=0xd & evexL=2 [ offs = 6; evexDisp8=offs; ] { export *[const]:1 offs; }
|
|
|
|
|
|
simm8_16: disp8N is vexMode=2 & evexDisp8N & simm8 [ disp8N = simm8 << evexDisp8; ] { export *[const]:2 disp8N; }
|
|
simm8_32: disp8N is vexMode=2 & evexDisp8N & simm8 [ disp8N = simm8 << evexDisp8; ] { export *[const]:4 disp8N; }
|
|
@ifdef IA64
|
|
simm8_64: disp8N is vexMode=2 & evexDisp8N & simm8 [ disp8N = simm8 << evexDisp8; ] { export *[const]:8 disp8N; }
|
|
@endif
|
|
|
|
usimm8_16: imm8 is imm8 & imm8_7=0 { export *[const]:2 imm8; }
|
|
usimm8_16: val is imm8 & imm8_7=1 [ val = 0xff00 | imm8; ] { export *[const]:2 val; }
|
|
usimm8_32: imm8 is imm8 & imm8_7=0 { export *[const]:4 imm8; }
|
|
usimm8_32: val is imm8 & imm8_7=1 [ val = 0xffffff00 | imm8; ] { export *[const]:4 val; }
|
|
@ifdef IA64
|
|
usimm8_64: imm8 is imm8 & imm8_7=0 { export *[const]:8 imm8; }
|
|
usimm8_64: val is imm8 & imm8_7=1 [ val = 0xffffffffffffff00 | imm8; ] { export *[const]:8 val; }
|
|
@endif
|
|
|
|
# unused
|
|
#usimm16_32: imm16 is imm16 & imm16_15=0 { export *[const]:4 imm16; }
|
|
#usimm16_32: val is imm16 & imm16_15=1 [ val = 0xffff0000 | imm16; ] { export *[const]:4 val; }
|
|
|
|
# RIP/EIP relative address - NOTE: export of size 0 is intentional so it may be adjusted
|
|
pcRelSimm32: addr is simm32 [ addr=inst_next+simm32; ] { export addr; }
|
|
|
|
# 16-bit addressing modes (the offset portion)
|
|
addr16: [BX + SI] is mod=0 & r_m=0 & BX & SI { local tmp=BX+SI; export tmp; }
|
|
addr16: [BX + DI] is mod=0 & r_m=1 & BX & DI { local tmp=BX+DI; export tmp; }
|
|
addr16: [BP + SI] is mod=0 & r_m=2 & BP & SI { local tmp=BP+SI; export tmp; }
|
|
addr16: [BP + DI] is mod=0 & r_m=3 & BP & DI { local tmp=BP+DI; export tmp; }
|
|
addr16: [SI] is mod=0 & r_m=4 & SI { export SI; }
|
|
addr16: [DI] is mod=0 & r_m=5 & DI { export DI; }
|
|
addr16: [imm16] is mod=0 & r_m=6; imm16 { export *[const]:2 imm16; }
|
|
addr16: [BX] is mod=0 & r_m=7 & BX { export BX; }
|
|
addr16: [BX + SI + simm8_16] is mod=1 & r_m=0 & BX & SI; simm8_16 { local tmp=BX+SI+simm8_16; export tmp; }
|
|
addr16: [BX + DI + simm8_16] is mod=1 & r_m=1 & BX & DI; simm8_16 { local tmp=BX+DI+simm8_16; export tmp; }
|
|
addr16: [BP + SI + simm8_16] is mod=1 & r_m=2 & BP & SI; simm8_16 { local tmp=BP+SI+simm8_16; export tmp; }
|
|
addr16: [BP + DI + simm8_16] is mod=1 & r_m=3 & BP & DI; simm8_16 { local tmp=BP+DI+simm8_16; export tmp; }
|
|
addr16: [SI + simm8_16] is mod=1 & r_m=4 & SI; simm8_16 { local tmp=SI+simm8_16; export tmp; }
|
|
addr16: [DI + simm8_16] is mod=1 & r_m=5 & DI; simm8_16 { local tmp=DI+simm8_16; export tmp; }
|
|
addr16: [BP + simm8_16] is mod=1 & r_m=6 & BP; simm8_16 { local tmp=BP+simm8_16; export tmp; }
|
|
addr16: [BX + simm8_16] is mod=1 & r_m=7 & BX; simm8_16 { local tmp=BX+simm8_16; export tmp; }
|
|
addr16: [BX + SI + imm16] is mod=2 & r_m=0 & BX & SI; imm16 { local tmp=BX+SI+imm16; export tmp; }
|
|
addr16: [BX + DI + imm16] is mod=2 & r_m=1 & BX & DI; imm16 { local tmp=BX+DI+imm16; export tmp; }
|
|
addr16: [BP + SI + imm16] is mod=2 & r_m=2 & BP & SI; imm16 { local tmp=BP+SI+imm16; export tmp; }
|
|
addr16: [BP + DI + imm16] is mod=2 & r_m=3 & BP & DI; imm16 {local tmp=BP+DI+imm16; export tmp; }
|
|
addr16: [SI + imm16] is mod=2 & r_m=4 & SI; imm16 { local tmp=SI+imm16; export tmp; }
|
|
addr16: [DI + imm16] is mod=2 & r_m=5 & DI; imm16 { local tmp=DI+imm16; export tmp; }
|
|
addr16: [BP + imm16] is mod=2 & r_m=6 & BP; imm16 { local tmp=BP+imm16; export tmp; }
|
|
addr16: [BX + imm16] is mod=2 & r_m=7 & BX; imm16 { local tmp=BX+imm16; export tmp; }
|
|
|
|
# 32-bit addressing modes (the offset portion)
|
|
addr32: [Rmr32] is mod=0 & Rmr32 { export Rmr32; }
|
|
addr32: [Rmr32 + simm8_32] is mod=1 & Rmr32; simm8_32 { local tmp=Rmr32+simm8_32; export tmp; }
|
|
addr32: [Rmr32] is mod=1 & r_m!=4 & Rmr32; simm8=0 { export Rmr32; }
|
|
addr32: [Rmr32 + imm32] is mod=2 & Rmr32; imm32 { local tmp=Rmr32+imm32; export tmp; }
|
|
addr32: [Rmr32] is mod=2 & r_m!=4 & Rmr32; imm32=0 { export Rmr32; }
|
|
addr32: [imm32] is mod=0 & r_m=5; imm32 { export *[const]:4 imm32; }
|
|
addr32: [Base + Index*ss] is mod=0 & r_m=4; Index & Base & ss { local tmp=Base+Index*ss; export tmp; }
|
|
addr32: [Base] is mod=0 & r_m=4; index=4 & Base { export Base; }
|
|
addr32: [Index*ss + imm32] is mod=0 & r_m=4; Index & base=5 & ss; imm32 { local tmp=imm32+Index*ss; export tmp; }
|
|
addr32: [imm32] is mod=0 & r_m=4; index=4 & base=5; imm32 { export *[const]:4 imm32; }
|
|
addr32: [Base + Index*ss + simm8_32] is mod=1 & r_m=4; Index & Base & ss; simm8_32 { local tmp=simm8_32+Base+Index*ss; export tmp; }
|
|
addr32: [Base + simm8_32] is mod=1 & r_m=4; index=4 & Base; simm8_32 { local tmp=simm8_32+Base; export tmp; }
|
|
addr32: [Base + Index*ss] is mod=1 & r_m=4; Index & Base & ss; simm8=0 { local tmp=Base+Index*ss; export tmp; }
|
|
addr32: [Base] is mod=1 & r_m=4; index=4 & Base; simm8=0 { export Base; }
|
|
addr32: [Base + Index*ss + imm32] is mod=2 & r_m=4; Index & Base & ss; imm32 { local tmp=imm32+Base+Index*ss; export tmp; }
|
|
addr32: [Base + imm32] is mod=2 & r_m=4; index=4 & Base; imm32 { local tmp=imm32+Base; export tmp; }
|
|
addr32: [Base + Index*ss] is mod=2 & r_m=4; Index & Base & ss; imm32=0 { local tmp=Base+Index*ss; export tmp; }
|
|
addr32: [Base] is mod=2 & r_m=4; index=4 & Base; imm32=0 { export Base; }
|
|
@ifdef IA64
|
|
addr32: [pcRelSimm32] is bit64=1 & mod=0 & r_m=4; index=4 & base=5; pcRelSimm32 { export *[const]:4 pcRelSimm32; }
|
|
|
|
Addr32_64: [pcRelSimm32] is mod=0 & r_m=5; pcRelSimm32 { export *[const]:8 pcRelSimm32; }
|
|
Addr32_64: [imm32] is mod=0 & r_m=4; index=4 & base=5; imm32 { export *[const]:8 imm32; }
|
|
Addr32_64: addr32 is addr32 { tmp:8 = sext(addr32); export tmp; }
|
|
|
|
|
|
@endif
|
|
|
|
# 64-bit addressing modes (the offset portion)
|
|
|
|
@ifdef IA64
|
|
addr64: [Rmr64] is mod=0 & Rmr64 { export Rmr64; }
|
|
addr64: [Rmr64 + simm8_64] is mod=1 & Rmr64; simm8_64 { local tmp=Rmr64+simm8_64; export tmp; }
|
|
addr64: [Rmr64 + simm32_64] is mod=2 & Rmr64; simm32_64 { local tmp=Rmr64+simm32_64; export tmp; }
|
|
addr64: [Rmr64] is mod=1 & r_m!=4 & Rmr64; simm8=0 { export Rmr64; }
|
|
addr64: [Rmr64] is mod=2 & r_m!=4 & Rmr64; simm32=0 { export Rmr64; }
|
|
addr64: [pcRelSimm32] is mod=0 & r_m=5; pcRelSimm32 { export *[const]:8 pcRelSimm32; }
|
|
addr64: [Base64 + Index64*ss] is mod=0 & r_m=4; Index64 & Base64 & ss { local tmp=Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64] is mod=0 & r_m=4; rexXprefix=0 & index64=4 & Base64 { export Base64; }
|
|
addr64: [simm32_64 + Index64*ss] is mod=0 & r_m=4; Index64 & base64=5 & ss; simm32_64 { local tmp=simm32_64+Index64*ss; export tmp; }
|
|
addr64: [Index64*ss] is mod=0 & r_m=4; Index64 & base64=5 & ss; imm32=0 { local tmp=Index64*ss; export tmp; }
|
|
addr64: [simm32_64] is mod=0 & r_m=4; rexXprefix=0 & index64=4 & base64=5; simm32_64 { export *[const]:8 simm32_64; }
|
|
addr64: [Base64 + simm8_64] is mod=1 & r_m=4; rexXprefix=0 & index64=4 & Base64; simm8_64 { local tmp=simm8_64+Base64; export tmp; }
|
|
addr64: [Base64 + Index64*ss + simm8_64] is mod=1 & r_m=4; Index64 & Base64 & ss; simm8_64 { local tmp=simm8_64+Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64 + Index64*ss] is mod=1 & r_m=4; Index64 & Base64 & ss; simm8=0 { local tmp=Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64 + simm32_64] is mod=2 & r_m=4; rexXprefix=0 & index64=4 & Base64; simm32_64 { local tmp=simm32_64+Base64; export tmp; }
|
|
addr64: [Base64] is mod=2 & r_m=4; rexXprefix=0 & index64=4 & Base64; imm32=0 { export Base64; }
|
|
addr64: [Base64 + Index64*ss + simm32_64] is mod=2 & r_m=4; Index64 & Base64 & ss; simm32_64 { local tmp=simm32_64+Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64 + Index64*ss] is mod=2 & r_m=4; Index64 & Base64 & ss; imm32=0 { local tmp=Base64+Index64*ss; export tmp; }
|
|
@endif
|
|
|
|
currentCS: CS is protectedMode=0 & CS { tmp:4 = (inst_next >> 4) & 0xf000; CS = tmp:2; export CS; }
|
|
currentCS: CS is protectedMode=1 & CS { tmp:4 = (inst_next >> 16) & 0xffff; CS = tmp:2; export CS; }
|
|
|
|
segWide: is segover=0 { export 0:$(SIZE); }
|
|
segWide: CS: is segover=1 & CS { export 0:$(SIZE); }
|
|
segWide: SS: is segover=2 & SS { export 0:$(SIZE); }
|
|
segWide: DS: is segover=3 & DS { export 0:$(SIZE); }
|
|
segWide: ES: is segover=4 & ES { export 0:$(SIZE); }
|
|
segWide: FS: is segover=5 & FS { export FS_OFFSET; }
|
|
segWide: GS: is segover=6 & GS { export GS_OFFSET; }
|
|
|
|
seg16: is segover=0 { export DS; }
|
|
seg16: currentCS: is segover=1 & currentCS { export currentCS; }
|
|
seg16: SS: is segover=2 & SS { export SS; }
|
|
seg16: DS: is segover=3 & DS { export DS; }
|
|
seg16: ES: is segover=4 & ES { export ES; }
|
|
seg16: FS: is segover=5 & FS { export FS; }
|
|
seg16: GS: is segover=6 & GS { export GS; }
|
|
|
|
Mem16: addr16 is (segover=0 & mod=0 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=0 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=1 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=1 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=1 & r_m=6) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=2 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=2 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=2 & r_m=6) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: seg16^addr16 is seg16; addr16 { tmp:$(SIZE) = segment(seg16,addr16); export tmp; }
|
|
|
|
Mem: Mem16 is addrsize=0 & Mem16 { export Mem16; }
|
|
|
|
@ifdef IA64
|
|
Mem: segWide^Addr32_64 is $(LONGMODE_ON) & addrsize=1 & segWide; Addr32_64 { export Addr32_64; }
|
|
Mem: segWide^Addr32_64 is $(LONGMODE_ON) & addrsize=1 & segWide & highseg=1; Addr32_64 { tmp:8 = segWide + Addr32_64; export tmp; }
|
|
Mem: segWide^addr64 is $(LONGMODE_ON) & addrsize=2 & segWide; addr64 { export addr64; }
|
|
Mem: segWide^addr64 is $(LONGMODE_ON) & addrsize=2 & segWide & highseg=1; addr64 { tmp:$(SIZE) = segWide + addr64; export tmp; }
|
|
Mem: segWide^addr32 is $(LONGMODE_OFF) & addrsize=1 & segWide; addr32 { tmp:$(SIZE) = zext(addr32); export tmp; }
|
|
@else
|
|
Mem: segWide^addr32 is $(LONGMODE_OFF) & addrsize=1 & segWide; addr32 { export addr32; }
|
|
@endif
|
|
Mem: segWide^addr32 is $(LONGMODE_OFF) & addrsize=1 & segWide & highseg=1; addr32 { tmp:$(SIZE) = segWide + zext(addr32); export tmp; }
|
|
|
|
rel8: reloc is simm8 [ reloc=inst_next+simm8; ] { export *[ram]:$(SIZE) reloc; }
|
|
rel16: reloc is simm16 [ reloc=((inst_next >> 16) << 16) | ((inst_next + simm16) & 0xFFFF); ] { export *[ram]:$(SIZE) reloc; }
|
|
rel32: reloc is simm32 [ reloc=inst_next+simm32; ] { export *[ram]:$(SIZE) reloc; }
|
|
|
|
|
|
m8: "byte ptr" Mem is Mem { export *:1 Mem; }
|
|
m16: "word ptr" Mem is Mem { export *:2 Mem; }
|
|
m32: "dword ptr" Mem is Mem { export *:4 Mem; }
|
|
m64: "qword ptr" Mem is Mem { export *:8 Mem; }
|
|
m80: "tword ptr" Mem is Mem { export *:10 Mem; }
|
|
m128: "xmmword ptr" Mem is Mem { export *:16 Mem; }
|
|
m256: "ymmword ptr" Mem is Mem { export *:32 Mem; }
|
|
m512: "zmmword ptr" Mem is Mem { export *:64 Mem; }
|
|
|
|
m32fp: "float ptr" Mem is Mem { export *:4 Mem; }
|
|
m64fp: "double ptr" Mem is Mem { export *:8 Mem; }
|
|
m80fp: "extended double ptr" Mem is Mem { export *:10 Mem; }
|
|
|
|
##
|
|
## VSIB
|
|
##
|
|
|
|
vaddr32x: [Base + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base & ss { local tmp=zext(Base)+Xmm_vsib*ss; export tmp; }
|
|
vaddr32x: [Xmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Xmm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Xmm_vsib*ss; export tmp; }
|
|
vaddr32x: [Base + Xmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Xmm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Xmm_vsib*ss; export tmp; }
|
|
vaddr32x: [Base + Xmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Xmm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Xmm_vsib*ss; export tmp; }
|
|
|
|
vaddr32y: [Base + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base & ss { local tmp=zext(Base)+Ymm_vsib*ss; export tmp; }
|
|
vaddr32y: [Ymm_vsib*ss + simm32_32] is mod=0 & r_m=4; Ymm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Ymm_vsib*ss; export tmp; }
|
|
vaddr32y: [Base + Ymm_vsib*ss + simm8_32] is mod=1 & r_m=4; Ymm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Ymm_vsib*ss; export tmp; }
|
|
vaddr32y: [Base + Ymm_vsib*ss + simm32_32] is mod=2 & r_m=4; Ymm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Ymm_vsib*ss; export tmp; }
|
|
|
|
vaddr32z: [Base + Zmm_vsib*ss] is mod=0 & r_m=4; Zmm_vsib & Base & ss { local tmp=zext(Base)+Zmm_vsib*ss; export tmp; }
|
|
vaddr32z: [Zmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Zmm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Zmm_vsib*ss; export tmp; }
|
|
vaddr32z: [Base + Zmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Zmm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Zmm_vsib*ss; export tmp; }
|
|
vaddr32z: [Base + Zmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Zmm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Zmm_vsib*ss; export tmp; }
|
|
|
|
@ifdef IA64
|
|
vaddr64x: [Base64 + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base64 & ss { local tmp=zext(Base64)+Xmm_vsib*ss; export tmp; }
|
|
vaddr64x: [Xmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Xmm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Xmm_vsib*ss; export tmp; }
|
|
vaddr64x: [Base64 + Xmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Xmm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Xmm_vsib*ss; export tmp; }
|
|
vaddr64x: [Base64 + Xmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Xmm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Xmm_vsib*ss; export tmp; }
|
|
|
|
vaddr64y: [Base64 + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base64 & ss { local tmp=zext(Base64)+Ymm_vsib*ss; export tmp; }
|
|
vaddr64y: [Ymm_vsib*ss + simm32_64] is mod=0 & r_m=4; Ymm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Ymm_vsib*ss; export tmp; }
|
|
vaddr64y: [Base64 + Ymm_vsib*ss + simm8_64] is mod=1 & r_m=4; Ymm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Ymm_vsib*ss; export tmp; }
|
|
vaddr64y: [Base64 + Ymm_vsib*ss + simm32_64] is mod=2 & r_m=4; Ymm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Ymm_vsib*ss; export tmp; }
|
|
|
|
vaddr64z: [Base64 + Zmm_vsib*ss] is mod=0 & r_m=4; Zmm_vsib & Base64 & ss { local tmp=zext(Base64)+Zmm_vsib*ss; export tmp; }
|
|
vaddr64z: [Zmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Zmm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Zmm_vsib*ss; export tmp; }
|
|
vaddr64z: [Base64 + Zmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Zmm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Zmm_vsib*ss; export tmp; }
|
|
vaddr64z: [Base64 + Zmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Zmm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Zmm_vsib*ss; export tmp; }
|
|
@endif
|
|
|
|
|
|
vMem32x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { export vaddr32x; }
|
|
vMem32x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { export vaddr32x; }
|
|
|
|
vMem32y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { export vaddr32y; }
|
|
vMem32y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { export vaddr32y; }
|
|
|
|
vMem32z: segWide^vaddr32z is addrsize=1 & segWide; vaddr32z { export vaddr32z; }
|
|
vMem32z: segWide^vaddr32z is addrsize=1 & segWide & highseg=1; vaddr32z { export vaddr32z; }
|
|
|
|
@ifdef IA64
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem32x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { export vaddr64x; }
|
|
vMem32x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { export vaddr64x; }
|
|
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem32y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { export vaddr64y; }
|
|
vMem32y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { export vaddr64y; }
|
|
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem32z: segWide^vaddr64z is addrsize=2 & segWide; vaddr64z { export vaddr64z; }
|
|
vMem32z: segWide^vaddr64z is addrsize=2 & segWide & highseg=1; vaddr64z { export vaddr64z;}
|
|
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem64x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { export vaddr32x; }
|
|
vMem64x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { export vaddr32x; }
|
|
|
|
vMem64x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { export vaddr64x; }
|
|
vMem64x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { export vaddr64x; }
|
|
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem64y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { export vaddr32y; }
|
|
vMem64y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { export vaddr32y; }
|
|
|
|
vMem64y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { export vaddr64y; }
|
|
vMem64y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { export vaddr64y; }
|
|
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem64z: segWide^vaddr32z is addrsize=1 & segWide; vaddr32z { export vaddr32z; }
|
|
vMem64z: segWide^vaddr32z is addrsize=1 & segWide & highseg=1; vaddr32z { export vaddr32z; }
|
|
|
|
vMem64z: segWide^vaddr64z is addrsize=2 & segWide; vaddr64z { export vaddr64z; }
|
|
vMem64z: segWide^vaddr64z is addrsize=2 & segWide & highseg=1; vaddr64z { export vaddr64z; }
|
|
@endif
|
|
|
|
|
|
d_vm32x: "dword ptr "^vMem32x is vMem32x { }
|
|
d_vm32y: "dword ptr "^vMem32y is vMem32y { }
|
|
# not used d_vm32z: "dword ptr "^vMem32z is vMem32z { }
|
|
|
|
@ifdef IA64
|
|
d_vm64x: "dword ptr "^vMem64x is vMem64x { }
|
|
d_vm64y: "dword ptr "^vMem64y is vMem64y { }
|
|
# not used d_vm64z: "dword ptr "^vMem64z is vMem64z { }
|
|
@endif
|
|
|
|
|
|
q_vm32x: "qword ptr "^vMem32x is vMem32x { export vMem32x; }
|
|
# not used q_vm32y: "qword ptr "^vMem32y is vMem32y { }
|
|
# not used q_vm32z: "qword ptr "^vMem32z is vMem32z { }
|
|
|
|
@ifdef IA64
|
|
q_vm64x: "qword ptr "^vMem64x is vMem64x { export vMem64x; }
|
|
q_vm64y: "qword ptr "^vMem64y is vMem64y { export vMem64y; }
|
|
q_vm64z: "qword ptr "^vMem64z is vMem64z { export vMem64z; }
|
|
@endif
|
|
|
|
x_vm32x: "xmmword ptr "^vMem32x is vMem32x { export vMem32x; }
|
|
y_vm32y: "ymmword ptr "^vMem32y is vMem32y { export vMem32y; }
|
|
z_vm32z: "zmmword ptr "^vMem32z is vMem32z { export vMem32z; }
|
|
|
|
@ifdef IA64
|
|
x_vm64x: "xmmword ptr "^vMem64x is vMem64x { export vMem64x; }
|
|
y_vm64y: "ymmword ptr "^vMem64y is vMem64y { export vMem64y; }
|
|
z_vm64z: "zmmword ptr "^vMem64z is vMem64z { export vMem64z; }
|
|
@endif
|
|
|
|
Reg32_m8: Rmr32 is mod=3 & Rmr32 { export Rmr32; }
|
|
Reg32_m8: m8 is m8 { local tmp:4 = zext(m8); export tmp; }
|
|
Reg32_m16: Rmr32 is mod=3 & Rmr32 { export Rmr32; }
|
|
Reg32_m16: m16 is m16 { local tmp:4 = zext(m16); export tmp; }
|
|
|
|
mmxreg2_m64: mmxreg2 is mod=3 & mmxreg2 { export mmxreg2; }
|
|
mmxreg2_m64: m64 is m64 { export m64; }
|
|
|
|
XmmReg2_m8: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m8: m8 is m8 { local tmp:16 = zext(m8); export tmp; }
|
|
XmmReg2_m16: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m16: m16 is m16 { local tmp:16 = zext(m16); export tmp; }
|
|
XmmReg2_m32: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m32: m32 is m32 { local tmp:16 = zext(m32); export tmp; }
|
|
XmmReg2_m64: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m64: m64 is m64 { local tmp:16 = zext(m64); export tmp; }
|
|
XmmReg2_m128: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m128: m128 is m128 { export m128; }
|
|
|
|
YmmReg2_m256: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; }
|
|
YmmReg2_m256: m256 is m256 { export m256; }
|
|
|
|
ZmmReg2_m512: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; }
|
|
ZmmReg2_m512: m512 is m512 { export m512; }
|
|
|
|
# used to extend ZmmReg2 if not assigning to m128
|
|
XmmReg2_m128_extend: XmmReg2 is mod=3 & XmmReg2 & ZmmReg2 { ZmmReg2 = zext(XmmReg2); }
|
|
XmmReg2_m128_extend: XmmReg2 is mod & XmmReg2 { }
|
|
|
|
m16bcst32: m16 is m16 { local tmp:2 = m16; BCST4[0,16] = tmp; BCST4[16,16] = tmp; export BCST4; }
|
|
|
|
m16bcst64: m16 is m16 { local tmp:2 = m16; BCST8[0,16] = tmp; BCST8[16,16] = tmp; BCST8[32,16] = tmp; BCST8[48,16] = tmp; export BCST8; }
|
|
m16bcst128: m16 is m16 {
|
|
local tmp:2 = m16;
|
|
BCST16[0,16] = tmp; BCST16[16,16] = tmp; BCST16[32,16] = tmp; BCST16[48,16] = tmp;
|
|
BCST16[64,16] = tmp; BCST16[80,16] = tmp; BCST16[96,16] = tmp; BCST16[112,16] = tmp;
|
|
export BCST16;
|
|
}
|
|
m16bcst256: m16 is m16 {
|
|
local tmp:2 = m16;
|
|
BCST32[0,16] = tmp; BCST32[16,16] = tmp; BCST32[32,16] = tmp; BCST32[48,16] = tmp;
|
|
BCST32[64,16] = tmp; BCST32[80,16] = tmp; BCST32[96,16] = tmp; BCST32[112,16] = tmp;
|
|
BCST32[128,16] = tmp; BCST32[144,16] = tmp; BCST32[160,16] = tmp; BCST32[176,16] = tmp;
|
|
BCST32[192,16] = tmp; BCST32[208,16] = tmp; BCST32[224,16] = tmp; BCST32[240,16] = tmp;
|
|
export BCST32;
|
|
}
|
|
m16bcst512: m16 is m16 {
|
|
local tmp:2 = m16;
|
|
BCST64[0,16] = tmp; BCST64[16,16] = tmp; BCST64[32,16] = tmp; BCST64[48,16] = tmp;
|
|
BCST64[64,16] = tmp; BCST64[80,16] = tmp; BCST64[96,16] = tmp; BCST64[112,16] = tmp;
|
|
BCST64[128,16] = tmp; BCST64[144,16] = tmp; BCST64[160,16] = tmp; BCST64[176,16] = tmp;
|
|
BCST64[192,16] = tmp; BCST64[208,16] = tmp; BCST64[224,16] = tmp; BCST64[240,16] = tmp;
|
|
BCST64[256,16] = tmp; BCST64[272,16] = tmp; BCST64[288,16] = tmp; BCST64[304,16] = tmp;
|
|
BCST64[320,16] = tmp; BCST64[336,16] = tmp; BCST64[352,16] = tmp; BCST64[368,16] = tmp;
|
|
BCST64[384,16] = tmp; BCST64[400,16] = tmp; BCST64[416,16] = tmp; BCST64[432,16] = tmp;
|
|
BCST64[448,16] = tmp; BCST64[464,16] = tmp; BCST64[480,16] = tmp; BCST64[496,16] = tmp;
|
|
export BCST64;
|
|
}
|
|
|
|
m32bcst64: m32 is m32 { local tmp:4 = m32; BCST8[0,32] = tmp; BCST8[32,32] = tmp; export BCST8; }
|
|
m32bcst128: m32 is m32 { local tmp:4 = m32; BCST16[0,32] = tmp; BCST16[32,32] = tmp; BCST16[64,32] = tmp; BCST16[96,32] = tmp; export BCST16; }
|
|
m32bcst256: m32 is m32 {
|
|
local tmp:4 = m32;
|
|
BCST32[0,32] = tmp; BCST32[32,32] = tmp; BCST32[64,32] = tmp; BCST32[96,32] = tmp;
|
|
BCST32[128,32] = tmp; BCST32[160,32] = tmp; BCST32[192,32] = tmp; BCST32[224,32] = tmp;
|
|
export BCST32;
|
|
}
|
|
m32bcst512: m32 is m32 {
|
|
local tmp:4 = m32;
|
|
BCST64[0,32] = tmp; BCST64[32,32] = tmp; BCST64[64,32] = tmp; BCST64[96,32] = tmp;
|
|
BCST64[128,32] = tmp; BCST64[160,32] = tmp; BCST64[192,32] = tmp; BCST64[224,32] = tmp;
|
|
BCST64[256,32] = tmp; BCST64[288,32] = tmp; BCST64[320,32] = tmp; BCST64[352,32] = tmp;
|
|
BCST64[384,32] = tmp; BCST64[416,32] = tmp; BCST64[448,32] = tmp; BCST64[480,32] = tmp;
|
|
export BCST64;
|
|
}
|
|
|
|
m64bcst128: m64 is m64 { local tmp:8 = m64; BCST16[0,64] = tmp; BCST16[64,64] = tmp; export BCST16; }
|
|
m64bcst256: m64 is m64 { local tmp:8 = m64; BCST32[0,64] = tmp; BCST32[64,64] = tmp; BCST32[128,64] = tmp; BCST32[192,64] = tmp; export BCST32; }
|
|
m64bcst512: m64 is m64 {
|
|
local tmp:8 = m64;
|
|
BCST64[0,64] = tmp; BCST64[64,64] = tmp; BCST64[128,64] = tmp; BCST64[192,64] = tmp;
|
|
BCST64[256,64] = tmp; BCST64[320,64] = tmp; BCST64[384,64] = tmp; BCST64[448,64] = tmp;
|
|
export BCST64;
|
|
}
|
|
|
|
XmmReg2_m32_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m32_m16bcst: m32 is m32 & evexDisp8N { local tmp:16 = zext(m32); export tmp; }
|
|
XmmReg2_m32_m16bcst: m16bcst32 is evexB=1 & m16bcst32 & evexDisp8N { local tmp:16 = zext(m16bcst32); export tmp; }
|
|
|
|
XmmReg2_m64_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m64_m16bcst: m64 is m64 & evexDisp8N { local tmp:16 = zext(m64); export tmp; }
|
|
XmmReg2_m64_m16bcst: m16bcst64 is evexB=1 & m16bcst64 & evexDisp8N { local tmp:16 = zext(m16bcst64); export tmp; }
|
|
|
|
XmmReg2_m64_m32bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m64_m32bcst: m64 is m64 & evexDisp8N { local tmp:16 = zext(m64); export tmp; }
|
|
XmmReg2_m64_m32bcst: m32bcst64 is evexB=1 & m32bcst64 & evexDisp8N { local tmp:16 = zext(m32bcst64); export tmp; }
|
|
|
|
XmmReg2_m128_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m128_m16bcst: m128 is m128& evexDisp8N { export m128; }
|
|
XmmReg2_m128_m16bcst: m16bcst128 is evexB=1 & m16bcst128 & evexDisp8N { export m16bcst128; }
|
|
|
|
XmmReg2_m128_m32bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m128_m32bcst: m128 is m128& evexDisp8N { export m128; }
|
|
XmmReg2_m128_m32bcst: m32bcst128 is evexB=1 & m32bcst128 & evexDisp8N { export m32bcst128; }
|
|
|
|
XmmReg2_m128_m64bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m128_m64bcst: m128 is m128 & evexDisp8N { export m128; }
|
|
XmmReg2_m128_m64bcst: m64bcst128 is evexB=1 & m64bcst128 & evexDisp8N { export m64bcst128; }
|
|
|
|
YmmReg2_m256_m16bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; }
|
|
YmmReg2_m256_m16bcst: m256 is m256 & evexDisp8N { export m256; }
|
|
YmmReg2_m256_m16bcst: m16bcst256 is evexB=1 & m16bcst256 & evexDisp8N { export m16bcst256; }
|
|
|
|
YmmReg2_m256_m32bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; }
|
|
YmmReg2_m256_m32bcst: m256 is m256 & evexDisp8N { export m256; }
|
|
YmmReg2_m256_m32bcst: m32bcst256 is evexB=1 & m32bcst256 & evexDisp8N { export m32bcst256; }
|
|
|
|
YmmReg2_m256_m64bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; }
|
|
YmmReg2_m256_m64bcst: m256 is m256 & evexDisp8N { export m256; }
|
|
YmmReg2_m256_m64bcst: m64bcst256 is evexB=1 & m64bcst256 & evexDisp8N { export m64bcst256; }
|
|
|
|
ZmmReg2_m512_m16bcst: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; }
|
|
ZmmReg2_m512_m16bcst: m512 is m512 & evexDisp8N { export m512; }
|
|
ZmmReg2_m512_m16bcst: m16bcst512 is evexB=1 & m16bcst512 & evexDisp8N { export m16bcst512; }
|
|
|
|
ZmmReg2_m512_m32bcst: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; }
|
|
ZmmReg2_m512_m32bcst: m512 is m512 & evexDisp8N { export m512; }
|
|
ZmmReg2_m512_m32bcst: m32bcst512 is evexB=1 & m32bcst512 & evexDisp8N { export m32bcst512; }
|
|
|
|
ZmmReg2_m512_m64bcst: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; }
|
|
ZmmReg2_m512_m64bcst: m512 is m512 & evexDisp8N { export m512; }
|
|
ZmmReg2_m512_m64bcst: m64bcst512 is evexB=1 & m64bcst512 & evexDisp8N { export m64bcst512; }
|
|
|
|
moffs8: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:1 tmp; }
|
|
moffs8: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:1 tmp; }
|
|
moffs8: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:1 imm32; }
|
|
@ifdef IA64
|
|
moffs8: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:1 tmp; }
|
|
moffs8: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:1 imm64; }
|
|
@endif
|
|
moffs16: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:2 tmp; }
|
|
moffs16: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:2 tmp; }
|
|
moffs16: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:2 imm32; }
|
|
@ifdef IA64
|
|
moffs16: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:2 tmp; }
|
|
moffs16: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:2 imm64; }
|
|
@endif
|
|
|
|
moffs32: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:4 tmp; }
|
|
moffs32: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:4 imm32; }
|
|
moffs32: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:4 tmp; }
|
|
@ifdef IA64
|
|
moffs32: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:4 imm64; }
|
|
moffs32: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:4 tmp; }
|
|
@endif
|
|
|
|
@ifdef IA64
|
|
moffs64: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:8 imm64; }
|
|
moffs64: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:8 tmp; }
|
|
moffs64: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:8 imm32; }
|
|
moffs64: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:8 = segWide + imm32; export *:8 tmp; }
|
|
@endif
|
|
# TODO: segment register offset in 64bit might not be right
|
|
|
|
# String memory access
|
|
dseSI1: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 1-2*zext(DF); export *:1 tmp; }
|
|
dseSI1: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 1-2*zext(DF); export *:1 tmp; }
|
|
dseSI2: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 2-4*zext(DF); export *:2 tmp; }
|
|
dseSI2: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 2-4*zext(DF); export *:2 tmp; }
|
|
dseSI4: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 4-8*zext(DF); export *:4 tmp; }
|
|
dseSI4: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 4-8*zext(DF); export *:4 tmp; }
|
|
eseDI1: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 1-2*zext(DF); export *:1 tmp; }
|
|
eseDI1: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+1-2*zext(DF); export *:1 tmp; }
|
|
eseDI2: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 2-4*zext(DF); export *:2 tmp; }
|
|
eseDI2: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+2-4*zext(DF); export *:2 tmp; }
|
|
eseDI4: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 4-8*zext(DF); export *:4 tmp; }
|
|
eseDI4: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+4-8*zext(DF); export *:4 tmp; }
|
|
|
|
@ifdef IA64
|
|
# quadword string functions
|
|
dseSI8: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 8-16*zext(DF); export *:8 tmp; }
|
|
dseSI8: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 8-16*zext(DF); export *:8 tmp; }
|
|
eseDI8: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 8-16*zext(DF); export *:8 tmp; }
|
|
eseDI8: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+8-16*zext(DF); export *:8 tmp; }
|
|
|
|
dseSI1: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 1-2*zext(DF); export *:1 tmp; }
|
|
dseSI2: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 2-4*zext(DF); export *:2 tmp; }
|
|
dseSI4: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 4-8*zext(DF); export *:4 tmp; }
|
|
dseSI8: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 8-16*zext(DF); export *:8 tmp; }
|
|
eseDI1: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+1-2*zext(DF); export *:1 tmp; }
|
|
eseDI2: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+2-4*zext(DF); export *:2 tmp; }
|
|
eseDI4: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+4-8*zext(DF); export *:4 tmp; }
|
|
eseDI8: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+8-16*zext(DF); export *:8 tmp; }
|
|
@endif
|
|
|
|
rm8: Rmr8 is mod=3 & Rmr8 { export Rmr8; }
|
|
rm8: "byte ptr" Mem is Mem { export *:1 Mem; }
|
|
|
|
rm16: Rmr16 is mod=3 & Rmr16 { export Rmr16; }
|
|
rm16: "word ptr" Mem is Mem { export *:2 Mem; }
|
|
|
|
rm32: Rmr32 is mod=3 & Rmr32 { export Rmr32; }
|
|
rm32: "dword ptr" Mem is Mem { export *:4 Mem; }
|
|
|
|
@ifdef IA64
|
|
rm64: Rmr64 is mod=3 & Rmr64 { export Rmr64; }
|
|
rm64: "qword ptr" Mem is Mem { export *:8 Mem; }
|
|
@endif
|
|
|
|
n1: one is epsilon [ one = 1; ] { export *[const]:1 one; }
|
|
|
|
@ifdef IA64
|
|
# Handle zero extension in 64-bit mode for 32-bit destination registers
|
|
check_Reg32_dest: is rexRprefix=0 & reg32 & reg64 { reg64 = zext(reg32); }
|
|
check_Reg32_dest: is rexRprefix=1 & reg32_x & reg64_x { reg64_x = zext(reg32_x); }
|
|
check_Rmr32_dest: is rexBprefix=0 & r32 & r64 { r64 = zext(r32); }
|
|
check_Rmr32_dest: is rexBprefix=1 & r32_x & r64_x { r64_x = zext(r32_x); }
|
|
check_rm32_dest: is mod=3 & check_Rmr32_dest { build check_Rmr32_dest; }
|
|
check_EAX_dest: is epsilon { RAX = zext(EAX); }
|
|
check_EDX_dest: is epsilon { RDX = zext(EDX); }
|
|
check_vexVVVV_r32_dest: is bit64=1 & vexVVVV_r64 & vexVVVV_r32 { vexVVVV_r64 = zext(vexVVVV_r32);}
|
|
@else
|
|
check_Reg32_dest: is epsilon { }
|
|
check_Rmr32_dest: is epsilon { }
|
|
check_EAX_dest: is epsilon { }
|
|
check_EDX_dest: is epsilon { }
|
|
check_vexVVVV_r32_dest: is epsilon { }
|
|
@endif
|
|
check_rm32_dest: is epsilon { }
|
|
|
|
|
|
ptr1616: reloc is protectedMode=0 & imm16; j16 [ reloc = j16*0x10 + imm16; ] { CS = j16; export *[ram]:4 reloc; }
|
|
ptr1616: reloc is protectedMode=1 & imm16; j16 [ reloc = j16*0x10000 + imm16; ] { CS = j16; export *[ram]:4 reloc; }
|
|
ptr1632: j16":"imm32 is imm32; j16 { CS = j16; export *:4 imm32; }
|
|
|
|
# conditions
|
|
|
|
cc: "O" is cond=0 { export OF; }
|
|
cc: "NO" is cond=1 { local tmp = !OF; export tmp; }
|
|
cc: "C" is cond=2 { export CF; }
|
|
cc: "NC" is cond=3 { local tmp = !CF; export tmp; }
|
|
cc: "Z" is cond=4 { export ZF; }
|
|
cc: "NZ" is cond=5 { local tmp = !ZF; export tmp; }
|
|
cc: "BE" is cond=6 { local tmp = CF || ZF; export tmp; }
|
|
cc: "A" is cond=7 { local tmp = !(CF || ZF); export tmp; }
|
|
cc: "S" is cond=8 { export SF; }
|
|
cc: "NS" is cond=9 { local tmp = !SF; export tmp; }
|
|
cc: "P" is cond=10 { export PF; }
|
|
cc: "NP" is cond=11 { local tmp = !PF; export tmp; }
|
|
cc: "L" is cond=12 { local tmp = OF != SF; export tmp; }
|
|
cc: "GE" is cond=13 { local tmp = OF == SF; export tmp; }
|
|
cc: "LE" is cond=14 { local tmp = ZF || (OF != SF); export tmp; }
|
|
cc: "G" is cond=15 { local tmp = !ZF && (OF == SF); export tmp; }
|
|
|
|
# repeat prefixes
|
|
rep: ".REP" is ((repprefx=1 & repneprefx=0)|(repprefx=0 & repneprefx=1)) & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; }
|
|
rep: ".REP" is ((repprefx=1 & repneprefx=0)|(repprefx=0 & repneprefx=1)) & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; }
|
|
@ifdef IA64
|
|
rep: ".REP" is ((repprefx=1 & repneprefx=0)|(repprefx=0 & repneprefx=1)) & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; }
|
|
@endif
|
|
rep: is repprefx=0 & repneprefx=0 { }
|
|
|
|
reptail: is ((repprefx=1 & repneprefx=0)|(repprefx=0 & repneprefx=1)) { goto inst_start; }
|
|
reptail: is repprefx=0 & repneprefx=0 { }
|
|
|
|
repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; }
|
|
repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; }
|
|
@ifdef IA64
|
|
repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; }
|
|
@endif
|
|
repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; }
|
|
repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; }
|
|
@ifdef IA64
|
|
repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; }
|
|
@endif
|
|
repe: is repprefx=0 & repneprefx=0 { }
|
|
|
|
repetail: is repprefx=1 & repneprefx=0 { if (ZF) goto inst_start; }
|
|
repetail: is repneprefx=1 & repprefx=0 { if (!ZF) goto inst_start; }
|
|
repetail: is repprefx=0 & repneprefx=0 { }
|
|
|
|
# XACQUIRE/XRELEASE prefix
|
|
xacq_xrel_prefx: ".XACQUIRE" is xacquireprefx=1 & xreleaseprefx=0 { XACQUIRE(); }
|
|
xacq_xrel_prefx: ".XRELEASE" is xacquireprefx=0 & xreleaseprefx=1 { XRELEASE(); }
|
|
xacq_xrel_prefx: is epsilon { }
|
|
|
|
#the XRELEASE prefix can be used with several variants of MOV (without the LOCK prefix)
|
|
xrelease: ".XRELEASE" is xacquireprefx=0 & xreleaseprefx=1 { XRELEASE(); }
|
|
xrelease: is epsilon { }
|
|
|
|
#XCHG with a memory destination asserts a LOCK signal whether or not there is a LOCK prefix (f0)
|
|
#"alwaysLock" constructor will place "LOCK" in the disassembly if the prefix occurs
|
|
alwaysLock: ".LOCK" is lockprefx=1 { LOCK(); }
|
|
alwaysLock: is epsilon { LOCK(); }
|
|
|
|
#check for LOCK prefix and the optional XACQUIRE/XRELEASE
|
|
lockx: xacq_xrel_prefx^".LOCK" is lockprefx=1 & xacq_xrel_prefx { build xacq_xrel_prefx; LOCK(); }
|
|
lockx: is epsilon { }
|
|
|
|
#"unlock" constructor is used to pair every LOCK pcodeop with a matching UNLOCK pcodeop
|
|
unlock: is lockprefx=1 { UNLOCK(); }
|
|
unlock: is epsilon { }
|
|
|
|
KReg_reg: opmaskreg is opmaskreg { export opmaskreg; }
|
|
KReg_rm: opmaskrm is opmaskrm { export opmaskrm; }
|
|
# not used vexVVVV_KReg: evexVopmask is evexVopmask { export evexVopmask; }
|
|
vex1VVV_KReg: evexVopmask is evexVopmask & vexHighV=0 { export evexVopmask; }
|
|
|
|
XmmMaskMode: is evexZ=0 { }
|
|
XmmMaskMode: "{z}" is evexZ=1 { XmmMask=0; }
|
|
|
|
YmmMaskMode: is evexZ=0 { }
|
|
YmmMaskMode: "{z}" is evexZ=1 { YmmMask=0; }
|
|
|
|
ZmmMaskMode: is evexZ=0 { }
|
|
ZmmMaskMode: "{z}" is evexZ=1 { ZmmMask=0; }
|
|
|
|
AVXOpMask: "{"^evexOpmask^"}" is evexOpmask { export evexOpmask; }
|
|
AVXOpMask: is evexOpmask=0 { local tmp:8 = 0xffffffffffffffff; export *[const]:8 tmp; }
|
|
# Z=0: merge masking
|
|
# Z=1: zero masking
|
|
XmmOpMask: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode {
|
|
export AVXOpMask;
|
|
}
|
|
|
|
XmmOpMask8: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(XmmResult[0,8], mask[0,1], XmmResult[0,8], XmmMask[0,8]);
|
|
conditionalAssign(XmmResult[8,8], mask[1,1],XmmResult[8,8], XmmMask[8,8]);
|
|
conditionalAssign(XmmResult[16,8], mask[2,1], XmmResult[16,8], XmmMask[16,8]);
|
|
conditionalAssign(XmmResult[24,8], mask[3,1], XmmResult[24,8], XmmMask[24,8]);
|
|
conditionalAssign(XmmResult[32,8], mask[4,1], XmmResult[32,8], XmmMask[32,8]);
|
|
conditionalAssign(XmmResult[40,8], mask[5,1], XmmResult[40,8], XmmMask[40,8]);
|
|
conditionalAssign(XmmResult[48,8], mask[6,1], XmmResult[48,8], XmmMask[48,8]);
|
|
conditionalAssign(XmmResult[56,8], mask[7,1], XmmResult[56,8], XmmMask[56,8]);
|
|
conditionalAssign(XmmResult[64,8], mask[8,1], XmmResult[64,8], XmmMask[64,8]);
|
|
conditionalAssign(XmmResult[72,8], mask[9,1], XmmResult[72,8], XmmMask[72,8]);
|
|
conditionalAssign(XmmResult[80,8], mask[10,1], XmmResult[80,8], XmmMask[80,8]);
|
|
conditionalAssign(XmmResult[88,8], mask[11,1], XmmResult[88,8], XmmMask[88,8]);
|
|
conditionalAssign(XmmResult[96,8], mask[12,1], XmmResult[96,8], XmmMask[96,8]);
|
|
conditionalAssign(XmmResult[104,8], mask[13,1], XmmResult[104,8], XmmMask[104,8]);
|
|
conditionalAssign(XmmResult[112,8], mask[14,1], XmmResult[112,8], XmmMask[112,8]);
|
|
conditionalAssign(XmmResult[120,8], mask[15,1], XmmResult[120,8], XmmMask[120,8]);
|
|
}
|
|
|
|
XmmOpMask8: is evexOpmask=0 {
|
|
}
|
|
|
|
XmmOpMask16: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(XmmResult[0,16], mask[0,1], XmmResult[0,16], XmmMask[0,16]);
|
|
conditionalAssign(XmmResult[16,16], mask[1,1], XmmResult[16,16], XmmMask[16,16]);
|
|
conditionalAssign(XmmResult[32,16], mask[2,1], XmmResult[32,16], XmmMask[32,16]);
|
|
conditionalAssign(XmmResult[48,16], mask[3,1], XmmResult[48,16], XmmMask[48,16]);
|
|
conditionalAssign(XmmResult[64,16], mask[4,1], XmmResult[64,16], XmmMask[64,16]);
|
|
conditionalAssign(XmmResult[80,16], mask[5,1], XmmResult[80,16], XmmMask[80,16]);
|
|
conditionalAssign(XmmResult[96,16], mask[6,1], XmmResult[96,16], XmmMask[96,16]);
|
|
conditionalAssign(XmmResult[112,16], mask[7,1], XmmResult[112,16], XmmMask[112,16]);
|
|
}
|
|
|
|
XmmOpMask16: is evexOpmask=0 {
|
|
}
|
|
|
|
|
|
XmmOpMask32: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(XmmResult[0,32], mask[0,1], XmmResult[0,32], XmmMask[0,32]);
|
|
conditionalAssign(XmmResult[32,32], mask[1,1], XmmResult[32,32], XmmMask[32,32]);
|
|
conditionalAssign(XmmResult[64,32], mask[2,1], XmmResult[64,32], XmmMask[64,32]);
|
|
conditionalAssign(XmmResult[96,32], mask[3,1], XmmResult[96,32], XmmMask[96,32]);
|
|
}
|
|
|
|
XmmOpMask32: is evexOpmask=0 {
|
|
}
|
|
|
|
XmmOpMask64: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(XmmResult[0,64], mask[0,1], XmmResult[0,64], XmmMask[0,64]);
|
|
conditionalAssign(XmmResult[64,64], mask[1,1], XmmResult[64,64], XmmMask[64,64]);
|
|
}
|
|
|
|
XmmOpMask64: is evexOpmask=0 {
|
|
}
|
|
|
|
YmmOpMask: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode {
|
|
export AVXOpMask;
|
|
}
|
|
|
|
YmmOpMask8: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(YmmResult[0,8], mask[0,1], YmmResult[0,8], YmmMask[0,8]);
|
|
conditionalAssign(YmmResult[8,8], mask[1,1], YmmResult[8,8], YmmMask[8,8]);
|
|
conditionalAssign(YmmResult[16,8], mask[2,1], YmmResult[16,8], YmmMask[16,8]);
|
|
conditionalAssign(YmmResult[24,8], mask[3,1], YmmResult[24,8], YmmMask[24,8]);
|
|
conditionalAssign(YmmResult[32,8], mask[4,1], YmmResult[32,8], YmmMask[32,8]);
|
|
conditionalAssign(YmmResult[40,8], mask[5,1], YmmResult[40,8], YmmMask[40,8]);
|
|
conditionalAssign(YmmResult[48,8], mask[6,1], YmmResult[48,8], YmmMask[48,8]);
|
|
conditionalAssign(YmmResult[56,8], mask[7,1], YmmResult[56,8], YmmMask[56,8]);
|
|
conditionalAssign(YmmResult[64,8], mask[8,1], YmmResult[64,8], YmmMask[64,8]);
|
|
conditionalAssign(YmmResult[72,8], mask[9,1], YmmResult[72,8], YmmMask[72,8]);
|
|
conditionalAssign(YmmResult[80,8], mask[10,1], YmmResult[80,8], YmmMask[80,8]);
|
|
conditionalAssign(YmmResult[88,8], mask[11,1], YmmResult[88,8], YmmMask[88,8]);
|
|
conditionalAssign(YmmResult[96,8], mask[12,1], YmmResult[96,8], YmmMask[96,8]);
|
|
conditionalAssign(YmmResult[104,8], mask[13,1], YmmResult[104,8], YmmMask[104,8]);
|
|
conditionalAssign(YmmResult[112,8], mask[14,1], YmmResult[112,8], YmmMask[112,8]);
|
|
conditionalAssign(YmmResult[120,8], mask[15,1], YmmResult[120,8], YmmMask[120,8]);
|
|
conditionalAssign(YmmResult[128,8], mask[16,1], YmmResult[128,8], YmmMask[128,8]);
|
|
conditionalAssign(YmmResult[136,8], mask[17,1], YmmResult[136,8], YmmMask[136,8]);
|
|
conditionalAssign(YmmResult[144,8], mask[18,1], YmmResult[144,8], YmmMask[144,8]);
|
|
conditionalAssign(YmmResult[152,8], mask[19,1], YmmResult[152,8], YmmMask[152,8]);
|
|
conditionalAssign(YmmResult[160,8], mask[20,1], YmmResult[160,8], YmmMask[160,8]);
|
|
conditionalAssign(YmmResult[168,8], mask[21,1], YmmResult[168,8], YmmMask[168,8]);
|
|
conditionalAssign(YmmResult[176,8], mask[22,1], YmmResult[176,8], YmmMask[176,8]);
|
|
conditionalAssign(YmmResult[184,8], mask[23,1], YmmResult[184,8], YmmMask[184,8]);
|
|
conditionalAssign(YmmResult[192,8], mask[24,1], YmmResult[192,8], YmmMask[192,8]);
|
|
conditionalAssign(YmmResult[200,8], mask[25,1], YmmResult[200,8], YmmMask[200,8]);
|
|
conditionalAssign(YmmResult[208,8], mask[26,1], YmmResult[208,8], YmmMask[208,8]);
|
|
conditionalAssign(YmmResult[216,8], mask[27,1], YmmResult[216,8], YmmMask[216,8]);
|
|
conditionalAssign(YmmResult[224,8], mask[28,1], YmmResult[224,8], YmmMask[224,8]);
|
|
conditionalAssign(YmmResult[232,8], mask[29,1], YmmResult[232,8], YmmMask[232,8]);
|
|
}
|
|
|
|
YmmOpMask8: is evexOpmask=0 {
|
|
}
|
|
|
|
YmmOpMask16: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(YmmResult[0,16], mask[0,1], YmmResult[0,16], YmmMask[0,16]);
|
|
conditionalAssign(YmmResult[16,16], mask[1,1], YmmResult[16,16], YmmMask[16,16]);
|
|
conditionalAssign(YmmResult[32,16], mask[2,1], YmmResult[32,16], YmmMask[32,16]);
|
|
conditionalAssign(YmmResult[48,16], mask[3,1], YmmResult[48,16], YmmMask[48,16]);
|
|
conditionalAssign(YmmResult[64,16], mask[4,1], YmmResult[64,16], YmmMask[64,16]);
|
|
conditionalAssign(YmmResult[80,16], mask[5,1], YmmResult[80,16], YmmMask[80,16]);
|
|
conditionalAssign(YmmResult[96,16], mask[6,1], YmmResult[96,16], YmmMask[96,16]);
|
|
conditionalAssign(YmmResult[112,16], mask[7,1], YmmResult[112,16], YmmMask[112,16]);
|
|
conditionalAssign(YmmResult[128,16], mask[8,1], YmmResult[128,16], YmmMask[128,16]);
|
|
conditionalAssign(YmmResult[144,16], mask[9,1], YmmResult[144,16], YmmMask[144,16]);
|
|
conditionalAssign(YmmResult[160,16], mask[10,1], YmmResult[160,16], YmmMask[160,16]);
|
|
conditionalAssign(YmmResult[176,16], mask[11,1], YmmResult[176,16], YmmMask[176,16]);
|
|
conditionalAssign(YmmResult[192,16], mask[12,1], YmmResult[192,16], YmmMask[192,16]);
|
|
conditionalAssign(YmmResult[208,16], mask[13,1], YmmResult[208,16], YmmMask[208,16]);
|
|
conditionalAssign(YmmResult[224,16], mask[14,1], YmmResult[224,16], YmmMask[224,16]);
|
|
conditionalAssign(YmmResult[240,16], mask[15,1], YmmResult[240,16], YmmMask[240,16]);
|
|
}
|
|
|
|
YmmOpMask16: is evexOpmask=0 {
|
|
}
|
|
|
|
YmmOpMask32: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(YmmResult[0,32], mask[0,1], YmmResult[0,32], YmmMask[0,32]);
|
|
conditionalAssign(YmmResult[32,32], mask[1,1], YmmResult[32,32], YmmMask[32,32]);
|
|
conditionalAssign(YmmResult[64,32], mask[2,1], YmmResult[64,32], YmmMask[64,32]);
|
|
conditionalAssign(YmmResult[96,32], mask[3,1], YmmResult[96,32], YmmMask[96,32]);
|
|
conditionalAssign(YmmResult[128,32], mask[4,1], YmmResult[128,32], YmmMask[128,32]);
|
|
conditionalAssign(YmmResult[160,32], mask[5,1], YmmResult[160,32], YmmMask[160,32]);
|
|
conditionalAssign(YmmResult[192,32], mask[6,1], YmmResult[192,32], YmmMask[192,32]);
|
|
conditionalAssign(YmmResult[224,32], mask[7,1], YmmResult[224,32], YmmMask[224,32]);
|
|
}
|
|
|
|
YmmOpMask32: is evexOpmask=0 {
|
|
}
|
|
|
|
YmmOpMask64: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(YmmResult[0,64], mask[0,1], YmmResult[0,64], YmmMask[0,64]);
|
|
conditionalAssign(YmmResult[64,64], mask[1,1], YmmResult[64,64], YmmMask[64,64]);
|
|
conditionalAssign(YmmResult[128,64], mask[2,1], YmmResult[128,64], YmmMask[128,64]);
|
|
conditionalAssign(YmmResult[192,64], mask[3,1], YmmResult[192,64], YmmMask[192,64]);
|
|
}
|
|
|
|
YmmOpMask64: is evexOpmask=0 {
|
|
}
|
|
|
|
ZmmOpMask: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode {
|
|
export AVXOpMask;
|
|
}
|
|
|
|
ZmmOpMask8: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(ZmmResult[0,8], mask[0,1], ZmmResult[0,8], ZmmMask[0,8]);
|
|
conditionalAssign(ZmmResult[8,8], mask[1,1], ZmmResult[8,8], ZmmMask[8,8]);
|
|
conditionalAssign(ZmmResult[16,8], mask[2,1], ZmmResult[16,8], ZmmMask[16,8]);
|
|
conditionalAssign(ZmmResult[24,8], mask[3,1], ZmmResult[24,8], ZmmMask[24,8]);
|
|
conditionalAssign(ZmmResult[32,8], mask[4,1], ZmmResult[32,8], ZmmMask[32,8]);
|
|
conditionalAssign(ZmmResult[40,8], mask[5,1], ZmmResult[40,8], ZmmMask[40,8]);
|
|
conditionalAssign(ZmmResult[48,8], mask[6,1], ZmmResult[48,8], ZmmMask[48,8]);
|
|
conditionalAssign(ZmmResult[56,8], mask[7,1], ZmmResult[56,8], ZmmMask[56,8]);
|
|
conditionalAssign(ZmmResult[64,8], mask[8,1], ZmmResult[64,8], ZmmMask[64,8]);
|
|
conditionalAssign(ZmmResult[72,8], mask[9,1], ZmmResult[72,8], ZmmMask[72,8]);
|
|
conditionalAssign(ZmmResult[80,8], mask[10,1], ZmmResult[80,8], ZmmMask[80,8]);
|
|
conditionalAssign(ZmmResult[88,8], mask[11,1], ZmmResult[88,8], ZmmMask[88,8]);
|
|
conditionalAssign(ZmmResult[96,8], mask[12,1], ZmmResult[96,8], ZmmMask[96,8]);
|
|
conditionalAssign(ZmmResult[104,8], mask[13,1], ZmmResult[104,8], ZmmMask[104,8]);
|
|
conditionalAssign(ZmmResult[112,8], mask[14,1], ZmmResult[112,8], ZmmMask[112,8]);
|
|
conditionalAssign(ZmmResult[120,8], mask[15,1], ZmmResult[120,8], ZmmMask[120,8]);
|
|
conditionalAssign(ZmmResult[128,8], mask[16,1], ZmmResult[128,8], ZmmMask[128,8]);
|
|
conditionalAssign(ZmmResult[136,8], mask[17,1], ZmmResult[136,8], ZmmMask[136,8]);
|
|
conditionalAssign(ZmmResult[144,8], mask[18,1], ZmmResult[144,8], ZmmMask[144,8]);
|
|
conditionalAssign(ZmmResult[152,8], mask[19,1], ZmmResult[152,8], ZmmMask[152,8]);
|
|
conditionalAssign(ZmmResult[160,8], mask[20,1], ZmmResult[160,8], ZmmMask[160,8]);
|
|
conditionalAssign(ZmmResult[168,8], mask[21,1], ZmmResult[168,8], ZmmMask[168,8]);
|
|
conditionalAssign(ZmmResult[176,8], mask[22,1], ZmmResult[176,8], ZmmMask[176,8]);
|
|
conditionalAssign(ZmmResult[184,8], mask[23,1], ZmmResult[184,8], ZmmMask[184,8]);
|
|
conditionalAssign(ZmmResult[192,8], mask[24,1], ZmmResult[192,8], ZmmMask[192,8]);
|
|
conditionalAssign(ZmmResult[200,8], mask[25,1], ZmmResult[200,8], ZmmMask[200,8]);
|
|
conditionalAssign(ZmmResult[208,8], mask[26,1], ZmmResult[208,8], ZmmMask[208,8]);
|
|
conditionalAssign(ZmmResult[216,8], mask[27,1], ZmmResult[216,8], ZmmMask[216,8]);
|
|
conditionalAssign(ZmmResult[224,8], mask[28,1], ZmmResult[224,8], ZmmMask[224,8]);
|
|
conditionalAssign(ZmmResult[232,8], mask[29,1], ZmmResult[232,8], ZmmMask[232,8]);
|
|
conditionalAssign(ZmmResult[240,8], mask[30,1], ZmmResult[240,8], ZmmMask[240,8]);
|
|
conditionalAssign(ZmmResult[248,8], mask[31,1], ZmmResult[248,8], ZmmMask[248,8]);
|
|
conditionalAssign(ZmmResult[256,8], mask[32,1], ZmmResult[256,8], ZmmMask[256,8]);
|
|
conditionalAssign(ZmmResult[264,8], mask[33,1], ZmmResult[264,8], ZmmMask[264,8]);
|
|
conditionalAssign(ZmmResult[272,8], mask[34,1], ZmmResult[272,8], ZmmMask[272,8]);
|
|
conditionalAssign(ZmmResult[280,8], mask[35,1], ZmmResult[280,8], ZmmMask[280,8]);
|
|
conditionalAssign(ZmmResult[288,8], mask[36,1], ZmmResult[288,8], ZmmMask[288,8]);
|
|
conditionalAssign(ZmmResult[296,8], mask[37,1], ZmmResult[296,8], ZmmMask[296,8]);
|
|
conditionalAssign(ZmmResult[304,8], mask[38,1], ZmmResult[304,8], ZmmMask[304,8]);
|
|
conditionalAssign(ZmmResult[312,8], mask[39,1], ZmmResult[312,8], ZmmMask[312,8]);
|
|
conditionalAssign(ZmmResult[320,8], mask[40,1], ZmmResult[320,8], ZmmMask[320,8]);
|
|
conditionalAssign(ZmmResult[328,8], mask[41,1], ZmmResult[328,8], ZmmMask[328,8]);
|
|
conditionalAssign(ZmmResult[336,8], mask[42,1], ZmmResult[336,8], ZmmMask[336,8]);
|
|
conditionalAssign(ZmmResult[344,8], mask[43,1], ZmmResult[344,8], ZmmMask[344,8]);
|
|
conditionalAssign(ZmmResult[352,8], mask[44,1], ZmmResult[352,8], ZmmMask[352,8]);
|
|
conditionalAssign(ZmmResult[360,8], mask[45,1], ZmmResult[360,8], ZmmMask[360,8]);
|
|
conditionalAssign(ZmmResult[368,8], mask[46,1], ZmmResult[368,8], ZmmMask[368,8]);
|
|
conditionalAssign(ZmmResult[376,8], mask[47,1], ZmmResult[376,8], ZmmMask[376,8]);
|
|
conditionalAssign(ZmmResult[384,8], mask[48,1], ZmmResult[384,8], ZmmMask[384,8]);
|
|
conditionalAssign(ZmmResult[392,8], mask[49,1], ZmmResult[392,8], ZmmMask[392,8]);
|
|
conditionalAssign(ZmmResult[400,8], mask[50,1], ZmmResult[400,8], ZmmMask[400,8]);
|
|
conditionalAssign(ZmmResult[408,8], mask[51,1], ZmmResult[408,8], ZmmMask[408,8]);
|
|
conditionalAssign(ZmmResult[416,8], mask[52,1], ZmmResult[416,8], ZmmMask[416,8]);
|
|
conditionalAssign(ZmmResult[424,8], mask[53,1], ZmmResult[424,8], ZmmMask[424,8]);
|
|
conditionalAssign(ZmmResult[432,8], mask[54,1], ZmmResult[432,8], ZmmMask[432,8]);
|
|
conditionalAssign(ZmmResult[440,8], mask[55,1], ZmmResult[440,8], ZmmMask[440,8]);
|
|
conditionalAssign(ZmmResult[448,8], mask[56,1], ZmmResult[448,8], ZmmMask[448,8]);
|
|
conditionalAssign(ZmmResult[456,8], mask[57,1], ZmmResult[456,8], ZmmMask[456,8]);
|
|
conditionalAssign(ZmmResult[464,8], mask[58,1], ZmmResult[464,8], ZmmMask[464,8]);
|
|
conditionalAssign(ZmmResult[472,8], mask[59,1], ZmmResult[472,8], ZmmMask[472,8]);
|
|
conditionalAssign(ZmmResult[480,8], mask[60,1], ZmmResult[480,8], ZmmMask[480,8]);
|
|
conditionalAssign(ZmmResult[488,8], mask[61,1], ZmmResult[488,8], ZmmMask[488,8]);
|
|
conditionalAssign(ZmmResult[496,8], mask[62,1], ZmmResult[496,8], ZmmMask[496,8]);
|
|
conditionalAssign(ZmmResult[504,8], mask[63,1], ZmmResult[504,8], ZmmMask[504,8]);
|
|
}
|
|
|
|
ZmmOpMask8: is evexOpmask=0 {
|
|
}
|
|
|
|
ZmmOpMask16: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(ZmmResult[0,16], mask[0,1], ZmmResult[0,16], ZmmMask[0,16]);
|
|
conditionalAssign(ZmmResult[16,16], mask[1,1], ZmmResult[16,16], ZmmMask[16,16]);
|
|
conditionalAssign(ZmmResult[32,16], mask[2,1], ZmmResult[32,16], ZmmMask[32,16]);
|
|
conditionalAssign(ZmmResult[48,16], mask[3,1], ZmmResult[48,16], ZmmMask[48,16]);
|
|
conditionalAssign(ZmmResult[64,16], mask[4,1], ZmmResult[64,16], ZmmMask[64,16]);
|
|
conditionalAssign(ZmmResult[80,16], mask[5,1], ZmmResult[80,16], ZmmMask[80,16]);
|
|
conditionalAssign(ZmmResult[96,16], mask[6,1], ZmmResult[96,16], ZmmMask[96,16]);
|
|
conditionalAssign(ZmmResult[112,16], mask[7,1], ZmmResult[112,16], ZmmMask[112,16]);
|
|
conditionalAssign(ZmmResult[128,16], mask[8,1], ZmmResult[128,16], ZmmMask[128,16]);
|
|
conditionalAssign(ZmmResult[144,16], mask[9,1], ZmmResult[144,16], ZmmMask[144,16]);
|
|
conditionalAssign(ZmmResult[160,16], mask[10,1], ZmmResult[160,16], ZmmMask[160,16]);
|
|
conditionalAssign(ZmmResult[176,16], mask[11,1], ZmmResult[176,16], ZmmMask[176,16]);
|
|
conditionalAssign(ZmmResult[192,16], mask[12,1], ZmmResult[192,16], ZmmMask[192,16]);
|
|
conditionalAssign(ZmmResult[208,16], mask[13,1], ZmmResult[208,16], ZmmMask[208,16]);
|
|
conditionalAssign(ZmmResult[224,16], mask[14,1], ZmmResult[224,16], ZmmMask[224,16]);
|
|
conditionalAssign(ZmmResult[240,16], mask[15,1], ZmmResult[240,16], ZmmMask[240,16]);
|
|
conditionalAssign(ZmmResult[256,16], mask[16,1], ZmmResult[256,16], ZmmMask[256,16]);
|
|
conditionalAssign(ZmmResult[272,16], mask[17,1], ZmmResult[272,16], ZmmMask[272,16]);
|
|
conditionalAssign(ZmmResult[288,16], mask[18,1], ZmmResult[288,16], ZmmMask[288,16]);
|
|
conditionalAssign(ZmmResult[304,16], mask[19,1], ZmmResult[304,16], ZmmMask[304,16]);
|
|
conditionalAssign(ZmmResult[320,16], mask[20,1], ZmmResult[320,16], ZmmMask[320,16]);
|
|
conditionalAssign(ZmmResult[336,16], mask[21,1], ZmmResult[336,16], ZmmMask[336,16]);
|
|
conditionalAssign(ZmmResult[352,16], mask[22,1], ZmmResult[352,16], ZmmMask[352,16]);
|
|
conditionalAssign(ZmmResult[368,16], mask[23,1], ZmmResult[368,16], ZmmMask[368,16]);
|
|
conditionalAssign(ZmmResult[384,16], mask[24,1], ZmmResult[384,16], ZmmMask[384,16]);
|
|
conditionalAssign(ZmmResult[400,16], mask[25,1], ZmmResult[400,16], ZmmMask[400,16]);
|
|
conditionalAssign(ZmmResult[416,16], mask[26,1], ZmmResult[416,16], ZmmMask[416,16]);
|
|
conditionalAssign(ZmmResult[432,16], mask[27,1], ZmmResult[432,16], ZmmMask[432,16]);
|
|
conditionalAssign(ZmmResult[448,16], mask[28,1], ZmmResult[448,16], ZmmMask[448,16]);
|
|
conditionalAssign(ZmmResult[464,16], mask[29,1], ZmmResult[464,16], ZmmMask[464,16]);
|
|
conditionalAssign(ZmmResult[480,16], mask[30,1], ZmmResult[480,16], ZmmMask[480,16]);
|
|
conditionalAssign(ZmmResult[496,16], mask[31,1], ZmmResult[496,16], ZmmMask[496,16]);
|
|
}
|
|
|
|
ZmmOpMask16: is evexOpmask=0 {
|
|
}
|
|
|
|
ZmmOpMask32: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(ZmmResult[0,32], mask[0,1], ZmmResult[0,32], ZmmMask[0,32]);
|
|
conditionalAssign(ZmmResult[32,32], mask[1,1], ZmmResult[32,32], ZmmMask[32,32]);
|
|
conditionalAssign(ZmmResult[64,32], mask[2,1], ZmmResult[64,32], ZmmMask[64,32]);
|
|
conditionalAssign(ZmmResult[96,32], mask[3,1], ZmmResult[96,32], ZmmMask[96,32]);
|
|
conditionalAssign(ZmmResult[128,32], mask[4,1], ZmmResult[128,32], ZmmMask[128,32]);
|
|
conditionalAssign(ZmmResult[160,32], mask[5,1], ZmmResult[160,32], ZmmMask[160,32]);
|
|
conditionalAssign(ZmmResult[192,32], mask[6,1], ZmmResult[192,32], ZmmMask[192,32]);
|
|
conditionalAssign(ZmmResult[224,32], mask[7,1], ZmmResult[224,32], ZmmMask[224,32]);
|
|
conditionalAssign(ZmmResult[256,32], mask[8,1], ZmmResult[256,32], ZmmMask[256,32]);
|
|
conditionalAssign(ZmmResult[288,32], mask[9,1], ZmmResult[288,32], ZmmMask[288,32]);
|
|
conditionalAssign(ZmmResult[320,32], mask[10,1], ZmmResult[320,32], ZmmMask[320,32]);
|
|
conditionalAssign(ZmmResult[352,32], mask[11,1], ZmmResult[352,32], ZmmMask[352,32]);
|
|
conditionalAssign(ZmmResult[384,32], mask[12,1], ZmmResult[384,32], ZmmMask[384,32]);
|
|
conditionalAssign(ZmmResult[416,32], mask[13,1], ZmmResult[416,32], ZmmMask[416,32]);
|
|
conditionalAssign(ZmmResult[448,32], mask[14,1], ZmmResult[448,32], ZmmMask[448,32]);
|
|
conditionalAssign(ZmmResult[480,32], mask[15,1], ZmmResult[480,32], ZmmMask[480,32]);
|
|
}
|
|
|
|
ZmmOpMask32: is evexOpmask=0 {
|
|
}
|
|
|
|
ZmmOpMask64: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode {
|
|
local mask = AVXOpMask;
|
|
conditionalAssign(ZmmResult[0,64], mask[0,1], ZmmResult[0,64], ZmmMask[0,64]);
|
|
conditionalAssign(ZmmResult[64,64], mask[1,1], ZmmResult[64,64], ZmmMask[64,64]);
|
|
conditionalAssign(ZmmResult[128,64], mask[2,1], ZmmResult[128,64], ZmmMask[128,64]);
|
|
conditionalAssign(ZmmResult[192,64], mask[3,1], ZmmResult[192,64], ZmmMask[192,64]);
|
|
conditionalAssign(ZmmResult[256,64], mask[4,1], ZmmResult[256,64], ZmmMask[256,64]);
|
|
conditionalAssign(ZmmResult[320,64], mask[5,1], ZmmResult[320,64], ZmmMask[320,64]);
|
|
conditionalAssign(ZmmResult[384,64], mask[6,1], ZmmResult[384,64], ZmmMask[384,64]);
|
|
conditionalAssign(ZmmResult[448,64], mask[7,1], ZmmResult[448,64], ZmmMask[448,64]);
|
|
}
|
|
|
|
ZmmOpMask64: is evexOpmask=0 {
|
|
}
|
|
|
|
|
|
RegK_m8: KReg_rm is mod=3 & KReg_rm { tmp:1 = KReg_rm[0,8]; export tmp; }
|
|
RegK_m8: m8 is m8 { tmp:1 = m8; export tmp; }
|
|
RegK_m16: KReg_rm is mod=3 & KReg_rm { tmp:2 = KReg_rm[0,16]; export tmp;}
|
|
RegK_m16: m16 is m16 { tmp:2 = m16; export tmp; }
|
|
RegK_m32: KReg_rm is mod=3 & KReg_rm { tmp:4 = KReg_rm[0,32]; export tmp; }
|
|
RegK_m32: m32 is m32 { tmp:4 = m32; export tmp; }
|
|
RegK_m64: KReg_rm is mod=3 & KReg_rm { export KReg_rm; }
|
|
RegK_m64: m64 is m64 { export m64; }
|
|
|
|
# Some macros
|
|
|
|
macro ptr2(r,x) {
|
|
r = zext(x);
|
|
}
|
|
|
|
macro ptr4(r,x) {
|
|
@ifdef IA64
|
|
r = zext(x);
|
|
@else
|
|
r = x;
|
|
@endif
|
|
}
|
|
|
|
macro ptr8(r,x) {
|
|
@ifdef IA64
|
|
r = x;
|
|
@else
|
|
r = x:$(SIZE);
|
|
@endif
|
|
}
|
|
|
|
macro push22(x) {
|
|
mysave:2 = x;
|
|
SP = SP -2;
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
*:2 tmp = mysave;
|
|
}
|
|
|
|
macro push24(x) {
|
|
mysave:4 = x;
|
|
SP = SP-4;
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
*:4 tmp = mysave;
|
|
}
|
|
|
|
macro push28(x) {
|
|
mysave:8 = x;
|
|
SP = SP-8;
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
*:8 tmp = mysave;
|
|
}
|
|
|
|
macro push42(x) {
|
|
mysave:2 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 2;
|
|
*:2 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro push44(x) {
|
|
mysave:4 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 4;
|
|
*:4 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro pushseg44(x) {
|
|
mysave:2 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 4;
|
|
*:2 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro push48(x) {
|
|
mysave:8 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 8;
|
|
*:8 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
@ifdef IA64
|
|
macro push82(x) {
|
|
mysave:2 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 2;
|
|
*:2 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro push84(x) {
|
|
mysave:4 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 4;
|
|
*:4 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro push88(x) {
|
|
mysave:8 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 8;
|
|
*:8 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro pushseg88(x) {
|
|
mysave:8 = zext(x);
|
|
$(STACKPTR) = $(STACKPTR) - 8;
|
|
*:8 $(STACKPTR) = mysave;
|
|
}
|
|
@endif
|
|
|
|
macro pop22(x) {
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
x = *:2 tmp;
|
|
SP = SP+2;
|
|
}
|
|
|
|
macro pop24(x) {
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
x = *:4 tmp;
|
|
SP = SP+4;
|
|
}
|
|
|
|
macro pop28(x) {
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
x = *:8 tmp;
|
|
SP = SP+8;
|
|
}
|
|
|
|
macro pop42(x) {
|
|
x = *:2 $(STACKPTR);
|
|
ESP = ESP + 2;
|
|
}
|
|
|
|
macro pop44(x) {
|
|
x = *:4 $(STACKPTR);
|
|
ESP = ESP + 4;
|
|
}
|
|
|
|
macro popseg44(x) {
|
|
x = *:2 $(STACKPTR);
|
|
ESP = ESP + 4;
|
|
}
|
|
|
|
macro pop48(x) {
|
|
x = *:8 $(STACKPTR);
|
|
ESP = ESP + 8;
|
|
}
|
|
|
|
@ifdef IA64
|
|
macro pop82(x) {
|
|
x = *:2 $(STACKPTR);
|
|
RSP = RSP + 2;
|
|
}
|
|
|
|
macro pop84(x) {
|
|
x = *:4 $(STACKPTR);
|
|
RSP = RSP + 4;
|
|
}
|
|
|
|
macro pop88(x) {
|
|
x = *:8 $(STACKPTR);
|
|
RSP = RSP + 8;
|
|
}
|
|
|
|
macro popseg88(x) {
|
|
x = *:2 $(STACKPTR);
|
|
RSP = RSP + 8;
|
|
}
|
|
@endif
|
|
|
|
macro unpackflags(tmp) {
|
|
NT = (tmp & 0x4000) != 0;
|
|
# IOPL = (tmp & 0x1000) != 0;
|
|
OF = (tmp & 0x0800) != 0;
|
|
DF = (tmp & 0x0400) != 0;
|
|
IF = (tmp & 0x0200) != 0;
|
|
TF = (tmp & 0x0100) != 0;
|
|
SF = (tmp & 0x0080) != 0;
|
|
ZF = (tmp & 0x0040) != 0;
|
|
AF = (tmp & 0x0010) != 0;
|
|
PF = (tmp & 0x0004) != 0;
|
|
CF = (tmp & 0x0001) != 0;
|
|
}
|
|
|
|
macro unpackeflags(tmp) {
|
|
ID = (tmp & 0x00200000) != 0;
|
|
AC = (tmp & 0x00040000) != 0;
|
|
# RF = (tmp & 0x00010000) != 0;
|
|
VIP = 0;
|
|
VIF = 0;
|
|
}
|
|
|
|
macro packflags(tmp) {
|
|
tmp= (0x4000 * zext(NT&1))
|
|
# | (0x1000 * zext(IOPL&1))
|
|
| (0x0800 * zext(OF&1))
|
|
| (0x0400 * zext(DF&1)) | (0x0200 * zext(IF&1)) | (0x0100 * zext(TF&1))
|
|
| (0x0080 * zext(SF&1)) | (0x0040 * zext(ZF&1)) | (0x0010 * zext(AF&1))
|
|
| (0x0004 * zext(PF&1)) | (0x0001 * zext(CF&1));
|
|
}
|
|
|
|
macro packeflags(tmp) {
|
|
tmp = tmp | (0x00200000 * zext(ID&1)) | (0x00100000 * zext(VIP&1))
|
|
| (0x00080000 * zext(VIF&1)) | (0x00040000 * zext(AC&1));
|
|
}
|
|
|
|
macro addflags(op1,op2) {
|
|
CF = carry(op1,op2);
|
|
OF = scarry(op1,op2);
|
|
}
|
|
|
|
#
|
|
# full-adder carry and overflow calculations
|
|
#
|
|
macro addCarryFlags ( op1, op2 ) {
|
|
local CFcopy = zext(CF);
|
|
CF = carry( op1, op2 );
|
|
OF = scarry( op1, op2 );
|
|
local result = op1 + op2;
|
|
CF = CF || carry( result, CFcopy );
|
|
OF = OF ^^ scarry( result, CFcopy );
|
|
op1 = result + CFcopy;
|
|
# AF not implemented
|
|
}
|
|
|
|
|
|
macro subCarryFlags ( op1, op2 ) {
|
|
local CFcopy = zext(CF);
|
|
CF = op1 < op2;
|
|
OF = sborrow( op1, op2 );
|
|
local result = op1 - op2;
|
|
CF = CF || (result < CFcopy);
|
|
OF = OF ^^ sborrow( result, CFcopy );
|
|
op1 = result - CFcopy;
|
|
# AF not implemented
|
|
}
|
|
|
|
macro resultflags(result) {
|
|
SF = result s< 0;
|
|
ZF = result == 0;
|
|
PF = ((popcount(result & 0xff) & 1:1) == 0);
|
|
# AF not implemented
|
|
}
|
|
|
|
macro shiftresultflags(result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
|
|
local newSF = (result s< 0);
|
|
SF = (!notzero & SF) | (notzero & newSF);
|
|
|
|
local newZF = (result == 0);
|
|
ZF = (!notzero & ZF) | (notzero & newZF);
|
|
|
|
local newPF = ((popcount(result & 0xff) & 1:1) == 0);
|
|
PF = (!notzero & PF) | (notzero & newPF);
|
|
# AF not implemented
|
|
}
|
|
|
|
macro subflags(op1,op2) {
|
|
CF = op1 < op2;
|
|
OF = sborrow(op1,op2);
|
|
}
|
|
|
|
macro negflags(op1) {
|
|
CF = (op1 != 0);
|
|
OF = sborrow(0,op1);
|
|
}
|
|
|
|
macro logicalflags() {
|
|
CF = 0;
|
|
OF = 0;
|
|
}
|
|
|
|
macro imultflags(low,total){
|
|
CF = sext(low) != total;
|
|
OF = CF;
|
|
}
|
|
|
|
macro multflags(highhalf) {
|
|
CF = highhalf != 0;
|
|
OF = CF;
|
|
}
|
|
|
|
macro rolflags(result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ((result & 1) != 0);
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = CF ^ (result s< 0);
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro rorflags(result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = (result s< 0);
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = (result s< 0) ^ ((result << 1) s< 0);
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro shlflags(op1,result,count) { # works for shld also
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ( (op1 << (count - 1)) s< 0 );
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = CF ^ (result s< 0);
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro sarflags(op1,result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ( ( (op1 s>> (count - 1)) & 1 ) != 0 );
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
OF = (!one & OF);
|
|
}
|
|
|
|
macro shrflags(op1,result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ( ( (op1 >> (count - 1)) & 1 ) != 0 );
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = (op1 s< 0);
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro shrdflags(op1,result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ( ( (op1 >> (count - 1)) & 1 ) != 0 );
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = ((op1 s< 0) ^ (result s< 0));
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro fdec() {
|
|
local tmp = ST7;
|
|
ST7 = ST6;
|
|
ST6 = ST5;
|
|
ST5 = ST4;
|
|
ST4 = ST3;
|
|
ST3 = ST2;
|
|
ST2 = ST1;
|
|
ST1 = ST0;
|
|
ST0 = tmp;
|
|
}
|
|
|
|
macro finc() {
|
|
local tmp = ST0;
|
|
ST0 = ST1;
|
|
ST1 = ST2;
|
|
ST2 = ST3;
|
|
ST3 = ST4;
|
|
ST4 = ST5;
|
|
ST5 = ST6;
|
|
ST6 = ST7;
|
|
ST7 = tmp;
|
|
}
|
|
|
|
macro fpop() {
|
|
ST0 = ST1;
|
|
ST1 = ST2;
|
|
ST2 = ST3;
|
|
ST3 = ST4;
|
|
ST4 = ST5;
|
|
ST5 = ST6;
|
|
ST6 = ST7;
|
|
}
|
|
|
|
macro fpushv(val) {
|
|
ST7 = ST6;
|
|
ST6 = ST5;
|
|
ST5 = ST4;
|
|
ST4 = ST3;
|
|
ST3 = ST2;
|
|
ST2 = ST1;
|
|
ST1 = ST0;
|
|
ST0 = val;
|
|
}
|
|
|
|
macro fpopv(val) {
|
|
val = ST0;
|
|
ST0 = ST1;
|
|
ST1 = ST2;
|
|
ST2 = ST3;
|
|
ST3 = ST4;
|
|
ST4 = ST5;
|
|
ST5 = ST6;
|
|
ST6 = ST7;
|
|
}
|
|
|
|
macro fcom(val) {
|
|
C1 = 0;
|
|
|
|
C2 = nan(ST0) || nan(val);
|
|
C0 = C2 | ( ST0 f< val );
|
|
C3 = C2 | ( ST0 f== val );
|
|
|
|
FPUStatusWord = (zext(C0)<<8) | (zext(C1)<<9) | (zext(C2)<<10) | (zext(C3)<<14);
|
|
}
|
|
|
|
macro fcomi(val) {
|
|
PF = nan(ST0) || nan(val);
|
|
ZF = PF | ( ST0 f== val );
|
|
CF = PF | ( ST0 f< val );
|
|
|
|
OF = 0;
|
|
AF = 0;
|
|
SF = 0;
|
|
|
|
FPUStatusWord = FPUStatusWord & 0xfdff; # Clear C1
|
|
C1 = 0;
|
|
}
|
|
|
|
# floating point NaN comparison into EFLAGS
|
|
macro fucompe(val1, val2) {
|
|
PF = nan(val1) || nan(val2 );
|
|
ZF = PF | ( val1 f== val2 );
|
|
CF = PF | ( val1 f< val2 );
|
|
|
|
OF = 0;
|
|
AF = 0;
|
|
SF = 0;
|
|
}
|
|
|
|
# The base level constructors
|
|
# The prefixes
|
|
:^instruction is instrPhase=0 & over=0x2e; instruction [ segover=1; ] {} # CS override
|
|
:^instruction is instrPhase=0 & over=0x36; instruction [ segover=2; ] {} # SS override
|
|
:^instruction is instrPhase=0 & over=0x3e; instruction [ segover=3; ] {} # DS override
|
|
:^instruction is instrPhase=0 & over=0x26; instruction [ segover=4; ] {} # ES override
|
|
:^instruction is instrPhase=0 & over=0x64; instruction [ segover=5; ] {} # FS override
|
|
:^instruction is instrPhase=0 & over=0x65; instruction [ segover=6; ] {} # GS override
|
|
:^instruction is instrPhase=0 & over=0x66; instruction [ opsize=opsize $xor 1; mandover = mandover $xor 1; ] {} # Operand size override
|
|
:^instruction is instrPhase=0 & over=0x67; instruction [ addrsize=addrsize $xor 1; ] {} # Address size override
|
|
:^instruction is instrPhase=0 & over=0xf2; instruction [ repneprefx=1; repprefx=0; ] {}
|
|
:^instruction is instrPhase=0 & over=0xf3; instruction [ repneprefx=0; repprefx=1; ] {}
|
|
:^instruction is instrPhase=0 & over=0xf0; instruction [ lockprefx=1; ] {}
|
|
@ifdef IA64
|
|
|
|
#
|
|
# REX opcode extension prefixes
|
|
#
|
|
|
|
# REX prefix present
|
|
# Specification is "REX"
|
|
@define REX "longMode=1 & rexprefix=1 & rexWprefix=0"
|
|
|
|
# Specification is "REX.W"
|
|
@define REX_W "longMode=1 & rexprefix=1 & rexWprefix=1"
|
|
|
|
|
|
|
|
# TODO I don't think the following line can really happen because the 66 67 prefix must come before REX prefix
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & over=0x66 & opsize=2; instruction [ opsize=0; mandover=mandover $xor 1; ] {} # Operand size override
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & over=0x67 & addrsize=2; instruction [ addrsize=1; ] {} # Address size override
|
|
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & row=0x4 & rexw=0 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=1; rexWprefix=0; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & row=0x4 & rexw=1 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=2; rexWprefix=1; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & opsize=0 & row=0x4 & rexw=0 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=0; rexWprefix=0; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & opsize=0 & row=0x4 & rexw=1 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=2; rexWprefix=1; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {}
|
|
|
|
# if longmode is off (on 64-bit processor in 32-bit compatibility mode), there is no 64-bit addressing, make sure is off before parsing
|
|
#
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & addrsize=2 & instruction [ addrsize=1; ] {}
|
|
@endif
|
|
|
|
#
|
|
# VEX definitions: One from each group must be present in the decoding; following the specification from the manual.
|
|
#
|
|
|
|
# VEX encoding for type of VEX data flow.
|
|
# Specification is "VEX.", "VEX.NDS", "VEX.NDD", or "VEX.DDS". If only "VEX." is present, then "VEX_NONE" must be used.
|
|
@define VEX_NONE "vexMode=1"
|
|
@define VEX_NDS "vexMode=1"
|
|
@define VEX_NDD "vexMode=1"
|
|
@define VEX_DDS "vexMode=1"
|
|
|
|
# Specification is "LIG", "LZ", "128", or "256".
|
|
@define VEX_LIG "vexL"
|
|
@define VEX_LZ "vexL=0"
|
|
@define VEX_L128 "vexL=0"
|
|
@define VEX_L256 "vexL=1"
|
|
@define EVEX_L512 "evexLp=1 & vexL=0"
|
|
@define EVEX_LLIG "evexLp & vexL"
|
|
|
|
# These are only to be used with VEX or EVEX decoding, where only one "mandatory" prefix is encoded in the VEX or EVEX.
|
|
# If no prefix is specified, then VEX_PRE_NONE must be used.
|
|
# No other "physical" prefixes are allowed.
|
|
# Specification is "(empty)", "66", "F3", or "F2". If none of these are present (empty), then "VEX_PRE_NONE" must be used.
|
|
@define VEX_PRE_NONE "mandover=0"
|
|
@define VEX_PRE_66 "mandover=1"
|
|
@define VEX_PRE_F3 "mandover=2"
|
|
@define VEX_PRE_F2 "mandover=4"
|
|
|
|
# Specification is "0F", "0F38", or "0F3A".
|
|
@define VEX_0F "vexMMMMM=1"
|
|
@define VEX_0F38 "vexMMMMM=2"
|
|
@define VEX_0F3A "vexMMMMM=3"
|
|
@define VEX_MAP4 "vexMMMMM=4"
|
|
@define VEX_MAP5 "vexMMMMM=5"
|
|
@define VEX_MAP6 "vexMMMMM=6"
|
|
|
|
# Specification is "WIG", "W0", or "W1".
|
|
@define VEX_WIG "rexWprefix"
|
|
@define VEX_W0 "rexWprefix=0"
|
|
@define VEX_W1 "rexWprefix=1"
|
|
|
|
@define EVEX_NONE "vexMode=2"
|
|
@define EVEX_NDS "vexMode=2"
|
|
@define EVEX_NDD "vexMode=2"
|
|
@define EVEX_DDS "vexMode=2"
|
|
|
|
@ifdef IA64
|
|
|
|
# 64-bit 3-byte VEX
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=0; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=1; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_66=1; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=2; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f3=1; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=3; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f2=1; ] {}
|
|
|
|
# 64-bit 2-byte VEX
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=0; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=1; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_66=1; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=2; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f3=1; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=3; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f2=1; ] {}
|
|
|
|
# 4-byte EVEX prefix
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r & vex_x & vex_b & evex_rp & evex_res=0 & evex_mmm;
|
|
vex_w & vex_vvvv & evex_res2=1 & vex_pp=0; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction
|
|
[ instrPhase=1; vexMode=2; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexMMMMM=evex_mmm;
|
|
evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r & vex_x & vex_b & evex_rp & evex_res=0 & evex_mmm;
|
|
vex_w & vex_vvvv & evex_res2=1 & vex_pp=1; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction
|
|
[ instrPhase=1; vexMode=2; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexMMMMM=evex_mmm;
|
|
evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; prefix_66=1; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r & vex_x & vex_b & evex_rp & evex_res=0 & evex_mmm;
|
|
vex_w & vex_vvvv & evex_res2=1 & vex_pp=2; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction
|
|
[ instrPhase=1; vexMode=2; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexMMMMM=evex_mmm;
|
|
evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; prefix_f3=1; ] {}
|
|
:^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r & vex_x & vex_b & evex_rp & evex_res=0 & evex_mmm;
|
|
vex_w & vex_vvvv & evex_res2=1 & vex_pp=3; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction
|
|
[ instrPhase=1; vexMode=2; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexMMMMM=evex_mmm;
|
|
evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; prefix_f2=1; ] {}
|
|
@endif
|
|
|
|
# 32-bit 3-byte VEX
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=0; instruction
|
|
[ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=1; instruction
|
|
[ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_66=1; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=2; instruction
|
|
[ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f3=1; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=3; instruction
|
|
[ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f2=1; ] {}
|
|
|
|
# 32-bit 2-byte VEX
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_x=1 & vex_vvvv & vex_l & vex_pp=0; instruction
|
|
[ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_x=1 & vex_vvvv & vex_l & vex_pp=1; instruction
|
|
[ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_66=1; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_x=1 & vex_vvvv & vex_l & vex_pp=2; instruction
|
|
[ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f3=1; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_x=1 & vex_vvvv & vex_l & vex_pp=3; instruction
|
|
[ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f2=1; ] {}
|
|
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r=1 & vex_x=1 & vex_b & evex_rp & evex_res=0 & evex_mmm;
|
|
vex_w & vex_vvvv & evex_res2=1 & vex_pp=0; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction
|
|
[ instrPhase=1; vexMode=2; vexVVVV=~vex_vvvv; rexBprefix=~vex_b; rexWprefix=vex_w; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; vexMMMMM=evex_mmm; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r=1 & vex_x=1 & vex_b & evex_rp & evex_res=0 & evex_mmm;
|
|
vex_w & vex_vvvv & evex_res2=1 & vex_pp=1; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction
|
|
[ instrPhase=1; vexMode=2; vexVVVV=~vex_vvvv; rexBprefix=~vex_b; rexWprefix=vex_w; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; vexMMMMM=evex_mmm; prefix_66=1; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r=1 & vex_x=1 & vex_b & evex_rp & evex_res=0 & evex_mmm;
|
|
vex_w & vex_vvvv & evex_res2=1 & vex_pp=2; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction
|
|
[ instrPhase=1; vexMode=2; vexVVVV=~vex_vvvv; rexBprefix=~vex_b; rexWprefix=vex_w; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; vexMMMMM=evex_mmm; prefix_f3=1; ] {}
|
|
:^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r=1 & vex_x=1 & vex_b & evex_rp & evex_res=0 & evex_mmm;
|
|
vex_w & vex_vvvv & evex_res2=1 & vex_pp=3; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction
|
|
[ instrPhase=1; vexMode=2; vexVVVV=~vex_vvvv; rexBprefix=~vex_b; rexWprefix=vex_w; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; vexMMMMM=evex_mmm; prefix_f2=1; ] {}
|
|
|
|
# Many of the multimedia instructions have a "mandatory" prefix, either 0x66, 0xf2 or 0xf3
|
|
# where the prefix really becomes part of the encoding. We collect the three possible prefixes of this
|
|
# sort in the mandover context variable so we can pattern all three at once
|
|
|
|
# 3DNow pre-parse to isolate suffix byte into context (suffix3D)
|
|
# - general format: 0x0f 0x0f <modR/M> [sib] [displacement] <suffix3D-byte>
|
|
# - must determine number of bytes consumed by addressing modes
|
|
# TODO: determine supported prefixes? (e.g., 0x26)
|
|
|
|
Suffix3D: imm8 is imm8 [ suffix3D=imm8; ] { }
|
|
|
|
:^instruction is instrPhase=0 & (byte=0x0f; byte=0x0f; XmmReg ... & m64; Suffix3D) ... & instruction ... [ instrPhase=1; ] { }
|
|
:^instruction is instrPhase=0 & (byte=0x0f; byte=0x0f; mmxmod=3; Suffix3D) ... & instruction ... [ instrPhase=1; ] { }
|
|
|
|
|
|
# Instructions in alphabetical order
|
|
|
|
# See 'lockable.sinc' file for instructions that are lockable
|
|
with : lockprefx=0 {
|
|
|
|
:AAA is vexMode=0 & bit64=0 & byte=0x37 { local car = ((AL & 0xf) > 9) | AF; AL = (AL+6*car)&0xf; AH=AH+car; CF=car; AF=car; }
|
|
:AAD imm8 is vexMode=0 & bit64=0 & byte=0xd5; imm8 { AL = AL + imm8*AH; AH=0; resultflags(AX); }
|
|
:AAM imm8 is vexMode=0 & bit64=0 & byte=0xd4; imm8 { AH = AL/imm8; AL = AL % imm8; resultflags(AX); }
|
|
:AAS is vexMode=0 & bit64=0 & byte=0x3f { local car = ((AL & 0xf) > 9) | AF; AL = (AL-6*car)&0xf; AH=AH-car; CF=car; AF=car; }
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:ADC AL,imm8 is vexMode=0 & byte=0x14; AL & imm8 { addCarryFlags( AL, imm8:1 ); resultflags( AL ); }
|
|
:ADC AX,imm16 is vexMode=0 & opsize=0 & byte=0x15; AX & imm16 { addCarryFlags( AX, imm16:2 ); resultflags( AX ); }
|
|
:ADC EAX,imm32 is vexMode=0 & opsize=1 & byte=0x15; EAX & check_EAX_dest & imm32 { addCarryFlags( EAX, imm32:4 ); build check_EAX_dest; resultflags( EAX ); }
|
|
@ifdef IA64
|
|
:ADC RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x15; RAX & simm32 { addCarryFlags( RAX, simm32 ); resultflags( RAX ); }
|
|
@endif
|
|
:ADC Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=2; imm8 { addCarryFlags( Rmr8, imm8:1 ); resultflags( Rmr8 ); }
|
|
:ADC Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=2; imm16 { addCarryFlags( Rmr16, imm16:2 ); resultflags( Rmr16 ); }
|
|
:ADC Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=2; imm32 { addCarryFlags( Rmr32, imm32:4 ); build check_Rmr32_dest; resultflags( Rmr32 ); }
|
|
@ifdef IA64
|
|
:ADC Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=2; simm32 { addCarryFlags( Rmr64, simm32 ); resultflags( Rmr64 ); }
|
|
@endif
|
|
:ADC Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=2; simm8_16 { addCarryFlags( Rmr16, simm8_16 ); resultflags( Rmr16 ); }
|
|
:ADC Rmr32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=2; simm8_32 { addCarryFlags( Rmr32, simm8_32 ); build check_Rmr32_dest; resultflags( Rmr32 ); }
|
|
@ifdef IA64
|
|
:ADC Rmr64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=2; simm8_64 { addCarryFlags( Rmr64, simm8_64 ); resultflags( Rmr64 ); }
|
|
@endif
|
|
:ADC Rmr8,Reg8 is vexMode=0 & byte=0x10; mod=3 & Rmr8 & Reg8 { addCarryFlags( Rmr8, Reg8 ); resultflags( Rmr8 ); }
|
|
:ADC Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x11; mod=3 & Rmr16 & Reg16 { addCarryFlags( Rmr16, Reg16 ); resultflags( Rmr16 ); }
|
|
:ADC Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x11; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { addCarryFlags( Rmr32, Reg32 ); build check_Rmr32_dest; resultflags( Rmr32 ); }
|
|
@ifdef IA64
|
|
:ADC Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x11; mod=3 & Rmr64 & Reg64 { addCarryFlags( Rmr64, Reg64 ); resultflags( Rmr64 ); }
|
|
@endif
|
|
:ADC Reg8,rm8 is vexMode=0 & byte=0x12; rm8 & Reg8 ... { addCarryFlags( Reg8, rm8 ); resultflags( Reg8 ); }
|
|
:ADC Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x13; rm16 & Reg16 ... { addCarryFlags( Reg16, rm16 ); resultflags( Reg16 ); }
|
|
:ADC Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x13; rm32 & Reg32 ... & check_Reg32_dest ... { addCarryFlags( Reg32, rm32 ); build check_Reg32_dest; resultflags( Reg32 ); }
|
|
@ifdef IA64
|
|
:ADC Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x13; rm64 & Reg64 ... { addCarryFlags( Reg64, rm64 ); resultflags( Reg64 ); }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:ADD AL,imm8 is vexMode=0 & byte=0x4; AL & imm8 { addflags( AL,imm8 ); AL = AL + imm8; resultflags( AL); }
|
|
:ADD AX,imm16 is vexMode=0 & opsize=0 & byte=0x5; AX & imm16 { addflags( AX,imm16); AX = AX + imm16; resultflags( AX); }
|
|
:ADD EAX,imm32 is vexMode=0 & opsize=1 & byte=0x5; EAX & check_EAX_dest & imm32 { addflags( EAX,imm32); EAX = EAX + imm32; build check_EAX_dest; resultflags( EAX); }
|
|
@ifdef IA64
|
|
:ADD RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x5; RAX & simm32 { addflags( RAX,simm32); RAX = RAX + simm32; resultflags( RAX); }
|
|
@endif
|
|
:ADD Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=0; imm8 { addflags( Rmr8,imm8 ); Rmr8 = Rmr8 + imm8; resultflags( Rmr8); }
|
|
:ADD Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=0; imm16 { addflags( Rmr16,imm16); Rmr16 = Rmr16 + imm16; resultflags( Rmr16); }
|
|
:ADD Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=0; imm32 { addflags( Rmr32,imm32); Rmr32 = Rmr32 + imm32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:ADD Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=0; simm32 { addflags( Rmr64,simm32); Rmr64 = Rmr64 + simm32; resultflags( Rmr64); }
|
|
@endif
|
|
:ADD Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=0; simm8_16 { addflags( Rmr16,simm8_16); Rmr16 = Rmr16 + simm8_16; resultflags( Rmr16); }
|
|
:ADD Rmr32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=0; simm8_32 { addflags( Rmr32,simm8_32); Rmr32 = Rmr32 + simm8_32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:ADD Rmr64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=0; simm8_64 { addflags( Rmr64,simm8_64); Rmr64 = Rmr64 + simm8_64; resultflags( Rmr64); }
|
|
@endif
|
|
:ADD Rmr8,Reg8 is vexMode=0 & byte=0x00; mod=3 & Rmr8 & Reg8 { addflags( Rmr8,Reg8 ); Rmr8 = Rmr8 + Reg8; resultflags( Rmr8); }
|
|
:ADD Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x1; mod=3 & Rmr16 & Reg16 { addflags( Rmr16,Reg16); Rmr16 = Rmr16 + Reg16; resultflags( Rmr16); }
|
|
:ADD Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x1; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { addflags( Rmr32,Reg32); Rmr32 = Rmr32 + Reg32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:ADD Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x1; mod=3 & Rmr64 & Reg64 { addflags( Rmr64,Reg64); Rmr64 = Rmr64 + Reg64; resultflags( Rmr64); }
|
|
@endif
|
|
:ADD Reg8,rm8 is vexMode=0 & byte=0x2; rm8 & Reg8 ... { addflags( Reg8,rm8 ); Reg8 = Reg8 + rm8; resultflags( Reg8); }
|
|
:ADD Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x3; rm16 & Reg16 ... { addflags(Reg16,rm16 ); Reg16 = Reg16 + rm16; resultflags(Reg16); }
|
|
:ADD Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x3; rm32 & Reg32 ... & check_Reg32_dest ... { addflags(Reg32,rm32 ); Reg32 = Reg32 + rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:ADD Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x3; rm64 & Reg64 ... { addflags(Reg64,rm64 ); Reg64 = Reg64 + rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:AND AL,imm8 is vexMode=0 & byte=0x24; AL & imm8 { logicalflags(); AL = AL & imm8; resultflags( AL); }
|
|
:AND AX,imm16 is vexMode=0 & opsize=0 & byte=0x25; AX & imm16 { logicalflags(); AX = AX & imm16; resultflags( AX); }
|
|
:AND EAX,imm32 is vexMode=0 & opsize=1 & byte=0x25; EAX & check_EAX_dest & imm32 { logicalflags(); EAX = EAX & imm32; build check_EAX_dest; resultflags( EAX); }
|
|
@ifdef IA64
|
|
:AND RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x25; RAX & simm32 { logicalflags(); RAX = RAX & simm32; resultflags( RAX); }
|
|
@endif
|
|
:AND Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=4; imm8 { logicalflags(); Rmr8 = Rmr8 & imm8; resultflags( Rmr8); }
|
|
:AND Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=4; imm16 { logicalflags(); Rmr16 = Rmr16 & imm16; resultflags( Rmr16); }
|
|
:AND Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=4; imm32 { logicalflags(); Rmr32 = Rmr32 & imm32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:AND Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=4; simm32 { logicalflags(); Rmr64 = Rmr64 & simm32; resultflags( Rmr64); }
|
|
@endif
|
|
:AND Rmr16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=4; usimm8_16 { logicalflags(); Rmr16 = Rmr16 & usimm8_16; resultflags( Rmr16); }
|
|
:AND Rmr32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=4; usimm8_32 { logicalflags(); Rmr32 = Rmr32 & usimm8_32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:AND Rmr64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=4; usimm8_64 { logicalflags(); Rmr64 = Rmr64 & usimm8_64; resultflags( Rmr64); }
|
|
@endif
|
|
:AND Rmr8,Reg8 is vexMode=0 & byte=0x20; mod=3 & Rmr8 & Reg8 { logicalflags(); Rmr8 = Rmr8 & Reg8; resultflags( Rmr8); }
|
|
:AND Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x21; mod=3 & Rmr16 & Reg16 { logicalflags(); Rmr16 = Rmr16 & Reg16; resultflags( Rmr16); }
|
|
:AND Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x21; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { logicalflags(); Rmr32 = Rmr32 & Reg32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:AND Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x21; mod=3 & Rmr64 & Reg64 { logicalflags(); Rmr64 = Rmr64 & Reg64; resultflags( Rmr64); }
|
|
@endif
|
|
:AND Reg8,rm8 is vexMode=0 & byte=0x22; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 & rm8; resultflags( Reg8); }
|
|
:AND Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x23; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 & rm16; resultflags(Reg16); }
|
|
:AND Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x23; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 & rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:AND Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x23; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 & rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
#ARPL is not encodable in 64-bit mode
|
|
:ARPL rm16,Reg16 is $(LONGMODE_OFF) & vexMode=0 & bit64=0 & byte=0x63; rm16 & Reg16 ... { local rpldest=rm16&3; local rplsrc=Reg16&3; local rpldiff=rplsrc-rpldest;
|
|
ZF = rpldiff s> 0; rm16 = rm16 + (zext(CF) * rpldiff); }
|
|
|
|
:BOUND Reg16,m16 is $(LONGMODE_OFF) & vexMode=0 & bit64=0 & opsize=0 & byte=0x62; m16 & Reg16 ... { }
|
|
:BOUND Reg32,m32 is $(LONGMODE_OFF) & vexMode=0 & bit64=0 & opsize=1 & byte=0x62; m32 & Reg32 ... { }
|
|
|
|
#:BSF Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbc; rm16 & Reg16 ... { ZF = rm16 == 0;
|
|
# choose = 0xffff * (zext((0xff & rm16) == 0));
|
|
# mask = (0xf00 & choose) | (0xf | ~choose);
|
|
# pos = 8 & choose;
|
|
# choose = 0xffff * (zext((mask & rm16) == 0));
|
|
# mask1 = (mask << 2) & (mask << 4);
|
|
# mask2 = (mask >> 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (4 & choose);
|
|
# choose = 0xffff * (zext((mask & rm16) == 0));
|
|
# mask1 = (mask << 1) & (mask << 2);
|
|
# mask2 = (mask >> 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (2 & choose);
|
|
# choose = zext((mask & rm16) == 0);
|
|
# Reg16 = pos + choose; }
|
|
|
|
:BSF Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbc; rm16 & Reg16 ...
|
|
{
|
|
bitIndex:2 = 0;
|
|
|
|
ZF = ( rm16 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( ((rm16 >> bitIndex) & 0x0001) != 0 ) goto <done>;
|
|
bitIndex = bitIndex + 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg16 = bitIndex;
|
|
}
|
|
|
|
#:BSF Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbc; rm32 & Reg32 ... & check_Reg32_dest ... { ZF = rm32 == 0;
|
|
# choose = 0xffffffff * (zext((0xffff & rm32) == 0));
|
|
# mask = (0xff0000 & choose) | (0xff | ~choose);
|
|
# pos = 16 & choose;
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask << 4) & (mask << 8);
|
|
# mask2 = (mask >> 4) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (8 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask << 2) & (mask << 4);
|
|
# mask2 = (mask >> 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (4 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask << 1) & (mask << 2);
|
|
# mask2 = (mask >> 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (2 & choose);
|
|
# choose = zext((mask & rm32) == 0);
|
|
# Reg32 = pos + choose;
|
|
# build check_Reg32_dest; }
|
|
|
|
:BSF Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbc; rm32 & Reg32 ... & check_Reg32_dest ...
|
|
{
|
|
bitIndex:4 = 0;
|
|
|
|
ZF = ( rm32 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( ((rm32 >> bitIndex) & 0x00000001) != 0 ) goto <done>;
|
|
bitIndex = bitIndex + 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg32 = bitIndex;
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
@ifdef IA64
|
|
#:BSF Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbc; rm64 & Reg64 ... { ZF = rm64 == 0;
|
|
## TODO: NEED TO EXTEND THIS TO 64bit op
|
|
# choose = 0xffffffff * (zext((0xffff & rm64) == 0));
|
|
# mask = (0xff0000 & choose) | (0xff | ~choose);
|
|
# pos = 16 & choose;
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask << 4) & (mask << 8);
|
|
# mask2 = (mask >> 4) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (8 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask << 2) & (mask << 4);
|
|
# mask2 = (mask >> 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (4 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask << 1) & (mask << 2);
|
|
# mask2 = (mask >> 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (2 & choose);
|
|
# choose = zext((mask & rm64) == 0);
|
|
# Reg64 = pos + choose; }
|
|
|
|
:BSF Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbc; rm64 & Reg64 ...
|
|
{
|
|
bitIndex:8 = 0;
|
|
|
|
ZF = ( rm64 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( ((rm64 >> bitIndex) & 0x0000000000000001) != 0 ) goto <done>;
|
|
bitIndex = bitIndex + 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg64 = bitIndex;
|
|
}
|
|
@endif
|
|
|
|
#:BSR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbd; rm16 & Reg16 ... { ZF = rm16 == 0;
|
|
# choose = 0xffff * (zext((0xff00 & rm16) == 0));
|
|
# mask = (0xf000 & ~choose) | (0xf0 | choose);
|
|
# pos = 16 - (8 & choose);
|
|
# choose = 0xffff * (zext((mask & rm16) == 0));
|
|
# mask1 = (mask >> 2) & (mask >> 4);
|
|
# mask2 = (mask << 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (4 & choose);
|
|
# choose = 0xffff * (zext((mask & rm16) == 0));
|
|
# mask1 = (mask >> 1) & (mask >> 2);
|
|
# mask2 = (mask << 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (2 & choose);
|
|
# choose = zext((mask & rm16) == 0);
|
|
# Reg16 = pos - choose; }
|
|
|
|
:BSR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbd; rm16 & Reg16 ...
|
|
{
|
|
bitIndex:2 = 15;
|
|
|
|
ZF = ( rm16 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( (rm16 >> bitIndex) != 0 ) goto <done>;
|
|
bitIndex = bitIndex - 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg16 = bitIndex;
|
|
}
|
|
|
|
#:BSR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbd; rm32 & Reg32 ... & check_Reg32_dest ... { ZF = rm32 == 0;
|
|
# choose = 0xffffffff * (zext((0xffff0000 & rm32) == 0));
|
|
# mask = (0xff000000 & ~choose) | (0xff00 | choose);
|
|
# pos = 32 - (16 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask >> 4) & (mask >> 8);
|
|
# mask2 = (mask << 4) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (8 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask >> 2) & (mask >> 4);
|
|
# mask2 = (mask << 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (4 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask >> 1) & (mask >> 2);
|
|
# mask2 = (mask << 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (2 & choose);
|
|
# choose = zext((mask & rm32) == 0);
|
|
# Reg32 = pos - choose;
|
|
# build check_Reg32_dest; }
|
|
|
|
:BSR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbd; rm32 & Reg32 ... & check_Reg32_dest ...
|
|
{
|
|
bitIndex:4 = 31;
|
|
|
|
ZF = ( rm32 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( (rm32 >> bitIndex) != 0 ) goto <done>;
|
|
bitIndex = bitIndex - 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg32 = bitIndex;
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
|
|
@ifdef IA64
|
|
#:BSR Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbd; rm64 & Reg64 ... { ZF = rm64 == 0;
|
|
## TODO: NEED TO EXTEND THIS TO 64bit op
|
|
# choose = 0xffffffff * (zext((0xffff0000 & rm64) == 0));
|
|
# mask = (0xff000000 & ~choose) | (0xff00 | choose);
|
|
# pos = 32 - (16 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask >> 4) & (mask >> 8);
|
|
# mask2 = (mask << 4) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (8 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask >> 2) & (mask >> 4);
|
|
# mask2 = (mask << 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (4 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask >> 1) & (mask >> 2);
|
|
# mask2 = (mask << 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (2 & choose);
|
|
# choose = zext((mask & rm64) == 0);
|
|
# Reg64 = pos - choose; }
|
|
|
|
:BSR Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbd; rm64 & Reg64 ...
|
|
{
|
|
bitIndex:8 = 63;
|
|
|
|
ZF = ( rm64 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( (rm64 >> bitIndex) != 0 ) goto <done>;
|
|
bitIndex = bitIndex - 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg64 = bitIndex;
|
|
}
|
|
|
|
@endif
|
|
|
|
:BSWAP Rmr32 is vexMode=0 & byte=0xf; row=12 & page=1 & Rmr32 & check_Rmr32_dest
|
|
{ local tmp = (Rmr32 & 0xff000000) >> 24 ;
|
|
tmp = tmp | ((Rmr32 & 0x00ff0000) >> 8 );
|
|
tmp = tmp | ((Rmr32 & 0x0000ff00) << 8 );
|
|
Rmr32 = tmp | ((Rmr32 & 0x000000ff) << 24);
|
|
build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:BSWAP Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; row=12 & page=1 & Rmr64
|
|
{ local tmp = (Rmr64 & 0xff00000000000000) >> 56 ;
|
|
tmp = tmp | ((Rmr64 & 0x00ff000000000000) >> 40 );
|
|
tmp = tmp | ((Rmr64 & 0x0000ff0000000000) >> 24 );
|
|
tmp = tmp | ((Rmr64 & 0x000000ff00000000) >> 8 );
|
|
tmp = tmp | ((Rmr64 & 0x00000000ff000000) << 8 );
|
|
tmp = tmp | ((Rmr64 & 0x0000000000ff0000) << 24 );
|
|
tmp = tmp | ((Rmr64 & 0x000000000000ff00) << 40 );
|
|
Rmr64 = tmp | ((Rmr64 & 0x00000000000000ff) << 56); }
|
|
@endif
|
|
|
|
:BT Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xa3; mod=3 & Rmr16 & Reg16 { CF = ((Rmr16 >> (Reg16 & 0xf)) & 1) != 0; }
|
|
:BT Mem,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xa3; Mem & Reg16 ... { local ptr = Mem + (sext(Reg16) s>> 3);
|
|
CF = ((*:1 ptr >> (Reg16 & 0x7)) & 1) != 0; }
|
|
:BT Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xa3; mod=3 & Rmr32 & Reg32 { CF = ((Rmr32 >> (Reg32 & 0x1f)) & 1) != 0; }
|
|
:BT Mem,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xa3; Mem & Reg32 ... {
|
|
@ifdef IA64
|
|
local ptr = Mem + (sext(Reg32) s>> 3);
|
|
@else
|
|
local ptr = Mem + (Reg32 s>> 3);
|
|
@endif
|
|
CF = ((*:1 ptr >> (Reg32 & 0x7)) & 1) != 0;
|
|
}
|
|
@ifdef IA64
|
|
:BT Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xa3; mod=3 & Rmr64 & Reg64 { CF = ((Rmr64 >> (Reg64 & 0x3f)) & 1) != 0; }
|
|
:BT Mem,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xa3; Mem & Reg64 ... { local ptr = Mem + (Reg64 s>> 3);
|
|
CF = ((*:1 ptr >> (Reg64 & 0x7)) & 1) != 0; }
|
|
@endif
|
|
:BT rm16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; (rm16 & reg_opcode=4 ...); imm8 { CF = ((rm16 >> (imm8 & 0x0f)) & 1) != 0; }
|
|
:BT rm32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; (rm32 & reg_opcode=4 ...); imm8 { CF = ((rm32 >> (imm8 & 0x1f)) & 1) != 0; }
|
|
@ifdef IA64
|
|
:BT rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xba; (rm64 & reg_opcode=4 ...); imm8 { CF = ((rm64 >> (imm8 & 0x3f)) & 1) != 0; }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:BTC Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbb; mod=3 & Rmr16 & Reg16 { local bit=Reg16&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16^(1<<bit); CF=(val!=0); }
|
|
:BTC Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbb; mod=3 & Rmr32 & Reg32 & check_Rmr32_dest { local bit=Reg32&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32^(1<<bit); build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:BTC Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbb; mod=3 & Rmr64 & Reg64 { local bit=Reg64&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64^(1<<bit); CF=(val!=0); }
|
|
@endif
|
|
:BTC Rmr16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; mod=3 & Rmr16 & reg_opcode=7; imm8 { local bit=imm8&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16^(1<<bit); CF=(val!=0); }
|
|
:BTC Rmr32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=7; imm8 { local bit=imm8&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32^(1<<bit); build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:BTC Rmr64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xba; mod=3 & Rmr64 & reg_opcode=7; imm8 { local bit=imm8&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64^(1<<bit); CF=(val!=0); }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:BTR Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb3; mod=3 & Rmr16 & Reg16 { local bit=Reg16&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16 & ~(1<<bit); CF=(val!=0); }
|
|
:BTR Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb3; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { local bit=Reg32&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32 & ~(1<<bit); build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:BTR Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xb3; mod=3 & Rmr64 & Reg64 { local bit=Reg64&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64 & ~(1<<bit); CF=(val!=0); }
|
|
@endif
|
|
:BTR Rmr16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; mod=3 & Rmr16 & reg_opcode=6; imm8 { local bit=imm8&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16 & ~(1<<bit); CF=(val!=0); }
|
|
:BTR Rmr32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; mod=3 & Rmr32 & reg_opcode=6 & check_Rmr32_dest; imm8 { local bit=imm8&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32 & ~(1<<bit); build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:BTR Rmr64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xba; mod=3 & Rmr64 & reg_opcode=6; imm8 { local bit=imm8&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64 & ~(1<<bit); CF=(val!=0); }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:BTS Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xab; mod=3 & Rmr16 & Reg16 { local bit=Reg16&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16 | (1<<bit); CF=(val!=0); }
|
|
:BTS Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xab; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { local bit=Reg32&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32 | (1<<bit); build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:BTS Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xab; mod=3 & Rmr64 & Reg64 { local bit=Reg64&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64 | (1<<bit); CF=(val!=0); }
|
|
@endif
|
|
:BTS Rmr16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; mod=3 & Rmr16 & reg_opcode=5; imm8 { local bit=imm8&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16 | (1<<bit); CF=(val!=0); }
|
|
:BTS Rmr32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; mod=3 & Rmr32 & reg_opcode=5 & check_Rmr32_dest; imm8 { local bit=imm8&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32 | (1<<bit); build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:BTS Rmr64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xba; mod=3 & Rmr64 & reg_opcode=5; imm8 { local bit=imm8&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64 | (1<<bit); CF=(val!=0); }
|
|
@endif
|
|
|
|
:CALL rel16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xe8; rel16 { push22(&:2 inst_next); call rel16; }
|
|
:CALL rel16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xe8; rel16 { push42(&:2 inst_next); call rel16; }
|
|
@ifdef IA64
|
|
:CALL rel16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xe8; rel16 { push88(&:8 inst_next); call rel16; }
|
|
@endif
|
|
|
|
# When is a Call a Jump, when it jumps right after. Not always the case but...
|
|
:CALL rel16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xe8; simm16=0 & rel16 { push22(&:2 inst_next); goto rel16; }
|
|
:CALL rel16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xe8; simm16=0 & rel16 { push42(&:2 inst_next); goto rel16; }
|
|
@ifdef IA64
|
|
:CALL rel16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xe8; simm16=0 & rel16 { push88(&:8 inst_next); goto rel16; }
|
|
@endif
|
|
|
|
:CALL rel32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xe8; rel32 { push24(&:4 inst_next); call rel32; }
|
|
:CALL rel32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xe8; rel32 { push44(&:4 inst_next); call rel32; }
|
|
@ifdef IA64
|
|
:CALL rel32 is $(LONGMODE_ON) & vexMode=0 & (opsize=1 | opsize=2) & byte=0xe8; rel32 { push88(&:8 inst_next); call rel32; }
|
|
@endif
|
|
|
|
# When is a call a Jump, when it jumps right after. Not always the case but...
|
|
:CALL rel32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xe8; simm32=0 & rel32 { push24(&:4 inst_next); goto rel32; }
|
|
:CALL rel32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xe8; simm32=0 & rel32 { push44(&:4 inst_next); goto rel32; }
|
|
@ifdef IA64
|
|
:CALL rel32 is $(LONGMODE_ON) & vexMode=0 & (opsize=1 | opsize=2) & byte=0xe8; simm32=0 & rel32 { push88(&:8 inst_next); goto rel32; }
|
|
@endif
|
|
|
|
:CALL rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xff & currentCS; rm16 & reg_opcode=2 ... { local dest:4 = segment(currentCS,rm16); push22(&:2 inst_next); call [dest]; }
|
|
:CALL rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=2 ... { local dest:2 = rm16; push42(&:2 inst_next); call [dest]; }
|
|
@ifdef IA64
|
|
:CALL rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xff; rm16 & reg_opcode=2 ... { local dest:8 = inst_next + zext(rm16); push88(&:8 inst_next); call [dest]; }
|
|
@endif
|
|
|
|
:CALL rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xff; rm32 & reg_opcode=2 ... { local dest:4 = rm32; push24(&:4 inst_next); call [dest]; }
|
|
:CALL rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; rm32 & reg_opcode=2 ... { local dest:4 = rm32; push44(&:4 inst_next); call [dest]; }
|
|
@ifdef IA64
|
|
:CALL rm64 is $(LONGMODE_ON) & vexMode=0 & (opsize=1 | opsize=2) & byte=0xff; rm64 & reg_opcode=2 ... { local dest:8 = rm64; push88(&:8 inst_next); call [dest]; }
|
|
@endif
|
|
|
|
# direct far calls generate an opcode undefined exception in x86-64
|
|
:CALLF ptr1616 is vexMode=0 & addrsize=0 & opsize=0 & byte=0x9a; ptr1616 { push22(CS); build ptr1616; push22(&:2 inst_next); call ptr1616; }
|
|
:CALLF ptr1616 is vexMode=0 & addrsize=1 & opsize=0 & byte=0x9a; ptr1616 { push42(CS); build ptr1616; push42(&:2 inst_next); call ptr1616; }
|
|
:CALLF ptr1632 is vexMode=0 & addrsize=0 & opsize=1 & byte=0x9a; ptr1632 { push22(CS); build ptr1632; push24(&:4 inst_next); call ptr1632; }
|
|
:CALLF ptr1632 is vexMode=0 & addrsize=1 & opsize=1 & byte=0x9a; ptr1632 { pushseg44(CS); build ptr1632; push44(&:4 inst_next); call ptr1632; }
|
|
:CALLF addr16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xff; addr16 & reg_opcode=3 ... { local ptr:$(SIZE) = segment(DS,addr16); local addrptr:$(SIZE) = segment(*:2 (ptr+2),*:2 ptr);
|
|
push22(CS); push22(&:2 inst_next); call [addrptr]; }
|
|
:CALLF addr32 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; addr32 & reg_opcode=3 ... { local dest:4 = addr32; push42(CS); push42(&:2 inst_next); call [dest]; }
|
|
@ifdef IA64
|
|
:CALLF addr64 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=0 & byte=0xff; addr64 & reg_opcode=3 ... { local dest:8 = addr64; push82(CS); push82(&:2 inst_next); call [dest]; }
|
|
@endif
|
|
|
|
|
|
:CALLF addr16 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xff; addr16 & reg_opcode=3 ... { local dest:2 = addr16; push22(CS); push24(&:4 inst_next); call [dest]; }
|
|
:CALLF addr32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; addr32 & reg_opcode=3 ... { local dest:4 = addr32; pushseg44(CS); push44(&:4 inst_next); call [dest]; }
|
|
@ifdef IA64
|
|
:CALLF addr32 is $(LONGMODE_ON) &vexMode=0 & addrsize=1 & opsize=2 & byte=0xff; addr32 & reg_opcode=3 ... { local dest:4 = addr32; pushseg88(CS); push88(&:8 inst_next); call [dest]; }
|
|
:CALLF addr64 is $(LONGMODE_ON) &vexMode=0 & addrsize=2 & opsize=1 & byte=0xff; addr64 & reg_opcode=3 ... { local dest:8 = addr64; pushseg44(CS); push84(&:4 inst_next); call [dest]; }
|
|
:CALLF addr64 is $(LONGMODE_ON) &vexMode=0 & addrsize=2 & opsize=2 & byte=0xff; addr64 & reg_opcode=3 ... { local dest:8 = addr64; pushseg88(CS); push88(&:8 inst_next); call [dest]; }
|
|
@endif
|
|
|
|
:CBW is vexMode=0 & opsize=0 & byte=0x98 { AX = sext(AL); }
|
|
:CWDE is vexMode=0 & opsize=1 & byte=0x98 & check_EAX_dest { EAX = sext(AX); build check_EAX_dest;}
|
|
@ifdef IA64
|
|
:CDQE is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x98 { RAX = sext(EAX); }
|
|
@endif
|
|
|
|
:CWD is vexMode=0 & opsize=0 & byte=0x99 { tmp:4 = sext(AX); DX = tmp(2); }
|
|
:CDQ is vexMode=0 & opsize=1 & byte=0x99 & check_EDX_dest { tmp:8 = sext(EAX); EDX = tmp(4); build check_EDX_dest;}
|
|
@ifdef IA64
|
|
:CQO is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x99 { tmp:16 = sext(RAX); RDX = tmp(8); }
|
|
@endif
|
|
|
|
define pcodeop clflush;
|
|
:CLFLUSH m8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=7 ) ... & m8 {
|
|
clflush(m8);
|
|
}
|
|
|
|
:CLAC is vexMode=0 & byte=0x0F; byte=0x01; byte=0xCA { AC = 0; }
|
|
|
|
:CLC is vexMode=0 & byte=0xf8 { CF = 0; }
|
|
:CLD is vexMode=0 & byte=0xfc { DF = 0; }
|
|
# MFL: AMD instruction
|
|
# TODO: define the action.
|
|
# CLGI: clear global interrupt flag (GIF); while GIF is zero, all external interrupts are disabled.
|
|
:CLGI is vexMode=0 & byte=0x0f; byte=0x01; byte=0xDD { clgi(); }
|
|
:CLI is vexMode=0 & byte=0xfa { IF = 0; }
|
|
define pcodeop clts;
|
|
:CLTS is vexMode=0 & byte=0x0f; byte=0x06 { CR0 = CR0 & ~(0x8); }
|
|
|
|
define pcodeop clzero;
|
|
|
|
:CLZERO is vexMode=0 & opsize=0 & byte=0x0F; byte=0x01; byte=0xFC { clzero(AX); }
|
|
:CLZERO is vexMode=0 & opsize=1 & byte=0x0F; byte=0x01; byte=0xFC { clzero(EAX); }
|
|
@ifdef IA64
|
|
:CLZERO is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0x01; byte=0xFC { clzero(RAX); }
|
|
@endif
|
|
|
|
|
|
:CMC is vexMode=0 & byte=0xf5 { CF = CF==0; }
|
|
|
|
:CMOV^cc Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; row=4 & cc; rm16 & Reg16 ... { local tmp = rm16; if (!cc) goto inst_next; Reg16 = tmp; }
|
|
:CMOV^cc Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; row=4 & cc; rm32 & Reg32 ... & check_Reg32_dest ... { local tmp = rm32; build check_Reg32_dest; if (!cc) goto inst_next; Reg32 = tmp; }
|
|
@ifdef IA64
|
|
:CMOV^cc Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; row=4 & cc; rm64 & Reg64 ... { local tmp = rm64; if (!cc) goto inst_next; Reg64 = tmp; }
|
|
@endif
|
|
|
|
:CMP AL,imm8 is vexMode=0 & byte=0x3c; AL & imm8 { subflags( AL,imm8 ); local tmp = AL - imm8; resultflags(tmp); }
|
|
:CMP AX,imm16 is vexMode=0 & opsize=0 & byte=0x3d; AX & imm16 { subflags( AX,imm16); local tmp = AX - imm16; resultflags(tmp); }
|
|
:CMP EAX,imm32 is vexMode=0 & opsize=1 & byte=0x3d; EAX & imm32 { subflags( EAX,imm32); local tmp = EAX - imm32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:CMP RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x3d; RAX & simm32 { subflags( RAX,simm32); local tmp = RAX - simm32; resultflags(tmp); }
|
|
@endif
|
|
:CMP rm8,imm8 is vexMode=0 & $(BYTE_80_82); rm8 & reg_opcode=7 ...; imm8 { local temp:1 = rm8; subflags(temp,imm8 ); local diff = temp - imm8; resultflags(diff); }
|
|
:CMP rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; rm16 & reg_opcode=7 ...; imm16 { local temp:2 = rm16; subflags(temp,imm16); local diff = temp - imm16; resultflags(diff); }
|
|
:CMP rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; rm32 & reg_opcode=7 ...; imm32 { local temp:4 = rm32; subflags(temp,imm32); local diff = temp - imm32; resultflags(diff); }
|
|
@ifdef IA64
|
|
:CMP rm64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; rm64 & reg_opcode=7 ...; simm32 { local temp:8 = rm64; subflags(temp,simm32); local diff = temp - simm32; resultflags(diff); }
|
|
@endif
|
|
:CMP rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; rm16 & reg_opcode=7 ...; simm8_16 { local temp:2 = rm16; subflags(temp,simm8_16); local diff = temp - simm8_16; resultflags(diff); }
|
|
:CMP rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; rm32 & reg_opcode=7 ...; simm8_32 { local temp:4 = rm32; subflags(temp,simm8_32); local diff = temp - simm8_32; resultflags(diff); }
|
|
@ifdef IA64
|
|
:CMP rm64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; rm64 & reg_opcode=7 ...; simm8_64 { local temp:8 = rm64; subflags(temp,simm8_64); local diff = temp - simm8_64; resultflags(diff); }
|
|
@endif
|
|
:CMP rm8,Reg8 is vexMode=0 & byte=0x38; rm8 & Reg8 ... { local temp:1 = rm8; subflags(temp,Reg8); local diff = temp - Reg8; resultflags(diff); }
|
|
:CMP rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x39; rm16 & Reg16 ... { local temp:2 = rm16; subflags(temp,Reg16); local diff = temp - Reg16; resultflags(diff); }
|
|
:CMP rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x39; rm32 & Reg32 ... { local temp:4 = rm32; subflags(temp,Reg32 ); local diff = temp - Reg32; resultflags(diff); }
|
|
@ifdef IA64
|
|
:CMP rm64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x39; rm64 & Reg64 ... { local temp:8 = rm64; subflags(temp,Reg64); local diff = temp - Reg64; resultflags(diff); }
|
|
@endif
|
|
:CMP Reg8,rm8 is vexMode=0 & byte=0x3a; rm8 & Reg8 ... { local temp:1 = rm8; subflags(Reg8,temp); local diff = Reg8 - temp; resultflags(diff); }
|
|
:CMP Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x3b; rm16 & Reg16 ... { local temp:2 = rm16; subflags(Reg16,temp); local diff = Reg16 - temp; resultflags(diff); }
|
|
:CMP Reg32,Rmr32 is vexMode=0 & opsize=1 & byte=0x3b; Reg32 & mod=3 & Rmr32 { local temp:4 = Rmr32; subflags(Reg32,temp); local diff = Reg32 - temp; resultflags(diff); }
|
|
:CMP Reg32,m32 is vexMode=0 & opsize=1 & byte=0x3b; Reg32 ... & m32 {local temp:4 = m32; subflags(Reg32, temp); local diff = Reg32 - temp; resultflags(diff); }
|
|
@ifdef IA64
|
|
:CMP Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x3b; rm64 & Reg64 ... { local temp:8 = rm64; subflags(Reg64,temp); local diff = Reg64 - temp; resultflags(diff); }
|
|
@endif
|
|
|
|
:CMPSB^repe^repetail eseDI1,dseSI1 is vexMode=0 & repe & repetail & byte=0xa6 & dseSI1 & eseDI1 { build repe; build eseDI1; build dseSI1; local temp_DI1:1 = eseDI1; local temp_SI1:1 = dseSI1; subflags(temp_SI1,temp_DI1); local diff=temp_SI1 - temp_DI1; resultflags(diff); build repetail; }
|
|
:CMPSW^repe^repetail eseDI2,dseSI2 is vexMode=0 & repe & repetail & opsize=0 & byte=0xa7 & dseSI2 & eseDI2 { build repe; build eseDI2; build dseSI2; local temp_DI2:2 = eseDI2; local temp_SI2:2 = dseSI2; subflags(temp_SI2,temp_DI2); local diff=temp_SI2 - temp_DI2; resultflags(diff); build repetail; }
|
|
:CMPSD^repe^repetail eseDI4,dseSI4 is vexMode=0 & repe & repetail & opsize=1 & byte=0xa7 & dseSI4 & eseDI4 { build repe; build eseDI4; build dseSI4; local temp_DI4:4 = eseDI4; local temp_SI4:4 = dseSI4; subflags(temp_SI4,temp_DI4); local diff=temp_SI4 - temp_DI4; resultflags(diff); build repetail; }
|
|
@ifdef IA64
|
|
:CMPSD^repe^repetail eseDI8,dseSI8 is $(LONGMODE_ON) & vexMode=0 & repe & repetail & opsize=2 & byte=0xa7 & dseSI8 & eseDI8 { build repe; build eseDI8; build dseSI8; local temp_DI8:8 = eseDI8; local temp_SI8:8 = dseSI8; subflags(temp_SI8,temp_DI8); local diff=temp_SI8-temp_DI8; resultflags(diff); build repetail; }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:CMPXCHG Rmr8,Reg8 is vexMode=0 & byte=0xf; byte=0xb0; mod=3 & Rmr8 & Reg8
|
|
{
|
|
local dest = Rmr8;
|
|
subflags(AL,dest);
|
|
local diff = AL-dest;
|
|
resultflags(diff);
|
|
if (ZF) goto <equal>;
|
|
AL = dest;
|
|
goto inst_next;
|
|
<equal>
|
|
Rmr8 = Reg8;
|
|
}
|
|
:CMPXCHG Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb1; mod=3 & Rmr16 & Reg16
|
|
{
|
|
local dest = Rmr16;
|
|
subflags(AX,dest);
|
|
local diff = AX-dest;
|
|
resultflags(diff);
|
|
if (ZF) goto <equal>;
|
|
AX = dest;
|
|
goto inst_next;
|
|
<equal>
|
|
Rmr16 = Reg16;
|
|
}
|
|
:CMPXCHG Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb1; mod=3 & Rmr32 & Reg32 & check_EAX_dest & check_Rmr32_dest
|
|
{
|
|
#this instruction writes to either EAX or Rmr32
|
|
#in 64-bit mode, a 32-bit register that is written to
|
|
#(and only the register that is written to)
|
|
#must be zero-extended to 64 bits
|
|
local dest = Rmr32;
|
|
subflags(EAX,dest);
|
|
local diff = EAX-dest;
|
|
resultflags(diff);
|
|
if (ZF) goto <equal>;
|
|
EAX = dest;
|
|
build check_EAX_dest;
|
|
goto inst_next;
|
|
<equal>
|
|
Rmr32 = Reg32;
|
|
build check_Rmr32_dest;
|
|
}
|
|
@ifdef IA64
|
|
:CMPXCHG Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xb1; mod=3 & Rmr64 & Reg64
|
|
{
|
|
local dest = Rmr64;
|
|
subflags(RAX,dest);
|
|
local diff = RAX-dest;
|
|
resultflags(diff);
|
|
if (ZF) goto <equal>;
|
|
RAX = dest;
|
|
goto inst_next;
|
|
<equal>
|
|
Rmr64 = Reg64;
|
|
}
|
|
@endif
|
|
|
|
# CMPXCHG8B See 'lockable.sinc' for memory destination, lockable variants
|
|
|
|
# This "bad_CMPXCHG8B" instruction encoding was not meant to be part of the x86 language.
|
|
# It was allowed by a toolchain (at Intel) and was encoded into at least one library.
|
|
# GCC does not recognize it. It does not make any semantic sense.
|
|
define pcodeop bad_CMPXCHG8B;
|
|
:bad_CMPXCHG8B r32 is vexMode=0 & byte=0xf; byte=0xc7; ( mod = 0b11 & reg_opcode=0b001 ) & r32 {
|
|
r32 = bad_CMPXCHG8B(r32);
|
|
}
|
|
|
|
# CMPXCHG16B See 'lockable.sinc' for memory destination, lockable variants
|
|
|
|
define pcodeop cpuid;
|
|
define pcodeop cpuid_basic_info;
|
|
define pcodeop cpuid_Version_info;
|
|
define pcodeop cpuid_cache_tlb_info;
|
|
define pcodeop cpuid_serial_info;
|
|
define pcodeop cpuid_Deterministic_Cache_Parameters_info;
|
|
define pcodeop cpuid_MONITOR_MWAIT_Features_info;
|
|
define pcodeop cpuid_Thermal_Power_Management_info;
|
|
define pcodeop cpuid_Extended_Feature_Enumeration_info;
|
|
define pcodeop cpuid_Direct_Cache_Access_info;
|
|
define pcodeop cpuid_Architectural_Performance_Monitoring_info;
|
|
define pcodeop cpuid_Extended_Topology_info;
|
|
define pcodeop cpuid_Processor_Extended_States_info;
|
|
define pcodeop cpuid_Quality_of_Service_info;
|
|
define pcodeop cpuid_brand_part1_info;
|
|
define pcodeop cpuid_brand_part2_info;
|
|
define pcodeop cpuid_brand_part3_info;
|
|
|
|
# CPUID is very difficult to implement correctly
|
|
# The side-effects of the call will show up, but not the correct values
|
|
|
|
:CPUID is vexMode=0 & byte=0xf; byte=0xa2 {
|
|
tmpptr:$(SIZE) = 0;
|
|
if (EAX == 0) goto <basic_info>;
|
|
if (EAX == 1) goto <Version_info>;
|
|
if (EAX == 2) goto <cache_tlb_info>;
|
|
if (EAX == 3) goto <serial_info>;
|
|
if (EAX == 0x4) goto <Deterministic_Cache_Parameters_info>;
|
|
if (EAX == 0x5) goto <MONITOR_MWAIT_Features_info>;
|
|
if (EAX == 0x6) goto <Thermal_Power_Management_info>;
|
|
if (EAX == 0x7) goto <Extended_Feature_Enumeration_info>;
|
|
if (EAX == 0x9) goto <Direct_Cache_Access_info>;
|
|
if (EAX == 0xa) goto <Architectural_Performance_Monitoring_info>;
|
|
if (EAX == 0xb) goto <Extended_Topology_info>;
|
|
if (EAX == 0xd) goto <Processor_Extended_States_info>;
|
|
if (EAX == 0xf) goto <Quality_of_Service_info>;
|
|
if (EAX == 0x80000002) goto <brand_part1_info>;
|
|
if (EAX == 0x80000003) goto <brand_part2_info>;
|
|
if (EAX == 0x80000004) goto <brand_part3_info>;
|
|
tmpptr = cpuid(EAX);
|
|
goto <finish>;
|
|
<basic_info>
|
|
tmpptr = cpuid_basic_info(EAX);
|
|
goto <finish>;
|
|
<Version_info>
|
|
tmpptr = cpuid_Version_info(EAX);
|
|
goto <finish>;
|
|
<cache_tlb_info>
|
|
tmpptr = cpuid_cache_tlb_info(EAX);
|
|
goto <finish>;
|
|
<serial_info>
|
|
tmpptr = cpuid_serial_info(EAX);
|
|
goto <finish>;
|
|
<Deterministic_Cache_Parameters_info>
|
|
tmpptr = cpuid_Deterministic_Cache_Parameters_info(EAX);
|
|
goto <finish>;
|
|
<MONITOR_MWAIT_Features_info>
|
|
tmpptr = cpuid_MONITOR_MWAIT_Features_info(EAX);
|
|
goto <finish>;
|
|
<Thermal_Power_Management_info>
|
|
tmpptr = cpuid_Thermal_Power_Management_info(EAX);
|
|
goto <finish>;
|
|
<Extended_Feature_Enumeration_info>
|
|
tmpptr = cpuid_Extended_Feature_Enumeration_info(EAX);
|
|
goto <finish>;
|
|
<Direct_Cache_Access_info>
|
|
tmpptr = cpuid_Direct_Cache_Access_info(EAX);
|
|
goto <finish>;
|
|
<Architectural_Performance_Monitoring_info>
|
|
tmpptr = cpuid_Architectural_Performance_Monitoring_info(EAX);
|
|
goto <finish>;
|
|
<Extended_Topology_info>
|
|
tmpptr = cpuid_Extended_Topology_info(EAX);
|
|
goto <finish>;
|
|
<Processor_Extended_States_info>
|
|
tmpptr = cpuid_Processor_Extended_States_info(EAX);
|
|
goto <finish>;
|
|
<Quality_of_Service_info>
|
|
tmpptr = cpuid_Quality_of_Service_info(EAX);
|
|
goto <finish>;
|
|
<brand_part1_info>
|
|
tmpptr = cpuid_brand_part1_info(EAX);
|
|
goto <finish>;
|
|
<brand_part2_info>
|
|
tmpptr = cpuid_brand_part2_info(EAX);
|
|
goto <finish>;
|
|
<brand_part3_info>
|
|
tmpptr = cpuid_brand_part3_info(EAX);
|
|
goto <finish>;
|
|
<finish>
|
|
@ifdef IA64
|
|
RAX = zext(*:4 (tmpptr));
|
|
RBX = zext(*:4 (tmpptr + 4));
|
|
RDX = zext(*:4 (tmpptr + 8));
|
|
RCX = zext(*:4 (tmpptr + 12));
|
|
@else
|
|
EAX = *tmpptr;
|
|
EBX = *(tmpptr + 4);
|
|
EDX = *(tmpptr + 8);
|
|
ECX = *(tmpptr + 12);
|
|
@endif
|
|
}
|
|
|
|
|
|
:DAA is vexMode=0 & bit64=0 & byte=0x27 { local car = ((AL & 0xf) > 9) | AF;
|
|
AL = AL + 6 * car;
|
|
CF = CF | car * carry(AL,6);
|
|
AF = car;
|
|
car = ((AL & 0xf0) > 0x90) | CF;
|
|
AL = AL + 0x60 * car;
|
|
CF = car; }
|
|
:DAS is vexMode=0 & bit64=0 & byte=0x2f { local car = ((AL & 0xf) > 9) | AF;
|
|
AL = AL - 6 * car;
|
|
CF = CF | car * (AL < 6);
|
|
AF = car;
|
|
car = (AL > 0x9f) | CF;
|
|
AL = AL - 0x60 * car;
|
|
CF = car; }
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:DEC Rmr8 is vexMode=0 & byte=0xfe; mod=3 & Rmr8 & reg_opcode=1 { OF = sborrow(Rmr8,1); Rmr8 = Rmr8 - 1; resultflags( Rmr8); }
|
|
:DEC Rmr16 is vexMode=0 & opsize=0 & byte=0xff; mod=3 & Rmr16 & reg_opcode=1 { OF = sborrow(Rmr16,1); Rmr16 = Rmr16 - 1; resultflags(Rmr16); }
|
|
:DEC Rmr32 is vexMode=0 & opsize=1 & byte=0xff; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=1 { OF = sborrow(Rmr32,1); Rmr32 = Rmr32 - 1; build check_rm32_dest; resultflags(Rmr32); }
|
|
@ifdef IA64
|
|
:DEC Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xff; mod=3 & Rmr64 & reg_opcode=1 { OF = sborrow(Rmr64,1); Rmr64 = Rmr64 - 1; resultflags(Rmr64); }
|
|
@endif
|
|
|
|
:DEC Rmr16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & row=4 & page=1 & Rmr16 { OF = sborrow(Rmr16,1); Rmr16 = Rmr16 - 1; resultflags( Rmr16); }
|
|
:DEC Rmr32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & row=4 & page=1 & Rmr32 & check_Rmr32_dest { OF = sborrow(Rmr32,1); Rmr32 = Rmr32 - 1; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
|
|
:DIV rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=6 ... { rm8ext:2 = zext(rm8);
|
|
local quotient = AX / rm8ext; # DE exception if quotient doesn't fit in AL
|
|
local rem = AX % rm8ext;
|
|
AL = quotient:1;
|
|
AH = rem:1; }
|
|
:DIV rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=6 ... { rm16ext:4 = zext(rm16);
|
|
tmp:4 = (zext(DX) << 16) | zext(AX); # DE exception if quotient doesn't fit in AX
|
|
local quotient = tmp / rm16ext;
|
|
AX = quotient:2;
|
|
local rem = tmp % rm16ext;
|
|
DX = rem:2; }
|
|
:DIV rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EDX_dest ... & check_EAX_dest ... & reg_opcode=6 ... { rm32ext:8 = zext(rm32);
|
|
tmp:8 = (zext(EDX) << 32) | zext(EAX); # DE exception if quotient doesn't fit in EAX
|
|
local quotient = tmp / rm32ext;
|
|
EAX = quotient:4;
|
|
build check_EAX_dest;
|
|
local rem = tmp % rm32ext;
|
|
EDX = rem:4;
|
|
build check_EDX_dest; }
|
|
@ifdef IA64
|
|
:DIV rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=6 ... { rm64ext:16 = zext(rm64);
|
|
tmp:16 = (zext(RDX) << 64) | zext(RAX); # DE exception if quotient doesn't fit in RAX
|
|
local quotient = tmp / rm64ext;
|
|
RAX = quotient:8;
|
|
local rem = tmp % rm64ext;
|
|
RDX = rem:8; }
|
|
@endif
|
|
|
|
enterFrames: low5 is low5 { tmp:1 = low5; export tmp; }
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc8; imm16; enterFrames & low5=0x00 {
|
|
push44(EBP);
|
|
EBP = ESP;
|
|
ESP = ESP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc8; imm16; enterFrames & low5=0x01 {
|
|
push44(EBP);
|
|
frameTemp:4 = ESP;
|
|
|
|
push44(frameTemp);
|
|
EBP = frameTemp;
|
|
ESP = ESP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc8; imm16; enterFrames {
|
|
push44(EBP);
|
|
frameTemp:4 = ESP;
|
|
@ifdef IA64
|
|
ESPt:$(SIZE) = zext(ESP);
|
|
EBPt:$(SIZE) = zext(EBP);
|
|
@else
|
|
ESPt:$(SIZE) = ESP;
|
|
EBPt:$(SIZE) = EBP;
|
|
@endif
|
|
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
EBPt = EBPt - 4;
|
|
ESPt = ESPt - 4;
|
|
*:4 ESPt = *:4 EBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:4 = 4 * zext(enterFrames - 1);
|
|
ESP = ESP - tmp_offset;
|
|
EBP = EBP - tmp_offset;
|
|
|
|
push44(frameTemp);
|
|
EBP = frameTemp;
|
|
ESP = ESP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc8; imm16; enterFrames & low5=0x00 {
|
|
push42(BP);
|
|
BP = SP;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc8; imm16; enterFrames & low5=0x01 {
|
|
push42(BP);
|
|
frameTemp:2 = SP;
|
|
|
|
push42(frameTemp);
|
|
BP = frameTemp;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc8; imm16; enterFrames {
|
|
push42(BP);
|
|
frameTemp:2 = SP;
|
|
@ifdef IA64
|
|
ESPt:$(SIZE) = zext(ESP);
|
|
EBPt:$(SIZE) = zext(EBP);
|
|
@else
|
|
ESPt:$(SIZE) = ESP;
|
|
EBPt:$(SIZE) = EBP;
|
|
@endif
|
|
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
EBPt = EBPt - 2;
|
|
ESPt = ESPt - 2;
|
|
*:2 ESPt = *:2 EBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:4 = 2 * zext(enterFrames - 1);
|
|
ESP = ESP - tmp_offset;
|
|
EBP = EBP - tmp_offset;
|
|
|
|
push42(frameTemp);
|
|
BP = frameTemp;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xc8; imm16; enterFrames & low5=0x00 {
|
|
push22(BP);
|
|
BP = SP;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xc8; imm16; enterFrames & low5=0x01 {
|
|
push22(BP);
|
|
frameTemp:2 = SP;
|
|
|
|
push22(frameTemp);
|
|
BP = frameTemp;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & seg16 & addrsize=0 & opsize=1 & byte=0xc8; imm16; enterFrames {
|
|
push24(zext(BP));
|
|
frameTemp:2 = SP;
|
|
|
|
SPt:2 = SP;
|
|
BPt:2 = BP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
|
|
BPt = BPt - 4;
|
|
tmp2:$(SIZE) = segment(seg16,BPt);
|
|
SPt = SPt - 4;
|
|
tmp:$(SIZE) = segment(SS,SPt);
|
|
*:4 tmp = *:4 tmp2;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:2 = 4 * zext(enterFrames - 1);
|
|
SP = SP - tmp_offset;
|
|
BP = BP - tmp_offset;
|
|
|
|
push24(zext(frameTemp));
|
|
BP = frameTemp;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & seg16 & addrsize=0 & opsize=0 & byte=0xc8; imm16; enterFrames {
|
|
push22(BP);
|
|
frameTemp:2 = SP;
|
|
|
|
SPt:2 = SP;
|
|
BPt:2 = BP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
BPt = BPt - 2;
|
|
tmp2:$(SIZE) = segment(seg16,BPt);
|
|
SPt = SPt - 2;
|
|
tmp:$(SIZE) = segment(SS,SPt);
|
|
*:2 tmp = *:2 tmp2;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:2 = 2 * zext(enterFrames - 1);
|
|
SP = SP - tmp_offset;
|
|
BP = BP - tmp_offset;
|
|
|
|
push22(frameTemp);
|
|
BP = frameTemp;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & byte=0xc8; imm16; enterFrames & low5=0x00 {
|
|
push88(RBP);
|
|
RBP = RSP;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & byte=0xc8; imm16; enterFrames & low5=0x01 {
|
|
push88(RBP);
|
|
frameTemp:8 = RSP;
|
|
|
|
push88(frameTemp);
|
|
RBP = frameTemp;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & byte=0xc8; imm16; enterFrames {
|
|
push88(RBP);
|
|
frameTemp:8 = RSP;
|
|
|
|
RSPt:$(SIZE) = RSP;
|
|
RBPt:$(SIZE) = RBP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
RBPt = RBPt - 8;
|
|
RSPt = RSPt - 8;
|
|
*:8 RSPt = *:8 RBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:8 = 8 * zext(enterFrames - 1);
|
|
RSP = RSP - tmp_offset;
|
|
RBP = RBP - tmp_offset;
|
|
|
|
push88(frameTemp);
|
|
RBP = frameTemp;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xc8; imm16; enterFrames & low5=0x00 {
|
|
push82(BP);
|
|
RBP = RSP;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xc8; imm16; enterFrames & low5=0x01 {
|
|
push82(BP);
|
|
frameTemp:2 = SP;
|
|
|
|
push82(frameTemp);
|
|
BP = frameTemp;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xc8; imm16; enterFrames {
|
|
push82(BP);
|
|
frameTemp:2 = SP;
|
|
|
|
RSPt:$(SIZE) = RSP;
|
|
RBPt:$(SIZE) = RBP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
RBPt = RBPt - 2;
|
|
RSPt = RSPt - 2;
|
|
*:2 RSPt = *:2 RBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:8 = 2 * zext(enterFrames - 1);
|
|
RSP = RSP - tmp_offset;
|
|
RBP = RBP - tmp_offset;
|
|
|
|
push82(frameTemp);
|
|
BP = frameTemp;
|
|
RSP = RSP - imm16;
|
|
}
|
|
@endif
|
|
|
|
|
|
|
|
# Informs the 80287 coprocessor of the switch to protected mode, treated as NOP for 80387 and later.
|
|
# We used to have a pseudo-op, but as this is a legacy instruction which is now explicitly treated
|
|
# as a NOP. We treat it as a NOP as well.
|
|
:FSETPM is vexMode=0 & byte=0xdb; byte=0xe4 { } # 80287 set protected mode
|
|
|
|
:HLT is vexMode=0 & byte=0xf4 { goto inst_start; }
|
|
|
|
:IDIV rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=7 ... { rm8ext:2 = sext(rm8);
|
|
local quotient = AX s/ rm8ext; # DE exception if quotient doesn't fit in AL
|
|
local rem = AX s% rm8ext;
|
|
AL = quotient:1;
|
|
AH = rem:1; }
|
|
:IDIV rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=7 ... { rm16ext:4 = sext(rm16);
|
|
tmp:4 = (zext(DX) << 16) | zext(AX); # DE exception if quotient doesn't fit in AX
|
|
local quotient = tmp s/ rm16ext;
|
|
AX = quotient:2;
|
|
local rem = tmp s% rm16ext;
|
|
DX = rem:2; }
|
|
:IDIV rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=7 ... { rm32ext:8 = sext(rm32);
|
|
tmp:8 = (zext(EDX) << 32) | zext(EAX); # DE exception if quotient doesn't fit in EAX
|
|
local quotient = tmp s/ rm32ext;
|
|
EAX = quotient:4;
|
|
build check_EAX_dest;
|
|
local rem = tmp s% rm32ext;
|
|
EDX = rem:4;
|
|
build check_EDX_dest; }
|
|
@ifdef IA64
|
|
:IDIV rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=7 ... { rm64ext:16 = sext(rm64);
|
|
tmp:16 = (zext(RDX) << 64) | zext(RAX); # DE exception if quotient doesn't fit in RAX
|
|
local quotient = tmp s/ rm64ext;
|
|
RAX = quotient:8;
|
|
local rem = tmp s% rm64ext;
|
|
RDX = rem:8; }
|
|
@endif
|
|
|
|
:IMUL rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=5 ... { AX = sext(AL) * sext(rm8); imultflags(AL,AX); }
|
|
:IMUL rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=5 ... { tmp:4 = sext(AX) * sext(rm16);
|
|
DX = tmp(2); AX = tmp(0); imultflags(AX,tmp); }
|
|
:IMUL rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=5 ... { tmp:8 = sext(EAX) * sext(rm32);
|
|
EDX = tmp(4); build check_EDX_dest; EAX = tmp(0); build check_EAX_dest; imultflags(EAX,tmp); }
|
|
@ifdef IA64
|
|
# We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct
|
|
:IMUL rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=5 ... { tmp:16 = sext(RAX) * sext(rm64);
|
|
RAX = RAX * rm64; RDX = tmp(8); imultflags(RAX,tmp); }
|
|
@endif
|
|
:IMUL Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xaf; rm16 & Reg16 ... { tmp:4 = sext(Reg16) * sext(rm16);
|
|
Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);}
|
|
:IMUL Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xaf; rm32 & Reg32 ... & check_Reg32_dest ... { tmp:8 = sext(Reg32) * sext(rm32);
|
|
Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
# We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct
|
|
:IMUL Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xaf; rm64 & Reg64 ... { tmp:16 = sext(Reg64) * sext(rm64);
|
|
Reg64 = Reg64 * rm64; high:8 = tmp(8); imultflags(Reg64,tmp);}
|
|
@endif
|
|
:IMUL Reg16,rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x6b; (rm16 & Reg16 ...) ; simm8_16 { tmp:4 = sext(rm16) * sext(simm8_16);
|
|
Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);}
|
|
:IMUL Reg32,rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x6b; (rm32 & Reg32 ... & check_Reg32_dest ... ) ; simm8_32 { tmp:8 = sext(rm32) * sext(simm8_32);
|
|
Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
# We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct
|
|
:IMUL Reg64,rm64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x6b; (rm64 & Reg64 ...) ; simm8_64 { tmp:16 = sext(rm64) * sext(simm8_64);
|
|
Reg64 = rm64 * simm8_64; high:8 = tmp(8); imultflags(Reg64,tmp);}
|
|
@endif
|
|
:IMUL Reg16,rm16,simm16_16 is vexMode=0 & opsize=0 & byte=0x69; (rm16 & Reg16 ...) ; simm16_16 { tmp:4 = sext(rm16) * sext(simm16_16);
|
|
Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);}
|
|
:IMUL Reg32,rm32,simm32_32 is vexMode=0 & opsize=1 & byte=0x69; (rm32 & Reg32 ... & check_Reg32_dest ...) ; simm32_32 { tmp:8 = sext(rm32) * sext(simm32_32);
|
|
Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:IMUL Reg64,rm64,simm32_32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x69; (rm64 & Reg64 ...) ; simm32_32 { tmp:16 = sext(rm64) * sext(simm32_32);
|
|
Reg64 = rm64 * sext(simm32_32); high:8 = tmp(8); imultflags(Reg64,tmp);}
|
|
@endif
|
|
|
|
# these appear in intelman2.pdf, but do they really exist?
|
|
#:IMUL Reg16,simm8_16 is vexMode=0 & opsize=0 & byte=0x6b; Reg16; simm8_16
|
|
#:IMUL Reg32,simm8_32 is vexMode=0 & opsize=1 & byte=0x6b; Reg32; simm8_32
|
|
#:IMUL Reg16,simm16 is vexMode=0 & opsize=0 & byte=0x69; Reg16; simm16
|
|
#:IMUL Reg32,simm32 is vexMode=0 & opsize=1 & byte=0x69; Reg32; simm32
|
|
|
|
:IN AL, imm8 is vexMode=0 & AL & (byte=0xe4; imm8) { tmp:1 = imm8; AL = in(tmp); }
|
|
:IN AX, imm8 is vexMode=0 & opsize=0 & AX & (byte=0xe5; imm8) { tmp:1 = imm8; AX = in(tmp); }
|
|
:IN EAX, imm8 is vexMode=0 & opsize=1 & EAX & check_EAX_dest & (byte=0xe5; imm8) { tmp:1 = imm8; EAX = in(tmp); build check_EAX_dest; }
|
|
@ifdef IA64
|
|
:IN RAX, imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & RAX & (byte=0xe5; imm8) { tmp:1 = imm8; RAX = in(tmp); }
|
|
@endif
|
|
:IN AL, DX is vexMode=0 & AL & DX & (byte=0xec) { AL = in(DX); }
|
|
:IN AX, DX is vexMode=0 & opsize=0 & AX & DX & (byte=0xed) { AX = in(DX); }
|
|
:IN EAX, DX is vexMode=0 & opsize=1 & EAX & check_EAX_dest & DX & (byte=0xed) { EAX = in(DX); build check_EAX_dest; }
|
|
@ifdef IA64
|
|
:IN RAX, DX is $(LONGMODE_ON) & vexMode=0 & opsize=2 & RAX & DX & (byte=0xed) { RAX = in(DX); }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:INC Rmr8 is vexMode=0 & byte=0xfe; mod=3 & Rmr8 & reg_opcode=0 { OF = scarry(Rmr8,1); Rmr8 = Rmr8 + 1; resultflags( Rmr8); }
|
|
:INC Rmr16 is vexMode=0 & opsize=0 & byte=0xff; mod=3 & Rmr16 & reg_opcode=0 { OF = scarry(Rmr16,1); Rmr16 = Rmr16 + 1; resultflags(Rmr16); }
|
|
:INC Rmr32 is vexMode=0 & opsize=1 & byte=0xff; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=0 { OF = scarry(Rmr32,1); Rmr32 = Rmr32 + 1; build check_Rmr32_dest; resultflags(Rmr32); }
|
|
@ifdef IA64
|
|
:INC Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xff; mod=3 & Rmr64 & reg_opcode=0 { OF = scarry(Rmr64,1); Rmr64 = Rmr64 + 1; resultflags(Rmr64); }
|
|
@endif
|
|
|
|
:INC Rmr16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & row = 4 & page = 0 & Rmr16 { OF = scarry(Rmr16,1); Rmr16 = Rmr16 + 1; resultflags( Rmr16); }
|
|
:INC Rmr32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & row = 4 & page = 0 & Rmr32 { OF = scarry(Rmr32,1); Rmr32 = Rmr32 + 1; resultflags( Rmr32); }
|
|
|
|
:INSB^rep^reptail eseDI1,DX is vexMode=0 & rep & reptail & byte=0x6c & eseDI1 & DX { build rep; build eseDI1; eseDI1 = in(DX); build reptail; }
|
|
:INSW^rep^reptail eseDI2,DX is vexMode=0 & rep & reptail & opsize=0 & byte=0x6d & eseDI2 & DX { build rep; build eseDI2; eseDI2 = in(DX); build reptail; }
|
|
:INSD^rep^reptail eseDI4,DX is vexMode=0 & rep & reptail & opsize=1 & byte=0x6d & eseDI4 & DX { build rep; build eseDI4; eseDI4 = in(DX); build reptail; }
|
|
:INSD^rep^reptail eseDI4,DX is vexMode=0 & rep & reptail & opsize=2 & byte=0x6d & eseDI4 & DX { build rep; build eseDI4; eseDI4 = in(DX); build reptail; }
|
|
|
|
:INT1 is vexMode=0 & byte=0xf1 { tmp:1 = 0x1; intloc:$(SIZE) = swi(tmp); call [intloc]; return [0:1]; }
|
|
:INT3 is vexMode=0 & byte=0xcc { tmp:1 = 0x3; intloc:$(SIZE) = swi(tmp); call [intloc]; return [0:1]; }
|
|
:INT imm8 is vexMode=0 & byte=0xcd; imm8 { tmp:1 = imm8; intloc:$(SIZE) = swi(tmp); call [intloc]; }
|
|
:INTO is vexMode=0 & byte=0xce & bit64=0
|
|
{
|
|
tmp:1 = 0x4;
|
|
intloc:$(SIZE) = swi(tmp);
|
|
|
|
if (OF != 1) goto <no_overflow>;
|
|
call [intloc];
|
|
<no_overflow>
|
|
}
|
|
|
|
:INVD is vexMode=0 & byte=0xf; byte=0x8 {}
|
|
:INVLPG Mem is vexMode=0 & byte=0xf; byte=0x1; ( reg_opcode=7 ) ... & Mem { invlpg(Mem); }
|
|
|
|
:INVLPGA is vexMode=0 & addrsize=0 & byte=0xf; byte=0x1; byte=0xDF { invlpga(AX,ECX); }
|
|
:INVLPGA is vexMode=0 & addrsize=1 & byte=0xf; byte=0x1; byte=0xDF { invlpga(EAX,ECX); }
|
|
@ifdef IA64
|
|
:INVLPGA is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0x1; byte=0xDF { invlpga(RAX,ECX); }
|
|
@endif
|
|
|
|
:INVPCID r32, m128 is vexMode=0 & addrsize=1 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x82; r32 ... & m128 { invpcid(r32, m128); }
|
|
@ifdef IA64
|
|
:INVPCID r64, m128 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x82; r64 ... & m128 { invpcid(r64, m128); }
|
|
@endif
|
|
|
|
:IRET is vexMode=0 & addrsize=0 & opsize=0 & byte=0xcf { pop22(IP); EIP=zext(IP); pop22(CS); pop22(flags); return [EIP]; }
|
|
:IRET is vexMode=0 & addrsize=1 & opsize=0 & byte=0xcf { pop42(IP); EIP=zext(IP); pop42(CS); pop42(flags); return [EIP]; }
|
|
@ifdef IA64
|
|
:IRET is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=0 & byte=0xcf { pop82(IP); RIP=zext(IP); pop82(CS); pop82(flags); return [RIP]; }
|
|
@endif
|
|
:IRETD is vexMode=0 & addrsize=0 & opsize=1 & byte=0xcf { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); pop24(tmp); flags=tmp(0); return [EIP]; }
|
|
:IRETD is vexMode=0 & addrsize=1 & opsize=1 & byte=0xcf { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); pop44(eflags); return [EIP]; }
|
|
@ifdef IA64
|
|
:IRETD is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=1 & byte=0xcf { pop84(RIP); tmp:8=0; pop84(tmp); CS=tmp(0); pop84(eflags); return [RIP]; }
|
|
:IRETQ is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=2 & byte=0xcf { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); pop88(rflags); return [RIP]; }
|
|
@endif
|
|
|
|
:J^cc rel8 is vexMode=0 & row=7 & cc; rel8 { if (cc) goto rel8; }
|
|
:J^cc rel16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; row=8 & cc; rel16 { if (cc) goto rel16; }
|
|
:J^cc rel32 is vexMode=0 & opsize=1 & byte=0xf; row=8 & cc; rel32 { if (cc) goto rel32; }
|
|
:J^cc rel32 is vexMode=0 & opsize=2 & byte=0xf; row=8 & cc; rel32 { if (cc) goto rel32; }
|
|
# The following is vexMode=0 & picked up by the line above. rel32 works for both 32 and 64 bit
|
|
#@ifdef IA64
|
|
#:J^cc rel32 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; row=8 & cc; rel32 { if (cc) goto rel32; }
|
|
#@endif
|
|
|
|
:JCXZ rel8 is vexMode=0 & addrsize=0 & byte=0xe3; rel8 { if (CX==0) goto rel8; }
|
|
:JECXZ rel8 is vexMode=0 & addrsize=1 & byte=0xe3; rel8 { if (ECX==0) goto rel8; }
|
|
@ifdef IA64
|
|
:JRCXZ rel8 is $(LONGMODE_ON) & addrsize=2 & vexMode=0 & byte=0xe3; rel8 { if (RCX==0) goto rel8; }
|
|
@endif
|
|
|
|
:JMP rel8 is vexMode=0 & byte=0xeb; rel8 { goto rel8; }
|
|
:JMP rel16 is vexMode=0 & opsize=0 & byte=0xe9; rel16 { goto rel16; }
|
|
:JMP rel32 is vexMode=0 & opsize=1 & byte=0xe9; rel32 { goto rel32; }
|
|
:JMP rel32 is vexMode=0 & opsize=2 & byte=0xe9; rel32 { goto rel32; }
|
|
|
|
:JMP rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xff & currentCS; rm16 & reg_opcode=4 ... { target:4 = segment(currentCS,rm16); goto [target]; }
|
|
:JMP rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=4 ... { goto [rm16]; }
|
|
:JMP rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; rm32 & reg_opcode=4 ... { goto [rm32]; }
|
|
@ifdef IA64
|
|
:JMP rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xff & currentCS; rm16 & reg_opcode=4 ... { goto [rm16]; }
|
|
:JMP rm64 is $(LONGMODE_ON) & vexMode=0 & byte=0xff; rm64 & reg_opcode=4 ... { goto [rm64]; }
|
|
@endif
|
|
|
|
:JMPF ptr1616 is vexMode=0 & opsize=0 & byte=0xea; ptr1616 { goto ptr1616; }
|
|
:JMPF ptr1632 is vexMode=0 & opsize=1 & byte=0xea; ptr1632 { goto ptr1632; }
|
|
:JMPF Mem is vexMode=0 & opsize=0 & byte=0xff; Mem & reg_opcode=5 ... { target:$(SIZE) = zext(*:2 Mem); goto [target]; }
|
|
:JMPF Mem is vexMode=0 & opsize=1 & byte=0xff; Mem & reg_opcode=5 ... {
|
|
@ifdef IA64
|
|
target:$(SIZE) = zext(*:4 Mem);
|
|
@else
|
|
target:$(SIZE) = *:4 Mem;
|
|
@endif
|
|
goto [target];
|
|
}
|
|
@ifdef IA64
|
|
:JMPF Mem is vexMode=0 & opsize=2 & byte=0xff; Mem & reg_opcode=5 ... { target:$(SIZE) = *:8 Mem; goto [target]; }
|
|
@endif
|
|
|
|
# Initially disallowed in 64bit mode, but later reintroduced
|
|
:LAHF is vexMode=0 & byte=0x9f { AH=(SF<<7)|(ZF<<6)|(AF<<4)|(PF<<2)|2|CF; }
|
|
|
|
:LAR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x2; rm16 & Reg16 ... { Reg16 = rm16 & 0xff00; ZF=1; }
|
|
:LAR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x2; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32 & 0xffff00; build check_Reg32_dest; ZF=1; }
|
|
@ifdef IA64
|
|
:LAR Reg64,rm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x2; rm32 & Reg64 ... { Reg64 = zext( rm32 & 0xffff00 ); ZF=1; }
|
|
@endif
|
|
|
|
:LDMXCSR m32 is vexMode=0 & byte=0xf; byte=0xae; ( mod != 0b11 & reg_opcode=2 ) ... & m32 { MXCSR = m32; }
|
|
|
|
# 16 & 32-bit only
|
|
:LDS Reg16,Mem is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xC5; Mem & Reg16 ... { tmp:4 = *Mem; DS = tmp(2); Reg16 = tmp(0); }
|
|
:LDS Reg32,Mem is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xC5 & bit64=0; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; DS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
|
|
:LSS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB2; Mem & Reg16 ... { tmp:4 = *Mem; SS = tmp(2); Reg16 = tmp(0); }
|
|
:LSS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB2; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; SS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:LSS Reg64,Mem is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0xB2; Mem & Reg64 ... { tmp:10 = *Mem; SS = tmp(8); Reg64 = tmp(0); }
|
|
@endif
|
|
|
|
# 16 & 32-bit only
|
|
:LES Reg16,Mem is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xC4; Mem & Reg16 ... { tmp:4 = *Mem; ES = tmp(2); Reg16 = tmp(0); }
|
|
:LES Reg32,Mem is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xC4 & bit64=0; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; ES = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
|
|
:LFS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB4; Mem & Reg16 ... { tmp:4 = *Mem; FS = tmp(2); Reg16 = tmp(0); }
|
|
:LFS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB4; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; FS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:LFS Reg64,Mem is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0xB4; Mem & Reg64 ... { tmp:10 = *Mem; FS = tmp(8); Reg64 = tmp(0); }
|
|
@endif
|
|
:LGS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB5; Mem & Reg16 ... { tmp:4 = *Mem; GS = tmp(2); Reg16 = tmp(0); }
|
|
:LGS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB5; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; GS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:LGS Reg64,Mem is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0xB5; Mem & Reg64 ... { tmp:10 = *Mem; GS = tmp(8); Reg64 = tmp(0); }
|
|
@endif
|
|
|
|
#in 64-bit mode address size of 16 is not encodable
|
|
:LEA Reg16,addr16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & addrsize=0 & byte=0x8D; addr16 & Reg16 ... { Reg16 = addr16; }
|
|
:LEA Reg32,addr16 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & addrsize=0 & byte=0x8D; addr16 & Reg32 ... { Reg32 = zext(addr16); }
|
|
|
|
:LEA Reg16,addr32 is vexMode=0 & opsize=0 & addrsize=1 & byte=0x8D; addr32 & Reg16 ... { Reg16 = addr32(0); }
|
|
:LEA Reg32,addr32 is vexMode=0 & opsize=1 & addrsize=1 & byte=0x8D; addr32 & Reg32 ... & check_Reg32_dest ... {
|
|
Reg32 = addr32;
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:LEA Reg16,addr64 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & addrsize=2 & byte=0x8D; addr64 & Reg16 ... { Reg16 = addr64(0); }
|
|
:LEA Reg32,addr64 is $(LONGMODE_ON) & vexMode=0 & opsize=1 & addrsize=2 & byte=0x8D; addr64 & Reg32 ... & check_Reg32_dest ... {
|
|
Reg32 = addr64(0);
|
|
build check_Reg32_dest;
|
|
}
|
|
:LEA Reg64,addr32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & addrsize=1 & byte=0x8D; addr32 & Reg64 ... { Reg64 = zext(addr32); }
|
|
:LEA Reg64,addr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & addrsize=2 & byte=0x8D; addr64 & Reg64 ... { Reg64 = addr64; }
|
|
@endif
|
|
|
|
:LEAVE is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xc9 { SP = BP; pop22(BP); }
|
|
:LEAVE is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xc9 { ESP = EBP; pop24(EBP); }
|
|
:LEAVE is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc9 { ESP = EBP; pop44(EBP); }
|
|
:LEAVE is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc9 { ESP = EBP; pop42(BP); }
|
|
@ifdef IA64
|
|
:LEAVE is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xc9 { RSP = RBP; pop82(BP); }
|
|
:LEAVE is $(LONGMODE_ON) & vexMode=0 & byte=0xc9 { RSP = RBP; pop88(RBP); }
|
|
@endif
|
|
|
|
define pcodeop GlobalDescriptorTableRegister;
|
|
:LGDT m16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m16
|
|
{
|
|
GlobalDescriptorTableRegister(m16);
|
|
}
|
|
|
|
:LGDT m32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m32
|
|
{
|
|
GlobalDescriptorTableRegister(m32);
|
|
}
|
|
@ifdef IA64
|
|
:LGDT m64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m64
|
|
{
|
|
GlobalDescriptorTableRegister(m64);
|
|
}
|
|
@endif
|
|
|
|
define pcodeop InterruptDescriptorTableRegister;
|
|
:LIDT m16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m16
|
|
{
|
|
InterruptDescriptorTableRegister(m16);
|
|
}
|
|
|
|
:LIDT m32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m32
|
|
{
|
|
InterruptDescriptorTableRegister(m32);
|
|
}
|
|
@ifdef IA64
|
|
:LIDT m64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m64
|
|
{
|
|
InterruptDescriptorTableRegister(m64);
|
|
}
|
|
@endif
|
|
|
|
define pcodeop LocalDescriptorTableRegister;
|
|
:LLDT rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=2 ...
|
|
{
|
|
LocalDescriptorTableRegister(rm16);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:LMSW rm16 is vexMode=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=6 ...
|
|
{
|
|
CR0 = (CR0 & 0xFFFFFFFFFFFFFFF0) | zext(rm16 & 0x000F);
|
|
}
|
|
@else
|
|
:LMSW rm16 is vexMode=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=6 ...
|
|
{
|
|
CR0 = (CR0 & 0xFFFFFFF0) | zext(rm16 & 0x000F);
|
|
}
|
|
@endif
|
|
|
|
:LODSB^rep^reptail dseSI1 is vexMode=0 & rep & reptail & byte=0xAC & dseSI1 { build rep; build dseSI1; AL=dseSI1; build reptail; }
|
|
:LODSW^rep^reptail dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xAD & dseSI2 { build rep; build dseSI2; AX=dseSI2; build reptail; }
|
|
:LODSD^rep^reptail dseSI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xAD & dseSI4 { build rep; build dseSI4; EAX=dseSI4; build reptail; }
|
|
@ifdef IA64
|
|
:LODSQ^rep^reptail dseSI8 is $(LONGMODE_ON) & vexMode=0 & rep & reptail & opsize=2 & byte=0xAD & dseSI8 { build rep; build dseSI8; RAX=dseSI8; build reptail; }
|
|
@endif
|
|
|
|
:LOOP rel8 is vexMode=0 & addrsize=0 & byte=0xE2; rel8 { CX = CX -1; if (CX!=0) goto rel8; }
|
|
:LOOP rel8 is vexMode=0 & addrsize=1 & byte=0xE2; rel8 { ECX = ECX -1; if (ECX!=0) goto rel8; }
|
|
@ifdef IA64
|
|
:LOOP rel8 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xE2; rel8 { RCX = RCX -1; if (RCX!=0) goto rel8; }
|
|
@endif
|
|
|
|
:LOOPZ rel8 is vexMode=0 & addrsize=0 & byte=0xE1; rel8 { CX = CX -1; if (CX!=0 && ZF!=0) goto rel8; }
|
|
:LOOPZ rel8 is vexMode=0 & addrsize=1 & byte=0xE1; rel8 { ECX = ECX -1; if (ECX!=0 && ZF!=0) goto rel8; }
|
|
@ifdef IA64
|
|
:LOOPZ rel8 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xE1; rel8 { RCX = RCX -1; if (RCX!=0 && ZF!=0) goto rel8; }
|
|
@endif
|
|
|
|
:LOOPNZ rel8 is vexMode=0 & addrsize=0 & byte=0xE0; rel8 { CX = CX -1; if (CX!=0 && ZF==0) goto rel8; }
|
|
:LOOPNZ rel8 is vexMode=0 & addrsize=1 & byte=0xE0; rel8 { ECX = ECX -1; if (ECX!=0 && ZF==0) goto rel8; }
|
|
@ifdef IA64
|
|
:LOOPNZ rel8 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xE0; rel8 { RCX = RCX -1; if (RCX!=0 && ZF==0) goto rel8; }
|
|
@endif
|
|
|
|
define pcodeop SegmentLimit;
|
|
:LSL Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x3; rm16 & Reg16 ...
|
|
{
|
|
tmp:3 = SegmentLimit(rm16);
|
|
Reg16 = tmp:2;
|
|
ZF = tmp[16,1];
|
|
}
|
|
|
|
:LSL Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x3; rm32 & Reg32 ...
|
|
{
|
|
tmp:3 = SegmentLimit(rm32);
|
|
Reg32 = zext(tmp:2);
|
|
ZF = tmp[16,1];
|
|
}
|
|
|
|
@ifdef IA64
|
|
:LSL Reg64,rm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x3; rm32 & Reg64 ...
|
|
{
|
|
tmp:3 = SegmentLimit(rm32);
|
|
Reg64 = zext(tmp:2);
|
|
ZF = tmp[16,1];
|
|
}
|
|
@endif
|
|
|
|
define pcodeop TaskRegister;
|
|
:LTR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=3 ... { TaskRegister(rm16); }
|
|
|
|
:MOV Rmr8,Reg8 is vexMode=0 & byte=0x88; mod=3 & Rmr8 & Reg8 { Rmr8=Reg8; }
|
|
|
|
:MOV^xrelease m8,Reg8 is vexMode=0 & xrelease & byte=0x88; m8 & Reg8 ...
|
|
{
|
|
build xrelease;
|
|
build m8;
|
|
m8=Reg8;
|
|
}
|
|
|
|
:MOV Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x89; mod=3 & Rmr16 & Reg16 { Rmr16=Reg16; }
|
|
|
|
:MOV^xrelease m16,Reg16 is vexMode=0 & xrelease & opsize=0 & byte=0x89; m16 & Reg16 ...
|
|
{
|
|
build xrelease;
|
|
build m16;
|
|
m16=Reg16;
|
|
}
|
|
|
|
:MOV Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x89; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { Rmr32=Reg32; build check_Rmr32_dest; }
|
|
|
|
:MOV^xrelease m32,Reg32 is vexMode=0 & xrelease & opsize=1 & byte=0x89; m32 & Reg32 ...
|
|
{
|
|
build xrelease;
|
|
build m32;
|
|
m32=Reg32;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:MOV Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x89; mod=3 & Rmr64 & Reg64 { Rmr64=Reg64; }
|
|
|
|
:MOV^xrelease m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & xrelease & opsize=2 & byte=0x89; m64 & Reg64 ...
|
|
{
|
|
build xrelease;
|
|
build m64;
|
|
m64=Reg64;
|
|
}
|
|
|
|
@endif
|
|
:MOV Reg8,rm8 is vexMode=0 & byte=0x8a; rm8 & Reg8 ... { Reg8 = rm8; }
|
|
:MOV Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x8b; rm16 & Reg16 ... { Reg16 = rm16; }
|
|
:MOV Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x8b; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32; build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOV Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x8b; rm64 & Reg64 ... { Reg64 = rm64; }
|
|
@endif
|
|
:MOV rm16,Sreg is vexMode=0 & byte=0x8c; rm16 & Sreg ... { rm16 = Sreg; }
|
|
:MOV Sreg,rm16 is vexMode=0 & byte=0x8e; rm16 & Sreg ... { Sreg=rm16; }
|
|
:MOV AL,moffs8 is vexMode=0 & byte=0xa0; AL & moffs8 { AL=moffs8; }
|
|
:MOV AX,moffs16 is vexMode=0 & opsize=0 & byte=0xa1; AX & moffs16 { AX=moffs16; }
|
|
:MOV EAX,moffs32 is vexMode=0 & opsize=1 & byte=0xa1; EAX & check_EAX_dest & moffs32 { EAX=moffs32; build check_EAX_dest; }
|
|
@ifdef IA64
|
|
:MOV RAX,moffs64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xa1; RAX & moffs64 { RAX=moffs64; }
|
|
@endif
|
|
:MOV moffs8,AL is vexMode=0 & byte=0xa2; AL & moffs8 { moffs8=AL; }
|
|
:MOV moffs16,AX is vexMode=0 & opsize=0 & byte=0xa3; AX & moffs16 { moffs16=AX; }
|
|
:MOV moffs32,EAX is vexMode=0 & opsize=1 & byte=0xa3; EAX & moffs32 { moffs32=EAX; }
|
|
@ifdef IA64
|
|
:MOV moffs64,RAX is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xa3; RAX & moffs64 { moffs64=RAX; }
|
|
@endif
|
|
:MOV CRmr8,imm8 is vexMode=0 & row=11 & page=0 & CRmr8; imm8 { CRmr8 = imm8; }
|
|
:MOV CRmr16,imm16 is vexMode=0 & opsize=0 & row=11 & page=1 & CRmr16; imm16 { CRmr16 = imm16; }
|
|
:MOV CRmr32,imm32 is vexMode=0 & opsize=1 & row=11 & page=1 & CRmr32; imm32 { CRmr32 = imm32; }
|
|
@ifdef IA64
|
|
:MOV Rmr64,imm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & row=11 & page=1 & Rmr64; imm64 { Rmr64 = imm64; }
|
|
@endif
|
|
:MOV Rmr8,imm8 is vexMode=0 & byte=0xc6; (mod=3 & Rmr8 & reg_opcode=0); imm8 { Rmr8 = imm8; }
|
|
|
|
:MOV^xrelease m8,imm8 is vexMode=0 & xrelease & byte=0xc6; m8 & reg_opcode=0 ...; imm8
|
|
{
|
|
build xrelease;
|
|
build m8;
|
|
m8 = imm8;
|
|
}
|
|
:MOV Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0xc7; (mod=3 & Rmr16 & reg_opcode=0); imm16 { Rmr16 = imm16; }
|
|
|
|
:MOV^xrelease m16,imm16 is vexMode=0 & xrelease & opsize=0 & byte=0xc7; m16 & reg_opcode=0 ...; imm16
|
|
{
|
|
build xrelease;
|
|
build m16;
|
|
m16 = imm16;
|
|
}
|
|
|
|
:MOV Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0xc7; (mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=0); imm32 { Rmr32 = imm32; build check_Rmr32_dest; }
|
|
|
|
:MOV^xrelease m32,imm32 is vexMode=0 & xrelease & opsize=1 & byte=0xc7; (m32 & reg_opcode=0 ...); imm32
|
|
{
|
|
build xrelease;
|
|
build m32;
|
|
m32 = imm32;
|
|
}
|
|
@ifdef IA64
|
|
:MOV Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xc7; (mod=3 & Rmr64 & reg_opcode=0); simm32 { Rmr64 = simm32; }
|
|
|
|
:MOV^xrelease m64,simm32 is $(LONGMODE_ON) & vexMode=0 & xrelease & opsize=2 & byte=0xc7; (m64 & reg_opcode=0 ...); simm32
|
|
{
|
|
build xrelease;
|
|
build m64;
|
|
m64 = simm32;
|
|
}
|
|
@endif
|
|
|
|
:MOV creg, Rmr32 is vexMode=0 & byte=0xf; byte=0x22; Rmr32 & creg {
|
|
@ifdef IA64
|
|
creg=zext(Rmr32);
|
|
@else
|
|
creg=Rmr32;
|
|
@endif
|
|
}
|
|
@ifdef IA64
|
|
:MOV creg, Rmr64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x22; Rmr64 & creg { creg=Rmr64; }
|
|
:MOV creg_x, Rmr64 is $(LONGMODE_ON) & vexMode=0 & rexRprefix=1 & byte=0xf; byte=0x22; Rmr64 & creg_x { creg_x=Rmr64; }
|
|
@endif
|
|
|
|
:MOV Rmr32, creg is $(LONGMODE_OFF) & vexMode=0 & byte=0xf; byte=0x20; Rmr32 & creg {
|
|
@ifdef IA64
|
|
Rmr32 = creg:4;
|
|
@else
|
|
Rmr32 = creg;
|
|
@endif
|
|
}
|
|
@ifdef IA64
|
|
:MOV Rmr64, creg is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x20; Rmr64 & creg { Rmr64 = creg; }
|
|
:MOV Rmr64, creg_x is $(LONGMODE_ON) & vexMode=0 & rexRprefix=1 & byte=0xf; byte=0x20; Rmr64 & creg_x { Rmr64 = creg_x; }
|
|
@endif
|
|
|
|
:MOV Rmr32, debugreg is $(LONGMODE_OFF) & vexMode=0 & byte=0xf; byte=0x21; Rmr32 & debugreg {
|
|
@ifdef IA64
|
|
Rmr32 = debugreg:4;
|
|
@else
|
|
Rmr32 = debugreg;
|
|
@endif
|
|
}
|
|
@ifdef IA64
|
|
:MOV Rmr64, debugreg is $(LONGMODE_ON) & vexMode=0 & bit64=1 & byte=0xf; byte=0x21; Rmr64 & debugreg { Rmr64 = debugreg; }
|
|
:MOV Rmr64, debugreg_x is $(LONGMODE_ON) & vexMode=0 & bit64=1 & rexRprefix=1 & byte=0xf; byte=0x21; Rmr64 & debugreg_x { Rmr64 = debugreg_x; }
|
|
@endif
|
|
|
|
:MOV debugreg, Rmr32 is $(LONGMODE_OFF) & vexMode=0 & byte=0xf; byte=0x23; Rmr32 & debugreg {
|
|
@ifdef IA64
|
|
debugreg = zext(Rmr32);
|
|
@else
|
|
debugreg = Rmr32;
|
|
@endif
|
|
}
|
|
@ifdef IA64
|
|
:MOV debugreg, Rmr64 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & byte=0xf; byte=0x23; Rmr64 & debugreg & mod=3 { debugreg = Rmr64; }
|
|
:MOV debugreg_x, Rmr64 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & rexRprefix=1 & byte=0xf; byte=0x23; Rmr64 & debugreg_x & mod=3 { debugreg_x = Rmr64; }
|
|
@endif
|
|
|
|
@ifndef IA64
|
|
# These are obsolete instructions after the 486 generation.
|
|
:MOV r32, testreg is vexMode=0 & byte=0xf; byte=0x24; r32 & testreg & mod=3 { r32 = testreg; }
|
|
:MOV testreg, r32 is vexMode=0 & byte=0xf; byte=0x26; r32 & testreg & mod=3 { testreg = r32; }
|
|
@endif
|
|
|
|
define pcodeop swap_bytes;
|
|
:MOVBE Reg16, m16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x38; byte=0xf0; Reg16 ... & m16 { Reg16 = swap_bytes( m16 ); }
|
|
:MOVBE Reg32, m32 is vexMode=0 & opsize=1 & mandover=0 & byte=0xf; byte=0x38; byte=0xf0; Reg32 ... & m32 { Reg32 = swap_bytes( m32 ); }
|
|
:MOVBE m16, Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x38; byte=0xf1; Reg16 ... & m16 { m16 = swap_bytes( Reg16 ); }
|
|
:MOVBE m32, Reg32 is vexMode=0 & opsize=1 & mandover=0 & byte=0xf; byte=0x38; byte=0xf1; Reg32 ... & m32 { m32 = swap_bytes( Reg32 ); }
|
|
@ifdef IA64
|
|
:MOVBE Reg64, m64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & mandover=0 & byte=0xf; byte=0x38; byte=0xf0; Reg64 ... & m64 { Reg64 = swap_bytes( m64 ); }
|
|
:MOVBE m64, Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & mandover=0 & byte=0xf; byte=0x38; byte=0xf1; Reg64 ... & m64 { m64 = swap_bytes( Reg64 ); }
|
|
@endif
|
|
|
|
|
|
:MOVNTI Mem,Reg32 is vexMode=0 & opsize = 1; byte=0xf; byte=0xc3; Mem & Reg32 ... { *Mem = Reg32; }
|
|
@ifdef IA64
|
|
:MOVNTI Mem,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize = 2; byte=0xf; byte=0xc3; Mem & Reg64 ... { *Mem = Reg64; }
|
|
@endif
|
|
|
|
:MOVSB^rep^reptail eseDI1,dseSI1 is vexMode=0 & rep & reptail & byte=0xa4 & eseDI1 & dseSI1 { build rep; build eseDI1; build dseSI1; eseDI1 = dseSI1; build reptail; }
|
|
:MOVSW^rep^reptail eseDI2,dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xa5 & eseDI2 & dseSI2 { build rep; build eseDI2; build dseSI2; eseDI2 = dseSI2; build reptail; }
|
|
:MOVSD^rep^reptail eseDI4,dseSI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xa5 & eseDI4 & dseSI4 { build rep; build eseDI4; build dseSI4; eseDI4 = dseSI4; build reptail; }
|
|
@ifdef IA64
|
|
:MOVSQ^rep^reptail eseDI8,dseSI8 is $(LONGMODE_ON) & vexMode=0 & rep & reptail & opsize=2 & byte=0xa5 & eseDI8 & dseSI8 { build rep; build eseDI8; build dseSI8; eseDI8 = dseSI8; build reptail; }
|
|
@endif
|
|
|
|
:MOVSX Reg16,rm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbe; rm8 & Reg16 ... { Reg16 = sext(rm8); }
|
|
:MOVSX Reg32,rm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbe; rm8 & Reg32 ... & check_Reg32_dest ... { Reg32 = sext(rm8); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVSX Reg64,rm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbe; rm8 & Reg64 ... { Reg64 = sext(rm8); }
|
|
@endif
|
|
:MOVSX Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbf; rm16 & Reg16 ... { Reg16 = rm16; }
|
|
:MOVSX Reg32,rm16 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbf; rm16 & Reg32 ... & check_Reg32_dest ... { Reg32 = sext(rm16); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVSX Reg64,rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbf; rm16 & Reg64 ... { Reg64 = sext(rm16); }
|
|
@endif
|
|
|
|
:MOVSXD Reg32,rm32 is vexMode=0 & bit64=1 & opsize=1 & byte=0x63; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32; build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVSXD Reg64,rm32 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & opsize=2 & byte=0x63; rm32 & Reg64 ... { Reg64 = sext(rm32); }
|
|
@endif
|
|
|
|
:MOVZX Reg16,rm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb6; rm8 & Reg16 ... { Reg16 = zext(rm8); }
|
|
:MOVZX Reg32,rm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb6; rm8 & Reg32 ... & check_Reg32_dest ... { Reg32 = zext(rm8); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVZX Reg64,rm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xb6; rm8 & Reg64 ... { Reg64 = zext(rm8); }
|
|
@endif
|
|
:MOVZX Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb7; rm16 & Reg16 ... { Reg16 = rm16; }
|
|
:MOVZX Reg32,rm16 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb7; rm16 & Reg32 ... & check_Reg32_dest ... { Reg32 = zext(rm16); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVZX Reg64,rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xb7; rm16 & Reg64 ... { Reg64 = zext(rm16); }
|
|
@endif
|
|
|
|
:MUL rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=4 ... { AX=zext(AL)*zext(rm8); multflags(AH); }
|
|
:MUL rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=4 ... { tmp:4=zext(AX)*zext(rm16); DX=tmp(2); AX=tmp(0); multflags(DX); }
|
|
:MUL rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=4 ... { tmp:8=zext(EAX)*zext(rm32); EDX=tmp(4); build check_EDX_dest; multflags(EDX); EAX=tmp(0); build check_EAX_dest; }
|
|
@ifdef IA64
|
|
:MUL rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=4 ... { tmp:16=zext(RAX)*zext(rm64); RDX=tmp(8); RAX=tmp(0); multflags(RDX); }
|
|
@endif
|
|
|
|
:MWAIT is vexMode=0 & byte=0x0f; byte=0x01; byte=0xC9 { mwait(); }
|
|
:MWAITX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xFB { mwaitx(); }
|
|
:MONITOR is vexMode=0 & byte=0x0f; byte=0x01; byte=0xC8 { monitor(); }
|
|
:MONITORX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xFA { monitorx(); }
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:NEG Rmr8 is vexMode=0 & byte=0xf6; mod=3 & Rmr8 & reg_opcode=3 { negflags(Rmr8); Rmr8 = -Rmr8; resultflags(Rmr8 ); }
|
|
:NEG Rmr16 is vexMode=0 & opsize=0 & byte=0xf7; mod=3 & Rmr16 & reg_opcode=3 { negflags(Rmr16); Rmr16 = -Rmr16; resultflags(Rmr16); }
|
|
:NEG Rmr32 is vexMode=0 & opsize=1 & byte=0xf7; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=3 { negflags(Rmr32); Rmr32 = -Rmr32; resultflags(Rmr32); build check_Rmr32_dest;}
|
|
@ifdef IA64
|
|
:NEG Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; mod=3 & Rmr64 & reg_opcode=3 { negflags(Rmr64); Rmr64 = -Rmr64; resultflags(Rmr64); }
|
|
@endif
|
|
|
|
:NOP is vexMode=0 & byte=0x90 & (mandover=0 | mandover=4 | mandover=1) & (rexprefix=0 | rexWRXBprefix=8) { }
|
|
:NOP rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; high5=3; rm16 ... { }
|
|
:NOP rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; high5=3; rm32 ... { }
|
|
:NOP^"/reserved" rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; byte=0x18; rm16 & reg_opcode_hb=1 ... { }
|
|
:NOP^"/reserved" rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; byte=0x18; rm32 & reg_opcode_hb=1 ... { }
|
|
:NOP rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; byte=0x1f; rm16 & reg_opcode=0 ... { }
|
|
:NOP rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; byte=0x1f; rm32 & reg_opcode=0 ... { }
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:NOT Rmr8 is vexMode=0 & byte=0xf6; mod=3 & Rmr8 & reg_opcode=2 { Rmr8 = ~Rmr8; }
|
|
:NOT Rmr16 is vexMode=0 & opsize=0 & byte=0xf7; mod=3 & Rmr16 & reg_opcode=2 { Rmr16 = ~Rmr16; }
|
|
:NOT Rmr32 is vexMode=0 & opsize=1 & byte=0xf7; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=2 { Rmr32 = ~Rmr32; build check_Rmr32_dest;}
|
|
@ifdef IA64
|
|
:NOT Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; mod=3 & Rmr64 & reg_opcode=2 { Rmr64 = ~Rmr64; }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:OR AL,imm8 is vexMode=0 & byte=0x0c; AL & imm8 { logicalflags(); AL = AL | imm8; resultflags( AL); }
|
|
:OR AX,imm16 is vexMode=0 & opsize=0 & byte=0xd; AX & imm16 { logicalflags(); AX = AX | imm16; resultflags( AX); }
|
|
:OR EAX,imm32 is vexMode=0 & opsize=1 & byte=0xd; EAX & check_EAX_dest & imm32 { logicalflags(); EAX = EAX | imm32; build check_EAX_dest; resultflags( EAX); }
|
|
@ifdef IA64
|
|
:OR RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xd; RAX & simm32 { logicalflags(); RAX = RAX | simm32; resultflags( RAX); }
|
|
@endif
|
|
:OR Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=1; imm8 { logicalflags(); Rmr8 = Rmr8 | imm8; resultflags( Rmr8); }
|
|
:OR Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=1; imm16 { logicalflags(); Rmr16 = Rmr16 | imm16; resultflags( Rmr16); }
|
|
:OR Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=1; imm32 { logicalflags(); Rmr32 = Rmr32 | imm32; build check_rm32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:OR Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=1; simm32 { logicalflags(); tmp:8 = Rmr64; Rmr64 = tmp | simm32; resultflags( Rmr64); }
|
|
@endif
|
|
:OR Rmr16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=1; usimm8_16 { logicalflags(); Rmr16 = Rmr16 | usimm8_16; resultflags( Rmr16); }
|
|
:OR Rmr32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=1; usimm8_32 { logicalflags(); Rmr32 = Rmr32 | usimm8_32; build check_rm32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:OR Rmr64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=1; usimm8_64 { logicalflags(); Rmr64 = Rmr64 | usimm8_64; resultflags( Rmr64); }
|
|
@endif
|
|
:OR Rmr8,Reg8 is vexMode=0 & byte=0x8; mod=3 & Rmr8 & Reg8 { logicalflags(); Rmr8 = Rmr8 | Reg8; resultflags( Rmr8); }
|
|
:OR Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x9; mod=3 & Rmr16 & Reg16 { logicalflags(); Rmr16 = Rmr16 | Reg16; resultflags( Rmr16); }
|
|
:OR Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x9; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { logicalflags(); Rmr32 = Rmr32 | Reg32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:OR Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x9; mod=3 & Rmr64 & Reg64 { logicalflags(); Rmr64 = Rmr64 | Reg64; resultflags( Rmr64); }
|
|
@endif
|
|
:OR Reg8,rm8 is vexMode=0 & byte=0xa; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 | rm8; resultflags( Reg8); }
|
|
:OR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xb; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 | rm16; resultflags(Reg16); }
|
|
:OR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xb; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 | rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:OR Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xb; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 | rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
:OUT imm8,AL is vexMode=0 & byte=0xe6; imm8 & AL { tmp:1 = imm8; out(tmp,AL); }
|
|
:OUT imm8,AX is vexMode=0 & opsize=0 & byte=0xe7; imm8 & AX { tmp:1 = imm8; out(tmp,AX); }
|
|
:OUT imm8,EAX is vexMode=0 & byte=0xe7; imm8 & EAX { tmp:1 = imm8; out(tmp,EAX); }
|
|
:OUT DX,AL is vexMode=0 & byte=0xee & DX & AL { out(DX,AL); }
|
|
:OUT DX,AX is vexMode=0 & opsize=0 & byte=0xef & DX & AX { out(DX,AX); }
|
|
:OUT DX,EAX is vexMode=0 & byte=0xef & DX & EAX { out(DX,EAX); }
|
|
|
|
:OUTSB^rep^reptail DX,dseSI1 is vexMode=0 & rep & reptail & byte=0x6e & DX & dseSI1 { build rep; build dseSI1; out(dseSI1,DX); build reptail;}
|
|
:OUTSW^rep^reptail DX,dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0x6f & DX & dseSI2 { build rep; build dseSI2; out(dseSI2,DX); build reptail;}
|
|
:OUTSD^rep^reptail DX,dseSI4 is vexMode=0 & rep & reptail & byte=0x6f & DX & dseSI4 { build rep; build dseSI4; out(dseSI4,DX); build reptail;}
|
|
|
|
:PAUSE is vexMode=0 & opsize=0 & $(PRE_F3) & byte=0x90 { }
|
|
:PAUSE is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x90 { }
|
|
|
|
:POP rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { local val:2 = 0; pop22(val); build rm16; rm16 = val; }
|
|
:POP rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { local val:2 = 0; pop42(val); build rm16; rm16 = val; }
|
|
:POP rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x8f; rm32 & reg_opcode=0 ... { local val:4 = 0; pop24(val); build rm32; rm32 = val; }
|
|
:POP rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x8f; rm32 & reg_opcode=0 ... { local val:4 = 0; pop44(val); build rm32; rm32 = val; }
|
|
@ifdef IA64
|
|
:POP rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { local val:2 = 0; pop82(val); build rm16; rm16 = val; }
|
|
:POP rm64 is $(LONGMODE_ON) & vexMode=0 & byte=0x8f; rm64 & reg_opcode=0 ... { local val:8 = 0; pop88(val); build rm64; rm64 = val; }
|
|
@endif
|
|
|
|
:POP Rmr16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & row=5 & page=1 & Rmr16 { local val:2 = 0; pop22(val); Rmr16 = val; }
|
|
:POP Rmr16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & row=5 & page=1 & Rmr16 { local val:2 = 0; pop42(val); Rmr16 = val; }
|
|
:POP Rmr32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & row=5 & page=1 & Rmr32 { local val:4 = 0; pop24(val); Rmr32 = val; }
|
|
:POP Rmr32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & row=5 & page=1 & Rmr32 { local val:4 = 0; pop44(val); Rmr32 = val; }
|
|
@ifdef IA64
|
|
:POP Rmr16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & row=5 & page=1 & Rmr16 { local val:2 = 0; pop82(val); Rmr16 = val; }
|
|
:POP Rmr64 is $(LONGMODE_ON) & vexMode=0 & row=5 & page=1 & Rmr64 { local val:8 = 0; pop88(val); Rmr64 = val; }
|
|
@endif
|
|
|
|
:POP DS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x1f & DS { pop22(DS); }
|
|
:POP DS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x1f & DS { popseg44(DS); }
|
|
:POP ES is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x7 & ES { pop22(ES); }
|
|
:POP ES is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x7 & ES { popseg44(ES); }
|
|
:POP SS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x17 & SS { pop22(SS); }
|
|
:POP SS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x17 & SS { popseg44(SS); }
|
|
:POP FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xf; byte=0xa1 & FS { pop22(FS); }
|
|
:POP FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xf; byte=0xa1 & FS { popseg44(FS); }
|
|
@ifdef IA64
|
|
:POP FS is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0xa1 & FS { popseg88(FS); }
|
|
@endif
|
|
:POP GS is vexMode=0 & addrsize=0 & byte=0xf; byte=0xa9 & GS { pop22(GS); }
|
|
:POP GS is vexMode=0 & addrsize=1 & byte=0xf; byte=0xa9 & GS { popseg44(GS); }
|
|
@ifdef IA64
|
|
:POP GS is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0xa9 & GS { popseg88(GS); }
|
|
@endif
|
|
|
|
:POPA is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x61 { pop22(DI); pop22(SI); pop22(BP); tmp:2=0; pop22(tmp); pop22(BX); pop22(DX); pop22(CX); pop22(AX); }
|
|
:POPA is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x61 { pop42(DI); pop42(SI); pop42(BP); tmp:2=0; pop42(tmp); pop42(BX); pop42(DX); pop42(CX); pop42(AX); }
|
|
:POPAD is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x61 { pop24(EDI); pop24(ESI); pop24(EBP); tmp:4=0; pop24(tmp); pop24(EBX); pop24(EDX); pop24(ECX); pop24(EAX); }
|
|
:POPAD is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x61 { pop44(EDI); pop44(ESI); pop44(EBP); tmp:4=0; pop44(tmp); pop44(EBX); pop44(EDX); pop44(ECX); pop44(EAX); }
|
|
:POPF is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x9d { pop22(flags); unpackflags(flags); }
|
|
:POPF is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x9d { pop42(flags); unpackflags(flags); }
|
|
:POPFD is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x9d { pop24(eflags); unpackflags(eflags); unpackeflags(eflags); }
|
|
:POPFD is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x9d { pop44(eflags); unpackflags(eflags); unpackeflags(eflags); }
|
|
@ifdef IA64
|
|
:POPF is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x9d { pop82(flags); unpackflags(flags); }
|
|
:POPFQ is $(LONGMODE_ON) & vexMode=0 & byte=0x9d { pop88(rflags); unpackflags(rflags); unpackeflags(rflags); }
|
|
@endif
|
|
|
|
:PREFETCH m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=0 ... { }
|
|
:PREFETCH m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode ... { } # rest aliased to /0
|
|
:PREFETCHW m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=1 ... { }
|
|
:PREFETCHWT1 m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=2 ... { }
|
|
|
|
:PREFETCHT0 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=1 ) ... & m8 { }
|
|
:PREFETCHT1 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=2 ) ... & m8 { }
|
|
:PREFETCHT2 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=3 ) ... & m8 { }
|
|
:PREFETCHNTA m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=0 ) ... & m8 { }
|
|
|
|
define pcodeop ptwrite;
|
|
|
|
:PTWRITE rm32 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0xae; rm32 & reg_opcode=4 ... { ptwrite(rm32); }
|
|
@ifdef IA64
|
|
:PTWRITE rm64 is vexMode=0 & $(PRE_F3) & opsize=2 & byte=0x0f; byte=0xae; rm64 & reg_opcode=4 ... { ptwrite(rm64); }
|
|
@endif
|
|
|
|
:PUSH rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push22(rm16); }
|
|
:PUSH rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push42(rm16); }
|
|
|
|
:PUSH rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xff; rm32 & reg_opcode=6 ... { push24(rm32); }
|
|
:PUSH rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; rm32 & reg_opcode=6 ... { push44(rm32); }
|
|
@ifdef IA64
|
|
:PUSH rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push82(rm16); }
|
|
:PUSH rm64 is $(LONGMODE_ON) & vexMode=0 & byte=0xff; rm64 & reg_opcode=6 ... { push88(rm64); }
|
|
@endif
|
|
|
|
:PUSH Rmr16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & row=5 & page=0 & Rmr16 { push22(Rmr16); }
|
|
:PUSH Rmr16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & row=5 & page=0 & Rmr16 { push42(Rmr16); }
|
|
:PUSH Rmr32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & row=5 & page=0 & Rmr32 { push24(Rmr32); }
|
|
:PUSH Rmr32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & row=5 & page=0 & Rmr32 { push44(Rmr32); }
|
|
@ifdef IA64
|
|
:PUSH Rmr16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & row=5 & page=0 & Rmr16 { push82(Rmr16); }
|
|
:PUSH Rmr64 is $(LONGMODE_ON) & vexMode=0 & row=5 & page=0 & Rmr64 { push88(Rmr64); }
|
|
@endif
|
|
|
|
:PUSH simm8_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push22(tmp); }
|
|
:PUSH simm8_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push42(tmp); }
|
|
:PUSH simm8_32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push24(tmp); }
|
|
:PUSH simm8_32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push44(tmp); }
|
|
@ifdef IA64
|
|
:PUSH simm8_16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push82(tmp); }
|
|
:PUSH simm8_64 is $(LONGMODE_ON) & vexMode=0 & byte=0x6a; simm8_64 { tmp:8=simm8_64; push88(tmp); }
|
|
@endif
|
|
|
|
:PUSH simm16_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push22(tmp); }
|
|
:PUSH simm16_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push42(tmp); }
|
|
:PUSH imm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x68; imm32 { tmp:4=imm32; push24(tmp); }
|
|
:PUSH imm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x68; imm32 { tmp:4=imm32; push44(tmp); }
|
|
@ifdef IA64
|
|
:PUSH simm16_16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push82(tmp); }
|
|
:PUSH simm32_64 is $(LONGMODE_ON) & vexMode=0 & byte=0x68; simm32_64 { tmp:8=simm32_64; push88(tmp); }
|
|
@endif
|
|
|
|
:PUSH CS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xe & CS { push22(CS); }
|
|
:PUSH CS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xe & CS { pushseg44(CS); }
|
|
:PUSH SS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x16 & SS { push22(SS); }
|
|
:PUSH SS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x16 & SS { pushseg44(SS); }
|
|
:PUSH DS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x1e & DS { push22(DS); }
|
|
:PUSH DS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x1e & DS { pushseg44(DS); }
|
|
:PUSH ES is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x6 & ES { push22(ES); }
|
|
:PUSH ES is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x6 & ES { pushseg44(ES); }
|
|
:PUSH FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xf; byte=0xa0 & FS { push22(FS); }
|
|
:PUSH FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xf; byte=0xa0 & FS { pushseg44(FS); }
|
|
@ifdef IA64
|
|
:PUSH FS is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xf; byte=0xa0 & FS { push82(FS); }
|
|
:PUSH FS is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0xa0 & FS { pushseg88(FS); }
|
|
@endif
|
|
:PUSH GS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xf; byte=0xa8 & GS { push22(GS); }
|
|
:PUSH GS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xf; byte=0xa8 & GS { pushseg44(GS); }
|
|
@ifdef IA64
|
|
:PUSH GS is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xf; byte=0xa8 & GS { push82(GS); }
|
|
:PUSH GS is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0xa8 & GS { pushseg88(GS); }
|
|
@endif
|
|
|
|
:PUSHA is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x60 { local tmp=SP; push22(AX); push22(CX); push22(DX); push22(BX); push22(tmp); push22(BP); push22(SI); push22(DI); }
|
|
:PUSHA is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x60 { local tmp=SP; push42(AX); push42(CX); push42(DX); push42(BX); push42(tmp); push42(BP); push42(SI); push42(DI); }
|
|
:PUSHAD is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x60 { local tmp=ESP; push24(EAX); push24(ECX); push24(EDX); push24(EBX); push24(tmp); push24(EBP); push24(ESI); push24(EDI); }
|
|
:PUSHAD is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x60 { local tmp=ESP; push44(EAX); push44(ECX); push44(EDX); push44(EBX); push44(tmp); push44(EBP); push44(ESI); push44(EDI); }
|
|
|
|
:PUSHF is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x9c { packflags(flags); push22(flags); }
|
|
:PUSHF is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x9c { packflags(flags); push42(flags); }
|
|
:PUSHFD is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x9c { packflags(eflags); packeflags(eflags); push24(eflags); }
|
|
:PUSHFD is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x9c { packflags(eflags); packeflags(eflags); push44(eflags); }
|
|
@ifdef IA64
|
|
:PUSHF is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x9c { packflags(flags); push82(flags); }
|
|
:PUSHFQ is $(LONGMODE_ON) & vexMode=0 & byte=0x9c { packflags(rflags); packeflags(rflags); push88(rflags); }
|
|
@endif
|
|
|
|
:RCL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=2 ... { local tmpCF = CF; CF = rm8 s< 0; rm8 = (rm8 << 1) | tmpCF; OF = CF ^ (rm8 s< 0); }
|
|
:RCL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=2 ... { local cnt=(CL&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp<<cnt)|(tmp>>(9-cnt));rm8=tmp(0); CF=(tmp&0x100)!=0; }
|
|
:RCL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=2 ... ; imm8 { local cnt=(imm8&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp<<cnt)|(tmp>>(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; }
|
|
:RCL rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=2 ... { local tmpCF = CF; CF = rm16 s< 0; rm16 = (rm16 << 1) | zext(tmpCF); OF = CF ^ (rm16 s< 0);}
|
|
:RCL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=2 ... {local cnt=(CL&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp<<cnt)|(tmp>>(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; }
|
|
:RCL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=2 ... ; imm8 { local cnt=(imm8&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp<<cnt)|(tmp>>(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; }
|
|
:RCL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=2 ... { local tmpCF=CF; CF=rm32 s< 0; rm32=(rm32<<1)|zext(tmpCF); OF=CF^(rm32 s< 0); build check_rm32_dest; }
|
|
:RCL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=2 ... { local cnt=CL&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp<<cnt)|(tmp>>(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; }
|
|
:RCL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=2 ... ; imm8 { local cnt=imm8&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp<<cnt)|(tmp>>(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:RCL rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=2 ... { local tmpCF=CF; CF=rm64 s< 0; rm64=(rm64<<1)|zext(tmpCF); OF=CF^(rm64 s< 0);}
|
|
|
|
:RCL rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=2 ...
|
|
{
|
|
local cnt:1=CL&0x3f;
|
|
local rm64_copy:8 = rm64;
|
|
local CF_copy:1 = CF;
|
|
rotated:8 = rm64_copy << cnt;
|
|
rotated = rotated | (rm64_copy >> (65 -cnt));
|
|
local CF_bit:8 = zext(CF_copy) << cnt-1;
|
|
rotated = rotated | CF_bit;
|
|
conditionalAssign(CF, cnt == 0:1, CF_copy, ((1:8<<(64-cnt)) & rm64_copy) != 0);
|
|
rm64 = rotated;
|
|
}
|
|
|
|
:RCL rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=2 ... ; imm8
|
|
{
|
|
local cnt:1=imm8&0x3f;
|
|
local rm64_copy:8 = rm64;
|
|
local CF_copy:1 = CF;
|
|
rotated:8 = rm64_copy << cnt;
|
|
rotated = rotated | (rm64_copy >> (65 -cnt));
|
|
local CF_bit:8 = zext(CF_copy) << cnt-1;
|
|
rotated = rotated | CF_bit;
|
|
conditionalAssign(CF, cnt == 0:1, CF_copy, ((1:8<<(64-cnt)) & rm64_copy) != 0);
|
|
rm64 = rotated;
|
|
}
|
|
@endif
|
|
|
|
:RCR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm8 s< 0; CF=(rm8&1)!=0; rm8=(rm8>>1)|(tmpCF<<7); OF=OF^(rm8 s< 0); }
|
|
:RCR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=3 ... { local cnt=(CL&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp>>cnt)|(tmp<<(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; }
|
|
:RCR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=3 ... ; imm8 { local cnt=(imm8&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp>>cnt)|(tmp<<(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; }
|
|
:RCR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm16 s< 0; CF=(rm16&1)!=0; rm16=(rm16>>1)|(zext(tmpCF)<<15); OF=OF^(rm16 s< 0); }
|
|
:RCR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=3 ... { local cnt=(CL&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp>>cnt)|(tmp<<(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; }
|
|
:RCR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=3 ... ; imm8 { local cnt=(imm8&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp>>cnt)|(tmp<<(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; }
|
|
:RCR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=3 ... { local tmpCF=CF; OF=rm32 s< 0; CF=(rm32&1)!=0; rm32=(rm32>>1)|(zext(tmpCF)<<31); OF=OF^(rm32 s< 0); build check_rm32_dest; }
|
|
:RCR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=3 ... { local cnt=CL&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp>>cnt)|(tmp<<(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; }
|
|
:RCR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=3 ... ; imm8 { local cnt=imm8&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp>>cnt)|(tmp<<(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:RCR rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm64 s< 0; CF=(rm64&1)!=0; rm64=(rm64>>1)|(zext(tmpCF)<<63); OF=OF^(rm64 s< 0); }
|
|
|
|
:RCR rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=3 ...
|
|
{
|
|
local cnt:1=CL&0x3f;
|
|
local rm64_copy:8 = rm64;
|
|
local CF_copy:1 = CF;
|
|
rotated:8 = rm64_copy >> cnt;
|
|
rotated = rotated | (rm64_copy << (65 -cnt));
|
|
local CF_bit:8 = zext(CF_copy) << 64-cnt;
|
|
rotated = rotated | CF_bit;
|
|
conditionalAssign(CF, cnt == 0:1, CF_copy, ((1:8<<(cnt-1)) & rm64_copy) != 0);
|
|
rm64 = rotated;
|
|
}
|
|
|
|
:RCR rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=3 ... ; imm8
|
|
{
|
|
local cnt:1=imm8&0x3f;
|
|
local rm64_copy:8 = rm64;
|
|
local CF_copy:1 = CF;
|
|
rotated:8 = rm64_copy >> cnt;
|
|
rotated = rotated | (rm64_copy << (65 -cnt));
|
|
local CF_bit:8 = zext(CF_copy) << 64-cnt;
|
|
rotated = rotated | CF_bit;
|
|
conditionalAssign(CF, cnt == 0:1, CF_copy, ((1:8<<(cnt-1)) & rm64_copy) != 0);
|
|
rm64 = rotated;
|
|
}
|
|
@endif
|
|
|
|
@ifdef IA64
|
|
define pcodeop readfsbase;
|
|
:RDFSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=0 & r32 { r32 = readfsbase(); }
|
|
:RDFSBASE r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=0 & r64 { r64 = readfsbase(); }
|
|
|
|
define pcodeop readgsbase;
|
|
:RDGSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=1 & r32 { r32 = readgsbase(); }
|
|
:RDGSBASE r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=1 & r64 { r64 = readgsbase(); }
|
|
@endif
|
|
|
|
define pcodeop rdmsr;
|
|
:RDMSR is vexMode=0 & byte=0xf; byte=0x32 & check_EAX_dest & check_EDX_dest {
|
|
tmp:8 = rdmsr(ECX);
|
|
EDX = tmp(4); build check_EDX_dest;
|
|
EAX = tmp(0); build check_EAX_dest;
|
|
}
|
|
|
|
define pcodeop readPID;
|
|
:RDPID r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xc7; reg_opcode=7 & r32 { r32 = readPID(); }
|
|
@ifdef IA64
|
|
:RDPID r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xc7; reg_opcode=7 & r64 { r64 = readPID(); }
|
|
@endif
|
|
|
|
define pcodeop rdpkru_u32;
|
|
:RDPKRU is vexMode=0 & byte=0x0f; byte=0x01; byte=0xee { EAX = rdpkru_u32(); }
|
|
|
|
define pcodeop rdpmc;
|
|
:RDPMC is vexMode=0 & byte=0xf; byte=0x33 { tmp:8 = rdpmc(ECX); EDX = tmp(4); EAX = tmp:4; }
|
|
|
|
define pcodeop rdtsc;
|
|
:RDTSC is vexMode=0 & byte=0xf; byte=0x31 { tmp:8 = rdtsc(); EDX = tmp(4); EAX = tmp(0); }
|
|
|
|
:RET is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xc3 { pop22(IP); EIP=segment(CS,IP); return [EIP]; }
|
|
:RET is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc3 { pop42(IP); EIP=zext(IP); return [EIP]; }
|
|
:RET is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xc3 { pop24(EIP); return [EIP]; }
|
|
:RET is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc3 { pop44(EIP); return [EIP]; }
|
|
@ifdef IA64
|
|
:RET is $(LONGMODE_ON) & vexMode=0 & byte=0xc3 { pop88(RIP); return [RIP]; }
|
|
@endif
|
|
|
|
:RETF is vexMode=0 & addrsize=0 & opsize=0 & byte=0xcb { pop22(IP); pop22(CS); EIP = segment(CS,IP); return [EIP]; }
|
|
:RETF is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xcb { pop42(IP); EIP=zext(IP); pop42(CS); return [EIP]; }
|
|
@ifdef IA64
|
|
:RETF is $(LONGMODE_ON) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xcb { pop82(IP); RIP=zext(IP); pop82(CS); return [RIP]; }
|
|
:RETF is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=0 & byte=0xcb { pop82(IP); RIP=zext(IP); pop82(CS); return [RIP]; }
|
|
@endif
|
|
:RETF is vexMode=0 & addrsize=0 & opsize=1 & byte=0xcb { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); return [EIP]; }
|
|
:RETF is vexMode=0 & addrsize=1 & opsize=1 & byte=0xcb { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); return [EIP]; }
|
|
@ifdef IA64
|
|
:RETF is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=1 & byte=0xcb { pop48(EIP); RIP=zext(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); return [RIP]; }
|
|
:RETF is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=2 & byte=0xcb { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); return [RIP]; }
|
|
@endif
|
|
|
|
:RET imm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xc2; imm16 { pop22(IP); EIP=zext(IP); SP=SP+imm16; return [EIP]; }
|
|
:RET imm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc2; imm16 { pop42(IP); EIP=zext(IP); ESP=ESP+imm16; return [EIP]; }
|
|
:RET imm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xc2; imm16 { pop24(EIP); SP=SP+imm16; return [EIP]; }
|
|
:RET imm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc2; imm16 { pop44(EIP); ESP=ESP+imm16; return [EIP]; }
|
|
@ifdef IA64
|
|
:RET imm16 is $(LONGMODE_ON) & vexMode=0 & byte=0xc2; imm16 { pop88(RIP); RSP=RSP+imm16; return [RIP]; }
|
|
@endif
|
|
|
|
:RETF imm16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xca; imm16 { pop22(IP); EIP=zext(IP); pop22(CS); SP=SP+imm16; return [EIP]; }
|
|
:RETF imm16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xca; imm16 { pop42(IP); EIP=zext(IP); pop42(CS); ESP=ESP+imm16; return [EIP]; }
|
|
@ifdef IA64
|
|
:RETF imm16 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=0 & byte=0xca; imm16 { pop82(IP); RIP=zext(IP); pop82(CS); RSP=RSP+imm16; return [RIP]; }
|
|
@endif
|
|
|
|
:RETF imm16 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xca; imm16 { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); SP=SP+imm16; return [EIP]; }
|
|
:RETF imm16 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xca; imm16 { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); ESP=ESP+imm16; return [EIP]; }
|
|
@ifdef IA64
|
|
:RETF imm16 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=1 & byte=0xca; imm16 { pop84(EIP); tmp:4=0; pop84(tmp); RIP=zext(EIP); CS=tmp(0); RSP=RSP+imm16; return [RIP]; }
|
|
:RETF imm16 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=2 & byte=0xca; imm16 { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); RSP=RSP+imm16; return [RIP]; }
|
|
@endif
|
|
|
|
:ROL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=0 ... { CF = rm8 s< 0; rm8 = (rm8 << 1) | CF; OF = CF ^ (rm8 s< 0); }
|
|
:ROL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=0 ... { local cnt = CL & 0x7; local count_and_mask = CL & 0x1f;rm8 = (rm8 << cnt) | (rm8 >> (8 - cnt)); rolflags(rm8, count_and_mask);}
|
|
:ROL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x7; rm8 = (rm8 << cnt) | (rm8 >> (8 - cnt)); rolflags(rm8,imm8 & 0x1f:1);}
|
|
:ROL rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=0 ... { CF = rm16 s< 0; rm16 = (rm16 << 1) | zext(CF); OF = CF ^ (rm16 s< 0); }
|
|
:ROL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=0 ... { local cnt = CL & 0xf; local count_and_mask = CL & 0x1f;rm16 = (rm16 << cnt) | (rm16 >> (16 - cnt)); rolflags(rm16,count_and_mask);}
|
|
:ROL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0xf; rm16 = (rm16 << cnt) | (rm16 >> (16 - cnt)); rolflags(rm16,imm8 & 0x1f:1);}
|
|
:ROL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=0 ... { CF = rm32 s< 0; rm32 = (rm32 << 1) | zext(CF); OF = CF ^ (rm32 s< 0); build check_rm32_dest; }
|
|
:ROL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=0 ... { local cnt = CL & 0x1f; rm32 = (rm32 << cnt) | (rm32 >> (32 - cnt)); rolflags(rm32,cnt); build check_rm32_dest; }
|
|
:ROL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x1f; rm32 = (rm32 << cnt) | (rm32 >> (32 - cnt)); rolflags(rm32,cnt); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:ROL rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=0 ... { CF = rm64 s< 0; rm64 = (rm64 << 1) | zext(CF); OF = CF ^ (rm64 s< 0); }
|
|
:ROL rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=0 ... { local cnt = CL & 0x3f; rm64 = (rm64 << cnt) | (rm64 >> (64 - cnt)); rolflags(rm64,cnt);}
|
|
:ROL rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x3f; rm64 = (rm64 << cnt) | (rm64 >> (64 - cnt)); rolflags(rm64,cnt);}
|
|
@endif
|
|
|
|
:ROR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=1 ... { CF = rm8 & 1; rm8 = (rm8 >> 1) | (CF << 7); OF = ((rm8 & 0x40) != 0) ^ (rm8 s< 0); }
|
|
:ROR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=1 ... { local cnt = CL & 0x7; local count_and_mask = CL & 0x1f;rm8 = (rm8 >> cnt) | (rm8 << (8 - cnt)); rorflags(rm8,count_and_mask);}
|
|
:ROR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x7; rm8 = (rm8 >> cnt) | (rm8 << (8 - cnt)); rorflags(rm8,imm8 & 0x1f:1);}
|
|
:ROR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=1 ... { CF=(rm16 & 1)!=0; rm16=(rm16>>1)|(zext(CF)<<15); OF=((rm16 & 0x4000) != 0) ^ (rm16 s< 0); }
|
|
:ROR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=1 ... { local cnt = CL & 0xf; local count_and_mask = CL & 0x1f; rm16 = (rm16 >> cnt) | (rm16 << (16 - cnt)); rorflags(rm16,count_and_mask);}
|
|
:ROR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0xf; rm16 = (rm16 >> cnt) | (rm16 << (16 - cnt)); rorflags(rm16,imm8 & 0x1f:1);}
|
|
:ROR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=1 ... { CF=(rm32&1)!=0; rm32=(rm32>>1)|(zext(CF)<<31); OF=((rm32&0x40000000)!=0) ^ (rm32 s< 0); build check_rm32_dest; }
|
|
:ROR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=1 ... { local cnt = CL & 0x1f; rm32 = (rm32 >> cnt) | (rm32 << (32 - cnt)); rorflags(rm32,cnt); build check_rm32_dest; }
|
|
:ROR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x1f; rm32 = (rm32 >> cnt) | (rm32 << (32 - cnt)); rorflags(rm32,cnt); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:ROR rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=1 ... { CF=(rm64&1)!=0; rm64=(rm64>>1)|(zext(CF)<<63); OF=((rm64&0x4000000000000000)!=0) ^ (rm64 s< 0); }
|
|
:ROR rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=1 ... { local cnt = CL & 0x3f; rm64 = (rm64 >> cnt) | (rm64 << (64 - cnt)); rorflags(rm64,cnt);}
|
|
:ROR rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x3f; rm64 = (rm64 >> cnt) | (rm64 << (64 - cnt)); rorflags(rm64,cnt);}
|
|
@endif
|
|
|
|
define pcodeop smm_restore_state;
|
|
:RSM is vexMode=0 & byte=0xf; byte=0xaa { tmp:4 = smm_restore_state(); return [tmp]; }
|
|
|
|
# Initially disallowed in 64bit mode, but later reintroduced
|
|
:SAHF is vexMode=0 & byte=0x9e { SF = (AH & 0x80) != 0;
|
|
ZF = (AH & 0x40) != 0;
|
|
AF = (AH & 0x10) != 0;
|
|
PF = (AH & 0x04) != 0;
|
|
CF = (AH & 0x01) != 0; }
|
|
|
|
:SALC is vexMode=0 & bit64=0 & byte=0xd6 { AL = CF * 0xff; }
|
|
|
|
:SAR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=7 ... { CF = rm8 & 1; OF = 0; rm8 = rm8 s>> 1; resultflags(rm8); }
|
|
:SAR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 s>> count;
|
|
sarflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SAR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 s>> count;
|
|
sarflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SAR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=7 ... { CF = (rm16 & 1) != 0; OF = 0; rm16 = rm16 s>> 1; resultflags(rm16); }
|
|
:SAR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 s>> count;
|
|
sarflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SAR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 s>> count;
|
|
sarflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SAR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=7 ... { CF = (rm32 & 1) != 0; OF = 0; rm32 = rm32 s>> 1; build check_rm32_dest; resultflags(rm32); }
|
|
:SAR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 s>> count; build check_rm32_dest;
|
|
sarflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
:SAR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 s>> count; build check_rm32_dest;
|
|
sarflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SAR rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=7 ... { CF = (rm64 & 1) != 0; OF = 0; rm64 = rm64 s>> 1; resultflags(rm64); }
|
|
:SAR rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=7 ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 s>> count;
|
|
sarflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
:SAR rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 s>> count;
|
|
sarflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:SBB AL,imm8 is vexMode=0 & byte=0x1c; AL & imm8 { subCarryFlags( AL, imm8 ); resultflags(AL); }
|
|
:SBB AX,imm16 is vexMode=0 & opsize=0 & byte=0x1d; AX & imm16 { subCarryFlags( AX, imm16 ); resultflags(AX); }
|
|
:SBB EAX,imm32 is vexMode=0 & opsize=1 & byte=0x1d; EAX & check_EAX_dest & imm32 { subCarryFlags( EAX, imm32 ); build check_EAX_dest; resultflags(EAX); }
|
|
@ifdef IA64
|
|
:SBB RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x1d; RAX & simm32 { subCarryFlags( RAX, simm32 ); resultflags(RAX); }
|
|
@endif
|
|
:SBB Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=3; imm8 { subCarryFlags( Rmr8, imm8 ); resultflags(Rmr8); }
|
|
:SBB Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=3; imm16 { subCarryFlags( Rmr16, imm16 ); resultflags(Rmr16); }
|
|
:SBB Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=3; imm32 { subCarryFlags( Rmr32, imm32 ); build check_Rmr32_dest; resultflags(Rmr32); }
|
|
@ifdef IA64
|
|
:SBB Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=3; simm32 { subCarryFlags( Rmr64, simm32 ); resultflags(Rmr64); }
|
|
@endif
|
|
|
|
:SBB Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=3; simm8_16 { subCarryFlags( Rmr16, simm8_16 ); resultflags(Rmr16); }
|
|
:SBB Rmr32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=3; simm8_32 { subCarryFlags( Rmr32, simm8_32 ); build check_Rmr32_dest; resultflags(Rmr32); }
|
|
@ifdef IA64
|
|
:SBB Rmr64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=3; simm8_64 { subCarryFlags( Rmr64, simm8_64 ); resultflags(Rmr64); }
|
|
@endif
|
|
|
|
:SBB Rmr8,Reg8 is vexMode=0 & byte=0x18; mod=3 & Rmr8 & Reg8 { subCarryFlags( Rmr8, Reg8 ); resultflags(Rmr8); }
|
|
:SBB Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x19; mod=3 & Rmr16 & Reg16 { subCarryFlags( Rmr16, Reg16 ); resultflags(Rmr16); }
|
|
:SBB Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x19; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { subCarryFlags( Rmr32, Reg32 ); build check_Rmr32_dest; resultflags(Rmr32); }
|
|
@ifdef IA64
|
|
:SBB Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x19; mod=3 & Rmr64 & Reg64 { subCarryFlags( Rmr64, Reg64 ); resultflags(Rmr64); }
|
|
@endif
|
|
|
|
:SBB Reg8,rm8 is vexMode=0 & byte=0x1a; rm8 & Reg8 ... { subCarryFlags( Reg8, rm8 ); resultflags(Reg8); }
|
|
:SBB Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x1b; rm16 & Reg16 ... { subCarryFlags( Reg16, rm16 ); resultflags(Reg16); }
|
|
:SBB Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x1b; rm32 & Reg32 ... & check_Reg32_dest ... { subCarryFlags( Reg32, rm32 ); build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:SBB Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x1b; rm64 & Reg64 ... { subCarryFlags( Reg64, rm64 ); resultflags(Reg64); }
|
|
@endif
|
|
|
|
:SCASB^repe^repetail eseDI1 is vexMode=0 & repe & repetail & byte=0xae & eseDI1 { build repe; build eseDI1; subflags(AL,eseDI1); local diff=AL-eseDI1; resultflags(diff); build repetail; }
|
|
:SCASW^repe^repetail eseDI2 is vexMode=0 & repe & repetail & opsize=0 & byte=0xaf & eseDI2 { build repe; build eseDI2; subflags(AX,eseDI2); local diff=AX-eseDI2; resultflags(diff); build repetail; }
|
|
:SCASD^repe^repetail eseDI4 is vexMode=0 & repe & repetail & opsize=1 & byte=0xaf & eseDI4 { build repe; build eseDI4; subflags(EAX,eseDI4); local diff=EAX-eseDI4; resultflags(diff); build repetail; }
|
|
@ifdef IA64
|
|
:SCASQ^repe^repetail eseDI8 is $(LONGMODE_ON) & vexMode=0 & repe & repetail & opsize=2 & byte=0xaf & eseDI8 { build repe; build eseDI8; subflags(RAX,eseDI8); local diff=RAX-eseDI8; resultflags(diff); build repetail; }
|
|
@endif
|
|
|
|
:SET^cc rm8 is vexMode=0 & byte=0xf; row=9 & cc; rm8 { rm8 = cc; }
|
|
|
|
# manual is not consistent on operands
|
|
:SGDT m16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m16
|
|
{
|
|
m16 = GlobalDescriptorTableRegister();
|
|
}
|
|
|
|
:SGDT m32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m32
|
|
{
|
|
m32 = GlobalDescriptorTableRegister();
|
|
}
|
|
|
|
@ifdef IA64
|
|
:SGDT m64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m64
|
|
{
|
|
m64 = GlobalDescriptorTableRegister();
|
|
}
|
|
@endif
|
|
|
|
|
|
:SHL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 &(reg_opcode=4|reg_opcode=6) ... { CF = rm8 s< 0; rm8 = rm8 << 1; OF = CF ^ (rm8 s< 0); resultflags(rm8); }
|
|
:SHL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 << count;
|
|
shlflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SHL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 << count;
|
|
shlflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SHL rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & (reg_opcode=4|reg_opcode=6) ... { CF = rm16 s< 0; rm16 = rm16 << 1; OF = CF ^ (rm16 s< 0); resultflags(rm16); }
|
|
:SHL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 << count;
|
|
shlflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SHL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 << count;
|
|
shlflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SHL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... { CF = rm32 s< 0; rm32 = rm32 << 1; OF = CF ^ (rm32 s< 0); build check_rm32_dest; resultflags(rm32); }
|
|
:SHL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 << count; build check_rm32_dest;
|
|
shlflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
:SHL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 << count; build check_rm32_dest;
|
|
shlflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SHL rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & (reg_opcode=4|reg_opcode=6) ... { CF = rm64 s< 0; rm64 = rm64 << 1; OF = CF ^ (rm64 s< 0); resultflags(rm64); }
|
|
:SHL rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 << count;
|
|
shlflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
:SHL rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 << count;
|
|
shlflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SHLD rm16,Reg16,imm8 is vexMode=0 & opsize=0; byte=0x0F; byte=0xA4; rm16 & Reg16 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16;
|
|
rm16 = (rm16 << count) | (Reg16 >> (16 - count));
|
|
shlflags(tmp,rm16,count); shiftresultflags(rm16,count);}
|
|
:SHLD rm16,Reg16,CL is vexMode=0 & opsize=0; byte=0x0F; byte=0xA5; CL & rm16 & Reg16 ... { local count = CL & 0x1f; local tmp = rm16;
|
|
rm16 = (rm16 << count) | (Reg16 >> (16 - count));
|
|
shlflags(tmp,rm16,count); shiftresultflags(rm16,count); }
|
|
:SHLD rm32,Reg32,imm8 is vexMode=0 & opsize=1; byte=0x0F; byte=0xA4; rm32 & check_rm32_dest ... & Reg32 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32;
|
|
rm32 = (rm32 << count) | (Reg32 >> (32 - count)); build check_rm32_dest;
|
|
shlflags(tmp,rm32,count); shiftresultflags(rm32,count); }
|
|
:SHLD rm32,Reg32,CL is vexMode=0 & opsize=1; byte=0x0F; byte=0xA5; CL & rm32 & check_rm32_dest ... & Reg32 ... { local count = CL & 0x1f; local tmp = rm32;
|
|
rm32 = (rm32 << count) | (Reg32 >> (32 - count)); build check_rm32_dest;
|
|
shlflags(tmp,rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SHLD rm64,Reg64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x0F; byte=0xA4; rm64 & Reg64 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64;
|
|
rm64 = (rm64 << count) | (Reg64 >> (64 - count));
|
|
shlflags(tmp,rm64,count); shiftresultflags(rm64,count); }
|
|
:SHLD rm64,Reg64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x0F; byte=0xA5; CL & rm64 & Reg64 ... { local count = CL & 0x3f; local tmp = rm64;
|
|
rm64 = (rm64 << count) | (Reg64 >> (64 - count));
|
|
shlflags(tmp,rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SHRD rm16,Reg16,imm8 is vexMode=0 & opsize=0; byte=0x0F; byte=0xAC; rm16 & Reg16 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16;
|
|
rm16 = (rm16 >> count) | (Reg16 << (16 - count));
|
|
shrdflags(tmp,rm16,count); shiftresultflags(rm16,count); }
|
|
:SHRD rm16,Reg16,CL is vexMode=0 & opsize=0; byte=0x0F; byte=0xAD; CL & rm16 & Reg16 ... { local count = CL & 0x1f; local tmp = rm16;
|
|
rm16 = (rm16 >> count) | (Reg16 << (16 - count));
|
|
shrdflags(tmp,rm16,count); shiftresultflags(rm16,count); }
|
|
:SHRD rm32,Reg32,imm8 is vexMode=0 & opsize=1; byte=0x0F; byte=0xAC; rm32 & check_rm32_dest ... & Reg32 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32;
|
|
rm32 = (rm32 >> count) | (Reg32 << (32 - count)); build check_rm32_dest;
|
|
shrdflags(tmp,rm32,count); shiftresultflags(rm32,count); }
|
|
:SHRD rm32,Reg32,CL is vexMode=0 & opsize=1; byte=0x0F; byte=0xAD; CL & rm32 & check_rm32_dest ... & Reg32 ... { local count = CL & 0x1f; local tmp = rm32;
|
|
rm32 = (rm32 >> count) | (Reg32 << (32 - count)); build check_rm32_dest;
|
|
shrdflags(tmp,rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SHRD rm64,Reg64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x0F; byte=0xAC; rm64 & Reg64 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64;
|
|
rm64 = (rm64 >> count) | (Reg64 << (64 - count));
|
|
shrdflags(tmp,rm64,count); shiftresultflags(rm64,count); }
|
|
:SHRD rm64,Reg64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x0F; byte=0xAD; CL & rm64 & Reg64 ... { local count = CL & 0x3f; local tmp = rm64;
|
|
rm64 = (rm64 >> count) | (Reg64 << (64 - count));
|
|
shrdflags(tmp,rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SHR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=5 ... { CF = rm8 & 1; OF = 0; rm8 = rm8 >> 1; resultflags(rm8); }
|
|
:SHR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 >> count;
|
|
shrflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SHR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 >> count;
|
|
shrflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SHR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=5 ... { CF = (rm16 & 1) != 0; OF = 0; rm16 = rm16 >> 1; resultflags(rm16); }
|
|
:SHR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 >> count;
|
|
shrflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SHR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 >> count;
|
|
shrflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SHR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=5 ... { CF = (rm32 & 1) != 0; OF = 0; rm32 = rm32 >> 1; build check_rm32_dest; resultflags(rm32); }
|
|
:SHR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 >> count; build check_rm32_dest;
|
|
shrflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
:SHR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 >> count; build check_rm32_dest;
|
|
shrflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SHR rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 ®_opcode=5 ... { CF = (rm64 & 1) != 0; OF = 0; rm64 = rm64 >> 1; resultflags(rm64); }
|
|
:SHR rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=5 ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 >> count;
|
|
shrflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
:SHR rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 >> count;
|
|
shrflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SIDT m16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m16
|
|
{
|
|
m16 = InterruptDescriptorTableRegister();
|
|
}
|
|
|
|
:SIDT m32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m32
|
|
{
|
|
m32 = InterruptDescriptorTableRegister();
|
|
}
|
|
@ifdef IA64
|
|
:SIDT m64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m64
|
|
{
|
|
m64 = InterruptDescriptorTableRegister();
|
|
}
|
|
@endif
|
|
|
|
define pcodeop skinit;
|
|
:SKINIT EAX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xde & EAX { skinit(EAX); }
|
|
|
|
:SLDT rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=0 ...
|
|
{
|
|
rm16 = LocalDescriptorTableRegister();
|
|
}
|
|
:SLDT rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x0; rm32 & reg_opcode=0 ...
|
|
{
|
|
rm32 = LocalDescriptorTableRegister();
|
|
}
|
|
@ifdef IA64
|
|
:SLDT rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x0; rm64 & reg_opcode=0 ...
|
|
{
|
|
rm64 = LocalDescriptorTableRegister();
|
|
}
|
|
@endif
|
|
|
|
:SMSW rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=4 ... { rm16 = CR0:2; }
|
|
:SMSW rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x01; rm32 & reg_opcode=4 ... { rm32 = zext(CR0:2); }
|
|
@ifdef IA64
|
|
:SMSW rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x01; rm64 & reg_opcode=4 ... { rm64 = CR0; }
|
|
@endif
|
|
|
|
:STAC is vexMode=0 & byte=0x0f; byte=0x01; byte=0xcb { AC = 1; }
|
|
:STC is vexMode=0 & byte=0xf9 { CF = 1; }
|
|
:STD is vexMode=0 & byte=0xfd { DF = 1; }
|
|
# MFL: AMD instruction
|
|
# TODO: define the action.
|
|
# STGI: set global interrupt flag (GIF); while GIF is zero, all external interrupts are disabled.
|
|
:STGI is vexMode=0 & byte=0x0f; byte=0x01; byte=0xdc { stgi(); }
|
|
:STI is vexMode=0 & byte=0xfb { IF = 1; }
|
|
|
|
:STMXCSR m32 is vexMode=0 & byte=0xf; byte=0xae; ( mod != 0b11 & reg_opcode=3 ) ... & m32 { m32 = MXCSR; }
|
|
|
|
:STOSB^rep^reptail eseDI1 is vexMode=0 & rep & reptail & byte=0xaa & eseDI1 { build rep; build eseDI1; eseDI1=AL; build reptail; }
|
|
:STOSW^rep^reptail eseDI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xab & eseDI2 { build rep; build eseDI2; eseDI2=AX; build reptail; }
|
|
:STOSD^rep^reptail eseDI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xab & eseDI4 { build rep; build eseDI4; eseDI4=EAX; build reptail; }
|
|
@ifdef IA64
|
|
:STOSQ^rep^reptail eseDI8 is $(LONGMODE_ON) & vexMode=0 & rep & reptail & opsize=2 & byte=0xab & eseDI8 { build rep; build eseDI8; eseDI8=RAX; build reptail; }
|
|
@endif
|
|
|
|
:STR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=1 ... { rm16 = TaskRegister(); }
|
|
:STR Rmr32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x0; Rmr32 & mod=3 & reg_opcode=1 & check_Rmr32_dest {
|
|
local tmp:2 = TaskRegister();
|
|
Rmr32 = zext(tmp);
|
|
build check_Rmr32_dest;
|
|
}
|
|
@ifdef IA64
|
|
:STR Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x0; Rmr64 & mod=3 & reg_opcode=1 {
|
|
local tmp:2 = TaskRegister();
|
|
Rmr64 = zext(tmp);
|
|
}
|
|
@endif
|
|
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:SUB AL,imm8 is vexMode=0 & byte=0x2c; AL & imm8 { subflags( AL,imm8 ); AL = AL - imm8; resultflags( AL); }
|
|
:SUB AX,imm16 is vexMode=0 & opsize=0 & byte=0x2d; AX & imm16 { subflags( AX,imm16); AX = AX - imm16; resultflags( AX); }
|
|
:SUB EAX,imm32 is vexMode=0 & opsize=1 & byte=0x2d; EAX & check_EAX_dest & imm32 { subflags( EAX,imm32); EAX = EAX - imm32; build check_EAX_dest; resultflags( EAX); }
|
|
@ifdef IA64
|
|
:SUB RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x2d; RAX & simm32 { subflags( RAX,simm32); RAX = RAX - simm32; resultflags( RAX); }
|
|
@endif
|
|
:SUB Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=5; imm8 { subflags( Rmr8,imm8 ); Rmr8 = Rmr8 - imm8; resultflags( Rmr8); }
|
|
:SUB Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=5; imm16 { subflags( Rmr16,imm16); Rmr16 = Rmr16 - imm16; resultflags( Rmr16); }
|
|
:SUB Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=5; imm32 { subflags( Rmr32,imm32); Rmr32 = Rmr32 - imm32; build check_rm32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:SUB Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=5; simm32 { subflags( Rmr64,simm32); Rmr64 = Rmr64 - simm32; resultflags( Rmr64); }
|
|
@endif
|
|
:SUB Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=5; simm8_16 { subflags( Rmr16,simm8_16); Rmr16 = Rmr16 - simm8_16; resultflags( Rmr16); }
|
|
:SUB Rmr32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=5; simm8_32 { subflags( Rmr32,simm8_32); Rmr32 = Rmr32 - simm8_32; build check_rm32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:SUB Rmr64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=5; simm8_64 { subflags( Rmr64,simm8_64); Rmr64 = Rmr64 - simm8_64; resultflags( Rmr64); }
|
|
@endif
|
|
:SUB Rmr8,Reg8 is vexMode=0 & byte=0x28; mod=3 & Rmr8 & Reg8 { subflags( Rmr8,Reg8 ); Rmr8 = Rmr8 - Reg8; resultflags( Rmr8); }
|
|
:SUB Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x29; mod=3 & Rmr16 & Reg16 { subflags( Rmr16,Reg16); Rmr16 = Rmr16 - Reg16; resultflags( Rmr16); }
|
|
:SUB Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x29; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { subflags( Rmr32,Reg32); Rmr32 = Rmr32 - Reg32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:SUB Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x29; mod=3 & Rmr64 & Reg64 { subflags( Rmr64,Reg64); Rmr64 = Rmr64 - Reg64; resultflags( Rmr64); }
|
|
@endif
|
|
:SUB Reg8,rm8 is vexMode=0 & byte=0x2a; rm8 & Reg8 ... { subflags( Reg8,rm8 ); Reg8 = Reg8 - rm8; resultflags( Reg8); }
|
|
:SUB Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x2b; rm16 & Reg16 ... { subflags(Reg16,rm16 ); Reg16 = Reg16 - rm16; resultflags(Reg16); }
|
|
:SUB Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x2b; rm32 & Reg32 ... & check_Reg32_dest ... { subflags(Reg32,rm32 ); Reg32 = Reg32 - rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:SUB Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x2b; rm64 & Reg64 ... { subflags(Reg64,rm64 ); Reg64 = Reg64 - rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
:SYSENTER is vexMode=0 & byte=0x0f; byte=0x34 { sysenter(); }
|
|
:SYSEXIT is vexMode=0 & byte=0x0f; byte=0x35 { sysexit();
|
|
@ifdef IA64
|
|
RIP=RCX; return [RIP];
|
|
@endif
|
|
}
|
|
|
|
:SYSCALL is vexMode=0 & byte=0x0f; byte=0x05 { syscall(); }
|
|
|
|
# returning to 32bit mode loads ECX
|
|
# returning to 64bit mode loads RCX
|
|
:SYSRET is vexMode=0 & byte=0x0f; byte=0x07 { sysret();
|
|
@ifdef IA64
|
|
RIP=RCX; return [RIP];
|
|
@endif
|
|
}
|
|
|
|
:SWAPGS is vexMode=0 & bit64=1 & byte=0x0f; byte=0x01; byte=0xf8 { swapgs(); }
|
|
|
|
:RDTSCP is vexMode=0 & bit64=1 & byte=0x0f; byte=0x01; byte=0xf9 { rdtscp(); }
|
|
|
|
:TEST AL,imm8 is vexMode=0 & byte=0xA8; AL & imm8 { logicalflags(); local tmp = AL & imm8; resultflags(tmp); }
|
|
:TEST AX,imm16 is vexMode=0 & opsize=0; byte=0xA9; AX & imm16 { logicalflags(); local tmp = AX & imm16; resultflags(tmp); }
|
|
:TEST EAX,imm32 is vexMode=0 & opsize=1; byte=0xA9; EAX & imm32 { logicalflags(); local tmp = EAX & imm32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:TEST RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0xA9; RAX & simm32 { logicalflags(); local tmp = RAX & simm32; resultflags(tmp); }
|
|
@endif
|
|
:TEST rm8,imm8 is vexMode=0 & byte=0xF6; rm8 & (reg_opcode=0 | reg_opcode=1) ... ; imm8 { logicalflags(); local tmp = rm8 & imm8; resultflags(tmp); }
|
|
:TEST rm16,imm16 is vexMode=0 & opsize=0; byte=0xF7; rm16 & (reg_opcode=0 | reg_opcode=1) ... ; imm16 { logicalflags(); local tmp = rm16 & imm16; resultflags(tmp); }
|
|
:TEST rm32,imm32 is vexMode=0 & opsize=1; byte=0xF7; rm32 & (reg_opcode=0 | reg_opcode=1) ... ; imm32 { logicalflags(); local tmp = rm32 & imm32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:TEST rm64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0xF7; rm64 & (reg_opcode=0 | reg_opcode=1) ... ; simm32 { logicalflags(); local tmp = rm64 & simm32; resultflags(tmp); }
|
|
@endif
|
|
:TEST rm8,Reg8 is vexMode=0 & byte=0x84; rm8 & Reg8 ... { logicalflags(); local tmp = rm8 & Reg8; resultflags(tmp); }
|
|
:TEST rm16,Reg16 is vexMode=0 & opsize=0; byte=0x85; rm16 & Reg16 ... { logicalflags(); local tmp = rm16 & Reg16; resultflags(tmp); }
|
|
:TEST rm32,Reg32 is vexMode=0 & opsize=1; byte=0x85; rm32 & Reg32 ... { logicalflags(); local tmp = rm32 & Reg32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:TEST rm64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x85; rm64 & Reg64 ... { logicalflags(); local tmp = rm64 & Reg64; resultflags(tmp); }
|
|
@endif
|
|
|
|
define pcodeop invalidInstructionException;
|
|
:UD0 Reg32, rm32 is vexMode=0 & byte=0x0f; byte=0xff; rm32 & Reg32 ... { local target:$(SIZE) = invalidInstructionException(); goto [target]; }
|
|
:UD1 Reg32, rm32 is vexMode=0 & byte=0x0f; byte=0xb9; rm32 & Reg32 ... { local target:$(SIZE) = invalidInstructionException(); goto [target]; }
|
|
:UD2 is vexMode=0 & byte=0xf; byte=0xb { local target:$(SIZE) = invalidInstructionException(); goto [target]; }
|
|
|
|
define pcodeop verr;
|
|
define pcodeop verw;
|
|
:VERR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=4 ... { ZF = verr(); }
|
|
:VERW rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=5 ... { ZF = verw(); }
|
|
|
|
# MFL added VMX opcodes
|
|
#
|
|
# AMD hardware assisted virtualization opcodes
|
|
:VMLOAD EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xda & EAX { vmload(EAX); }
|
|
@ifdef IA64
|
|
:VMLOAD RAX is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xda & RAX { vmload(RAX); }
|
|
@endif
|
|
:VMMCALL is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd9 { vmmcall(); }
|
|
# Limiting the effective address size to 32 and 64 bit. Surely we're not expecting a 16-bit VM address, are we?
|
|
:VMRUN EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xd8 & EAX { vmrun(EAX); }
|
|
@ifdef IA64
|
|
:VMRUN RAX is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xd8 & RAX { vmrun(RAX); }
|
|
@endif
|
|
# Limiting the effective address size to 32 and 64 bit. Surely we're not expecting a 16-bit VM address, are we?
|
|
:VMSAVE EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xdb & EAX { vmsave(EAX); }
|
|
@ifdef IA64
|
|
:VMSAVE RAX is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xdb & RAX { vmsave(RAX); }
|
|
@endif
|
|
#
|
|
|
|
#
|
|
# Intel hardware assisted virtualization opcodes
|
|
@ifdef IA64
|
|
:INVEPT Reg64, m128 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x80; Reg64 ... & m128 { invept(Reg64, m128); }
|
|
@endif
|
|
:INVEPT Reg32, m128 is vexMode=0 & bit64=0 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x80; Reg32 ... & m128 { invept(Reg32, m128); }
|
|
@ifdef IA64
|
|
:INVVPID Reg64, m128 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x81; Reg64 ... & m128 { invvpid(Reg64, m128); }
|
|
@endif
|
|
:INVVPID Reg32, m128 is vexMode=0 & bit64=0 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x81; Reg32 ... & m128 { invvpid(Reg32, m128); }
|
|
:VMCALL is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc1 { vmcall(); }
|
|
@ifdef IA64
|
|
:VMCLEAR m64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmclear(m64); }
|
|
@endif
|
|
#TODO: invokes a VM function specified in EAX
|
|
:VMFUNC EAX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd4 & EAX { vmfunc(EAX); }
|
|
#TODO: this launches the VM managed by the current VMCS. How is the VMCS expressed for the emulator? For Ghidra analysis?
|
|
:VMLAUNCH is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc2 { vmlaunch(); }
|
|
#TODO: this resumes the VM managed by the current VMCS. How is the VMCS expressed for the emulator? For Ghidra analysis?
|
|
:VMRESUME is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc3 { vmresume(); }
|
|
#TODO: this loads the VMCS pointer from the m64 memory address and makes the VMCS pointer valid; how to express
|
|
# this for analysis and emulation?
|
|
:VMPTRLD m64 is vexMode=0 & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmptrld(m64); }
|
|
#TODO: stores the current VMCS pointer into the specified 64-bit memory address; how to express this for analysis and emulation?
|
|
#TODO: note that the Intel manual does not specify m64 (which it does for VMPTRLD, yet it does state that "the operand
|
|
# of this instruction is always 64-bits and is always in memory". Is it an error that the "Instruction" entry in the
|
|
# box giving the definition does not specify m64?
|
|
:VMPTRST m64 is vexMode=0 & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=7 ) ... & m64 { vmptrst(m64); }
|
|
:VMREAD rm32, Reg32 is vexMode=0 & opsize=1 & byte=0x0f; byte=0x78; rm32 & check_rm32_dest ... & Reg32 ... { rm32 = vmread(Reg32); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:VMREAD rm64, Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0f; byte=0x78; rm64 & Reg64 ... { rm64 = vmread(Reg64); }
|
|
@endif
|
|
:VMWRITE Reg32, rm32 is vexMode=0 & opsize=1 & byte=0x0f; byte=0x79; rm32 & Reg32 ... & check_Reg32_dest ... { vmwrite(rm32,Reg32); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:VMWRITE Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0f; byte=0x79; rm64 & Reg64 ... { vmwrite(rm64,Reg64); }
|
|
@endif
|
|
:VMXOFF is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc4 { vmxoff(); }
|
|
# NB: this opcode is incorrect in the 2005 edition of the Intel manual. Opcode below is taken from the 2008 version.
|
|
:VMXON m64 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmxon(m64); }
|
|
|
|
#END of changes for VMX opcodes
|
|
|
|
:WAIT is vexMode=0 & byte=0x9b { }
|
|
:WBINVD is vexMode=0 & byte=0xf; byte=0x9 { }
|
|
|
|
@ifdef IA64
|
|
define pcodeop writefsbase;
|
|
:WRFSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=2 & r32 { tmp:8 = zext(r32); writefsbase(tmp); }
|
|
:WRFSBASE r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=2 & r64 { writefsbase(r64); }
|
|
|
|
define pcodeop writegsbase;
|
|
:WRGSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=3 & r32 { tmp:8 = zext(r32); writegsbase(tmp); }
|
|
:WRGSBASE r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=3 & r64 { writegsbase(r64); }
|
|
@endif
|
|
|
|
define pcodeop wrpkru;
|
|
:WRPKRU is byte=0x0F; byte=0x01; byte=0xEF { wrpkru(EAX); }
|
|
|
|
define pcodeop wrmsr;
|
|
:WRMSR is vexMode=0 & byte=0xf; byte=0x30 { tmp:8 = (zext(EDX) << 32) | zext(EAX); wrmsr(ECX,tmp); }
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:XADD Rmr8,Reg8 is vexMode=0 & byte=0x0F; byte=0xC0; mod=3 & Rmr8 & Reg8 { addflags( Rmr8,Reg8 ); local tmp = Rmr8 + Reg8; Reg8 = Rmr8; Rmr8 = tmp; resultflags(tmp); }
|
|
:XADD Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x0F; byte=0xC1; mod=3 & Rmr16 & Reg16 { addflags(Rmr16,Reg16); local tmp = Rmr16 + Reg16; Reg16 = Rmr16; Rmr16 = tmp; resultflags(tmp); }
|
|
:XADD Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x0F; byte=0xC1; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 & check_Reg32_dest { addflags(Rmr32,Reg32); local tmp = Rmr32 + Reg32; Reg32 = Rmr32; Rmr32 = tmp; build check_Rmr32_dest; build check_Reg32_dest; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:XADD Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0xC1; mod=3 & Rmr64 & Reg64 { addflags(Rmr64,Reg64); local tmp = Rmr64 + Reg64; Reg64 = Rmr64; Rmr64 = tmp; resultflags(tmp); }
|
|
@endif
|
|
|
|
define pcodeop xabort;
|
|
|
|
:XABORT imm8 is vexMode=0 & byte=0xc6; byte=0xf8; imm8 { tmp:1 = imm8; xabort(tmp); }
|
|
|
|
define pcodeop xbegin;
|
|
define pcodeop xend;
|
|
|
|
:XBEGIN rel16 is vexMode=0 & opsize=0 & byte=0xc7; byte=0xf8; rel16 { xbegin(&:$(SIZE) rel16); }
|
|
:XBEGIN rel32 is vexMode=0 & (opsize=1 | opsize=2) & byte=0xc7; byte=0xf8; rel32 { xbegin(&:$(SIZE) rel32); }
|
|
|
|
:XEND is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd5 { xend(); }
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:XCHG AX,Rmr16 is vexMode=0 & opsize=0 & row = 9 & page = 0 & AX & Rmr16 { local tmp = AX; AX = Rmr16; Rmr16 = tmp; }
|
|
:XCHG EAX,Rmr32 is vexMode=0 & opsize=1 & row = 9 & page = 0 & EAX & check_EAX_dest & Rmr32 & check_Rmr32_dest { local tmp = EAX; EAX = Rmr32; build check_EAX_dest; Rmr32 = tmp; build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:XCHG RAX,Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & row = 9 & page = 0 & RAX & Rmr64 { local tmp = RAX; RAX = Rmr64; Rmr64 = tmp; }
|
|
@endif
|
|
|
|
:XCHG Rmr8,Reg8 is vexMode=0 & byte=0x86; mod=3 & Rmr8 & Reg8 { local tmp = Rmr8; Rmr8 = Reg8; Reg8 = tmp; }
|
|
:XCHG Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x87; mod=3 & Rmr16 & Reg16 { local tmp = Rmr16; Rmr16 = Reg16; Reg16 = tmp; }
|
|
:XCHG Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x87; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 & check_Reg32_dest { local tmp = Rmr32; Rmr32 = Reg32; build check_Rmr32_dest; Reg32 = tmp; build check_Reg32_dest;}
|
|
@ifdef IA64
|
|
:XCHG Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x87; mod=3 & Rmr64 & Reg64 { local tmp = Rmr64; Rmr64 = Reg64; Reg64 = tmp; }
|
|
@endif
|
|
|
|
:XLAT seg16^BX is vexMode=0 & addrsize=0 & seg16 & byte=0xd7; BX { tmp:$(SIZE)= 0; ptr2(tmp,BX+zext(AL)); AL = *tmp; }
|
|
:XLAT segWide^EBX is vexMode=0 & addrsize=1 & segWide & byte=0xd7; EBX { tmp:$(SIZE)= 0; ptr4(tmp,EBX+zext(AL)); AL = *tmp; }
|
|
@ifdef IA64
|
|
:XLAT segWide^RBX is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & segWide & byte=0xd7; RBX { tmp:$(SIZE)= 0; ptr8(tmp,RBX+zext(AL)); AL = *tmp; }
|
|
@endif
|
|
|
|
# See 'lockable.sinc' for memory destination, lockable variants
|
|
:XOR AL,imm8 is vexMode=0 & byte=0x34; AL & imm8 { logicalflags(); AL = AL ^ imm8; resultflags( AL); }
|
|
:XOR AX,imm16 is vexMode=0 & opsize=0 & byte=0x35; AX & imm16 { logicalflags(); AX = AX ^ imm16; resultflags( AX); }
|
|
:XOR EAX,imm32 is vexMode=0 & opsize=1 & byte=0x35; EAX & imm32 & check_EAX_dest { logicalflags(); EAX = EAX ^ imm32; build check_EAX_dest; resultflags( EAX);}
|
|
@ifdef IA64
|
|
:XOR RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x35; RAX & simm32 { logicalflags(); RAX = RAX ^ simm32; resultflags( RAX); }
|
|
@endif
|
|
:XOR Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=6; imm8 { logicalflags(); Rmr8 = Rmr8 ^ imm8; resultflags( Rmr8); }
|
|
:XOR Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=6; imm16 { logicalflags(); Rmr16 = Rmr16 ^ imm16; resultflags( Rmr16); }
|
|
:XOR Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=6; imm32 { logicalflags(); Rmr32 = Rmr32 ^ imm32; build check_rm32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:XOR Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=6; simm32 { logicalflags(); Rmr64 = Rmr64 ^ simm32; resultflags( Rmr64); }
|
|
@endif
|
|
:XOR Rmr16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=6; usimm8_16 { logicalflags(); Rmr16 = Rmr16 ^ usimm8_16; resultflags( Rmr16); }
|
|
:XOR Rmr32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=6; usimm8_32 { logicalflags(); Rmr32 = Rmr32 ^ usimm8_32; build check_rm32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:XOR Rmr64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=6; usimm8_64 { logicalflags(); Rmr64 = Rmr64 ^ usimm8_64; resultflags( Rmr64); }
|
|
@endif
|
|
:XOR Rmr8,Reg8 is vexMode=0 & byte=0x30; mod=3 & Rmr8 & Reg8 { logicalflags(); Rmr8 = Rmr8 ^ Reg8; resultflags( Rmr8); }
|
|
:XOR Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x31; mod=3 & Rmr16 & Reg16 { logicalflags(); Rmr16 = Rmr16 ^ Reg16; resultflags( Rmr16); }
|
|
:XOR Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x31; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { logicalflags(); Rmr32 = Rmr32 ^ Reg32; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@ifdef IA64
|
|
:XOR Rmr64,Reg64 is vexMode=0 & $(LONGMODE_ON) & opsize=2 & byte=0x31; mod=3 & Rmr64 & Reg64 { logicalflags(); Rmr64 = Rmr64 ^ Reg64; resultflags( Rmr64); }
|
|
@endif
|
|
:XOR Reg8,rm8 is vexMode=0 & byte=0x32; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 ^ rm8; resultflags( Reg8); }
|
|
:XOR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x33; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 ^ rm16; resultflags(Reg16); }
|
|
:XOR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x33; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 ^ rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:XOR Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x33; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 ^ rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
:XGETBV is vexMode=0 & byte=0x0F; byte=0x01; byte=0xD0 { local tmp = XCR0 >> 32; EDX = tmp:4; EAX = XCR0:4; }
|
|
:XSETBV is vexMode=0 & byte=0x0F; byte=0x01; byte=0xD1 { XCR0 = (zext(EDX) << 32) | zext(EAX); }
|
|
|
|
define pcodeop xsave;
|
|
define pcodeop xsave64;
|
|
define pcodeop xsavec;
|
|
define pcodeop xsavec64;
|
|
define pcodeop xsaveopt;
|
|
define pcodeop xsaveopt64;
|
|
define pcodeop xsaves;
|
|
define pcodeop xsaves64;
|
|
define pcodeop xrstor;
|
|
define pcodeop xrstor64;
|
|
define pcodeop xrstors;
|
|
define pcodeop xrstors64;
|
|
|
|
:XRSTOR Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xrstor(Mem, tmp); }
|
|
@ifdef IA64
|
|
:XRSTOR64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xrstor64(Mem, tmp); }
|
|
@endif
|
|
|
|
:XRSTORS Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=3 ) ... & Mem { tmp:4 = 512; xrstors(Mem, tmp); }
|
|
@ifdef IA64
|
|
:XRSTORS64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=3 ) ... & Mem { tmp:4 = 512; xrstors64(Mem, tmp); }
|
|
@endif
|
|
|
|
:XSAVE Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsave(Mem, tmp); }
|
|
@ifdef IA64
|
|
:XSAVE64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsave64(Mem, tmp); }
|
|
@endif
|
|
|
|
:XSAVEC Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsavec(Mem, tmp); }
|
|
@ifdef IA64
|
|
:XSAVEC64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsavec64(Mem, tmp); }
|
|
@endif
|
|
|
|
:XSAVEOPT Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=6 ) ... & Mem { tmp:4 = 512; xsaveopt(Mem, tmp); }
|
|
@ifdef IA64
|
|
:XSAVEOPT64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=6 ) ... & Mem { tmp:4 = 512; xsaveopt64(Mem, tmp); }
|
|
@endif
|
|
|
|
:XSAVES Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xsaves(Mem, tmp); }
|
|
@ifdef IA64
|
|
:XSAVES64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xsaves64(Mem, tmp); }
|
|
@endif
|
|
|
|
define pcodeop xtest;
|
|
:XTEST is byte=0x0F; byte=0x01; byte=0xD6 { ZF = xtest(); }
|
|
|
|
:LFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=5 & r_m=0 { }
|
|
:MFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=6 & r_m=0 { }
|
|
:SFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=7 & r_m=0 { }
|
|
|
|
#
|
|
# floating point instructions
|
|
#
|
|
define pcodeop f2xm1;
|
|
:F2XM1 is vexMode=0 & byte=0xD9; byte=0xF0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = f2xm1(ST0);
|
|
} # compute 2^x-1
|
|
|
|
:FABS is vexMode=0 & byte=0xD9; byte=0xE1
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = abs(ST0);
|
|
}
|
|
|
|
:FADD m32fp is vexMode=0 & byte=0xD8; reg_opcode=0 ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f+ float2float(m32fp);
|
|
}
|
|
|
|
:FADD m64fp is vexMode=0 & byte=0xDC; reg_opcode=0 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f+ float2float(m64fp);
|
|
}
|
|
|
|
:FADD ST0, freg is vexMode=0 & byte=0xD8; frow=12 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f+ freg;
|
|
}
|
|
|
|
:FADD freg, ST0 is vexMode=0 & byte=0xDC; frow=12 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = freg f+ ST0;
|
|
}
|
|
|
|
:FADDP is vexMode=0 & byte=0xDE; byte=0xC1
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST1 = ST0 f+ ST1;
|
|
fpop();
|
|
}
|
|
|
|
:FADDP freg, ST0 is vexMode=0 & byte=0xDE; frow=12 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = ST0 f+ freg;
|
|
fpop();
|
|
}
|
|
|
|
:FIADD m32 is vexMode=0 & byte=0xDA; reg_opcode=0 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f+ int2float(m32);
|
|
}
|
|
|
|
:FIADD m16 is vexMode=0 & byte=0xDE; reg_opcode=0 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f+ int2float(m16);
|
|
}
|
|
|
|
define pcodeop from_bcd;
|
|
:FBLD m80 is vexMode=0 & byte=0xDF; reg_opcode=4 ... & m80
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fdec();
|
|
ST0 = from_bcd(m80);
|
|
}
|
|
|
|
define pcodeop to_bcd;
|
|
:FBSTP m80 is vexMode=0 & byte=0xDF; reg_opcode=6 ... & m80
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
m80 = to_bcd(ST0);
|
|
fpop();
|
|
}
|
|
|
|
:FCHS is vexMode=0 & byte=0xD9; byte=0xE0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = f- ST0;
|
|
}
|
|
|
|
:FCLEX is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE2
|
|
{
|
|
FPUStatusWord[0,8] = 0;
|
|
FPUStatusWord[15,1] = 0;
|
|
}
|
|
|
|
:FNCLEX is vexMode=0 & byte=0xDB; byte=0xE2
|
|
{
|
|
FPUStatusWord[0,8] = 0;
|
|
FPUStatusWord[15,1] = 0;
|
|
}
|
|
|
|
:FCMOVB ST0, freg is vexMode=0 & byte=0xDA; frow=12 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
if ( !CF ) goto inst_next;
|
|
ST0 = freg;
|
|
}
|
|
|
|
:FCMOVE ST0, freg is vexMode=0 & byte=0xDA; frow=12 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
if ( !ZF ) goto inst_next;
|
|
ST0 = freg;
|
|
}
|
|
|
|
:FCMOVBE ST0, freg is vexMode=0 & byte=0xDA; frow=13 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
if ( !CF & !ZF ) goto inst_next;
|
|
ST0 = freg;
|
|
}
|
|
|
|
:FCMOVU ST0, freg is vexMode=0 & byte=0xDA; frow=13 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
if ( !PF ) goto inst_next;
|
|
ST0 = freg;
|
|
}
|
|
|
|
:FCMOVNB ST0, freg is vexMode=0 & byte=0xDB; frow=12 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
if ( CF ) goto inst_next;
|
|
ST0 = freg;
|
|
}
|
|
|
|
:FCMOVNE ST0, freg is vexMode=0 & byte=0xDB; frow=12 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
if ( ZF ) goto inst_next;
|
|
ST0 = freg;
|
|
}
|
|
|
|
:FCMOVNBE ST0, freg is vexMode=0 & byte=0xDB; frow=13 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
if ( CF & ZF ) goto inst_next;
|
|
ST0 = freg;
|
|
}
|
|
:FCMOVNU ST0, freg is vexMode=0 & byte=0xDB; frow=13 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
if ( PF ) goto inst_next;
|
|
ST0 = freg;
|
|
}
|
|
|
|
:FCOM m32fp is vexMode=0 & byte=0xD8; reg_opcode=2 ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp=float2float(m32fp);
|
|
fcom(tmp);
|
|
}
|
|
|
|
:FCOM m64fp is vexMode=0 & byte=0xDC; reg_opcode=2 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp=float2float(m64fp);
|
|
fcom(tmp);
|
|
}
|
|
|
|
:FCOM freg is vexMode=0 & byte=0xD8; frow=13 & fpage=0 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(freg);
|
|
}
|
|
|
|
:FCOM is vexMode=0 & byte=0xD8; byte=0xD1
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(ST1);
|
|
}
|
|
|
|
:FCOMP m32fp is vexMode=0 & byte=0xD8; reg_opcode=3 ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp=float2float(m32fp);
|
|
fcom(tmp);
|
|
fpop();
|
|
}
|
|
|
|
:FCOMP m64fp is vexMode=0 & byte=0xDC; reg_opcode=3 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp=float2float(m64fp);
|
|
fcom(tmp);
|
|
fpop();
|
|
}
|
|
|
|
:FCOMP freg is vexMode=0 & byte=0xD8; frow=13 & fpage=1 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(freg);
|
|
fpop();
|
|
}
|
|
|
|
:FCOMP is vexMode=0 & byte=0xD8; byte=0xD9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(ST1);
|
|
fpop();
|
|
}
|
|
|
|
:FCOMPP is vexMode=0 & byte=0xDE; byte=0xD9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(ST1);
|
|
fpop();
|
|
fpop();
|
|
}
|
|
|
|
:FCOMI ST0, freg is vexMode=0 & byte=0xDB; frow=15 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcomi(freg);
|
|
}
|
|
|
|
:FCOMIP ST0, freg is vexMode=0 & byte=0xDF; frow=15 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcomi(freg);
|
|
fpop();
|
|
}
|
|
|
|
:FUCOMI ST0, freg is vexMode=0 & byte=0xDB; frow=14 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcomi(freg);
|
|
}
|
|
|
|
:FUCOMIP ST0, freg is vexMode=0 & byte=0xDF; frow=14 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcomi(freg);
|
|
fpop();
|
|
}
|
|
|
|
define pcodeop fcos;
|
|
:FCOS is vexMode=0 & byte=0xD9; byte=0xFF
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = fcos(ST0);
|
|
}
|
|
|
|
:FDECSTP is vexMode=0 & byte=0xD9; byte=0xF6
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fdec();
|
|
FPUStatusWord = FPUStatusWord & 0xfeff;
|
|
C0 = 0; #Clear C0
|
|
}
|
|
|
|
# Legacy 8087 instructions. Still valid but treated as NOP instructions.
|
|
:FDISI is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE1 {}
|
|
:FNDISI is vexMode=0 & byte=0xDB; byte=0xE1 {}
|
|
:FENI is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE0 {}
|
|
:FNENI is vexMode=0 & byte=0xDB; byte=0xE0 {}
|
|
|
|
:FDIV m32fp is vexMode=0 & byte=0xD8; reg_opcode=6 ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f/ float2float(m32fp);
|
|
}
|
|
|
|
:FDIV m64fp is vexMode=0 & byte=0xDC; reg_opcode=6 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f/ float2float(m64fp);
|
|
}
|
|
|
|
:FDIV ST0,freg is vexMode=0 & byte=0xD8; frow=15 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f/ freg;
|
|
}
|
|
|
|
:FDIV freg,ST0 is vexMode=0 & byte=0xDC; frow=15 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = freg f/ ST0;
|
|
}
|
|
|
|
:FDIVP freg,ST0 is vexMode=0 & byte=0xDE; frow=15 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = freg f/ ST0;
|
|
fpop();
|
|
}
|
|
|
|
:FDIVP is vexMode=0 & byte=0xDE; byte=0xF9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST1 = ST1 f/ ST0;
|
|
fpop();
|
|
}
|
|
|
|
:FIDIV m32 is vexMode=0 & byte=0xDA; reg_opcode=6 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f/ int2float(m32);
|
|
}
|
|
|
|
:FIDIV m16 is vexMode=0 & byte=0xDE; reg_opcode=6 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f/ int2float(m16);
|
|
}
|
|
|
|
:FDIVR m32fp is vexMode=0 & byte=0xD8; reg_opcode=7 ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = float2float(m32fp) f/ ST0;
|
|
}
|
|
|
|
:FDIVR m64fp is vexMode=0 & byte=0xDC; reg_opcode=7 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = float2float(m64fp) f/ ST0;
|
|
}
|
|
|
|
:FDIVR ST0,freg is vexMode=0 & byte=0xD8; frow=15 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = freg f/ ST0;
|
|
}
|
|
|
|
:FDIVR freg,ST0 is vexMode=0 & byte=0xDC; frow=15 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = ST0 f/ freg;
|
|
}
|
|
|
|
:FDIVRP freg,ST0 is vexMode=0 & byte=0xDE; frow=15 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = ST0 f/ freg;
|
|
fpop();
|
|
}
|
|
|
|
:FDIVRP is vexMode=0 & byte=0xDE; byte=0xF1
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST1 = ST0 f/ ST1;
|
|
fpop();
|
|
}
|
|
|
|
:FIDIVR m32 is vexMode=0 & byte=0xDA; reg_opcode=7 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = int2float(m32) f/ ST0;
|
|
}
|
|
|
|
:FIDIVR m16 is vexMode=0 & byte=0xDE; reg_opcode=7 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = int2float(m16) f/ ST0;
|
|
}
|
|
|
|
define pcodeop ffree;
|
|
:FFREE freg is vexMode=0 & byte=0xDD; frow=12 & fpage=0 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
FPUTagWord = ffree(freg); # Set freg to invalid value
|
|
}
|
|
|
|
:FFREEP freg is vexMode=0 & byte=0xDF; frow=12 & fpage=0 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
FPUTagWord = ffree(freg);
|
|
fpop(); # FFREE and pop
|
|
}
|
|
|
|
:FICOM m16 is vexMode=0 & byte=0xDE; reg_opcode=2 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = int2float(m16);
|
|
fcom(tmp);
|
|
}
|
|
|
|
:FICOM m32 is vexMode=0 & byte=0xDA; reg_opcode=2 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = int2float(m32);
|
|
fcom(tmp);
|
|
}
|
|
|
|
:FICOMP m16 is vexMode=0 & byte=0xDE; (mod != 0b11 & reg_opcode=3) ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = int2float(m16);
|
|
fcom(tmp);
|
|
fpop();
|
|
}
|
|
|
|
:FICOMP m32 is vexMode=0 & byte=0xDA; reg_opcode=3 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = int2float(m32);
|
|
fcom(tmp);
|
|
fpop();
|
|
}
|
|
|
|
:FILD m16 is vexMode=0 & byte=0xDF; reg_opcode=0 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fdec(); ST0 = int2float(m16);
|
|
}
|
|
|
|
:FILD m32 is vexMode=0 & byte=0xDB; reg_opcode=0 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fdec();
|
|
ST0 = int2float(m32);
|
|
}
|
|
|
|
:FILD m64 is vexMode=0 & byte=0xDF; reg_opcode=5 ... & m64
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fdec();
|
|
ST0 = int2float(m64);
|
|
}
|
|
|
|
:FINCSTP is vexMode=0 & byte=0xD9; byte=0xF7
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
finc();
|
|
}
|
|
|
|
:FINIT is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE3
|
|
{
|
|
FPUControlWord = 0x037f;
|
|
FPUStatusWord = 0x0000;
|
|
FPUTagWord = 0xffff;
|
|
FPUDataPointer = 0x00000000;
|
|
FPUInstructionPointer = 0x00000000;
|
|
FPULastInstructionOpcode = 0x0000;
|
|
C0 = 0;
|
|
C1 = 0;
|
|
C2 = 0;
|
|
C3 = 0;
|
|
}
|
|
|
|
:FNINIT is vexMode=0 & byte=0xDB; byte=0xE3
|
|
{
|
|
FPUControlWord = 0x037f;
|
|
FPUStatusWord = 0x0000;
|
|
FPUTagWord = 0xffff;
|
|
FPUDataPointer = 0x00000000;
|
|
FPUInstructionPointer = 0x00000000;
|
|
FPULastInstructionOpcode = 0x0000;
|
|
C0 = 0;
|
|
C1 = 0;
|
|
C2 = 0;
|
|
C3 = 0;
|
|
}
|
|
|
|
:FIST m16 is vexMode=0 & byte=0xDF; (mod != 0b11 & reg_opcode=2) ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
tmp:10 = round(ST0);
|
|
m16 = trunc(tmp);
|
|
}
|
|
|
|
:FIST m32 is vexMode=0 & byte=0xDB; (mod != 0b11 & reg_opcode=2) ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
tmp:10 = round(ST0);
|
|
m32 = trunc(tmp);
|
|
}
|
|
|
|
:FISTP m16 is vexMode=0 & byte=0xDF; reg_opcode=3 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
tmp:10 = round(ST0);
|
|
fpop();
|
|
m16 = trunc(tmp);
|
|
}
|
|
|
|
:FISTP m32 is vexMode=0 & byte=0xDB; reg_opcode=3 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
tmp:10 = round(ST0);
|
|
fpop();
|
|
m32 = trunc(tmp);
|
|
}
|
|
|
|
:FISTP m64 is vexMode=0 & byte=0xDF; reg_opcode=7 ... & m64
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
tmp:10 = round(ST0);
|
|
fpop();
|
|
m64 = trunc(tmp);
|
|
}
|
|
|
|
:FISTTP m16 is vexMode=0 & byte=0xDF; reg_opcode=1 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
m16 = trunc(ST0);
|
|
fpop();
|
|
}
|
|
|
|
:FISTTP m32 is vexMode=0 & byte=0xDB; reg_opcode=1 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
m32 = trunc(ST0);
|
|
fpop();
|
|
}
|
|
|
|
:FISTTP m64 is vexMode=0 & byte=0xDD; reg_opcode=1 ... & m64
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
m64 = trunc(ST0);
|
|
fpop();
|
|
}
|
|
|
|
:FLD m32fp is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=0) ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fdec();
|
|
ST0 = float2float(m32fp);
|
|
}
|
|
|
|
:FLD m64fp is vexMode=0 & byte=0xDD; reg_opcode=0 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fdec();
|
|
ST0 = float2float(m64fp);
|
|
}
|
|
|
|
:FLD m80fp is vexMode=0 & byte=0xDB; reg_opcode=5 ... & m80fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fpushv(m80fp);
|
|
}
|
|
|
|
# Be careful that you don't clobber freg during fpushv, need a tmp to hold the value
|
|
:FLD freg is vexMode=0 & byte=0xD9; frow=12 & fpage=0 & freg { tmp:10 = freg; fpushv(tmp); }
|
|
|
|
:FLD1 is vexMode=0 & byte=0xD9; byte=0xE8
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
one:4 = 1;
|
|
tmp:10 = int2float(one);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FLDL2T is vexMode=0 & byte=0xD9; byte=0xE9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
src:8 = 0x400a934f0979a371;
|
|
tmp:10 = float2float(src);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FLDL2E is vexMode=0 & byte=0xD9; byte=0xEA
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
src:8 = 0x3ff71547652b82fe;
|
|
tmp:10 = float2float(src);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FLDPI is vexMode=0 & byte=0xD9; byte=0xEB
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
src:8 = 0x400921fb54442d18;
|
|
tmp:10 = float2float(src);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FLDLG2 is vexMode=0 & byte=0xD9; byte=0xEC
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
src:8 = 0x3fd34413509f79ff;
|
|
tmp:10 = float2float(src);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FLDLN2 is vexMode=0 & byte=0xD9; byte=0xED
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
src:8 = 0x3fe62e42fefa39ef;
|
|
tmp:10 = float2float(src);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FLDZ is vexMode=0 & byte=0xD9; byte=0xEE
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
zero:4 = 0;
|
|
tmp:10 = int2float(zero);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FLDCW m16 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=5) ... & m16
|
|
{
|
|
FPUControlWord = m16;
|
|
}
|
|
|
|
define pcodeop fldenv;
|
|
:FLDENV Mem is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=4) ... & Mem
|
|
{
|
|
FPUControlWord = *:2 (Mem);
|
|
FPUStatusWord = *:2 (Mem + 4);
|
|
FPUTagWord = *:2 (Mem + 8);
|
|
FPUDataPointer = *:4 (Mem + 20);
|
|
FPUInstructionPointer = *:4 (Mem + 12);
|
|
FPULastInstructionOpcode = *:2 (Mem + 18);
|
|
}
|
|
|
|
:FMUL m32fp is vexMode=0 & byte=0xD8; reg_opcode=1 ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f* float2float(m32fp);
|
|
}
|
|
|
|
:FMUL m64fp is vexMode=0 & byte=0xDC; reg_opcode=1 ... & m64fp
|
|
{
|
|
ST0 = ST0 f* float2float(m64fp);
|
|
FPUInstructionPointer = inst_start;
|
|
}
|
|
|
|
:FMUL freg is vexMode=0 & byte=0xD8; frow=12 & fpage=1 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f* freg;
|
|
}
|
|
|
|
:FMUL freg is vexMode=0 & byte=0xDC; frow=12 & fpage=1 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = freg f* ST0;
|
|
}
|
|
|
|
:FMULP freg is vexMode=0 & byte=0xDE; frow=12 & fpage=1 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = ST0 f* freg;
|
|
fpop();
|
|
}
|
|
|
|
:FMULP is vexMode=0 & byte=0xDE; byte=0xC9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST1 = ST0 f* ST1;
|
|
fpop();
|
|
}
|
|
|
|
:FIMUL m32 is vexMode=0 & byte=0xDA; reg_opcode=1 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f* int2float(m32);
|
|
}
|
|
|
|
:FIMUL m16 is vexMode=0 & byte=0xDE; reg_opcode=1 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f* int2float(m16);
|
|
}
|
|
|
|
:FNOP is vexMode=0 & byte=0xD9; byte=0xD0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
}
|
|
|
|
define pcodeop fpatan;
|
|
:FPATAN is vexMode=0 & byte=0xD9; byte=0xF3
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST1 = fpatan(ST1, ST0);
|
|
fpop();
|
|
}
|
|
|
|
:FPREM is vexMode=0 & byte=0xD9; byte=0xF8
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = ST0 f/ ST1;
|
|
tmp = tmp f* ST1;
|
|
ST0 = ST0 f- tmp;
|
|
}
|
|
|
|
:FPREM1 is vexMode=0 & byte=0xD9; byte=0xF5
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = ST0 f/ ST1;
|
|
tmp = tmp f* ST1;
|
|
ST0 = ST0 f- tmp;
|
|
}
|
|
|
|
define pcodeop fptan;
|
|
:FPTAN is vexMode=0 & byte=0xD9; byte=0xF2
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = fptan(ST0);
|
|
one:4 = 1;
|
|
tmp:10 = int2float(one);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FRNDINT is vexMode=0 & byte=0xD9; byte=0xFC
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = round(ST0);
|
|
ST0 = tmp;
|
|
}
|
|
|
|
:FRSTOR Mem is vexMode=0 & byte=0xDD; reg_opcode=4 ... & Mem
|
|
{
|
|
FPUControlWord = *:2 (Mem);
|
|
FPUStatusWord = *:2 (Mem + 4);
|
|
FPUTagWord = *:2 (Mem + 8);
|
|
FPUDataPointer = *:4 (Mem + 20);
|
|
FPUInstructionPointer = *:4 (Mem + 12);
|
|
FPULastInstructionOpcode = *:2 (Mem + 18);
|
|
|
|
ST0 = *:10 (Mem + 28);
|
|
ST1 = *:10 (Mem + 38);
|
|
ST2 = *:10 (Mem + 48);
|
|
ST3 = *:10 (Mem + 58);
|
|
ST4 = *:10 (Mem + 68);
|
|
ST5 = *:10 (Mem + 78);
|
|
ST6 = *:10 (Mem + 88);
|
|
ST7 = *:10 (Mem + 98);
|
|
}
|
|
|
|
:FSAVE Mem is vexMode=0 & byte=0x9B; byte=0xDD; reg_opcode=6 ... & Mem
|
|
{
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
|
|
*:10 (Mem + 28) = ST0;
|
|
*:10 (Mem + 38) = ST1;
|
|
*:10 (Mem + 48) = ST2;
|
|
*:10 (Mem + 58) = ST3;
|
|
*:10 (Mem + 68) = ST4;
|
|
*:10 (Mem + 78) = ST5;
|
|
*:10 (Mem + 88) = ST6;
|
|
*:10 (Mem + 98) = ST7;
|
|
|
|
FPUControlWord = 0x037f;
|
|
FPUStatusWord = 0x0000;
|
|
FPUTagWord = 0xffff;
|
|
FPUDataPointer = 0x00000000;
|
|
FPUInstructionPointer = 0x00000000;
|
|
FPULastInstructionOpcode = 0x0000;
|
|
}
|
|
|
|
:FNSAVE Mem is vexMode=0 & byte=0xDD; reg_opcode=6 ... & Mem
|
|
{
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
|
|
*:10 (Mem + 28) = ST0;
|
|
*:10 (Mem + 38) = ST1;
|
|
*:10 (Mem + 48) = ST2;
|
|
*:10 (Mem + 58) = ST3;
|
|
*:10 (Mem + 68) = ST4;
|
|
*:10 (Mem + 78) = ST5;
|
|
*:10 (Mem + 88) = ST6;
|
|
*:10 (Mem + 98) = ST7;
|
|
|
|
FPUControlWord = 0x037f;
|
|
FPUStatusWord = 0x0000;
|
|
FPUTagWord = 0xffff;
|
|
FPUDataPointer = 0x00000000;
|
|
FPUInstructionPointer = 0x00000000;
|
|
FPULastInstructionOpcode = 0x0000;
|
|
}
|
|
|
|
define pcodeop fscale;
|
|
:FSCALE is vexMode=0 & byte=0xD9; byte=0xFD
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = fscale(ST0, ST1);
|
|
}
|
|
|
|
define pcodeop fsin;
|
|
:FSIN is vexMode=0 & byte=0xD9; byte=0xFE
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = fsin(ST0);
|
|
}
|
|
|
|
:FSINCOS is vexMode=0 & byte=0xD9; byte=0xFB
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
tmp:10 = fcos(ST0);
|
|
ST0 = fsin(ST0);
|
|
fpushv(tmp);
|
|
}
|
|
|
|
:FSQRT is vexMode=0 & byte=0xD9; byte=0xFA
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = sqrt(ST0);
|
|
}
|
|
|
|
:FST m32fp is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=2) ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
m32fp = float2float(ST0);
|
|
}
|
|
|
|
:FST m64fp is vexMode=0 & byte=0xDD; reg_opcode=2 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
m64fp = float2float(ST0);
|
|
}
|
|
|
|
:FST freg is vexMode=0 & byte=0xDD; frow=13 & fpage=0 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = ST0;
|
|
}
|
|
|
|
:FSTP m32fp is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=3) ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
m32fp = float2float(ST0);
|
|
fpop();
|
|
}
|
|
|
|
:FSTP m64fp is vexMode=0 & byte=0xDD; reg_opcode=3 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
m64fp = float2float(ST0);
|
|
fpop();
|
|
}
|
|
|
|
:FSTP m80fp is vexMode=0 & byte=0xDB; reg_opcode=7 ... & m80fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fpopv(m80fp);
|
|
}
|
|
|
|
:FSTP freg is vexMode=0 & byte=0xDD; frow=13 & fpage=1 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fpopv(freg);
|
|
}
|
|
|
|
:FSTCW m16 is vexMode=0 & byte=0x9B; byte=0xD9; (mod != 0b11 & reg_opcode=7) ... & m16
|
|
{
|
|
m16 = FPUControlWord;
|
|
}
|
|
|
|
:FNSTCW m16 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=7) ... & m16
|
|
{
|
|
m16 = FPUControlWord;
|
|
}
|
|
|
|
:FSTENV Mem is vexMode=0 & byte=0x9B; byte=0xD9; (mod != 0b11 & reg_opcode=6) ... & Mem
|
|
{
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
}
|
|
|
|
:FNSTENV Mem is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=6) ... & Mem
|
|
{
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
}
|
|
|
|
:FSTSW m16 is vexMode=0 & byte=0x9B; byte=0xDD; reg_opcode=7 ... & m16
|
|
{
|
|
m16 = FPUStatusWord;
|
|
}
|
|
|
|
:FSTSW AX is vexMode=0 & byte=0x9B; byte=0xDF; byte=0xE0 & AX
|
|
{
|
|
AX = FPUStatusWord;
|
|
}
|
|
|
|
:FNSTSW m16 is vexMode=0 & byte=0xDD; reg_opcode=7 ... & m16
|
|
{
|
|
m16 = FPUStatusWord;
|
|
}
|
|
|
|
:FNSTSW AX is vexMode=0 & byte=0xDF; byte=0xE0 & AX
|
|
{
|
|
AX = FPUStatusWord;
|
|
}
|
|
|
|
:FSUB m32fp is vexMode=0 & byte=0xD8; reg_opcode=4 ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f- float2float(m32fp);
|
|
}
|
|
|
|
:FSUB m64fp is vexMode=0 & byte=0xDC; reg_opcode=4 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f- float2float(m64fp);
|
|
}
|
|
|
|
:FSUB ST0,freg is vexMode=0 & byte=0xD8; frow=14 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f- freg;
|
|
}
|
|
|
|
:FSUB freg,ST0 is vexMode=0 & byte=0xDC; frow=14 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = freg f- ST0;
|
|
}
|
|
|
|
:FSUBP is vexMode=0 & byte=0xDE; byte=0xE9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST1 = ST1 f- ST0;
|
|
fpop();
|
|
}
|
|
|
|
:FSUBP freg,ST0 is vexMode=0 & byte=0xDE; frow=14 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = freg f- ST0;
|
|
fpop();
|
|
}
|
|
|
|
:FISUB m32 is vexMode=0 & byte=0xDA; (mod != 0b11 & reg_opcode=4) ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f- int2float(m32);
|
|
}
|
|
|
|
:FISUB m16 is vexMode=0 & byte=0xDE; reg_opcode=4 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = ST0 f- int2float(m16);
|
|
}
|
|
|
|
:FSUBR m32fp is vexMode=0 & byte=0xD8; reg_opcode=5 ... & m32fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = float2float(m32fp) f- ST0;
|
|
}
|
|
|
|
:FSUBR m64fp is vexMode=0 & byte=0xDC; reg_opcode=5 ... & m64fp
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = float2float(m64fp) f- ST0;
|
|
}
|
|
|
|
:FSUBR ST0,freg is vexMode=0 & byte=0xD8; frow=14 & fpage=1 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = freg f- ST0;
|
|
}
|
|
|
|
:FSUBR freg,ST0 is vexMode=0 & byte=0xDC; frow=14 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = ST0 f- freg;
|
|
}
|
|
|
|
:FSUBRP is vexMode=0 & byte=0xDE; byte=0xE1
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST1 = ST0 f- ST1; fpop();
|
|
}
|
|
|
|
:FSUBRP freg,ST0 is vexMode=0 & byte=0xDE; frow=14 & fpage=0 & freg & ST0
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
freg = ST0 f- freg; fpop();
|
|
}
|
|
|
|
:FISUBR m32 is vexMode=0 & byte=0xDA; reg_opcode=5 ... & m32
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = int2float(m32) f- ST0;
|
|
}
|
|
|
|
:FISUBR m16 is vexMode=0 & byte=0xDE; reg_opcode=5 ... & m16
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
ST0 = int2float(m16) f- ST0;
|
|
}
|
|
|
|
:FTST is vexMode=0 & byte=0xD9; byte=0xE4
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
zero:4 = 0;
|
|
tmp:10 = int2float(zero);
|
|
fcom(tmp);
|
|
}
|
|
|
|
:FUCOM freg is vexMode=0 & byte=0xDD; frow=14 & fpage=0 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(freg);
|
|
}
|
|
|
|
:FUCOM is vexMode=0 & byte=0xDD; byte=0xE1
|
|
{
|
|
fcom(ST1);
|
|
}
|
|
|
|
:FUCOMP freg is vexMode=0 & byte=0xDD; frow=14 & fpage=1 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(freg);
|
|
fpop();
|
|
}
|
|
|
|
:FUCOMP is vexMode=0 & byte=0xDD; byte=0xE9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(ST1);
|
|
fpop();
|
|
}
|
|
|
|
:FUCOMPP is vexMode=0 & byte=0xDA; byte=0xE9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
fcom(ST1);
|
|
fpop();
|
|
fpop();
|
|
}
|
|
|
|
:FXAM is vexMode=0 & byte=0xD9; byte=0xE5
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
# this is not an exact implementation, but gets the sign and zero tests right
|
|
izero:4 = 0;
|
|
fzero:10 = int2float(izero);
|
|
|
|
# did not know how test for infinity or empty
|
|
C0 = nan(ST0);
|
|
|
|
# sign of ST0
|
|
C1 = ( ST0 f< fzero );
|
|
|
|
# assume normal if not zero
|
|
C2 = ( ST0 f!= fzero );
|
|
|
|
# equal to zero
|
|
C3 = ( ST0 f== fzero );
|
|
|
|
FPUStatusWord = (zext(C0)<<8) | (zext(C1)<<9) | (zext(C2)<<10) | (zext(C3)<<14);
|
|
}
|
|
|
|
:FXCH freg is vexMode=0 & byte=0xD9; frow=12 & fpage=1 & freg
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = ST0;
|
|
ST0 = freg;
|
|
freg = tmp;
|
|
}
|
|
|
|
:FXCH is vexMode=0 & byte=0xD9; byte=0xC9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local tmp = ST0;
|
|
ST0 = ST1;
|
|
ST1 = tmp;
|
|
}
|
|
|
|
# fxsave and fxrstor
|
|
define pcodeop _fxsave;
|
|
define pcodeop _fxrstor;
|
|
@ifdef IA64
|
|
define pcodeop _fxsave64;
|
|
define pcodeop _fxrstor64;
|
|
@endif
|
|
|
|
# this saves the FPU state into 512 bytes of memory
|
|
:FXSAVE Mem is $(LONGMODE_OFF) & vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=0 ) ... & Mem
|
|
{
|
|
_fxsave(Mem);
|
|
}
|
|
|
|
:FXRSTOR Mem is $(LONGMODE_OFF) & vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=1 ) ... & Mem
|
|
{
|
|
_fxrstor(Mem);
|
|
}
|
|
|
|
@ifdef IA64
|
|
# this saves the FPU state into 512 bytes of memory similar to the 32-bit mode
|
|
:FXSAVE Mem is $(LONGMODE_ON) & vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=0 ) ... & Mem
|
|
{
|
|
_fxsave(Mem);
|
|
}
|
|
|
|
:FXSAVE64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=0 ) ... & Mem
|
|
{
|
|
_fxsave64(Mem);
|
|
}
|
|
|
|
:FXRSTOR Mem is $(LONGMODE_ON) & vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=1 ) ... & Mem
|
|
{
|
|
_fxrstor(Mem);
|
|
}
|
|
|
|
:FXRSTOR64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=1 ) ... & Mem
|
|
{
|
|
_fxrstor64(Mem);
|
|
}
|
|
@endif
|
|
|
|
define pcodeop extract_significand;
|
|
define pcodeop extract_exponent;
|
|
:FXTRACT is vexMode=0 & byte=0xD9; byte=0xF4
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
significand:10 = extract_significand(ST0);
|
|
exponent:10 = extract_exponent(ST0);
|
|
ST0 = exponent;
|
|
fpushv(significand);
|
|
}
|
|
|
|
:FYL2X is vexMode=0 & byte=0xD9; byte=0xF1
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
local log2st0 = ST0;
|
|
ST1 = ST1 f* log2st0;
|
|
fpop();
|
|
}
|
|
:FYL2XP1 is vexMode=0 & byte=0xD9; byte=0xF9
|
|
{
|
|
FPUInstructionPointer = inst_start;
|
|
one:4 = 1;
|
|
tmp:10 = int2float(one);
|
|
log2st0:10 = ST0 f+ tmp;
|
|
ST1 = ST1 f* log2st0;
|
|
fpop();
|
|
}
|
|
|
|
#
|
|
# MMX instructions
|
|
#
|
|
|
|
:ADDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x58; XmmReg ... & m128
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] f+ m128[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] f+ m128[64,64];
|
|
}
|
|
|
|
:ADDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] f+ XmmReg2[64,64];
|
|
}
|
|
|
|
:ADDPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x58; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128; # Guarantee value is in a fixed location
|
|
XmmReg[0,32] = XmmReg[0,32] f+ m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] f+ m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] f+ m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] f+ m[96,32];
|
|
}
|
|
|
|
:ADDPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] f+ XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] f+ XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] f+ XmmReg2[96,32];
|
|
}
|
|
|
|
:ADDSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x58; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] f+ m64;
|
|
}
|
|
|
|
:ADDSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg2[0,64];
|
|
}
|
|
|
|
:ADDSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x58; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = XmmReg[0,32] f+ m32;
|
|
}
|
|
|
|
:ADDSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg2[0,32];
|
|
}
|
|
|
|
:ADDSUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD0; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f- m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] f+ m[64,64];
|
|
}
|
|
|
|
:ADDSUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD0; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] f+ XmmReg2[64,64];
|
|
}
|
|
|
|
:ADDSUBPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD0; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f- m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] f+ m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] f- m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] f+ m[96,32];
|
|
}
|
|
|
|
:ADDSUBPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD0; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] f+ XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] f- XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] f+ XmmReg2[96,32];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND
|
|
:ANDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x54; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] & m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] & m[64,64];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND
|
|
:ANDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x54; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] & XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] & XmmReg2[64,64];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND
|
|
:ANDPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x54; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] & m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] & m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] & m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] & m[96,32];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND
|
|
:ANDPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x54; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] & XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] & XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] & XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] & XmmReg2[96,32];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND NOT
|
|
:ANDNPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x55; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = ~XmmReg[0,64] & m[0,64];
|
|
XmmReg[64,64] = ~XmmReg[64,64] & m[64,64];
|
|
}
|
|
|
|
:ANDNPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x55; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = ~XmmReg1[0,64] & XmmReg2[0,64];
|
|
XmmReg1[64,64] = ~XmmReg1[64,64] & XmmReg2[64,64];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND NOT
|
|
:ANDNPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x55; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = ~XmmReg[0,32] & m[0,32];
|
|
XmmReg[32,32] = ~XmmReg[32,32] & m[32,32];
|
|
XmmReg[64,32] = ~XmmReg[64,32] & m[64,32];
|
|
XmmReg[96,32] = ~XmmReg[96,32] & m[96,32];
|
|
}
|
|
|
|
:ANDNPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x55; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = ~XmmReg1[0,32] & XmmReg2[0,32];
|
|
XmmReg1[32,32] = ~XmmReg1[32,32] & XmmReg2[32,32];
|
|
XmmReg1[64,32] = ~XmmReg1[64,32] & XmmReg2[64,32];
|
|
XmmReg1[96,32] = ~XmmReg1[96,32] & XmmReg2[96,32];
|
|
}
|
|
|
|
# predicate mnemonics for "CMP...PD" opcode
|
|
XmmCondPD: "EQ" is imm8=0 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f== xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( xmmTmp1_Qb f== xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "LT" is imm8=1 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f< xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( xmmTmp1_Qb f< xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "LE" is imm8=2 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f<= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( xmmTmp1_Qb f<= xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "UNORD" is imm8=3 {
|
|
xmmTmp1_Qa = zext( nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( nan(xmmTmp1_Qb) || nan(xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "NEQ" is imm8=4 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f!= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( xmmTmp1_Qb f!= xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "NLT" is imm8=5 {
|
|
xmmTmp1_Qa = zext( !(xmmTmp1_Qa f< xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( !(xmmTmp1_Qb f< xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "NLE" is imm8=6 {
|
|
xmmTmp1_Qa = zext( !(xmmTmp1_Qa f<= xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( !(xmmTmp1_Qb f<= xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "ORD" is imm8=7 {
|
|
xmmTmp1_Qa = zext( !(nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa)) ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( !(nan(xmmTmp1_Qb) || nan(xmmTmp2_Qb)) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
define pcodeop cmppd;
|
|
XmmCondPD: is imm8 {
|
|
xmmTmp1_Qa = cmppd(xmmTmp1_Qa, xmmTmp2_Qa, imm8:1);
|
|
xmmTmp1_Qb = cmppd(xmmTmp1_Qb, xmmTmp2_Qb, imm8:1);
|
|
}
|
|
|
|
# immediate operand for "CMP...PD" opcode
|
|
# note: normally blank, "imm8" emits for all out of range cases
|
|
CMPPD_OPERAND: is imm8<8 { }
|
|
CMPPD_OPERAND: ", "^imm8 is imm8 { }
|
|
|
|
:CMP^XmmCondPD^"PD" XmmReg,m128^CMPPD_OPERAND is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC2; (m128 & XmmReg ...); XmmCondPD & CMPPD_OPERAND
|
|
{
|
|
local m:16 = m128;
|
|
xmmTmp1_Qa = XmmReg[0,64];
|
|
xmmTmp1_Qb = XmmReg[64,64];
|
|
|
|
xmmTmp2_Qa = m[0,64];
|
|
xmmTmp2_Qb = m[64,64];
|
|
|
|
build XmmCondPD;
|
|
|
|
XmmReg[0,64] = xmmTmp1_Qa;
|
|
XmmReg[64,64] = xmmTmp1_Qb;
|
|
}
|
|
|
|
:CMP^XmmCondPD^"PD" XmmReg1,XmmReg2^CMPPD_OPERAND is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondPD & CMPPD_OPERAND
|
|
{
|
|
xmmTmp1_Qa = XmmReg1[0,64];
|
|
xmmTmp1_Qb = XmmReg1[64,64];
|
|
|
|
xmmTmp2_Qa = XmmReg2[0,64];
|
|
xmmTmp2_Qb = XmmReg2[64,64];
|
|
|
|
build XmmCondPD;
|
|
|
|
XmmReg1[0,64] = xmmTmp1_Qa;
|
|
XmmReg1[64,64] = xmmTmp1_Qb;
|
|
}
|
|
|
|
|
|
# predicate mnemonics for "CMP...PS" opcode
|
|
XmmCondPS: "EQ" is imm8=0 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f== xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( xmmTmp1_Db f== xmmTmp2_Db ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( xmmTmp1_Dc f== xmmTmp2_Dc ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( xmmTmp1_Dd f== xmmTmp2_Dd ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "LT" is imm8=1 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f< xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( xmmTmp1_Db f< xmmTmp2_Db ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( xmmTmp1_Dc f< xmmTmp2_Dc ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( xmmTmp1_Dd f< xmmTmp2_Dd ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "LE" is imm8=2 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f<= xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( xmmTmp1_Db f<= xmmTmp2_Db ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( xmmTmp1_Dc f<= xmmTmp2_Dc ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( xmmTmp1_Dd f<= xmmTmp2_Dd ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "UNORD" is imm8=3 {
|
|
xmmTmp1_Da = zext( nan(xmmTmp1_Da) || nan(xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( nan(xmmTmp1_Db) || nan(xmmTmp2_Db) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( nan(xmmTmp1_Dc) || nan(xmmTmp2_Dc) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( nan(xmmTmp1_Dd) || nan(xmmTmp2_Dd) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "NEQ" is imm8=4 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f!= xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( xmmTmp1_Db f!= xmmTmp2_Db ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( xmmTmp1_Dc f!= xmmTmp2_Dc ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( xmmTmp1_Dd f!= xmmTmp2_Dd ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "NLT" is imm8=5 {
|
|
xmmTmp1_Da = zext( !(xmmTmp1_Da f< xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( !(xmmTmp1_Db f< xmmTmp2_Db) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( !(xmmTmp1_Dc f< xmmTmp2_Dc) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( !(xmmTmp1_Dd f< xmmTmp2_Dd) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "NLE" is imm8=6 {
|
|
xmmTmp1_Da = zext( !(xmmTmp1_Da f<= xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( !(xmmTmp1_Db f<= xmmTmp2_Db) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( !(xmmTmp1_Dc f<= xmmTmp2_Dc) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( !(xmmTmp1_Dd f<= xmmTmp2_Dd) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "ORD" is imm8=7 {
|
|
xmmTmp1_Da = zext( !(nan(xmmTmp1_Da) || nan(xmmTmp2_Da)) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( !(nan(xmmTmp1_Db) || nan(xmmTmp2_Db)) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( !(nan(xmmTmp1_Dc) || nan(xmmTmp2_Dc)) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( !(nan(xmmTmp1_Dd) || nan(xmmTmp2_Dd)) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
define pcodeop cmpps;
|
|
XmmCondPS: is imm8 {
|
|
xmmTmp1_Da = cmpps(xmmTmp1_Da, xmmTmp2_Da, imm8:1);
|
|
xmmTmp1_Db = cmpps(xmmTmp1_Db, xmmTmp2_Db, imm8:1);
|
|
xmmTmp1_Dc = cmpps(xmmTmp1_Dc, xmmTmp2_Dc, imm8:1);
|
|
xmmTmp1_Dd = cmpps(xmmTmp1_Dd, xmmTmp2_Dd, imm8:1);
|
|
}
|
|
|
|
# immediate operand for "CMP...PS" opcode
|
|
# note: normally blank, "imm8" emits for all out of range cases
|
|
CMPPS_OPERAND: is imm8<8 { }
|
|
CMPPS_OPERAND: ", "^imm8 is imm8 { }
|
|
|
|
:CMP^XmmCondPS^"PS" XmmReg,m128^CMPPS_OPERAND is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC2; (m128 & XmmReg ...); XmmCondPS & CMPPS_OPERAND
|
|
{
|
|
local m:16 = m128;
|
|
xmmTmp1_Da = XmmReg[0,32];
|
|
xmmTmp1_Db = XmmReg[32,32];
|
|
xmmTmp1_Dc = XmmReg[64,32];
|
|
xmmTmp1_Dd = XmmReg[96,32];
|
|
|
|
xmmTmp2_Da = m[0,32];
|
|
xmmTmp2_Db = m[32,32];
|
|
xmmTmp2_Dc = m[64,32];
|
|
xmmTmp2_Dd = m[96,32];
|
|
|
|
build XmmCondPS;
|
|
|
|
XmmReg[0,32] = xmmTmp1_Da;
|
|
XmmReg[32,32] = xmmTmp1_Db;
|
|
XmmReg[64,32] = xmmTmp1_Dc;
|
|
XmmReg[96,32] = xmmTmp1_Dd;
|
|
}
|
|
|
|
:CMP^XmmCondPS^"PS" XmmReg1,XmmReg2^CMPPS_OPERAND is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondPS & CMPPS_OPERAND
|
|
{
|
|
xmmTmp1_Da = XmmReg1[0,32];
|
|
xmmTmp1_Db = XmmReg1[32,32];
|
|
xmmTmp1_Dc = XmmReg1[64,32];
|
|
xmmTmp1_Dd = XmmReg1[96,32];
|
|
|
|
xmmTmp2_Da = XmmReg2[0,32];
|
|
xmmTmp2_Db = XmmReg2[32,32];
|
|
xmmTmp2_Dc = XmmReg2[64,32];
|
|
xmmTmp2_Dd = XmmReg2[96,32];
|
|
|
|
build XmmCondPS;
|
|
|
|
XmmReg1[0,32] = xmmTmp1_Da;
|
|
XmmReg1[32,32] = xmmTmp1_Db;
|
|
XmmReg1[64,32] = xmmTmp1_Dc;
|
|
XmmReg1[96,32] = xmmTmp1_Dd;
|
|
}
|
|
|
|
|
|
# predicate mnemonics for "CMP...SD" opcode
|
|
XmmCondSD: "EQ" is imm8=0 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f== xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "LT" is imm8=1 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f< xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "LE" is imm8=2 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f<= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "UNORD" is imm8=3 {
|
|
xmmTmp1_Qa = zext( nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "NEQ" is imm8=4 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f!= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "NLT" is imm8=5 {
|
|
xmmTmp1_Qa = zext( !(xmmTmp1_Qa f< xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "NLE" is imm8=6 {
|
|
xmmTmp1_Qa = zext( !(xmmTmp1_Qa f<= xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "ORD" is imm8=7 {
|
|
xmmTmp1_Qa = zext( !(nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa)) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
|
|
define pcodeop cmpsd;
|
|
XmmCondSD: is imm8 {
|
|
xmmTmp1_Qa = cmpsd(xmmTmp1_Qa, xmmTmp2_Qa, imm8:1);
|
|
}
|
|
|
|
# immediate operand for "CMP...SD" opcode
|
|
# note: normally blank, "imm8" emits for all out of range cases
|
|
CMPSD_OPERAND: is imm8<8 { }
|
|
CMPSD_OPERAND: ", "^imm8 is imm8 { }
|
|
|
|
:CMP^XmmCondSD^"SD" XmmReg, m64^CMPSD_OPERAND is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xC2; (m64 & XmmReg ...); XmmCondSD & CMPSD_OPERAND
|
|
{
|
|
xmmTmp1_Qa = XmmReg[0,64];
|
|
xmmTmp2_Qa = m64;
|
|
build XmmCondSD;
|
|
XmmReg[0,64] = xmmTmp1_Qa;
|
|
}
|
|
|
|
:CMP^XmmCondSD^"SD" XmmReg1, XmmReg2^CMPSD_OPERAND is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondSD & CMPSD_OPERAND
|
|
{
|
|
xmmTmp1_Qa = XmmReg1[0,64];
|
|
xmmTmp2_Qa = XmmReg2[0,64];
|
|
build XmmCondSD;
|
|
XmmReg1[0,64] = xmmTmp1_Qa;
|
|
}
|
|
|
|
|
|
# predicate mnemonics for "CMP...SS" opcode
|
|
XmmCondSS: "EQ" is imm8=0 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f== xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "LT" is imm8=1 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f< xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "LE" is imm8=2 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f<= xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "UNORD" is imm8=3 {
|
|
xmmTmp1_Da = zext( nan(xmmTmp1_Da) || nan(xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "NEQ" is imm8=4 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f!= xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "NLT" is imm8=5 {
|
|
xmmTmp1_Da = zext( !(xmmTmp1_Da f< xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "NLE" is imm8=6 {
|
|
xmmTmp1_Da = zext( !(xmmTmp1_Da f<= xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "ORD" is imm8=7 {
|
|
xmmTmp1_Da = zext( !(nan(xmmTmp1_Da) || nan(xmmTmp2_Da)) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
|
|
define pcodeop cmpss;
|
|
XmmCondSS: is imm8 {
|
|
xmmTmp1_Da = cmpss(xmmTmp1_Da, xmmTmp2_Da, imm8:1);
|
|
}
|
|
|
|
# immediate operand for "CMP...SS" opcode
|
|
# note: normally blank, "imm8" emits for all out of range cases
|
|
CMPSS_OPERAND: is imm8<8 { }
|
|
CMPSS_OPERAND: ", "^imm8 is imm8 { }
|
|
|
|
:CMP^XmmCondSS^"SS" XmmReg, m32^CMPSS_OPERAND is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xC2; (m32 & XmmReg ...); XmmCondSS & CMPSS_OPERAND
|
|
{
|
|
xmmTmp1_Da = XmmReg[0,32];
|
|
xmmTmp2_Da = m32;
|
|
build XmmCondSS;
|
|
XmmReg[0,32] = xmmTmp1_Da;
|
|
}
|
|
|
|
:CMP^XmmCondSS^"SS" XmmReg1, XmmReg2^CMPSS_OPERAND is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondSS & CMPSS_OPERAND
|
|
{
|
|
xmmTmp1_Da = XmmReg1[0,32];
|
|
xmmTmp2_Da = XmmReg2[0,32];
|
|
build XmmCondSS;
|
|
XmmReg1[0,32] = xmmTmp1_Da;
|
|
}
|
|
|
|
|
|
:COMISD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2F; m64 & XmmReg ...
|
|
{
|
|
fucompe(XmmReg[0,64], m64);
|
|
}
|
|
|
|
:COMISD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2F; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
fucompe(XmmReg1[0,64], XmmReg2[0,64]);
|
|
}
|
|
|
|
:COMISS XmmReg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2F; m32 & XmmReg ...
|
|
{
|
|
fucompe(XmmReg[0,32], m32);
|
|
}
|
|
|
|
:COMISS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2F; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
fucompe(XmmReg1[0,32], XmmReg2[0,32]);
|
|
}
|
|
|
|
:CVTDQ2PD XmmReg, m64 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xE6; m64 & XmmReg ...
|
|
{
|
|
local m:8 = m64;
|
|
XmmReg[0,64] = int2float( m[0,32] );
|
|
XmmReg[64,64] = int2float( m[32,32] );
|
|
}
|
|
|
|
:CVTDQ2PD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
local tmp:8 = XmmReg2[0,64];
|
|
XmmReg1[0,64] = int2float( tmp[0,32] );
|
|
XmmReg1[64,64] = int2float( tmp[32,32] );
|
|
}
|
|
|
|
:CVTDQ2PS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5B; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = int2float( m[0,32] );
|
|
XmmReg[32,32] = int2float( m[32,32] );
|
|
XmmReg[64,32] = int2float( m[64,32] );
|
|
XmmReg[96,32] = int2float( m[96,32] );
|
|
}
|
|
|
|
:CVTDQ2PS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = int2float( XmmReg2[0,32] );
|
|
XmmReg1[32,32] = int2float( XmmReg2[32,32] );
|
|
XmmReg1[64,32] = int2float( XmmReg2[64,32] );
|
|
XmmReg1[96,32] = int2float( XmmReg2[96,32] );
|
|
}
|
|
|
|
:CVTPD2DQ XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xE6; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = trunc( m[0,64] );
|
|
XmmReg[32,32] = trunc( m[64,64] );
|
|
XmmReg[64,32] = 0;
|
|
XmmReg[96,32] = 0;
|
|
}
|
|
|
|
:CVTPD2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = trunc( XmmReg2[0,64] );
|
|
XmmReg1[32,32] = trunc( XmmReg2[64,64] );
|
|
XmmReg1[64,32] = 0;
|
|
XmmReg1[96,32] = 0;
|
|
}
|
|
|
|
:CVTPD2PI mmxreg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2D; mmxreg ... & m128
|
|
{
|
|
local m:16 = m128;
|
|
mmxreg[0,32] = trunc( m[0,64] );
|
|
mmxreg[32,32] = trunc( m[64,64] );
|
|
}
|
|
|
|
:CVTPD2PI mmxreg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2D; xmmmod=3 & mmxreg1 & XmmReg2
|
|
{
|
|
mmxreg1[0,32] = trunc( XmmReg2[0,64] );
|
|
mmxreg1[32,32] = trunc( XmmReg2[64,64] );
|
|
}
|
|
|
|
:CVTPD2PS XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5A; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = float2float( m[0,64] );
|
|
XmmReg[32,32] = float2float( m[64,64] );
|
|
XmmReg[64,32] = 0;
|
|
XmmReg[96,32] = 0;
|
|
}
|
|
|
|
:CVTPD2PS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = float2float( XmmReg2[0,64] );
|
|
XmmReg1[32,32] = float2float( XmmReg2[64,64] );
|
|
XmmReg1[64,32] = 0;
|
|
XmmReg1[96,32] = 0;
|
|
}
|
|
|
|
:CVTPI2PD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2A; m64 & XmmReg ...
|
|
{
|
|
local m:8 = m64;
|
|
XmmReg[0,64] = int2float(m[0,32]);
|
|
XmmReg[64,64] = int2float(m[32,32]);
|
|
}
|
|
|
|
:CVTPI2PD XmmReg1, mmxreg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2A; xmmmod=3 & XmmReg1 & mmxreg2
|
|
{
|
|
XmmReg1[0,64] = int2float(mmxreg2[0,32]);
|
|
XmmReg1[64,64] = int2float(mmxreg2[32,32]);
|
|
}
|
|
|
|
:CVTPI2PS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2A; m64 & XmmReg ...
|
|
{
|
|
local m:8 = m64;
|
|
XmmReg[0,32] = int2float(m[0,32]);
|
|
XmmReg[32,32] = int2float(m[32,32]);
|
|
}
|
|
|
|
:CVTPI2PS XmmReg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2A; xmmmod=3 & XmmReg1 & mmxreg2
|
|
{
|
|
XmmReg1[0,32] = int2float(mmxreg2[0,32]);
|
|
XmmReg1[32,32] = int2float(mmxreg2[32,32]);
|
|
}
|
|
|
|
:CVTPS2DQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5B; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = trunc( m[0,32] );
|
|
XmmReg[32,32] = trunc( m[32,32] );
|
|
XmmReg[64,32] = trunc( m[64,32] );
|
|
XmmReg[96,32] = trunc( m[96,32] );
|
|
}
|
|
|
|
:CVTPS2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = trunc( XmmReg2[0,32] );
|
|
XmmReg1[32,32] = trunc( XmmReg2[32,32] );
|
|
XmmReg1[64,32] = trunc( XmmReg2[64,32] );
|
|
XmmReg1[96,32] = trunc( XmmReg2[96,32] );
|
|
}
|
|
|
|
:CVTPS2PD XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5A; m64 & XmmReg ...
|
|
{
|
|
local m:8 = m64;
|
|
XmmReg[0,64] = float2float( m[0,32] );
|
|
XmmReg[64,64] = float2float( m[32,32] );
|
|
}
|
|
|
|
:CVTPS2PD XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
local tmp:8 = XmmReg2[0,64];
|
|
XmmReg1[0,64] = float2float( tmp[0,32] );
|
|
XmmReg1[64,64] = float2float( tmp[32,32] );
|
|
}
|
|
|
|
:CVTPS2PI mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2D; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = round(m[0,32]);
|
|
mmxreg[32,32] = round(m[32,32]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTPS2PI mmxreg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2D; xmmmod=3 & mmxreg1 & XmmReg2
|
|
{
|
|
mmxreg1[0,32] = round(XmmReg2[0,32]);
|
|
mmxreg1[32,32] = round(XmmReg2[32,32]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTSD2SI Reg32, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2D; (Reg32 & check_Reg32_dest) ... & m64
|
|
{
|
|
Reg32 = trunc(round(m64));
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
:CVTSD2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg32 & check_Reg32_dest & XmmReg2
|
|
{
|
|
Reg32 = trunc(round(XmmReg2[0,64]));
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTSD2SI Reg64, m64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2D; Reg64 ... & m64
|
|
{
|
|
Reg64 = trunc(round(m64));
|
|
}
|
|
|
|
:CVTSD2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg64 & XmmReg2
|
|
{
|
|
Reg64 = trunc(round(XmmReg2[0,64]));
|
|
}
|
|
@endif
|
|
|
|
:CVTSD2SS XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5A; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = float2float(m64);
|
|
}
|
|
|
|
:CVTSD2SS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = float2float(XmmReg2[0,64]);
|
|
}
|
|
|
|
:CVTSI2SD XmmReg, rm32 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2A; rm32 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = int2float(rm32);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTSI2SD XmmReg, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2A; rm64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = int2float(rm64);
|
|
}
|
|
@endif
|
|
|
|
:CVTSI2SS XmmReg, rm32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2A; rm32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = int2float(rm32);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTSI2SS XmmReg, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2A; rm64 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = int2float(rm64);
|
|
}
|
|
@endif
|
|
|
|
:CVTSS2SD XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5A; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = float2float(m32);
|
|
}
|
|
|
|
:CVTSS2SD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = float2float(XmmReg2[0,32]);
|
|
}
|
|
|
|
:CVTSS2SI Reg32, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2D; (Reg32 & check_Reg32_dest) ... & m32
|
|
{
|
|
Reg32 = trunc(round(m32));
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
:CVTSS2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg32 & check_Reg32_dest & XmmReg2
|
|
{
|
|
Reg32 = trunc(round(XmmReg2[0,32]));
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTSS2SI Reg64, m32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2D; Reg64 ... & m32
|
|
{
|
|
Reg64 = trunc(round(m32));
|
|
}
|
|
|
|
:CVTSS2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg64 & XmmReg2
|
|
{
|
|
Reg64 = trunc(round(XmmReg2[0,32]));
|
|
}
|
|
@endif
|
|
|
|
:CVTTPD2PI mmxreg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2C; mmxreg ... & m128
|
|
{
|
|
local m:16 = m128;
|
|
mmxreg[0,32] = trunc(m[0,64]);
|
|
mmxreg[32,32] = trunc(m[64,64]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTTPD2PI mmxreg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2C; xmmmod=3 & mmxreg1 & XmmReg2
|
|
{
|
|
mmxreg1[0,32] = trunc(XmmReg2[0,64]);
|
|
mmxreg1[32,32] = trunc(XmmReg2[64,64]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTTPD2DQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE6; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = trunc(m[0,64]);
|
|
XmmReg[32,32] = trunc(m[64,64]);
|
|
XmmReg[64,32] = 0;
|
|
XmmReg[96,32] = 0;
|
|
}
|
|
|
|
:CVTTPD2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = trunc(XmmReg2[0,64]);
|
|
XmmReg1[32,32] = trunc(XmmReg2[64,64]);
|
|
XmmReg1[64,32] = 0;
|
|
XmmReg1[96,32] = 0;
|
|
}
|
|
|
|
:CVTTPS2DQ XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5B; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = trunc(m[0,32]);
|
|
XmmReg[32,32] = trunc(m[32,32]);
|
|
XmmReg[64,32] = trunc(m[64,32]);
|
|
XmmReg[96,32] = trunc(m[96,32]);
|
|
}
|
|
|
|
:CVTTPS2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = trunc(XmmReg2[0,32]);
|
|
XmmReg1[32,32] = trunc(XmmReg2[32,32]);
|
|
XmmReg1[64,32] = trunc(XmmReg2[64,32]);
|
|
XmmReg1[96,32] = trunc(XmmReg2[96,32]);
|
|
}
|
|
|
|
:CVTTPS2PI mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2C; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = trunc(m[0,32]);
|
|
mmxreg[32,32] = trunc(m[32,32]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTTPS2PI mmxreg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2C; xmmmod=3 & mmxreg1 & XmmReg2
|
|
{
|
|
mmxreg1[0,32] = trunc(XmmReg2[0,32]);
|
|
mmxreg1[32,32] = trunc(XmmReg2[32,32]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTTSD2SI Reg32, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2C; (Reg32 & check_Reg32_dest) ... & m64
|
|
{
|
|
Reg32 = trunc(m64);
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
:CVTTSD2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg32 & check_Reg32_dest & XmmReg2
|
|
{
|
|
Reg32 = trunc(XmmReg2[0,64]);
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTTSD2SI Reg64, m64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2C; Reg64 ... & m64
|
|
{
|
|
Reg64 = trunc(m64);
|
|
}
|
|
|
|
:CVTTSD2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg64 & XmmReg2
|
|
{
|
|
Reg64 = trunc(XmmReg2[0,64]);
|
|
}
|
|
@endif
|
|
|
|
:CVTTSS2SI Reg32, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2C; (Reg32 & check_Reg32_dest) ... & m32
|
|
{
|
|
Reg32 = trunc(m32);
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
:CVTTSS2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg32 & check_Reg32_dest & XmmReg2
|
|
{
|
|
Reg32 = trunc(XmmReg2[0,32]);
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTTSS2SI Reg64, m32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2C; Reg64 ... & m32
|
|
{
|
|
Reg64 = trunc(m32);
|
|
}
|
|
|
|
:CVTTSS2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg64 & XmmReg2
|
|
{
|
|
Reg64 = trunc(XmmReg2[0,32]);
|
|
}
|
|
@endif
|
|
|
|
define pcodeop divpd;
|
|
:DIVPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5E; XmmReg ... & m128 { XmmReg = divpd(XmmReg, m128); }
|
|
:DIVPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = divpd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop divps;
|
|
:DIVPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5E; XmmReg ... & m128 { XmmReg = divps(XmmReg, m128); }
|
|
:DIVPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = divps(XmmReg1, XmmReg2); }
|
|
|
|
:DIVSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5E; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] f/ m64;
|
|
}
|
|
|
|
:DIVSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f/ XmmReg2[0,64];
|
|
}
|
|
|
|
:DIVSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5E; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = XmmReg[0,32] f/ m32;
|
|
}
|
|
|
|
:DIVSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f/ XmmReg2[0,32];
|
|
}
|
|
|
|
:EMMS is vexMode=0 & byte=0x0F; byte=0x77 { FPUTagWord = 0xFFFF; }
|
|
|
|
:HADDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7C; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f+ XmmReg[64,64];
|
|
XmmReg[64,64] = m[0,64] f+ m[64,64];
|
|
}
|
|
|
|
:HADDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7C; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
local tmp:16 = XmmReg2;
|
|
XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg1[64,64];
|
|
XmmReg1[64,64] = tmp[0,64] f+ tmp[64,64];
|
|
}
|
|
|
|
:HADDPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7C; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f+ XmmReg[32,32];
|
|
XmmReg[32,32] = XmmReg[64,32] f+ XmmReg[96,32];
|
|
XmmReg[64,32] = m[0,32] f+ m[32,32];
|
|
XmmReg[96,32] = m[64,32] f+ m[96,32];
|
|
}
|
|
|
|
:HADDPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7C; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
local tmp:16 = XmmReg2;
|
|
XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg1[32,32];
|
|
XmmReg1[32,32] = XmmReg1[64,32] f+ XmmReg1[96,32];
|
|
XmmReg1[64,32] = tmp[0,32] f+ tmp[32,32];
|
|
XmmReg1[96,32] = tmp[64,32] f+ tmp[96,32];
|
|
}
|
|
|
|
:HSUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7D; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f- XmmReg[64,64];
|
|
XmmReg[64,64] = m[0,64] f- m[64,64];
|
|
}
|
|
|
|
:HSUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7D; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
local tmp:16 = XmmReg2;
|
|
XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg1[64,64];
|
|
XmmReg1[64,64] = tmp[0,64] f- tmp[64,64];
|
|
}
|
|
|
|
:HSUBPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7D; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f- XmmReg[32,32];
|
|
XmmReg[32,32] = XmmReg[64,32] f- XmmReg[96,32];
|
|
XmmReg[64,32] = m[0,32] f- m[32,32];
|
|
XmmReg[96,32] = m[64,32] f- m[96,32];
|
|
}
|
|
|
|
:HSUBPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7D; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
local tmp:16 = XmmReg2;
|
|
XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg1[32,32];
|
|
XmmReg1[32,32] = XmmReg1[64,32] f- XmmReg1[96,32];
|
|
XmmReg1[64,32] = tmp[0,32] f- tmp[32,32];
|
|
XmmReg1[96,32] = tmp[64,32] f- tmp[96,32];
|
|
}
|
|
|
|
#--------------------
|
|
#SSE3...
|
|
#--------------------
|
|
|
|
define pcodeop lddqu;
|
|
:LDDQU XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xF0; XmmReg ... & m128 { XmmReg = lddqu(XmmReg, m128); }
|
|
|
|
define pcodeop maskmovdqu;
|
|
:MASKMOVDQU XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF7; XmmReg1 & XmmReg2 { XmmReg1 = maskmovdqu(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop maxpd;
|
|
:MAXPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5F; XmmReg ... & m128 { XmmReg = maxpd(XmmReg, m128); }
|
|
:MAXPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = maxpd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop maxps;
|
|
:MAXPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5F; XmmReg ... & m128 { XmmReg = maxps(XmmReg, m128); }
|
|
:MAXPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = maxps(XmmReg1, XmmReg2); }
|
|
|
|
:MAXSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5F; XmmReg ... & m64
|
|
{
|
|
local tmp:8 = m64;
|
|
if (tmp f< XmmReg[0,64]) goto inst_next;
|
|
XmmReg[0,64] = tmp;
|
|
}
|
|
|
|
:MAXSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
if (XmmReg2[0,64] f< XmmReg1[0,64]) goto inst_next;
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:MAXSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5F; XmmReg ... & m32
|
|
{
|
|
local tmp:4 = m32;
|
|
if (tmp f< XmmReg[0,32]) goto inst_next;
|
|
XmmReg[0,32] = tmp;
|
|
}
|
|
|
|
:MAXSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
if (XmmReg2[0,32] f< XmmReg1[0,32]) goto inst_next;
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
}
|
|
|
|
define pcodeop minpd;
|
|
:MINPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5D; XmmReg ... & m128 { XmmReg = minpd(XmmReg, m128); }
|
|
:MINPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = minpd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop minps;
|
|
:MINPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5D; XmmReg ... & m128 { XmmReg = minps(XmmReg, m128); }
|
|
:MINPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = minps(XmmReg1, XmmReg2); }
|
|
|
|
:MINSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5D; XmmReg ... & m64
|
|
{
|
|
local tmp:8 = m64;
|
|
if (XmmReg[0,64] f< tmp) goto inst_next;
|
|
XmmReg[0,64] = tmp;
|
|
}
|
|
|
|
:MINSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
if (XmmReg1[0,64] f< XmmReg2[0,64]) goto inst_next;
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:MINSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5D; XmmReg ... & m32
|
|
{
|
|
local tmp:4 = m32;
|
|
if (XmmReg[0,32] f< tmp) goto inst_next;
|
|
XmmReg[0,32] = tmp;
|
|
}
|
|
|
|
:MINSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
if (XmmReg1[0,32] f< XmmReg2[0,32]) goto inst_next;
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
}
|
|
|
|
:MOVAPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x28; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = m[0,64];
|
|
XmmReg[64,64] = m[64,64];
|
|
}
|
|
|
|
:MOVAPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg2[64,64];
|
|
}
|
|
|
|
:MOVAPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x29; m128 & XmmReg ...
|
|
{
|
|
m128 = XmmReg;
|
|
}
|
|
|
|
:MOVAPD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,64] = XmmReg1[0,64];
|
|
XmmReg2[64,64] = XmmReg1[64,64];
|
|
}
|
|
|
|
:MOVAPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x28; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = m[0,32];
|
|
XmmReg[32,32] = m[32,32];
|
|
XmmReg[64,32] = m[64,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:MOVAPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:MOVAPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x29; m128 & XmmReg ...
|
|
{
|
|
m128 = XmmReg;
|
|
}
|
|
|
|
:MOVAPS XmmReg2, XmmReg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,32] = XmmReg1[0,32];
|
|
XmmReg2[32,32] = XmmReg1[32,32];
|
|
XmmReg2[64,32] = XmmReg1[64,32];
|
|
XmmReg2[96,32] = XmmReg1[96,32];
|
|
}
|
|
|
|
:MOVD mmxreg, rm32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6E; rm32 & mmxreg ... { mmxreg = zext(rm32); }
|
|
:MOVD rm32, mmxreg is vexMode=0 & rexWprefix=0 & mandover=0 & byte=0x0F; byte=0x7E; rm32 & check_rm32_dest ... & mmxreg ... { rm32 = mmxreg(0); build check_rm32_dest; }
|
|
:MOVD XmmReg, rm32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6E; rm32 & XmmReg ... { XmmReg = zext(rm32); }
|
|
:MOVD rm32, XmmReg is vexMode=0 & $(PRE_66) & rexWprefix=0 & byte=0x0F; byte=0x7E; rm32 & check_rm32_dest ... & XmmReg ... { rm32 = XmmReg(0); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:MOVQ mmxreg, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & mandover=0 & byte=0x0F; byte=0x6E; rm64 & mmxreg ... { mmxreg = rm64; }
|
|
:MOVQ rm64, mmxreg is $(LONGMODE_ON) & vexMode=0 & opsize=2 & mandover=0 & byte=0x0F; byte=0x7E; rm64 & mmxreg ... { rm64 = mmxreg; }
|
|
:MOVQ XmmReg, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_66) & byte=0x0F; byte=0x6E; rm64 & XmmReg ... { XmmReg = zext(rm64); }
|
|
:MOVQ rm64, XmmReg is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_66) & byte=0x0F; byte=0x7E; rm64 & XmmReg ... { rm64 = XmmReg(0); }
|
|
@endif
|
|
|
|
|
|
:MOVDIRI Mem,Reg32 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0x38; byte=0xF9; Mem & Reg32 ... { *Mem = Reg32; }
|
|
@ifdef IA64
|
|
:MOVDIRI Mem,Reg64 is vexMode=0 & $(PRE_NO) & $(REX_W) & byte=0x0F; byte=0x38; byte=0xF9; Mem & Reg64 ... { *Mem = Reg64; }
|
|
@endif
|
|
|
|
define pcodeop movdir64b;
|
|
:MOVDIR64B Reg16, m512 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_66) & addrsize=0 & byte=0x0F; byte=0x38; byte=0xF8; Reg16 ... & m512 {
|
|
movdir64b(Reg16, m512);
|
|
}
|
|
:MOVDIR64B Reg32, m512 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_66) & addrsize=1 & byte=0x0F; byte=0x38; byte=0xF8; Reg32 ... & m512 {
|
|
movdir64b(Reg32, m512);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:MOVDIR64B Reg32, m512 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & addrsize=1 & byte=0x0F; byte=0x38; byte=0xF8; Reg32 ... & m512 {
|
|
movdir64b(Reg32, m512);
|
|
}
|
|
|
|
:MOVDIR64B Reg64, m512 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xF8; Reg64 ... & m512 {
|
|
movdir64b(Reg64, m512);
|
|
}
|
|
@endif
|
|
|
|
:MOVDDUP XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x12; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = m64;
|
|
XmmReg[64,64] = m64;
|
|
}
|
|
|
|
:MOVDDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:MOVSHDUP XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x16; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = m[32,32];
|
|
XmmReg[32,32] = m[32,32];
|
|
XmmReg[64,32] = m[96,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:MOVSHDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x16; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[32,32];
|
|
XmmReg1[32,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg2[96,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:MOVSLDUP XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x12; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = m[0,32];
|
|
XmmReg[32,32] = m[0,32];
|
|
XmmReg[64,32] = m[64,32];
|
|
XmmReg[96,32] = m[64,32];
|
|
}
|
|
|
|
:MOVSLDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg2[0,32];
|
|
XmmReg1[64,32] = XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg2[64,32];
|
|
}
|
|
|
|
:MOVDQA XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6F; XmmReg ... & m128 { XmmReg = m128; }
|
|
:MOVDQA XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg2; }
|
|
:MOVDQA m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7F; XmmReg ... & m128 { m128 = XmmReg; }
|
|
:MOVDQA XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg2 = XmmReg1; }
|
|
|
|
:MOVDQU XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x6F; XmmReg ... & m128 { XmmReg = m128; }
|
|
:MOVDQU XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x6F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg2; }
|
|
:MOVDQU m128, XmmReg is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7F; XmmReg ... & m128 { m128 = XmmReg; }
|
|
:MOVDQU XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg2 = XmmReg1; }
|
|
|
|
# TODO: this vexMode=0 & is potentially wrong
|
|
|
|
define pcodeop movdq2q;
|
|
:MOVDQ2Q mmxreg2, XmmReg1 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD6; XmmReg1 & mmxreg2 { mmxreg2 = movdq2q(mmxreg2, XmmReg1); }
|
|
|
|
:MOVHLPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg2[64,64]; }
|
|
|
|
:MOVHPD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x16; XmmReg ... & m64 { XmmReg[64,64] = m64; }
|
|
|
|
:MOVHPD m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x17; XmmReg ... & m64 { m64 = XmmReg[64,64]; }
|
|
|
|
:MOVHPS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x16; XmmReg ... & m64 { XmmReg[64,64] = m64; }
|
|
|
|
:MOVHPS m64, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x17; XmmReg ... & m64 { m64 = XmmReg[64,64]; }
|
|
|
|
:MOVLHPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x16; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[64,64] = XmmReg2[0,64]; }
|
|
|
|
:MOVLPD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x12; XmmReg ... & m64 { XmmReg[0,64] = m64; }
|
|
|
|
:MOVLPD m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x13; XmmReg ... & m64 { m64 = XmmReg[0,64]; }
|
|
|
|
:MOVLPS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x12; XmmReg ... & m64 { XmmReg[0,64] = m64; }
|
|
|
|
:MOVLPS m64, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x13; XmmReg ... & m64 { m64 = XmmReg[0,64]; }
|
|
|
|
define pcodeop movmskpd;
|
|
:MOVMSKPD Reg32, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x50; XmmReg2 & Reg32 { Reg32 = movmskpd(Reg32, XmmReg2); }
|
|
|
|
define pcodeop movmskps;
|
|
:MOVMSKPS Reg32, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x50; XmmReg2 & Reg32 { Reg32 = movmskps(Reg32, XmmReg2); }
|
|
|
|
:MOVNTQ m64, mmxreg is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE7; mmxreg ... & m64 { m64 = mmxreg; }
|
|
|
|
:MOVNTDQ m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE7; XmmReg ... & m128 { m128 = XmmReg; }
|
|
|
|
:MOVNTPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2B; XmmReg ... & m128 { m128 = XmmReg; }
|
|
|
|
:MOVNTPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2B; XmmReg ... & m128 { m128 = XmmReg; }
|
|
|
|
:MOVQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6F; mmxreg ... & m64 { mmxreg = m64; }
|
|
:MOVQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg2; }
|
|
:MOVQ m64, mmxreg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x7F; mmxreg ... & m64 { m64 = mmxreg; }
|
|
:MOVQ mmxreg2, mmxreg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x7F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg2 = mmxreg1; }
|
|
|
|
:MOVQ XmmReg, m64 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7E; XmmReg ... & m64
|
|
{
|
|
XmmReg = zext(m64);
|
|
}
|
|
|
|
:MOVQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7E; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1 = zext(XmmReg2[0,64]);
|
|
}
|
|
|
|
:MOVQ m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD6; m64 & XmmReg ...
|
|
{
|
|
m64 = XmmReg[0,64];
|
|
}
|
|
|
|
:MOVQ XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD6; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2 = zext(XmmReg1[0,64]);
|
|
}
|
|
|
|
:MOVQ2DQ XmmReg, mmxreg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xD6; XmmReg & mmxreg2
|
|
{
|
|
XmmReg = zext(mmxreg2);
|
|
# may need to model x87 FPU state changes too ?????
|
|
}
|
|
|
|
:MOVSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x10; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = m64;
|
|
XmmReg[64,64] = 0;
|
|
}
|
|
|
|
:MOVSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:MOVSD m64, XmmReg is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x11; m64 & XmmReg ...
|
|
{
|
|
m64 = XmmReg[0,64];
|
|
}
|
|
|
|
:MOVSD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,64] = XmmReg1[0,64];
|
|
}
|
|
|
|
:MOVSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x10; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = m32;
|
|
XmmReg[32,32] = 0;
|
|
XmmReg[64,32] = 0;
|
|
XmmReg[96,32] = 0;
|
|
}
|
|
|
|
:MOVSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
}
|
|
|
|
:MOVSS m32, XmmReg is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x11; m32 & XmmReg ...
|
|
{
|
|
m32 = XmmReg[0,32];
|
|
}
|
|
|
|
:MOVSS XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,32] = XmmReg1[0,32];
|
|
}
|
|
|
|
:MOVUPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x10; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = m[0,64];
|
|
XmmReg[64,64] = m[64,64];
|
|
}
|
|
|
|
:MOVUPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg2[64,64];
|
|
}
|
|
|
|
:MOVUPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x11; m128 & XmmReg ...
|
|
{
|
|
m128 = XmmReg;
|
|
}
|
|
|
|
:MOVUPD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,64] = XmmReg1[0,64];
|
|
XmmReg2[64,64] = XmmReg1[64,64];
|
|
}
|
|
|
|
# Not sure why someone had done it this way ?????
|
|
#Xmm2m128: m128 is vexMode=0 & m128 { export m128; }
|
|
#Xmm2m128: XmmReg2 is vexMode=0 & xmmmod=3 & XmmReg2 { export XmmReg2; }
|
|
#
|
|
#define pcodeop movups;
|
|
##:MOVUPS XmmReg, m128 is vexMode=0 & byte=0x0F; byte=0x10; XmmReg ... & m128 { XmmReg = movups(XmmReg, m128); }
|
|
##:MOVUPS XmmReg1, XmmReg2 is vexMode=0 & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = movups(XmmReg1, XmmReg2); }
|
|
#
|
|
#:MOVUPS XmmReg,Xmm2m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x10; XmmReg ... & Xmm2m128 { XmmReg = movups(XmmReg, Xmm2m128); }
|
|
|
|
:MOVUPS XmmReg, m128 is vexMode=0 & byte=0x0F; byte=0x10; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = m[0,32];
|
|
XmmReg[32,32] = m[32,32];
|
|
XmmReg[64,32] = m[64,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:MOVUPS XmmReg1, XmmReg2 is vexMode=0 & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:MOVUPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x11; m128 & XmmReg ...
|
|
{
|
|
m128 = XmmReg;
|
|
}
|
|
|
|
:MOVUPS XmmReg2, XmmReg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,32] = XmmReg1[0,32];
|
|
XmmReg2[32,32] = XmmReg1[32,32];
|
|
XmmReg2[64,32] = XmmReg1[64,32];
|
|
XmmReg2[96,32] = XmmReg1[96,32];
|
|
}
|
|
|
|
:MULPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x59; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f* m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] f* m[64,64];
|
|
}
|
|
|
|
:MULPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f* XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] f* XmmReg2[64,64];
|
|
}
|
|
|
|
:MULPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x59; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f* m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] f* m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] f* m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] f* m[96,32];
|
|
}
|
|
|
|
:MULPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f* XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] f* XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] f* XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] f* XmmReg2[96,32];
|
|
}
|
|
|
|
:MULSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x59; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] f* m64;
|
|
}
|
|
|
|
:MULSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f* XmmReg2[0,64];
|
|
}
|
|
|
|
:MULSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x59; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = XmmReg[0,32] f* m32;
|
|
}
|
|
|
|
:MULSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f* XmmReg2[0,32];
|
|
}
|
|
|
|
:ORPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x56; XmmReg ... & m128 { XmmReg = XmmReg | m128; }
|
|
:ORPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x56; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; }
|
|
|
|
:ORPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x56; XmmReg ... & m128 { XmmReg = XmmReg | m128; }
|
|
:ORPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x56; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; }
|
|
|
|
# what about these ?????
|
|
define pcodeop packsswb;
|
|
:PACKSSWB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x63; mmxreg ... & m64 { mmxreg = packsswb(mmxreg, m64); }
|
|
:PACKSSWB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x63; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = packsswb(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop packssdw;
|
|
:PACKSSDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6B; mmxreg ... & m64 { mmxreg = packssdw(mmxreg, m64); }
|
|
:PACKSSDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6B; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = packssdw(mmxreg1, mmxreg2); }
|
|
|
|
:PACKSSWB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x63; XmmReg ... & m128 { XmmReg = packsswb(XmmReg, m128); }
|
|
:PACKSSWB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x63; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = packsswb(XmmReg1, XmmReg2); }
|
|
:PACKSSDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6B; XmmReg ... & m128 { XmmReg = packssdw(XmmReg, m128); }
|
|
:PACKSSDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6B; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = packssdw(XmmReg1, XmmReg2); }
|
|
|
|
#sword < 0 : ubyte = 0
|
|
#sword > 0xff: ubyte = 0xff
|
|
#otherwise ubyte = sword
|
|
macro sswub(sword, ubyte) {
|
|
ubyte = (sword s> 0xff:2) * 0xff:1;
|
|
ubyte = ubyte + (sword s> 0:2) * (sword s<= 0xff:2) * sword:1;
|
|
}
|
|
|
|
:PACKUSWB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x67; mmxreg ... & m64
|
|
{
|
|
local dest_copy:8 = mmxreg;
|
|
local src_copy:8 = m64;
|
|
local ubyte:1 = 0;
|
|
sswub(dest_copy[0,16],ubyte);
|
|
mmxreg[0,8] = ubyte;
|
|
sswub(dest_copy[16,16],ubyte);
|
|
mmxreg[8,8] = ubyte;
|
|
sswub(dest_copy[32,16],ubyte);
|
|
mmxreg[16,8] = ubyte;
|
|
sswub(dest_copy[48,16],ubyte);
|
|
mmxreg[24,8] = ubyte;
|
|
sswub(src_copy[0,16],ubyte);
|
|
mmxreg[32,8] = ubyte;
|
|
sswub(src_copy[16,16],ubyte);
|
|
mmxreg[40,8] = ubyte;
|
|
sswub(src_copy[32,16],ubyte);
|
|
mmxreg[48,8] = ubyte;
|
|
sswub(src_copy[48,16],ubyte);
|
|
mmxreg[56,8] = ubyte;
|
|
}
|
|
|
|
:PACKUSWB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x67; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
local dest_copy:8 = mmxreg1;
|
|
local src_copy:8 = mmxreg2;
|
|
local ubyte:1 = 0;
|
|
sswub(dest_copy[0,16],ubyte);
|
|
mmxreg1[0,8] = ubyte;
|
|
sswub(dest_copy[16,16],ubyte);
|
|
mmxreg1[8,8] = ubyte;
|
|
sswub(dest_copy[32,16],ubyte);
|
|
mmxreg1[16,8] = ubyte;
|
|
sswub(dest_copy[48,16],ubyte);
|
|
mmxreg1[24,8] = ubyte;
|
|
sswub(src_copy[0,16],ubyte);
|
|
mmxreg1[32,8] = ubyte;
|
|
sswub(src_copy[16,16],ubyte);
|
|
mmxreg1[40,8] = ubyte;
|
|
sswub(src_copy[32,16],ubyte);
|
|
mmxreg1[48,8] = ubyte;
|
|
sswub(src_copy[48,16],ubyte);
|
|
mmxreg1[56,8] = ubyte;
|
|
}
|
|
|
|
:PACKUSWB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x67; XmmReg ... & m128
|
|
{
|
|
local dest_copy:16 = XmmReg;
|
|
local src_copy:16 = m128;
|
|
local ubyte:1 = 0;
|
|
sswub(dest_copy[0,16],ubyte);
|
|
XmmReg[0,8] = ubyte;
|
|
sswub(dest_copy[16,16],ubyte);
|
|
XmmReg[8,8] = ubyte;
|
|
sswub(dest_copy[32,16],ubyte);
|
|
XmmReg[16,8] = ubyte;
|
|
sswub(dest_copy[48,16],ubyte);
|
|
XmmReg[24,8] = ubyte;
|
|
sswub(dest_copy[64,16],ubyte);
|
|
XmmReg[32,8] = ubyte;
|
|
sswub(dest_copy[80,16],ubyte);
|
|
XmmReg[40,8] = ubyte;
|
|
sswub(dest_copy[96,16],ubyte);
|
|
XmmReg[48,8] = ubyte;
|
|
sswub(dest_copy[112,16],ubyte);
|
|
XmmReg[56,8] = ubyte;
|
|
|
|
sswub(src_copy[0,16],ubyte);
|
|
XmmReg[64,8] = ubyte;
|
|
sswub(src_copy[16,16],ubyte);
|
|
XmmReg[72,8] = ubyte;
|
|
sswub(src_copy[32,16],ubyte);
|
|
XmmReg[80,8] = ubyte;
|
|
sswub(src_copy[48,16],ubyte);
|
|
XmmReg[88,8] = ubyte;
|
|
sswub(src_copy[64,16],ubyte);
|
|
XmmReg[96,8] = ubyte;
|
|
sswub(src_copy[80,16],ubyte);
|
|
XmmReg[104,8] = ubyte;
|
|
sswub(src_copy[96,16],ubyte);
|
|
XmmReg[112,8] = ubyte;
|
|
sswub(src_copy[112,16],ubyte);
|
|
XmmReg[120,8] = ubyte;
|
|
}
|
|
|
|
:PACKUSWB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x67; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
local dest_copy:16 = XmmReg1;
|
|
local src_copy:16 = XmmReg2;
|
|
local ubyte:1 = 0;
|
|
sswub(dest_copy[0,16],ubyte);
|
|
XmmReg1[0,8] = ubyte;
|
|
sswub(dest_copy[16,16],ubyte);
|
|
XmmReg1[8,8] = ubyte;
|
|
sswub(dest_copy[32,16],ubyte);
|
|
XmmReg1[16,8] = ubyte;
|
|
sswub(dest_copy[48,16],ubyte);
|
|
XmmReg1[24,8] = ubyte;
|
|
sswub(dest_copy[64,16],ubyte);
|
|
XmmReg1[32,8] = ubyte;
|
|
sswub(dest_copy[80,16],ubyte);
|
|
XmmReg1[40,8] = ubyte;
|
|
sswub(dest_copy[96,16],ubyte);
|
|
XmmReg1[48,8] = ubyte;
|
|
sswub(dest_copy[112,16],ubyte);
|
|
XmmReg1[56,8] = ubyte;
|
|
|
|
sswub(src_copy[0,16],ubyte);
|
|
XmmReg1[64,8] = ubyte;
|
|
sswub(src_copy[16,16],ubyte);
|
|
XmmReg1[72,8] = ubyte;
|
|
sswub(src_copy[32,16],ubyte);
|
|
XmmReg1[80,8] = ubyte;
|
|
sswub(src_copy[48,16],ubyte);
|
|
XmmReg1[88,8] = ubyte;
|
|
sswub(src_copy[64,16],ubyte);
|
|
XmmReg1[96,8] = ubyte;
|
|
sswub(src_copy[80,16],ubyte);
|
|
XmmReg1[104,8] = ubyte;
|
|
sswub(src_copy[96,16],ubyte);
|
|
XmmReg1[112,8] = ubyte;
|
|
sswub(src_copy[112,16],ubyte);
|
|
XmmReg1[120,8] = ubyte;
|
|
}
|
|
|
|
define pcodeop pabsb;
|
|
:PABSB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1c; mmxreg ... & m64 { mmxreg=pabsb(mmxreg,m64); }
|
|
:PABSB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1c; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsb(mmxreg1,mmxreg2); }
|
|
:PABSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1c; XmmReg ... & m128 { XmmReg=pabsb(XmmReg,m128); }
|
|
:PABSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1c; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsb(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pabsw;
|
|
:PABSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1d; mmxreg ... & m64 { mmxreg=pabsw(mmxreg,m64); }
|
|
:PABSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1d; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsw(mmxreg1,mmxreg2); }
|
|
:PABSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1d; XmmReg ... & m128 { XmmReg=pabsw(XmmReg,m128); }
|
|
:PABSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1d; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pabsd;
|
|
:PABSD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1e; mmxreg ... & m64 { mmxreg=pabsd(mmxreg,m64); }
|
|
:PABSD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1e; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsd(mmxreg1,mmxreg2); }
|
|
:PABSD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1e; XmmReg ... & m128 { XmmReg=pabsd(XmmReg,m128); }
|
|
:PABSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1e; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsd(XmmReg1,XmmReg2); }
|
|
|
|
:PADDB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFC; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = mmxreg[0,8] + m[0,8];
|
|
mmxreg[8,8] = mmxreg[8,8] + m[8,8];
|
|
mmxreg[16,8] = mmxreg[16,8] + m[16,8];
|
|
mmxreg[24,8] = mmxreg[24,8] + m[24,8];
|
|
mmxreg[32,8] = mmxreg[32,8] + m[32,8];
|
|
mmxreg[40,8] = mmxreg[40,8] + m[40,8];
|
|
mmxreg[48,8] = mmxreg[48,8] + m[48,8];
|
|
mmxreg[56,8] = mmxreg[56,8] + m[56,8];
|
|
}
|
|
|
|
:PADDB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFC; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = mmxreg1[0,8] + mmxreg2[0,8];
|
|
mmxreg1[8,8] = mmxreg1[8,8] + mmxreg2[8,8];
|
|
mmxreg1[16,8] = mmxreg1[16,8] + mmxreg2[16,8];
|
|
mmxreg1[24,8] = mmxreg1[24,8] + mmxreg2[24,8];
|
|
mmxreg1[32,8] = mmxreg1[32,8] + mmxreg2[32,8];
|
|
mmxreg1[40,8] = mmxreg1[40,8] + mmxreg2[40,8];
|
|
mmxreg1[48,8] = mmxreg1[48,8] + mmxreg2[48,8];
|
|
mmxreg1[56,8] = mmxreg1[56,8] + mmxreg2[56,8];
|
|
}
|
|
|
|
:PADDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFD; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = mmxreg[0,16] + m[0,16];
|
|
mmxreg[16,16] = mmxreg[16,16] + m[16,16];
|
|
mmxreg[32,16] = mmxreg[32,16] + m[32,16];
|
|
mmxreg[48,16] = mmxreg[48,16] + m[48,16];
|
|
}
|
|
|
|
:PADDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFD; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = mmxreg1[0,16] + mmxreg2[0,16];
|
|
mmxreg1[16,16] = mmxreg1[16,16] + mmxreg2[16,16];
|
|
mmxreg1[32,16] = mmxreg1[32,16] + mmxreg2[32,16];
|
|
mmxreg1[48,16] = mmxreg1[48,16] + mmxreg2[48,16];
|
|
}
|
|
|
|
:PADDD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFE; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = mmxreg[0,32] + m[0,32];
|
|
mmxreg[32,32] = mmxreg[32,32] + m[32,32];
|
|
}
|
|
|
|
:PADDD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFE; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = mmxreg1[0,32] + mmxreg2[0,32];
|
|
mmxreg1[32,32] = mmxreg1[32,32] + mmxreg2[32,32];
|
|
}
|
|
|
|
:PADDB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = XmmReg[0,8] + m[0,8];
|
|
XmmReg[8,8] = XmmReg[8,8] + m[8,8];
|
|
XmmReg[16,8] = XmmReg[16,8] + m[16,8];
|
|
XmmReg[24,8] = XmmReg[24,8] + m[24,8];
|
|
XmmReg[32,8] = XmmReg[32,8] + m[32,8];
|
|
XmmReg[40,8] = XmmReg[40,8] + m[40,8];
|
|
XmmReg[48,8] = XmmReg[48,8] + m[48,8];
|
|
XmmReg[56,8] = XmmReg[56,8] + m[56,8];
|
|
XmmReg[64,8] = XmmReg[64,8] + m[64,8];
|
|
XmmReg[72,8] = XmmReg[72,8] + m[72,8];
|
|
XmmReg[80,8] = XmmReg[80,8] + m[80,8];
|
|
XmmReg[88,8] = XmmReg[88,8] + m[88,8];
|
|
XmmReg[96,8] = XmmReg[96,8] + m[96,8];
|
|
XmmReg[104,8] = XmmReg[104,8] + m[104,8];
|
|
XmmReg[112,8] = XmmReg[112,8] + m[112,8];
|
|
XmmReg[120,8] = XmmReg[120,8] + m[120,8];
|
|
}
|
|
|
|
## example of bitfield solution
|
|
#:PADDB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
#{
|
|
# XmmReg1[ 0,8] = XmmReg1[ 0,8] + XmmReg2[ 0,8];
|
|
# XmmReg1[ 8,8] = XmmReg1[ 8,8] + XmmReg2[ 8,8];
|
|
# XmmReg1[ 16,8] = XmmReg1[ 16,8] + XmmReg2[ 16,8];
|
|
# XmmReg1[ 24,8] = XmmReg1[ 24,8] + XmmReg2[ 24,8];
|
|
# XmmReg1[ 32,8] = XmmReg1[ 32,8] + XmmReg2[ 32,8];
|
|
# XmmReg1[ 40,8] = XmmReg1[ 40,8] + XmmReg2[ 40,8];
|
|
# XmmReg1[ 48,8] = XmmReg1[ 48,8] + XmmReg2[ 48,8];
|
|
# XmmReg1[ 56,8] = XmmReg1[ 56,8] + XmmReg2[ 56,8];
|
|
## XmmReg1[ 64,8] = XmmReg1[ 64,8] + XmmReg2[ 64,8];
|
|
## XmmReg1[ 72,8] = XmmReg1[ 72,8] + XmmReg2[ 72,8];
|
|
## XmmReg1[ 80,8] = XmmReg1[ 80,8] + XmmReg2[ 80,8];
|
|
## XmmReg1[ 88,8] = XmmReg1[ 88,8] + XmmReg2[ 88,8];
|
|
## XmmReg1[ 96,8] = XmmReg1[ 96,8] + XmmReg2[ 96,8];
|
|
## XmmReg1[104,8] = XmmReg1[104,8] + XmmReg2[104,8];
|
|
## XmmReg1[112,8] = XmmReg1[112,8] + XmmReg2[112,8];
|
|
## XmmReg1[120,8] = XmmReg1[120,8] + XmmReg2[120,8];
|
|
#}
|
|
|
|
# full set of XMM byte registers
|
|
:PADDB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = XmmReg1[0,8] + XmmReg2[0,8];
|
|
XmmReg1[8,8] = XmmReg1[8,8] + XmmReg2[8,8];
|
|
XmmReg1[16,8] = XmmReg1[16,8] + XmmReg2[16,8];
|
|
XmmReg1[24,8] = XmmReg1[24,8] + XmmReg2[24,8];
|
|
XmmReg1[32,8] = XmmReg1[32,8] + XmmReg2[32,8];
|
|
XmmReg1[40,8] = XmmReg1[40,8] + XmmReg2[40,8];
|
|
XmmReg1[48,8] = XmmReg1[48,8] + XmmReg2[48,8];
|
|
XmmReg1[56,8] = XmmReg1[56,8] + XmmReg2[56,8];
|
|
XmmReg1[64,8] = XmmReg1[64,8] + XmmReg2[64,8];
|
|
XmmReg1[72,8] = XmmReg1[72,8] + XmmReg2[72,8];
|
|
XmmReg1[80,8] = XmmReg1[80,8] + XmmReg2[80,8];
|
|
XmmReg1[88,8] = XmmReg1[88,8] + XmmReg2[88,8];
|
|
XmmReg1[96,8] = XmmReg1[96,8] + XmmReg2[96,8];
|
|
XmmReg1[104,8] = XmmReg1[104,8] + XmmReg2[104,8];
|
|
XmmReg1[112,8] = XmmReg1[112,8] + XmmReg2[112,8];
|
|
XmmReg1[120,8] = XmmReg1[120,8] + XmmReg2[120,8];
|
|
}
|
|
|
|
:PADDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFD; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = XmmReg[0,16] + m[0,16];
|
|
XmmReg[16,16] = XmmReg[16,16] + m[16,16];
|
|
XmmReg[32,16] = XmmReg[32,16] + m[32,16];
|
|
XmmReg[48,16] = XmmReg[48,16] + m[48,16];
|
|
XmmReg[64,16] = XmmReg[64,16] + m[64,16];
|
|
XmmReg[80,16] = XmmReg[80,16] + m[80,16];
|
|
XmmReg[96,16] = XmmReg[96,16] + m[96,16];
|
|
XmmReg[112,16] = XmmReg[112,16] + m[112,16];
|
|
}
|
|
|
|
:PADDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFD; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = XmmReg1[0,16] + XmmReg2[0,16];
|
|
XmmReg1[16,16] = XmmReg1[16,16] + XmmReg2[16,16];
|
|
XmmReg1[32,16] = XmmReg1[32,16] + XmmReg2[32,16];
|
|
XmmReg1[48,16] = XmmReg1[48,16] + XmmReg2[48,16];
|
|
XmmReg1[64,16] = XmmReg1[64,16] + XmmReg2[64,16];
|
|
XmmReg1[80,16] = XmmReg1[80,16] + XmmReg2[80,16];
|
|
XmmReg1[96,16] = XmmReg1[96,16] + XmmReg2[96,16];
|
|
XmmReg1[112,16] = XmmReg1[112,16] + XmmReg2[112,16];
|
|
}
|
|
|
|
:PADDD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFE; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] + m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] + m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] + m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] + m[96,32];
|
|
}
|
|
|
|
:PADDD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFE; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] + XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] + XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] + XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] + XmmReg2[96,32];
|
|
}
|
|
|
|
:PADDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD4; mmxreg ... & m64
|
|
{
|
|
mmxreg = mmxreg + m64;
|
|
}
|
|
|
|
:PADDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD4; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1 = mmxreg1 + mmxreg2;
|
|
}
|
|
|
|
:PADDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD4; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] + m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] + m[64,64];
|
|
}
|
|
|
|
:PADDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD4; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] + XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] + XmmReg2[64,64];
|
|
}
|
|
|
|
define pcodeop paddsb;
|
|
:PADDSB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEC; mmxreg1 ... & mmxreg2_m64 { mmxreg1 = paddsb(mmxreg1, mmxreg2_m64); }
|
|
|
|
define pcodeop paddsw;
|
|
:PADDSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xED; mmxreg1 ... & mmxreg2_m64 { mmxreg1 = paddsw(mmxreg1, mmxreg2_m64); }
|
|
|
|
:PADDSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEC; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = paddsb(XmmReg1, XmmReg2_m128); }
|
|
:PADDSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xED; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = paddsw(XmmReg1, XmmReg2_m128); }
|
|
|
|
define pcodeop paddusb;
|
|
:PADDUSB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDC; mmxreg1 ... & mmxreg2_m64 { mmxreg1 = paddusb(mmxreg1, mmxreg2_m64); }
|
|
|
|
define pcodeop paddusw;
|
|
:PADDUSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDD; mmxreg1 ... & mmxreg2_m64 { mmxreg1 = paddusw(mmxreg1, mmxreg2_m64); }
|
|
|
|
:PADDUSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDC; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = paddusb(XmmReg1, XmmReg2_m128); }
|
|
:PADDUSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDD; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = paddusw(XmmReg1, XmmReg2_m128); }
|
|
|
|
:PALIGNR mmxreg, m64, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x3A; byte=0x0F; m64 & mmxreg ...; imm8
|
|
{
|
|
temp:16 = ( ( zext(mmxreg) << 64 ) | zext( m64 ) ) >> ( imm8 * 8 );
|
|
mmxreg = temp:8;
|
|
}
|
|
|
|
:PALIGNR mmxreg1, mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x3A; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2; imm8
|
|
{
|
|
temp:16 = ( ( zext(mmxreg1) << 64 ) | zext( mmxreg2 ) ) >> ( imm8 * 8 );
|
|
mmxreg1 = temp:8;
|
|
}
|
|
|
|
:PALIGNR XmmReg1, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0F; m128 & XmmReg1 ...; imm8
|
|
{
|
|
temp:32 = ( ( zext(XmmReg1) << 128 ) | zext( m128 ) ) >> ( imm8 * 8 );
|
|
XmmReg1 = temp:16;
|
|
}
|
|
|
|
:PALIGNR XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0F; xmmmod=3 & XmmReg1 & XmmReg2; imm8
|
|
{
|
|
temp:32 = ( ( zext(XmmReg1) << 128 ) | zext( XmmReg2 ) ) >> ( imm8 * 8 );
|
|
XmmReg1 = temp:16;
|
|
}
|
|
|
|
:PAND mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDB; mmxreg ... & m64 { mmxreg = mmxreg & m64; }
|
|
:PAND mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 & mmxreg2; }
|
|
:PAND XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDB; XmmReg ... & m128 { XmmReg = XmmReg & m128; }
|
|
:PAND XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDB; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 & XmmReg2; }
|
|
|
|
:PANDN mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDF; mmxreg ... & m64 { mmxreg = ~mmxreg & m64; }
|
|
:PANDN mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDF; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = ~mmxreg1 & mmxreg2; }
|
|
:PANDN XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDF; XmmReg ... & m128 { XmmReg = ~XmmReg & m128; }
|
|
:PANDN XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDF; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = ~XmmReg1 & XmmReg2; }
|
|
|
|
define pcodeop pavgb;
|
|
:PAVGB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE0; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = pavgb(mmxreg[0,8], m[0,8]);
|
|
mmxreg[8,8] = pavgb(mmxreg[8,8], m[8,8]);
|
|
mmxreg[16,8] = pavgb(mmxreg[16,8], m[16,8]);
|
|
mmxreg[24,8] = pavgb(mmxreg[24,8], m[24,8]);
|
|
mmxreg[32,8] = pavgb(mmxreg[32,8], m[32,8]);
|
|
mmxreg[40,8] = pavgb(mmxreg[40,8], m[40,8]);
|
|
mmxreg[48,8] = pavgb(mmxreg[48,8], m[48,8]);
|
|
mmxreg[56,8] = pavgb(mmxreg[56,8], m[56,8]);
|
|
}
|
|
|
|
:PAVGB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE0; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = pavgb(mmxreg1[0,8], mmxreg2[0,8]);
|
|
mmxreg1[8,8] = pavgb(mmxreg1[8,8], mmxreg2[8,8]);
|
|
mmxreg1[16,8] = pavgb(mmxreg1[16,8], mmxreg2[16,8]);
|
|
mmxreg1[24,8] = pavgb(mmxreg1[24,8], mmxreg2[24,8]);
|
|
mmxreg1[32,8] = pavgb(mmxreg1[32,8], mmxreg2[32,8]);
|
|
mmxreg1[40,8] = pavgb(mmxreg1[40,8], mmxreg2[40,8]);
|
|
mmxreg1[48,8] = pavgb(mmxreg1[48,8], mmxreg2[48,8]);
|
|
mmxreg1[56,8] = pavgb(mmxreg1[56,8], mmxreg2[56,8]);
|
|
}
|
|
|
|
define pcodeop pavgw;
|
|
:PAVGW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE3; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = pavgw(mmxreg[0,16], m[0,16]);
|
|
mmxreg[16,16] = pavgw(mmxreg[16,16], m[16,16]);
|
|
mmxreg[32,16] = pavgw(mmxreg[32,16], m[32,16]);
|
|
mmxreg[48,16] = pavgw(mmxreg[48,16], m[48,16]);
|
|
}
|
|
|
|
:PAVGW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE3; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = pavgw(mmxreg1[0,16], mmxreg2[0,16]);
|
|
mmxreg1[16,16] = pavgw(mmxreg1[16,16], mmxreg2[16,16]);
|
|
mmxreg1[32,16] = pavgw(mmxreg1[32,16], mmxreg2[32,16]);
|
|
mmxreg1[48,16] = pavgw(mmxreg1[48,16], mmxreg2[48,16]);
|
|
}
|
|
|
|
:PAVGB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE0; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = pavgb(XmmReg[0,8], m[0,8]);
|
|
XmmReg[8,8] = pavgb(XmmReg[8,8], m[8,8]);
|
|
XmmReg[16,8] = pavgb(XmmReg[16,8], m[16,8]);
|
|
XmmReg[24,8] = pavgb(XmmReg[24,8], m[24,8]);
|
|
XmmReg[32,8] = pavgb(XmmReg[32,8], m[32,8]);
|
|
XmmReg[40,8] = pavgb(XmmReg[40,8], m[40,8]);
|
|
XmmReg[48,8] = pavgb(XmmReg[48,8], m[48,8]);
|
|
XmmReg[56,8] = pavgb(XmmReg[56,8], m[56,8]);
|
|
XmmReg[64,8] = pavgb(XmmReg[64,8], m[64,8]);
|
|
XmmReg[72,8] = pavgb(XmmReg[72,8], m[72,8]);
|
|
XmmReg[80,8] = pavgb(XmmReg[80,8], m[80,8]);
|
|
XmmReg[88,8] = pavgb(XmmReg[88,8], m[88,8]);
|
|
XmmReg[96,8] = pavgb(XmmReg[96,8], m[96,8]);
|
|
XmmReg[104,8] = pavgb(XmmReg[104,8], m[104,8]);
|
|
XmmReg[112,8] = pavgb(XmmReg[112,8], m[112,8]);
|
|
XmmReg[120,8] = pavgb(XmmReg[120,8], m[120,8]);
|
|
}
|
|
|
|
# full set of XMM byte registers
|
|
:PAVGB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE0; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = pavgb(XmmReg1[0,8], XmmReg2[0,8]);
|
|
XmmReg1[8,8] = pavgb(XmmReg1[8,8], XmmReg2[8,8]);
|
|
XmmReg1[16,8] = pavgb(XmmReg1[16,8], XmmReg2[16,8]);
|
|
XmmReg1[24,8] = pavgb(XmmReg1[24,8], XmmReg2[24,8]);
|
|
XmmReg1[32,8] = pavgb(XmmReg1[32,8], XmmReg2[32,8]);
|
|
XmmReg1[40,8] = pavgb(XmmReg1[40,8], XmmReg2[40,8]);
|
|
XmmReg1[48,8] = pavgb(XmmReg1[48,8], XmmReg2[48,8]);
|
|
XmmReg1[56,8] = pavgb(XmmReg1[56,8], XmmReg2[56,8]);
|
|
XmmReg1[64,8] = pavgb(XmmReg1[64,8], XmmReg2[64,8]);
|
|
XmmReg1[72,8] = pavgb(XmmReg1[72,8], XmmReg2[72,8]);
|
|
XmmReg1[80,8] = pavgb(XmmReg1[80,8], XmmReg2[80,8]);
|
|
XmmReg1[88,8] = pavgb(XmmReg1[88,8], XmmReg2[88,8]);
|
|
XmmReg1[96,8] = pavgb(XmmReg1[96,8], XmmReg2[96,8]);
|
|
XmmReg1[104,8] = pavgb(XmmReg1[104,8], XmmReg2[104,8]);
|
|
XmmReg1[112,8] = pavgb(XmmReg1[112,8], XmmReg2[112,8]);
|
|
XmmReg1[120,8] = pavgb(XmmReg1[120,8], XmmReg2[120,8]);
|
|
}
|
|
|
|
:PAVGW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE3; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = pavgw(XmmReg[0,16], m[0,16]);
|
|
XmmReg[16,16] = pavgw(XmmReg[16,16], m[16,16]);
|
|
XmmReg[32,16] = pavgw(XmmReg[32,16], m[32,16]);
|
|
XmmReg[48,16] = pavgw(XmmReg[48,16], m[48,16]);
|
|
XmmReg[64,16] = pavgw(XmmReg[64,16], m[64,16]);
|
|
XmmReg[80,16] = pavgw(XmmReg[80,16], m[80,16]);
|
|
XmmReg[96,16] = pavgw(XmmReg[96,16], m[96,16]);
|
|
XmmReg[112,16] = pavgw(XmmReg[112,16], m[112,16]);
|
|
}
|
|
|
|
:PAVGW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE3; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = pavgw(XmmReg1[0,16], XmmReg2[0,16]);
|
|
XmmReg1[16,16] = pavgw(XmmReg1[16,16], XmmReg2[16,16]);
|
|
XmmReg1[32,16] = pavgw(XmmReg1[32,16], XmmReg2[32,16]);
|
|
XmmReg1[48,16] = pavgw(XmmReg1[48,16], XmmReg2[48,16]);
|
|
XmmReg1[64,16] = pavgw(XmmReg1[64,16], XmmReg2[64,16]);
|
|
XmmReg1[80,16] = pavgw(XmmReg1[80,16], XmmReg2[80,16]);
|
|
XmmReg1[96,16] = pavgw(XmmReg1[96,16], XmmReg2[96,16]);
|
|
XmmReg1[112,16] = pavgw(XmmReg1[112,16], XmmReg2[112,16]);
|
|
}
|
|
|
|
:PCMPEQB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x74; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = (mmxreg[0,8] == m[0,8]) * 0xFF;
|
|
mmxreg[8,8] = (mmxreg[8,8] == m[8,8]) * 0xFF;
|
|
mmxreg[16,8] = (mmxreg[16,8] == m[16,8]) * 0xFF;
|
|
mmxreg[24,8] = (mmxreg[24,8] == m[24,8]) * 0xFF;
|
|
mmxreg[32,8] = (mmxreg[32,8] == m[32,8]) * 0xFF;
|
|
mmxreg[40,8] = (mmxreg[40,8] == m[40,8]) * 0xFF;
|
|
mmxreg[48,8] = (mmxreg[48,8] == m[48,8]) * 0xFF;
|
|
mmxreg[56,8] = (mmxreg[56,8] == m[56,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPEQB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x74; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = (mmxreg1[0,8] == mmxreg2[0,8]) * 0xFF;
|
|
mmxreg1[8,8] = (mmxreg1[8,8] == mmxreg2[8,8]) * 0xFF;
|
|
mmxreg1[16,8] = (mmxreg1[16,8] == mmxreg2[16,8]) * 0xFF;
|
|
mmxreg1[24,8] = (mmxreg1[24,8] == mmxreg2[24,8]) * 0xFF;
|
|
mmxreg1[32,8] = (mmxreg1[32,8] == mmxreg2[32,8]) * 0xFF;
|
|
mmxreg1[40,8] = (mmxreg1[40,8] == mmxreg2[40,8]) * 0xFF;
|
|
mmxreg1[48,8] = (mmxreg1[48,8] == mmxreg2[48,8]) * 0xFF;
|
|
mmxreg1[56,8] = (mmxreg1[56,8] == mmxreg2[56,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPEQW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x75; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = zext(mmxreg[0,16] == m[0,16]) * 0xFFFF;
|
|
mmxreg[16,16] = zext(mmxreg[16,16] == m[16,16]) * 0xFFFF;
|
|
mmxreg[32,16] = zext(mmxreg[32,16] == m[32,16]) * 0xFFFF;
|
|
mmxreg[48,16] = zext(mmxreg[48,16] == m[48,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPEQW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x75; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = zext(mmxreg1[0,16] == mmxreg2[0,16]) * 0xFFFF;
|
|
mmxreg1[16,16] = zext(mmxreg1[16,16] == mmxreg2[16,16]) * 0xFFFF;
|
|
mmxreg1[32,16] = zext(mmxreg1[32,16] == mmxreg2[32,16]) * 0xFFFF;
|
|
mmxreg1[48,16] = zext(mmxreg1[48,16] == mmxreg2[48,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPEQD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x76; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = zext(mmxreg[0,32] == m[0,32]) * 0xFFFFFFFF;
|
|
mmxreg[32,32] = zext(mmxreg[32,32] == m[32,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPEQD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x76; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = zext(mmxreg1[0,32] == mmxreg2[0,32]) * 0xFFFFFFFF;
|
|
mmxreg1[32,32] = zext(mmxreg1[32,32] == mmxreg2[32,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPEQB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x74; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = (XmmReg[0,8] == m[0,8]) * 0xFF;
|
|
XmmReg[8,8] = (XmmReg[8,8] == m[8,8]) * 0xFF;
|
|
XmmReg[16,8] = (XmmReg[16,8] == m[16,8]) * 0xFF;
|
|
XmmReg[24,8] = (XmmReg[24,8] == m[24,8]) * 0xFF;
|
|
XmmReg[32,8] = (XmmReg[32,8] == m[32,8]) * 0xFF;
|
|
XmmReg[40,8] = (XmmReg[40,8] == m[40,8]) * 0xFF;
|
|
XmmReg[48,8] = (XmmReg[48,8] == m[48,8]) * 0xFF;
|
|
XmmReg[56,8] = (XmmReg[56,8] == m[56,8]) * 0xFF;
|
|
XmmReg[64,8] = (XmmReg[64,8] == m[64,8]) * 0xFF;
|
|
XmmReg[72,8] = (XmmReg[72,8] == m[72,8]) * 0xFF;
|
|
XmmReg[80,8] = (XmmReg[80,8] == m[80,8]) * 0xFF;
|
|
XmmReg[88,8] = (XmmReg[88,8] == m[88,8]) * 0xFF;
|
|
XmmReg[96,8] = (XmmReg[96,8] == m[96,8]) * 0xFF;
|
|
XmmReg[104,8] = (XmmReg[104,8] == m[104,8]) * 0xFF;
|
|
XmmReg[112,8] = (XmmReg[112,8] == m[112,8]) * 0xFF;
|
|
XmmReg[120,8] = (XmmReg[120,8] == m[120,8]) * 0xFF;
|
|
}
|
|
|
|
# full set of XMM byte registers
|
|
:PCMPEQB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x74; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = (XmmReg1[0,8] == XmmReg2[0,8]) * 0xFF;
|
|
XmmReg1[8,8] = (XmmReg1[8,8] == XmmReg2[8,8]) * 0xFF;
|
|
XmmReg1[16,8] = (XmmReg1[16,8] == XmmReg2[16,8]) * 0xFF;
|
|
XmmReg1[24,8] = (XmmReg1[24,8] == XmmReg2[24,8]) * 0xFF;
|
|
XmmReg1[32,8] = (XmmReg1[32,8] == XmmReg2[32,8]) * 0xFF;
|
|
XmmReg1[40,8] = (XmmReg1[40,8] == XmmReg2[40,8]) * 0xFF;
|
|
XmmReg1[48,8] = (XmmReg1[48,8] == XmmReg2[48,8]) * 0xFF;
|
|
XmmReg1[56,8] = (XmmReg1[56,8] == XmmReg2[56,8]) * 0xFF;
|
|
XmmReg1[64,8] = (XmmReg1[64,8] == XmmReg2[64,8]) * 0xFF;
|
|
XmmReg1[72,8] = (XmmReg1[72,8] == XmmReg2[72,8]) * 0xFF;
|
|
XmmReg1[80,8] = (XmmReg1[80,8] == XmmReg2[80,8]) * 0xFF;
|
|
XmmReg1[88,8] = (XmmReg1[88,8] == XmmReg2[88,8]) * 0xFF;
|
|
XmmReg1[96,8] = (XmmReg1[96,8] == XmmReg2[96,8]) * 0xFF;
|
|
XmmReg1[104,8] = (XmmReg1[104,8] == XmmReg2[104,8]) * 0xFF;
|
|
XmmReg1[112,8] = (XmmReg1[112,8] == XmmReg2[112,8]) * 0xFF;
|
|
XmmReg1[120,8] = (XmmReg1[120,8] == XmmReg2[120,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPEQW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x75; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = zext(XmmReg[0,16] == m[0,16]) * 0xFFFF;
|
|
XmmReg[16,16] = zext(XmmReg[16,16] == m[16,16]) * 0xFFFF;
|
|
XmmReg[32,16] = zext(XmmReg[32,16] == m[32,16]) * 0xFFFF;
|
|
XmmReg[48,16] = zext(XmmReg[48,16] == m[48,16]) * 0xFFFF;
|
|
XmmReg[64,16] = zext(XmmReg[64,16] == m[64,16]) * 0xFFFF;
|
|
XmmReg[80,16] = zext(XmmReg[80,16] == m[80,16]) * 0xFFFF;
|
|
XmmReg[96,16] = zext(XmmReg[96,16] == m[96,16]) * 0xFFFF;
|
|
XmmReg[112,16] = zext(XmmReg[112,16] == m[112,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPEQW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x75; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = zext(XmmReg1[0,16] == XmmReg2[0,16]) * 0xFFFF;
|
|
XmmReg1[16,16] = zext(XmmReg1[16,16] == XmmReg2[16,16]) * 0xFFFF;
|
|
XmmReg1[32,16] = zext(XmmReg1[32,16] == XmmReg2[32,16]) * 0xFFFF;
|
|
XmmReg1[48,16] = zext(XmmReg1[48,16] == XmmReg2[48,16]) * 0xFFFF;
|
|
XmmReg1[64,16] = zext(XmmReg1[64,16] == XmmReg2[64,16]) * 0xFFFF;
|
|
XmmReg1[80,16] = zext(XmmReg1[80,16] == XmmReg2[80,16]) * 0xFFFF;
|
|
XmmReg1[96,16] = zext(XmmReg1[96,16] == XmmReg2[96,16]) * 0xFFFF;
|
|
XmmReg1[112,16] = zext(XmmReg1[112,16] == XmmReg2[112,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPEQD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x76; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = zext(XmmReg[0,32] == m[0,32]) * 0xFFFFFFFF;
|
|
XmmReg[32,32] = zext(XmmReg[32,32] == m[32,32]) * 0xFFFFFFFF;
|
|
XmmReg[64,32] = zext(XmmReg[64,32] == m[64,32]) * 0xFFFFFFFF;
|
|
XmmReg[96,32] = zext(XmmReg[96,32] == m[96,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPEQD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x76; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = zext(XmmReg1[0,32] == XmmReg2[0,32]) * 0xFFFFFFFF;
|
|
XmmReg1[32,32] = zext(XmmReg1[32,32] == XmmReg2[32,32]) * 0xFFFFFFFF;
|
|
XmmReg1[64,32] = zext(XmmReg1[64,32] == XmmReg2[64,32]) * 0xFFFFFFFF;
|
|
XmmReg1[96,32] = zext(XmmReg1[96,32] == XmmReg2[96,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPGTB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x64; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = (mmxreg[0,8] s> m[0,8]) * 0xFF;
|
|
mmxreg[8,8] = (mmxreg[8,8] s> m[8,8]) * 0xFF;
|
|
mmxreg[16,8] = (mmxreg[16,8] s> m[16,8]) * 0xFF;
|
|
mmxreg[24,8] = (mmxreg[24,8] s> m[24,8]) * 0xFF;
|
|
mmxreg[32,8] = (mmxreg[32,8] s> m[32,8]) * 0xFF;
|
|
mmxreg[40,8] = (mmxreg[40,8] s> m[40,8]) * 0xFF;
|
|
mmxreg[48,8] = (mmxreg[48,8] s> m[48,8]) * 0xFF;
|
|
mmxreg[56,8] = (mmxreg[56,8] s> m[56,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPGTB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x64; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = (mmxreg1[0,8] s> mmxreg2[0,8]) * 0xFF;
|
|
mmxreg1[8,8] = (mmxreg1[8,8] s> mmxreg2[8,8]) * 0xFF;
|
|
mmxreg1[16,8] = (mmxreg1[16,8] s> mmxreg2[16,8]) * 0xFF;
|
|
mmxreg1[24,8] = (mmxreg1[24,8] s> mmxreg2[24,8]) * 0xFF;
|
|
mmxreg1[32,8] = (mmxreg1[32,8] s> mmxreg2[32,8]) * 0xFF;
|
|
mmxreg1[40,8] = (mmxreg1[40,8] s> mmxreg2[40,8]) * 0xFF;
|
|
mmxreg1[48,8] = (mmxreg1[48,8] s> mmxreg2[48,8]) * 0xFF;
|
|
mmxreg1[56,8] = (mmxreg1[56,8] s> mmxreg2[56,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPGTW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x65; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = zext(mmxreg[0,16] s> m[0,16]) * 0xFFFF;
|
|
mmxreg[16,16] = zext(mmxreg[16,16] s> m[16,16]) * 0xFFFF;
|
|
mmxreg[32,16] = zext(mmxreg[32,16] s> m[32,16]) * 0xFFFF;
|
|
mmxreg[48,16] = zext(mmxreg[48,16] s> m[48,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPGTW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x65; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = zext(mmxreg1[0,16] s> mmxreg2[0,16]) * 0xFFFF;
|
|
mmxreg1[16,16] = zext(mmxreg1[16,16] s> mmxreg2[16,16]) * 0xFFFF;
|
|
mmxreg1[32,16] = zext(mmxreg1[32,16] s> mmxreg2[32,16]) * 0xFFFF;
|
|
mmxreg1[48,16] = zext(mmxreg1[48,16] s> mmxreg2[48,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPGTD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x66; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = zext(mmxreg[0,32] s> m[0,32]) * 0xFFFFFFFF;
|
|
mmxreg[32,32] = zext(mmxreg[32,32] s> m[32,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPGTD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x66; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = zext(mmxreg1[0,32] s> mmxreg2[0,32]) * 0xFFFFFFFF;
|
|
mmxreg1[32,32] = zext(mmxreg1[32,32] s> mmxreg2[32,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPGTB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x64; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = (XmmReg[0,8] s> m[0,8]) * 0xFF;
|
|
XmmReg[8,8] = (XmmReg[8,8] s> m[8,8]) * 0xFF;
|
|
XmmReg[16,8] = (XmmReg[16,8] s> m[16,8]) * 0xFF;
|
|
XmmReg[24,8] = (XmmReg[24,8] s> m[24,8]) * 0xFF;
|
|
XmmReg[32,8] = (XmmReg[32,8] s> m[32,8]) * 0xFF;
|
|
XmmReg[40,8] = (XmmReg[40,8] s> m[40,8]) * 0xFF;
|
|
XmmReg[48,8] = (XmmReg[48,8] s> m[48,8]) * 0xFF;
|
|
XmmReg[56,8] = (XmmReg[56,8] s> m[56,8]) * 0xFF;
|
|
XmmReg[64,8] = (XmmReg[64,8] s> m[64,8]) * 0xFF;
|
|
XmmReg[72,8] = (XmmReg[72,8] s> m[72,8]) * 0xFF;
|
|
XmmReg[80,8] = (XmmReg[80,8] s> m[80,8]) * 0xFF;
|
|
XmmReg[88,8] = (XmmReg[88,8] s> m[88,8]) * 0xFF;
|
|
XmmReg[96,8] = (XmmReg[96,8] s> m[96,8]) * 0xFF;
|
|
XmmReg[104,8] = (XmmReg[104,8] s> m[104,8]) * 0xFF;
|
|
XmmReg[112,8] = (XmmReg[112,8] s> m[112,8]) * 0xFF;
|
|
XmmReg[120,8] = (XmmReg[120,8] s> m[120,8]) * 0xFF;
|
|
}
|
|
|
|
# full set of XMM byte registers
|
|
:PCMPGTB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x64; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = (XmmReg1[0,8] s> XmmReg2[0,8]) * 0xFF;
|
|
XmmReg1[8,8] = (XmmReg1[8,8] s> XmmReg2[8,8]) * 0xFF;
|
|
XmmReg1[16,8] = (XmmReg1[16,8] s> XmmReg2[16,8]) * 0xFF;
|
|
XmmReg1[24,8] = (XmmReg1[24,8] s> XmmReg2[24,8]) * 0xFF;
|
|
XmmReg1[32,8] = (XmmReg1[32,8] s> XmmReg2[32,8]) * 0xFF;
|
|
XmmReg1[40,8] = (XmmReg1[40,8] s> XmmReg2[40,8]) * 0xFF;
|
|
XmmReg1[48,8] = (XmmReg1[48,8] s> XmmReg2[48,8]) * 0xFF;
|
|
XmmReg1[56,8] = (XmmReg1[56,8] s> XmmReg2[56,8]) * 0xFF;
|
|
XmmReg1[64,8] = (XmmReg1[64,8] s> XmmReg2[64,8]) * 0xFF;
|
|
XmmReg1[72,8] = (XmmReg1[72,8] s> XmmReg2[72,8]) * 0xFF;
|
|
XmmReg1[80,8] = (XmmReg1[80,8] s> XmmReg2[80,8]) * 0xFF;
|
|
XmmReg1[88,8] = (XmmReg1[88,8] s> XmmReg2[88,8]) * 0xFF;
|
|
XmmReg1[96,8] = (XmmReg1[96,8] s> XmmReg2[96,8]) * 0xFF;
|
|
XmmReg1[104,8] = (XmmReg1[104,8] s> XmmReg2[104,8]) * 0xFF;
|
|
XmmReg1[112,8] = (XmmReg1[112,8] s> XmmReg2[112,8]) * 0xFF;
|
|
XmmReg1[120,8] = (XmmReg1[120,8] s> XmmReg2[120,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPGTW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x65; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = zext(XmmReg[0,16] s> m[0,16]) * 0xFFFF;
|
|
XmmReg[16,16] = zext(XmmReg[16,16] s> m[16,16]) * 0xFFFF;
|
|
XmmReg[32,16] = zext(XmmReg[32,16] s> m[32,16]) * 0xFFFF;
|
|
XmmReg[48,16] = zext(XmmReg[48,16] s> m[48,16]) * 0xFFFF;
|
|
XmmReg[64,16] = zext(XmmReg[64,16] s> m[64,16]) * 0xFFFF;
|
|
XmmReg[80,16] = zext(XmmReg[80,16] s> m[80,16]) * 0xFFFF;
|
|
XmmReg[96,16] = zext(XmmReg[96,16] s> m[96,16]) * 0xFFFF;
|
|
XmmReg[112,16] = zext(XmmReg[112,16] s> m[112,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPGTW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x65; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = zext(XmmReg1[0,16] s> XmmReg2[0,16]) * 0xFFFF;
|
|
XmmReg1[16,16] = zext(XmmReg1[16,16] s> XmmReg2[16,16]) * 0xFFFF;
|
|
XmmReg1[32,16] = zext(XmmReg1[32,16] s> XmmReg2[32,16]) * 0xFFFF;
|
|
XmmReg1[48,16] = zext(XmmReg1[48,16] s> XmmReg2[48,16]) * 0xFFFF;
|
|
XmmReg1[64,16] = zext(XmmReg1[64,16] s> XmmReg2[64,16]) * 0xFFFF;
|
|
XmmReg1[80,16] = zext(XmmReg1[80,16] s> XmmReg2[80,16]) * 0xFFFF;
|
|
XmmReg1[96,16] = zext(XmmReg1[96,16] s> XmmReg2[96,16]) * 0xFFFF;
|
|
XmmReg1[112,16] = zext(XmmReg1[112,16] s> XmmReg2[112,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPGTD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x66; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = zext(XmmReg[0,32] s> m[0,32]) * 0xFFFFFFFF;
|
|
XmmReg[32,32] = zext(XmmReg[32,32] s> m[32,32]) * 0xFFFFFFFF;
|
|
XmmReg[64,32] = zext(XmmReg[64,32] s> m[64,32]) * 0xFFFFFFFF;
|
|
XmmReg[96,32] = zext(XmmReg[96,32] s> m[96,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPGTD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x66; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = zext(XmmReg1[0,32] s> XmmReg2[0,32]) * 0xFFFFFFFF;
|
|
XmmReg1[32,32] = zext(XmmReg1[32,32] s> XmmReg2[32,32]) * 0xFFFFFFFF;
|
|
XmmReg1[64,32] = zext(XmmReg1[64,32] s> XmmReg2[64,32]) * 0xFFFFFFFF;
|
|
XmmReg1[96,32] = zext(XmmReg1[96,32] s> XmmReg2[96,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PEXTRW Reg32, mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC5; Reg32 & mmxreg2; imm8
|
|
{
|
|
temp:8 = mmxreg2 >> ( (imm8 & 0x03) * 16 );
|
|
Reg32 = zext(temp:2);
|
|
}
|
|
|
|
:PEXTRW Reg32, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC5; Reg32 & XmmReg2 & check_Reg32_dest; imm8
|
|
{
|
|
local shift:1 = (imm8 & 0x7) * 16:1;
|
|
local low:1 = shift < 64:1;
|
|
local temp:8;
|
|
conditionalAssign(temp,low,XmmReg2[0,64] >> shift, XmmReg2[64,64] >> (shift-64));
|
|
Reg32 = zext(temp:2);
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
#break PEXTRW with reg/mem dest into two constructors to handle zext in register case
|
|
:PEXTRW Rmr32, XmmReg1, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x15; (mod = 3 & Rmr32 & check_Rmr32_dest) & XmmReg1 ; imm8
|
|
{
|
|
local shift:1 = (imm8 & 0x7) * 16:1;
|
|
local low:1 = shift < 64:1;
|
|
local temp:8;
|
|
conditionalAssign(temp,low,XmmReg1[0,64] >> shift,XmmReg1[64,64] >> (shift - 64));
|
|
Rmr32 = zext(temp:2);
|
|
build check_Rmr32_dest;
|
|
}
|
|
|
|
:PEXTRW m16, XmmReg1, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x15; XmmReg1 ... & m16; imm8
|
|
{
|
|
local shift:1 = (imm8 & 0x7) * 16:1;
|
|
local low:1 = shift < 64:1;
|
|
local temp:8;
|
|
conditionalAssign(temp,low,XmmReg1[0,64] >> shift,XmmReg1[64,64] >> (shift - 64));
|
|
m16 = temp:2;
|
|
}
|
|
|
|
define pcodeop phaddd;
|
|
:PHADDD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x02; mmxreg ... & m64 { mmxreg=phaddd(mmxreg,m64); }
|
|
:PHADDD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x02; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddd(mmxreg1,mmxreg2); }
|
|
:PHADDD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x02; XmmReg ... & m128 { XmmReg=phaddd(XmmReg,m128); }
|
|
:PHADDD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x02; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddd(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phaddw;
|
|
:PHADDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x01; mmxreg ... & m64 { mmxreg=phaddw(mmxreg,m64); }
|
|
:PHADDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x01; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddw(mmxreg1,mmxreg2); }
|
|
:PHADDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x01; XmmReg ... & m128 { XmmReg=phaddw(XmmReg,m128); }
|
|
:PHADDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x01; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phaddsw;
|
|
:PHADDSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x03; mmxreg ... & m64 { mmxreg=phaddsw(mmxreg,m64); }
|
|
:PHADDSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x03; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddsw(mmxreg1,mmxreg2); }
|
|
:PHADDSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x03; XmmReg ... & m128 { XmmReg=phaddsw(XmmReg,m128); }
|
|
:PHADDSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x03; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phsubd;
|
|
:PHSUBD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x06; mmxreg ... & m64 { mmxreg=phsubd(mmxreg,m64); }
|
|
:PHSUBD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x06; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubd(mmxreg1,mmxreg2); }
|
|
:PHSUBD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x06; XmmReg ... & m128 { XmmReg=phsubd(XmmReg,m128); }
|
|
:PHSUBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x06; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubd(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phsubw;
|
|
:PHSUBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x05; mmxreg ... & m64 { mmxreg=phsubw(mmxreg,m64); }
|
|
:PHSUBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x05; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubw(mmxreg1,mmxreg2); }
|
|
:PHSUBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x05; XmmReg ... & m128 { XmmReg=phsubw(XmmReg,m128); }
|
|
:PHSUBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x05; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phsubsw;
|
|
:PHSUBSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x07; mmxreg ... & m64 { mmxreg=phsubsw(mmxreg,m64); }
|
|
:PHSUBSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x07; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubsw(mmxreg1,mmxreg2); }
|
|
:PHSUBSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x07; XmmReg ... & m128 { XmmReg=phsubsw(XmmReg,m128); }
|
|
:PHSUBSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x07; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubsw(XmmReg1,XmmReg2); }
|
|
|
|
:PINSRW mmxreg, Rmr32, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC4; mmxmod=3 & Rmr32 & mmxreg; imm8
|
|
{
|
|
local destIndex:1 = (imm8 & 0x7) * 16:1;
|
|
mmxreg = mmxreg & ~(0xffff:8 << destIndex);
|
|
local newVal:8 = zext(Rmr32[0,16]);
|
|
mmxreg = mmxreg | (newVal << destIndex);
|
|
}
|
|
|
|
:PINSRW mmxreg, m16, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC4; m16 & mmxreg ... ; imm8
|
|
{
|
|
local destIndex:1 = (imm8 & 0x7) * 16:1;
|
|
mmxreg = mmxreg & ~(0xffff:8 << destIndex);
|
|
local newVal:8 = zext(m16);
|
|
mmxreg = mmxreg | (newVal << destIndex);
|
|
}
|
|
|
|
:PINSRW XmmReg, Rmr32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC4; xmmmod=3 & Rmr32 & XmmReg; imm8
|
|
{
|
|
local destIndex:1 = (imm8 & 0x7) * 16:1;
|
|
local useLow:1 = destIndex < 64:1;
|
|
local newLow:8 = zext(Rmr32:2) << destIndex;
|
|
newLow = (XmmReg[0,64] & ~(0xffff:8 << destIndex)) | newLow;
|
|
local newHigh:8 = zext(Rmr32:2) << (destIndex-64:1);
|
|
newHigh = (XmmReg[64,64] & ~(0xffff:8 << (destIndex - 64:1))) | newHigh;
|
|
conditionalAssign(XmmReg[0,64],useLow,newLow,XmmReg[0,64]);
|
|
conditionalAssign(XmmReg[64,64],!useLow,newHigh,XmmReg[64,64]);
|
|
}
|
|
|
|
:PINSRW XmmReg, m16, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC4; m16 & XmmReg ...; imm8
|
|
{
|
|
local destIndex:1 = (imm8 & 0x7) * 16:1;
|
|
local useLow:1 = destIndex < 64:1;
|
|
local newLow:8 = zext(m16) << destIndex;
|
|
newLow = (XmmReg[0,64] & ~(0xffff:8 << destIndex)) | newLow;
|
|
local newHigh:8 = zext(m16) << (destIndex-64:1);
|
|
newHigh = (XmmReg[64,64] & ~(0xffff:8 << (destIndex - 64:1))) | newHigh;
|
|
conditionalAssign(XmmReg[0,64],useLow,newLow,XmmReg[0,64]);
|
|
conditionalAssign(XmmReg[64,64],!useLow,newHigh,XmmReg[64,64]);
|
|
}
|
|
|
|
define pcodeop pmaddubsw;
|
|
:PMADDUBSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x04; mmxreg ... & m64 { mmxreg=pmaddubsw(mmxreg,m64); }
|
|
:PMADDUBSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x04; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pmaddubsw(mmxreg1,mmxreg2); }
|
|
:PMADDUBSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x04; XmmReg ... & m128 { XmmReg=pmaddubsw(XmmReg,m128); }
|
|
:PMADDUBSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x04; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pmaddubsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pmaddwd;
|
|
:PMADDWD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF5; mmxreg ... & m64 { mmxreg = pmaddwd(mmxreg, m64); }
|
|
:PMADDWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF5; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmaddwd(mmxreg1, mmxreg2); }
|
|
:PMADDWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF5; XmmReg ... & m128 { XmmReg = pmaddwd(XmmReg, m128); }
|
|
:PMADDWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF5; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaddwd(XmmReg1, XmmReg2); }
|
|
|
|
:PMAXSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEE; mmxreg1 ... & mmxreg2_m64
|
|
{
|
|
local srcCopy:8 = mmxreg2_m64;
|
|
conditionalAssign(mmxreg1[0,16],srcCopy[0,16] s> mmxreg1[0,16],srcCopy[0,16],mmxreg1[0,16]);
|
|
conditionalAssign(mmxreg1[16,16],srcCopy[16,16] s> mmxreg1[16,16],srcCopy[16,16],mmxreg1[16,16]);
|
|
conditionalAssign(mmxreg1[32,16],srcCopy[32,16] s> mmxreg1[32,16],srcCopy[32,16],mmxreg1[32,16]);
|
|
conditionalAssign(mmxreg1[48,16],srcCopy[48,16] s> mmxreg1[48,16],srcCopy[48,16],mmxreg1[48,16]);
|
|
}
|
|
|
|
:PMAXSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEE; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,16],srcCopy[0,16] s> XmmReg1[0,16],srcCopy[0,16],XmmReg1[0,16]);
|
|
conditionalAssign(XmmReg1[16,16],srcCopy[16,16] s> XmmReg1[16,16],srcCopy[16,16],XmmReg1[16,16]);
|
|
conditionalAssign(XmmReg1[32,16],srcCopy[32,16] s> XmmReg1[32,16],srcCopy[32,16],XmmReg1[32,16]);
|
|
conditionalAssign(XmmReg1[48,16],srcCopy[48,16] s> XmmReg1[48,16],srcCopy[48,16],XmmReg1[48,16]);
|
|
conditionalAssign(XmmReg1[64,16],srcCopy[64,16] s> XmmReg1[64,16],srcCopy[64,16],XmmReg1[64,16]);
|
|
conditionalAssign(XmmReg1[80,16],srcCopy[80,16] s> XmmReg1[80,16],srcCopy[80,16],XmmReg1[80,16]);
|
|
conditionalAssign(XmmReg1[96,16],srcCopy[96,16] s> XmmReg1[96,16],srcCopy[96,16],XmmReg1[96,16]);
|
|
conditionalAssign(XmmReg1[112,16],srcCopy[112,16] s> XmmReg1[112,16],srcCopy[112,16],XmmReg1[112,16]);
|
|
}
|
|
|
|
:PMAXUB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDE; mmxreg1 ... & mmxreg2_m64
|
|
{
|
|
local srcCopy:8 = mmxreg2_m64;
|
|
conditionalAssign(mmxreg1[0,8],srcCopy[0,8] > mmxreg1[0,8],srcCopy[0,8],mmxreg1[0,8]);
|
|
conditionalAssign(mmxreg1[8,8],srcCopy[8,8] > mmxreg1[8,8],srcCopy[8,8],mmxreg1[8,8]);
|
|
conditionalAssign(mmxreg1[16,8],srcCopy[16,8] > mmxreg1[16,8],srcCopy[16,8],mmxreg1[16,8]);
|
|
conditionalAssign(mmxreg1[24,8],srcCopy[24,8] > mmxreg1[24,8],srcCopy[24,8],mmxreg1[24,8]);
|
|
conditionalAssign(mmxreg1[32,8],srcCopy[32,8] > mmxreg1[32,8],srcCopy[32,8],mmxreg1[32,8]);
|
|
conditionalAssign(mmxreg1[40,8],srcCopy[40,8] > mmxreg1[40,8],srcCopy[40,8],mmxreg1[40,8]);
|
|
conditionalAssign(mmxreg1[48,8],srcCopy[48,8] > mmxreg1[48,8],srcCopy[48,8],mmxreg1[48,8]);
|
|
conditionalAssign(mmxreg1[56,8],srcCopy[56,8] > mmxreg1[56,8],srcCopy[56,8],mmxreg1[56,8]);
|
|
}
|
|
|
|
:PMAXUB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDE; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,8],srcCopy[0,8] > XmmReg1[0,8],srcCopy[0,8],XmmReg1[0,8]);
|
|
conditionalAssign(XmmReg1[8,8],srcCopy[8,8] > XmmReg1[8,8],srcCopy[8,8],XmmReg1[8,8]);
|
|
conditionalAssign(XmmReg1[16,8],srcCopy[16,8] > XmmReg1[16,8],srcCopy[16,8],XmmReg1[16,8]);
|
|
conditionalAssign(XmmReg1[24,8],srcCopy[24,8] > XmmReg1[24,8],srcCopy[24,8],XmmReg1[24,8]);
|
|
conditionalAssign(XmmReg1[32,8],srcCopy[32,8] > XmmReg1[32,8],srcCopy[32,8],XmmReg1[32,8]);
|
|
conditionalAssign(XmmReg1[40,8],srcCopy[40,8] > XmmReg1[40,8],srcCopy[40,8],XmmReg1[40,8]);
|
|
conditionalAssign(XmmReg1[48,8],srcCopy[48,8] > XmmReg1[48,8],srcCopy[48,8],XmmReg1[48,8]);
|
|
conditionalAssign(XmmReg1[56,8],srcCopy[56,8] > XmmReg1[56,8],srcCopy[56,8],XmmReg1[56,8]);
|
|
conditionalAssign(XmmReg1[64,8],srcCopy[64,8] > XmmReg1[64,8],srcCopy[64,8],XmmReg1[64,8]);
|
|
conditionalAssign(XmmReg1[72,8],srcCopy[72,8] > XmmReg1[72,8],srcCopy[72,8],XmmReg1[72,8]);
|
|
conditionalAssign(XmmReg1[80,8],srcCopy[80,8] > XmmReg1[80,8],srcCopy[80,8],XmmReg1[80,8]);
|
|
conditionalAssign(XmmReg1[88,8],srcCopy[88,8] > XmmReg1[88,8],srcCopy[88,8],XmmReg1[88,8]);
|
|
conditionalAssign(XmmReg1[96,8],srcCopy[96,8] > XmmReg1[96,8],srcCopy[96,8],XmmReg1[96,8]);
|
|
conditionalAssign(XmmReg1[104,8],srcCopy[104,8] > XmmReg1[104,8],srcCopy[104,8],XmmReg1[104,8]);
|
|
conditionalAssign(XmmReg1[112,8],srcCopy[112,8] > XmmReg1[112,8],srcCopy[112,8],XmmReg1[112,8]);
|
|
conditionalAssign(XmmReg1[120,8],srcCopy[120,8] > XmmReg1[120,8],srcCopy[120,8],XmmReg1[120,8]);
|
|
}
|
|
|
|
:PMINSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEA; mmxreg1 ... & mmxreg2_m64
|
|
{
|
|
local srcCopy:8 = mmxreg2_m64;
|
|
conditionalAssign(mmxreg1[0,16],srcCopy[0,16] s< mmxreg1[0,16],srcCopy[0,16],mmxreg1[0,16]);
|
|
conditionalAssign(mmxreg1[16,16],srcCopy[16,16] s< mmxreg1[16,16],srcCopy[16,16],mmxreg1[16,16]);
|
|
conditionalAssign(mmxreg1[32,16],srcCopy[32,16] s< mmxreg1[32,16],srcCopy[32,16],mmxreg1[32,16]);
|
|
conditionalAssign(mmxreg1[48,16],srcCopy[48,16] s< mmxreg1[48,16],srcCopy[48,16],mmxreg1[48,16]);
|
|
}
|
|
|
|
:PMINSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEA; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,16],srcCopy[0,16] s< XmmReg1[0,16],srcCopy[0,16],XmmReg1[0,16]);
|
|
conditionalAssign(XmmReg1[16,16],srcCopy[16,16] s< XmmReg1[16,16],srcCopy[16,16],XmmReg1[16,16]);
|
|
conditionalAssign(XmmReg1[32,16],srcCopy[32,16] s< XmmReg1[32,16],srcCopy[32,16],XmmReg1[32,16]);
|
|
conditionalAssign(XmmReg1[48,16],srcCopy[48,16] s< XmmReg1[48,16],srcCopy[48,16],XmmReg1[48,16]);
|
|
conditionalAssign(XmmReg1[64,16],srcCopy[64,16] s< XmmReg1[64,16],srcCopy[64,16],XmmReg1[64,16]);
|
|
conditionalAssign(XmmReg1[80,16],srcCopy[80,16] s< XmmReg1[80,16],srcCopy[80,16],XmmReg1[80,16]);
|
|
conditionalAssign(XmmReg1[96,16],srcCopy[96,16] s< XmmReg1[96,16],srcCopy[96,16],XmmReg1[96,16]);
|
|
conditionalAssign(XmmReg1[112,16],srcCopy[112,16] s< XmmReg1[112,16],srcCopy[112,16],XmmReg1[112,16]);
|
|
}
|
|
|
|
:PMINUB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDA; mmxreg1 ... & mmxreg2_m64
|
|
{
|
|
local srcCopy:8 = mmxreg2_m64;
|
|
conditionalAssign(mmxreg1[0,8],srcCopy[0,8] < mmxreg1[0,8],srcCopy[0,8],mmxreg1[0,8]);
|
|
conditionalAssign(mmxreg1[8,8],srcCopy[8,8] < mmxreg1[8,8],srcCopy[8,8],mmxreg1[8,8]);
|
|
conditionalAssign(mmxreg1[16,8],srcCopy[16,8] < mmxreg1[16,8],srcCopy[16,8],mmxreg1[16,8]);
|
|
conditionalAssign(mmxreg1[24,8],srcCopy[24,8] < mmxreg1[24,8],srcCopy[24,8],mmxreg1[24,8]);
|
|
conditionalAssign(mmxreg1[32,8],srcCopy[32,8] < mmxreg1[32,8],srcCopy[32,8],mmxreg1[32,8]);
|
|
conditionalAssign(mmxreg1[40,8],srcCopy[40,8] < mmxreg1[40,8],srcCopy[40,8],mmxreg1[40,8]);
|
|
conditionalAssign(mmxreg1[48,8],srcCopy[48,8] < mmxreg1[48,8],srcCopy[48,8],mmxreg1[48,8]);
|
|
conditionalAssign(mmxreg1[56,8],srcCopy[56,8] < mmxreg1[56,8],srcCopy[56,8],mmxreg1[56,8]);
|
|
}
|
|
|
|
:PMINUB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDA; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,8],srcCopy[0,8] < XmmReg1[0,8],srcCopy[0,8],XmmReg1[0,8]);
|
|
conditionalAssign(XmmReg1[8,8],srcCopy[8,8] < XmmReg1[8,8],srcCopy[8,8],XmmReg1[8,8]);
|
|
conditionalAssign(XmmReg1[16,8],srcCopy[16,8] < XmmReg1[16,8],srcCopy[16,8],XmmReg1[16,8]);
|
|
conditionalAssign(XmmReg1[24,8],srcCopy[24,8] < XmmReg1[24,8],srcCopy[24,8],XmmReg1[24,8]);
|
|
conditionalAssign(XmmReg1[32,8],srcCopy[32,8] < XmmReg1[32,8],srcCopy[32,8],XmmReg1[32,8]);
|
|
conditionalAssign(XmmReg1[40,8],srcCopy[40,8] < XmmReg1[40,8],srcCopy[40,8],XmmReg1[40,8]);
|
|
conditionalAssign(XmmReg1[48,8],srcCopy[48,8] < XmmReg1[48,8],srcCopy[48,8],XmmReg1[48,8]);
|
|
conditionalAssign(XmmReg1[56,8],srcCopy[56,8] < XmmReg1[56,8],srcCopy[56,8],XmmReg1[56,8]);
|
|
conditionalAssign(XmmReg1[64,8],srcCopy[64,8] < XmmReg1[64,8],srcCopy[64,8],XmmReg1[64,8]);
|
|
conditionalAssign(XmmReg1[72,8],srcCopy[72,8] < XmmReg1[72,8],srcCopy[72,8],XmmReg1[72,8]);
|
|
conditionalAssign(XmmReg1[80,8],srcCopy[80,8] < XmmReg1[80,8],srcCopy[80,8],XmmReg1[80,8]);
|
|
conditionalAssign(XmmReg1[88,8],srcCopy[88,8] < XmmReg1[88,8],srcCopy[88,8],XmmReg1[88,8]);
|
|
conditionalAssign(XmmReg1[96,8],srcCopy[96,8] < XmmReg1[96,8],srcCopy[96,8],XmmReg1[96,8]);
|
|
conditionalAssign(XmmReg1[104,8],srcCopy[104,8] < XmmReg1[104,8],srcCopy[104,8],XmmReg1[104,8]);
|
|
conditionalAssign(XmmReg1[112,8],srcCopy[112,8] < XmmReg1[112,8],srcCopy[112,8],XmmReg1[112,8]);
|
|
conditionalAssign(XmmReg1[120,8],srcCopy[120,8] < XmmReg1[120,8],srcCopy[120,8],XmmReg1[120,8]);
|
|
}
|
|
|
|
#in 64-bit mode the default operand size is 64 bits
|
|
#note that gcc assembles pmovmskb eax, mm0 and pmovmskb rax, mm0 to 0f d7 c0
|
|
:PMOVMSKB Reg32, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD7; mod = 3 & Reg32 & mmxreg2 & check_Reg32_dest
|
|
{
|
|
local byte_mask:1 = 0:1;
|
|
byte_mask[0,1] = mmxreg2[7,1];
|
|
byte_mask[1,1] = mmxreg2[15,1];
|
|
byte_mask[2,1] = mmxreg2[23,1];
|
|
byte_mask[3,1] = mmxreg2[31,1];
|
|
byte_mask[4,1] = mmxreg2[39,1];
|
|
byte_mask[5,1] = mmxreg2[47,1];
|
|
byte_mask[6,1] = mmxreg2[55,1];
|
|
byte_mask[7,1] = mmxreg2[63,1];
|
|
Reg32 = zext(byte_mask);
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
#in 64-bit mode the default operand size is 64 bits
|
|
#note that gcc assembles pmovmskb eax, xmm0 and pmovmskb rax, xmm0 to 66 0f d7 c0
|
|
:PMOVMSKB Reg32, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD7; mod = 3 & Reg32 & XmmReg2 & check_Reg32_dest
|
|
{
|
|
local byte_mask:2 = 0:2;
|
|
byte_mask[0,1] = XmmReg2[7,1];
|
|
byte_mask[1,1] = XmmReg2[15,1];
|
|
byte_mask[2,1] = XmmReg2[23,1];
|
|
byte_mask[3,1] = XmmReg2[31,1];
|
|
byte_mask[4,1] = XmmReg2[39,1];
|
|
byte_mask[5,1] = XmmReg2[47,1];
|
|
byte_mask[6,1] = XmmReg2[55,1];
|
|
byte_mask[7,1] = XmmReg2[63,1];
|
|
byte_mask[8,1] = XmmReg2[71,1];
|
|
byte_mask[9,1] = XmmReg2[79,1];
|
|
byte_mask[10,1] = XmmReg2[87,1];
|
|
byte_mask[11,1] = XmmReg2[95,1];
|
|
byte_mask[12,1] = XmmReg2[103,1];
|
|
byte_mask[13,1] = XmmReg2[111,1];
|
|
byte_mask[14,1] = XmmReg2[119,1];
|
|
byte_mask[15,1] = XmmReg2[127,1];
|
|
Reg32 = zext(byte_mask);
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
define pcodeop pmulhrsw;
|
|
:PMULHRSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0B; mmxreg ... & m64 { mmxreg=pmulhrsw(mmxreg,m64); }
|
|
:PMULHRSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0B; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pmulhrsw(mmxreg1,mmxreg2); }
|
|
:PMULHRSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0B; XmmReg ... & m128 { XmmReg=pmulhrsw(XmmReg,m128); }
|
|
:PMULHRSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0B; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pmulhrsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pmulhuw;
|
|
:PMULHUW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE4; mmxreg ... & m64 { mmxreg = pmulhuw(mmxreg, m64); }
|
|
:PMULHUW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE4; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmulhuw(mmxreg1, mmxreg2); }
|
|
:PMULHUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE4; XmmReg ... & m128 { XmmReg = pmulhuw(XmmReg, m128); }
|
|
:PMULHUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE4; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulhuw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmulhw;
|
|
:PMULHW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE5; mmxreg ... & m64 { mmxreg = pmulhw(mmxreg, m64); }
|
|
:PMULHW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE5; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmulhw(mmxreg1, mmxreg2); }
|
|
:PMULHW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE5; XmmReg ... & m128 { XmmReg = pmulhw(XmmReg, m128); }
|
|
:PMULHW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE5; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulhw(XmmReg1, XmmReg2); }
|
|
|
|
:PMULLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD5; mmxreg ... & m64 {
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = mmxreg[0,16] * m[0,16];
|
|
mmxreg[16,16] = mmxreg[16,16] * m[16,16];
|
|
mmxreg[32,16] = mmxreg[32,16] * m[32,16];
|
|
mmxreg[48,16] = mmxreg[48,16] * m[48,16];
|
|
}
|
|
|
|
:PMULLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD5; mmxmod = 3 & mmxreg1 & mmxreg2 {
|
|
mmxreg1[0,16] = mmxreg1[0,16] * mmxreg2[0,16];
|
|
mmxreg1[16,16] = mmxreg1[16,16] * mmxreg2[16,16];
|
|
mmxreg1[32,16] = mmxreg1[32,16] * mmxreg2[32,16];
|
|
mmxreg1[48,16] = mmxreg1[48,16] * mmxreg2[48,16];
|
|
}
|
|
|
|
:PMULLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD5; XmmReg ... & m128 {
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = XmmReg[0,16] * m[0,16];
|
|
XmmReg[16,16] = XmmReg[16,16] * m[16,16];
|
|
XmmReg[32,16] = XmmReg[32,16] * m[32,16];
|
|
XmmReg[48,16] = XmmReg[48,16] * m[48,16];
|
|
XmmReg[64,16] = XmmReg[64,16] * m[64,16];
|
|
XmmReg[80,16] = XmmReg[80,16] * m[80,16];
|
|
XmmReg[96,16] = XmmReg[96,16] * m[96,16];
|
|
XmmReg[112,16] = XmmReg[112,16] * m[112,16];
|
|
}
|
|
|
|
:PMULLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD5; xmmmod = 3 & XmmReg1 & XmmReg2 {
|
|
XmmReg1[0,16] = XmmReg1[0,16] * XmmReg2[0,16];
|
|
XmmReg1[16,16] = XmmReg1[16,16] * XmmReg2[16,16];
|
|
XmmReg1[32,16] = XmmReg1[32,16] * XmmReg2[32,16];
|
|
XmmReg1[48,16] = XmmReg1[48,16] * XmmReg2[48,16];
|
|
XmmReg1[64,16] = XmmReg1[64,16] * XmmReg2[64,16];
|
|
XmmReg1[80,16] = XmmReg1[80,16] * XmmReg2[80,16];
|
|
XmmReg1[96,16] = XmmReg1[96,16] * XmmReg2[96,16];
|
|
XmmReg1[112,16] = XmmReg1[112,16] * XmmReg2[112,16];
|
|
}
|
|
|
|
:PMULUDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF4; mmxreg ... & m64
|
|
{
|
|
local a:8 = zext(mmxreg[0,32]);
|
|
local b:8 = zext(m64[0,32]);
|
|
mmxreg = a * b;
|
|
}
|
|
|
|
:PMULUDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF4; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
local a:8 = zext(mmxreg1[0,32]);
|
|
local b:8 = zext(mmxreg2[0,32]);
|
|
mmxreg1 = a * b;
|
|
}
|
|
|
|
:PMULUDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF4; XmmReg ... & m128
|
|
{
|
|
local a:8 = zext(XmmReg[0,32]);
|
|
local b:8 = zext(m128[0,32]);
|
|
XmmReg[0,64] = a * b;
|
|
local c:8 = zext(XmmReg[64,32]);
|
|
local d:8 = zext(m128[64,32]);
|
|
XmmReg[64,64] = c * d;
|
|
}
|
|
|
|
:PMULUDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF4; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
local a:8 = zext(XmmReg1[0,32]);
|
|
local b:8 = zext(XmmReg2[0,32]);
|
|
XmmReg1[0,64] = a * b;
|
|
local c:8 = zext(XmmReg1[64,32]);
|
|
local d:8 = zext(XmmReg2[64,32]);
|
|
XmmReg1[64,64] = c * d;
|
|
}
|
|
|
|
:POR mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEB; mmxreg ... & m64 { mmxreg = mmxreg | m64; }
|
|
:POR mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 | mmxreg2; }
|
|
:POR XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEB; XmmReg ... & m128 { XmmReg = XmmReg | m128; }
|
|
:POR XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEB; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; }
|
|
|
|
define pcodeop psadbw;
|
|
:PSADBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF6; mmxreg ... & m64 { mmxreg = psadbw(mmxreg, m64); }
|
|
:PSADBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF6; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psadbw(mmxreg1, mmxreg2); }
|
|
:PSADBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF6; XmmReg ... & m128 { XmmReg = psadbw(XmmReg, m128); }
|
|
:PSADBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF6; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psadbw(XmmReg1, XmmReg2); }
|
|
|
|
# these byte and word shuffles need to be done also ?????
|
|
define pcodeop pshufb;
|
|
:PSHUFB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x00; mmxreg1 ... & mmxreg2_m64 { mmxreg1=pshufb(mmxreg1,mmxreg2_m64); }
|
|
:PSHUFB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x00; XmmReg1 ... & XmmReg2_m128 { XmmReg1=pshufb(XmmReg1,XmmReg2_m128); }
|
|
|
|
# determine the total shift required by the bit fields in a shuffle opcode
|
|
Order0: order0 is imm8 [ order0 = ( imm8 & 0x3); ] { export *[const]:1 order0; }
|
|
Order1: order1 is imm8 [ order1 = ((imm8 >> 2) & 0x3); ] { export *[const]:1 order1; }
|
|
Order2: order2 is imm8 [ order2 = ((imm8 >> 4) & 0x3); ] { export *[const]:1 order2; }
|
|
Order3: order3 is imm8 [ order3 = ((imm8 >> 6) & 0x3); ] { export *[const]:1 order3; }
|
|
|
|
macro shuffle_4(dest,ord,c0,c1,c2,c3){
|
|
dest = zext(ord == 0) * c0 + zext(ord == 1) * c1 + zext(ord == 2) * c2 + zext(ord == 3) * c3;
|
|
}
|
|
|
|
:PSHUFD XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x70; (XmmReg2_m128 & XmmReg1 ...); imm8 & Order0 & Order1 & Order2 & Order3
|
|
{
|
|
local c0 = XmmReg2_m128[0,32];
|
|
local c1 = XmmReg2_m128[32,32];
|
|
local c2 = XmmReg2_m128[64,32];
|
|
local c3 = XmmReg2_m128[96,32];
|
|
|
|
shuffle_4(XmmReg1[0,32],Order0,c0,c1,c2,c3);
|
|
shuffle_4(XmmReg1[32,32],Order1,c0,c1,c2,c3);
|
|
shuffle_4(XmmReg1[64,32],Order2,c0,c1,c2,c3);
|
|
shuffle_4(XmmReg1[96,32],Order3,c0,c1,c2,c3);
|
|
}
|
|
|
|
define pcodeop pshufhw;
|
|
:PSHUFHW XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x70; XmmReg2_m128 & XmmReg1 ...; imm8 { XmmReg1 = pshufhw(XmmReg1, XmmReg2_m128, imm8:8); }
|
|
|
|
define pcodeop pshuflw;
|
|
:PSHUFLW XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x70; XmmReg2_m128 & XmmReg1 ...; imm8 { XmmReg1 = pshuflw(XmmReg1, XmmReg2_m128, imm8:8); }
|
|
|
|
define pcodeop pshufw;
|
|
:PSHUFW mmxreg1, mmxreg2_m64, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x70; mmxreg2_m64 & mmxreg1 ...; imm8 { mmxreg1 = pshufw(mmxreg1, mmxreg2_m64, imm8:8); }
|
|
|
|
define pcodeop psignb;
|
|
:PSIGNB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x08; mmxreg ... & m64 { mmxreg=psignb(mmxreg,m64); }
|
|
:PSIGNB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x08; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignb(mmxreg1,mmxreg2); }
|
|
:PSIGNB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x08; XmmReg ... & m128 { XmmReg=psignb(XmmReg,m128); }
|
|
:PSIGNB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x08; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignb(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop psignw;
|
|
:PSIGNW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x09; mmxreg ... & m64 { mmxreg=psignw(mmxreg,m64); }
|
|
:PSIGNW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x09; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignw(mmxreg1,mmxreg2); }
|
|
:PSIGNW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x09; XmmReg ... & m128 { XmmReg=psignw(XmmReg,m128); }
|
|
:PSIGNW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x09; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop psignd;
|
|
:PSIGND mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0a; mmxreg ... & m64 { mmxreg=psignd(mmxreg,m64); }
|
|
:PSIGND mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0a; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignd(mmxreg1,mmxreg2); }
|
|
:PSIGND XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0a; XmmReg ... & m128 { XmmReg=psignd(XmmReg,m128); }
|
|
:PSIGND XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0a; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignd(XmmReg1,XmmReg2); }
|
|
|
|
#break into two 64-bit chunks so decompiler can follow constants
|
|
:PSLLDQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; xmmmod = 3 & reg_opcode=7 & XmmReg2; imm8
|
|
{
|
|
if (imm8:1 > 15:1) goto <zero>;
|
|
local low64copy:8 = XmmReg2[0,64];
|
|
XmmReg2[0,64] = XmmReg2[0,64] << (8:1 * imm8:1);
|
|
if (imm8:1 > 8:1) goto <greater>;
|
|
XmmReg2[64,64] = (XmmReg2[64,64] << (8:1 * imm8:1)) | (low64copy >> (8:1 * (8 - imm8:1)));
|
|
goto <end>;
|
|
<greater>
|
|
XmmReg2[64,64] = low64copy << (8:1 * (imm8 - 8));
|
|
goto <end>;
|
|
<zero>
|
|
XmmReg2[0,64] = 0:8;
|
|
XmmReg2[64,64] = 0:8;
|
|
<end>
|
|
}
|
|
|
|
define pcodeop psllw;
|
|
:PSLLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF1; mmxreg ... & m64 ... { mmxreg = psllw(mmxreg, m64); }
|
|
:PSLLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF1; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psllw(mmxreg1, mmxreg2); }
|
|
:PSLLW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 { mmxreg2 = psllw(mmxreg2, imm8:8); }
|
|
|
|
:PSLLD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF2; mmxreg ... & m64 ... {
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = mmxreg[0,32] << m[0,32];
|
|
mmxreg[32,32] = mmxreg[32,32] << m[32,32];
|
|
}
|
|
|
|
:PSLLD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF2; mmxmod = 3 & mmxreg1 & mmxreg2 {
|
|
mmxreg1[0,32] = mmxreg1[0,32] << mmxreg2[0,32];
|
|
mmxreg1[32,32] = mmxreg1[32,32] << mmxreg2[32,32];
|
|
}
|
|
|
|
:PSLLD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 {
|
|
mmxreg2[0,32] = mmxreg2[0,32] << imm8;
|
|
mmxreg2[32,32] = mmxreg2[32,32] << imm8;
|
|
}
|
|
|
|
:PSLLQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF3; mmxreg ... & m64 ... { mmxreg = mmxreg << m64; }
|
|
:PSLLQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF3; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 << mmxreg2; }
|
|
:PSLLQ mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 { mmxreg2 = mmxreg2 << imm8:8; }
|
|
|
|
:PSLLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF1; XmmReg ... & m128 ... { XmmReg = psllw(XmmReg, m128); }
|
|
:PSLLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF1; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psllw(XmmReg1, XmmReg2); }
|
|
:PSLLW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 { XmmReg2 = psllw(XmmReg2, imm8:8); }
|
|
|
|
:PSLLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF2; XmmReg ... & m128 ... {
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] << m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] << m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] << m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] << m[96,32];
|
|
}
|
|
|
|
:PSLLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF2; xmmmod = 3 & XmmReg1 & XmmReg2 {
|
|
XmmReg1[0,32] = XmmReg1[0,32] << XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] << XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] << XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] << XmmReg2[96,32];
|
|
}
|
|
|
|
:PSLLD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 {
|
|
XmmReg2[0,32] = XmmReg2[0,32] << imm8;
|
|
XmmReg2[32,32] = XmmReg2[32,32] << imm8;
|
|
XmmReg2[64,32] = XmmReg2[64,32] << imm8;
|
|
XmmReg2[96,32] = XmmReg2[96,32] << imm8;
|
|
}
|
|
|
|
:PSLLQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF3; XmmReg ... & m128 ... {
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] << m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] << m[64,64];
|
|
}
|
|
|
|
:PSLLQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF3; xmmmod = 3 & XmmReg1 & XmmReg2 {
|
|
XmmReg1[0,64] = XmmReg1[0,64] << XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] << XmmReg2[64,64];
|
|
}
|
|
|
|
:PSLLQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 {
|
|
XmmReg2[0,64] = XmmReg2[0,64] << imm8;
|
|
XmmReg2[64,64] = XmmReg2[64,64] << imm8;
|
|
}
|
|
|
|
define pcodeop psraw;
|
|
:PSRAW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE1; mmxreg ... & m64 ... { mmxreg = psraw(mmxreg, m64); }
|
|
:PSRAW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE1; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psraw(mmxreg1, mmxreg2); }
|
|
:PSRAW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=4 & mmxreg2; imm8 { mmxreg2 = psraw(mmxreg2, imm8:8); }
|
|
|
|
:PSRAD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE2; mmxreg ... & m64
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
mmxreg[0,32] = mmxreg[0,32] s>> m64;
|
|
mmxreg[32,32] = mmxreg[32,32] s>> m64;
|
|
}
|
|
|
|
:PSRAD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE2; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
mmxreg1[0,32] = mmxreg1[0,32] s>> mmxreg2;
|
|
mmxreg1[32,32] = mmxreg1[32,32] s>> mmxreg2;
|
|
}
|
|
|
|
:PSRAD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=4 & mmxreg2; imm8
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
mmxreg2[0,32] = mmxreg2[0,32] s>> imm8;
|
|
mmxreg2[32,32] = mmxreg2[32,32] s>> imm8;
|
|
}
|
|
|
|
:PSRAW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE1; XmmReg ... & m128 ... { XmmReg = psraw(XmmReg, m128); }
|
|
:PSRAW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE1; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psraw(XmmReg1, XmmReg2); }
|
|
:PSRAW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=4 & XmmReg2; imm8 { XmmReg2 = psraw(XmmReg2, imm8:8); }
|
|
|
|
:PSRAD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE2; m128 & XmmReg ...
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
XmmReg[0,32] = XmmReg[0,32] s>> m128;
|
|
XmmReg[32,32] = XmmReg[32,32] s>> m128;
|
|
XmmReg[64,32] = XmmReg[64,32] s>> m128;
|
|
XmmReg[96,32] = XmmReg[96,32] s>> m128;
|
|
}
|
|
|
|
:PSRAD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE2; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
XmmReg1[0,32] = XmmReg1[0,32] s>> XmmReg2;
|
|
XmmReg1[32,32] = XmmReg1[32,32] s>> XmmReg2;
|
|
XmmReg1[64,32] = XmmReg1[64,32] s>> XmmReg2;
|
|
XmmReg1[96,32] = XmmReg1[96,32] s>> XmmReg2;
|
|
}
|
|
|
|
:PSRAD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=4 & XmmReg2; imm8
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
XmmReg2[0,32] = XmmReg2[0,32] s>> imm8;
|
|
XmmReg2[32,32] = XmmReg2[32,32] s>> imm8;
|
|
XmmReg2[64,32] = XmmReg2[64,32] s>> imm8;
|
|
XmmReg2[96,32] = XmmReg2[96,32] s>> imm8;
|
|
}
|
|
|
|
:PSRLDQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; xmmmod=3 & reg_opcode=3 & XmmReg2; imm8
|
|
{
|
|
# a count greater than 15 just clears all the bits
|
|
XmmReg2 = XmmReg2 >> (imm8 * 8);
|
|
}
|
|
|
|
:PSRLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD1; mmxreg ... & m64 ...
|
|
{
|
|
mmxreg[0,16] = mmxreg[0,16] >> m64;
|
|
mmxreg[16,16] = mmxreg[16,16] >> m64;
|
|
mmxreg[32,16] = mmxreg[32,16] >> m64;
|
|
mmxreg[48,16] = mmxreg[48,16] >> m64;
|
|
}
|
|
|
|
:PSRLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD1; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = mmxreg1[0,16] >> mmxreg2;
|
|
mmxreg1[16,16] = mmxreg1[16,16] >> mmxreg2;
|
|
mmxreg1[32,16] = mmxreg1[32,16] >> mmxreg2;
|
|
mmxreg1[48,16] = mmxreg1[48,16] >> mmxreg2;
|
|
}
|
|
|
|
:PSRLW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8
|
|
{
|
|
mmxreg2[0,16] = mmxreg2[0,16] >> imm8;
|
|
mmxreg2[16,16] = mmxreg2[16,16] >> imm8;
|
|
mmxreg2[32,16] = mmxreg2[32,16] >> imm8;
|
|
mmxreg2[48,16] = mmxreg2[48,16] >> imm8;
|
|
}
|
|
|
|
:PSRLD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD2; mmxreg ... & m64 ...
|
|
{
|
|
mmxreg[0,32] = mmxreg[0,32] >> m64;
|
|
mmxreg[32,32] = mmxreg[32,32] >> m64;
|
|
}
|
|
|
|
:PSRLD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD2; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = mmxreg1[0,32] >> mmxreg2;
|
|
mmxreg1[32,32] = mmxreg1[32,32] >> mmxreg2;
|
|
}
|
|
|
|
:PSRLD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8
|
|
{
|
|
mmxreg2[0,32] = mmxreg2[0,32] >> imm8;
|
|
mmxreg2[32,32] = mmxreg2[32,32] >> imm8;
|
|
}
|
|
|
|
:PSRLQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD3; mmxreg ... & m64 ...
|
|
{
|
|
mmxreg = mmxreg >> m64;
|
|
}
|
|
|
|
:PSRLQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD3; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1 = mmxreg1 >> mmxreg2;
|
|
}
|
|
|
|
:PSRLQ mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8
|
|
{
|
|
mmxreg2 = mmxreg2 >> imm8;
|
|
}
|
|
|
|
:PSRLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD1; XmmReg ... & m128 ...
|
|
{
|
|
XmmReg[0,16] = XmmReg[0,16] >> m128[0,64];
|
|
XmmReg[16,16] = XmmReg[16,16] >> m128[0,64];
|
|
XmmReg[32,16] = XmmReg[32,16] >> m128[0,64];
|
|
XmmReg[48,16] = XmmReg[48,16] >> m128[0,64];
|
|
XmmReg[64,16] = XmmReg[64,16] >> m128[0,64];
|
|
XmmReg[80,16] = XmmReg[80,16] >> m128[0,64];
|
|
XmmReg[96,16] = XmmReg[96,16] >> m128[0,64];
|
|
XmmReg[112,16] = XmmReg[112,16] >> m128[0,64];
|
|
}
|
|
|
|
:PSRLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD1; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
#save this off in case XmmReg1 and XmmReg2 are the same register
|
|
local count:8 = XmmReg2[0,64];
|
|
|
|
XmmReg1[0,16] = XmmReg1[0,16] >> count;
|
|
XmmReg1[16,16] = XmmReg1[16,16] >> count;
|
|
XmmReg1[32,16] = XmmReg1[32,16] >> count;
|
|
XmmReg1[48,16] = XmmReg1[48,16] >> count;
|
|
XmmReg1[64,16] = XmmReg1[64,16] >> count;
|
|
XmmReg1[80,16] = XmmReg1[80,16] >> count;
|
|
XmmReg1[96,16] = XmmReg1[96,16] >> count;
|
|
XmmReg1[112,16] = XmmReg1[112,16] >> count;
|
|
}
|
|
|
|
:PSRLW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8
|
|
{
|
|
XmmReg2[0,16] = XmmReg2[0,16] >> imm8;
|
|
XmmReg2[16,16] = XmmReg2[16,16] >> imm8;
|
|
XmmReg2[32,16] = XmmReg2[32,16] >> imm8;
|
|
XmmReg2[48,16] = XmmReg2[48,16] >> imm8;
|
|
XmmReg2[64,16] = XmmReg2[64,16] >> imm8;
|
|
XmmReg2[80,16] = XmmReg2[80,16] >> imm8;
|
|
XmmReg2[96,16] = XmmReg2[96,16] >> imm8;
|
|
XmmReg2[112,16] = XmmReg2[112,16] >> imm8;
|
|
}
|
|
|
|
:PSRLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD2; XmmReg ... & m128 ...
|
|
{
|
|
XmmReg[0,32] = XmmReg[0,32] >> m128[0,64];
|
|
XmmReg[32,32] = XmmReg[32,32] >> m128[0,64];
|
|
XmmReg[64,32] = XmmReg[64,32] >> m128[0,64];
|
|
XmmReg[96,32] = XmmReg[96,32] >> m128[0,64];
|
|
}
|
|
|
|
:PSRLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD2; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
#save this off in case XmmReg1 and XmmReg2 are the same register
|
|
local count = XmmReg2[0,64];
|
|
|
|
XmmReg1[0,32] = XmmReg1[0,32] >> count;
|
|
XmmReg1[32,32] = XmmReg1[32,32] >> count;
|
|
XmmReg1[64,32] = XmmReg1[64,32] >> count;
|
|
XmmReg1[96,32] = XmmReg1[96,32] >> count;
|
|
}
|
|
|
|
:PSRLD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8
|
|
{
|
|
XmmReg2[0,32] = XmmReg2[0,32] >> imm8;
|
|
XmmReg2[32,32] = XmmReg2[32,32] >> imm8;
|
|
XmmReg2[64,32] = XmmReg2[64,32] >> imm8;
|
|
XmmReg2[96,32] = XmmReg2[96,32] >> imm8;
|
|
}
|
|
|
|
:PSRLQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD3; XmmReg ... & m128 ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] >> m128[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] >> m128[0,64];
|
|
}
|
|
|
|
:PSRLQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD3; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
#save this off in case XmmReg1 and XmmReg2 are the same register
|
|
local count = XmmReg2[0,64];
|
|
|
|
XmmReg1[0,64] = XmmReg1[0,64] >> count;
|
|
XmmReg1[64,64] = XmmReg1[64,64] >> count;
|
|
}
|
|
|
|
:PSRLQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8
|
|
{
|
|
XmmReg2[0,64] = XmmReg2[0,64] >> imm8;
|
|
XmmReg2[64,64] = XmmReg2[64,64] >> imm8;
|
|
}
|
|
|
|
:PSUBB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF8; mmxreg ... & m64 ...
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = mmxreg[0,8] - m[0,8];
|
|
mmxreg[8,8] = mmxreg[8,8] - m[8,8];
|
|
mmxreg[16,8] = mmxreg[16,8] - m[16,8];
|
|
mmxreg[24,8] = mmxreg[24,8] - m[24,8];
|
|
mmxreg[32,8] = mmxreg[32,8] - m[32,8];
|
|
mmxreg[40,8] = mmxreg[40,8] - m[40,8];
|
|
mmxreg[48,8] = mmxreg[48,8] - m[48,8];
|
|
mmxreg[56,8] = mmxreg[56,8] - m[56,8];
|
|
}
|
|
|
|
:PSUBB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF8; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = mmxreg1[0,8] - mmxreg2[0,8];
|
|
mmxreg1[16,8] = mmxreg1[16,8] - mmxreg2[16,8];
|
|
mmxreg1[24,8] = mmxreg1[24,8] - mmxreg2[24,8];
|
|
mmxreg1[32,8] = mmxreg1[32,8] - mmxreg2[32,8];
|
|
mmxreg1[40,8] = mmxreg1[40,8] - mmxreg2[40,8];
|
|
mmxreg1[48,8] = mmxreg1[48,8] - mmxreg2[48,8];
|
|
mmxreg1[56,8] = mmxreg1[56,8] - mmxreg2[56,8];
|
|
}
|
|
|
|
:PSUBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF9; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = mmxreg[0,16] - m[0,16];
|
|
mmxreg[16,16] = mmxreg[16,16] - m[16,16];
|
|
mmxreg[32,16] = mmxreg[32,16] - m[32,16];
|
|
mmxreg[48,16] = mmxreg[48,16] - m[48,16];
|
|
}
|
|
|
|
:PSUBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF9; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = mmxreg1[0,16] - mmxreg2[0,16];
|
|
mmxreg1[16,16] = mmxreg1[16,16] - mmxreg2[16,16];
|
|
mmxreg1[32,16] = mmxreg1[32,16] - mmxreg2[32,16];
|
|
mmxreg1[48,16] = mmxreg1[48,16] - mmxreg2[48,16];
|
|
}
|
|
|
|
:PSUBD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFA; mmxreg ... & m64 ...
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = mmxreg[0,32] - m[0,32];
|
|
mmxreg[32,32] = mmxreg[32,32] - m[32,32];
|
|
}
|
|
|
|
:PSUBD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFA; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = mmxreg1[0,32] - mmxreg2[0,32];
|
|
mmxreg1[32,32] = mmxreg1[32,32] - mmxreg2[32,32];
|
|
}
|
|
|
|
:PSUBQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFB; mmxreg ... & m64 ... { mmxreg = mmxreg - m64; }
|
|
:PSUBQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 - mmxreg2; }
|
|
:PSUBQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFB; XmmReg ... & m128 ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] - m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] - m[64,64];
|
|
}
|
|
|
|
:PSUBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFB; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] - XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] - XmmReg2[64,64];
|
|
}
|
|
|
|
:PSUBB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF8; XmmReg ... & m128 ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = XmmReg[0,8] - m[0,8];
|
|
XmmReg[8,8] = XmmReg[8,8] - m[8,8];
|
|
XmmReg[16,8] = XmmReg[16,8] - m[16,8];
|
|
XmmReg[24,8] = XmmReg[24,8] - m[24,8];
|
|
XmmReg[32,8] = XmmReg[32,8] - m[32,8];
|
|
XmmReg[40,8] = XmmReg[40,8] - m[40,8];
|
|
XmmReg[48,8] = XmmReg[48,8] - m[48,8];
|
|
XmmReg[56,8] = XmmReg[56,8] - m[56,8];
|
|
XmmReg[64,8] = XmmReg[64,8] - m[64,8];
|
|
XmmReg[72,8] = XmmReg[72,8] - m[72,8];
|
|
XmmReg[80,8] = XmmReg[80,8] - m[80,8];
|
|
XmmReg[88,8] = XmmReg[88,8] - m[88,8];
|
|
XmmReg[96,8] = XmmReg[96,8] - m[96,8];
|
|
XmmReg[104,8] = XmmReg[104,8] - m[104,8];
|
|
XmmReg[112,8] = XmmReg[112,8] - m[112,8];
|
|
XmmReg[120,8] = XmmReg[120,8] - m[120,8];
|
|
}
|
|
|
|
:PSUBB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF8; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = XmmReg1[0,8] - XmmReg2[0,8];
|
|
XmmReg1[8,8] = XmmReg1[8,8] - XmmReg2[8,8];
|
|
XmmReg1[16,8] = XmmReg1[16,8] - XmmReg2[16,8];
|
|
XmmReg1[24,8] = XmmReg1[24,8] - XmmReg2[24,8];
|
|
XmmReg1[32,8] = XmmReg1[32,8] - XmmReg2[32,8];
|
|
XmmReg1[40,8] = XmmReg1[40,8] - XmmReg2[40,8];
|
|
XmmReg1[48,8] = XmmReg1[48,8] - XmmReg2[48,8];
|
|
XmmReg1[56,8] = XmmReg1[56,8] - XmmReg2[56,8];
|
|
XmmReg1[64,8] = XmmReg1[64,8] - XmmReg2[64,8];
|
|
XmmReg1[72,8] = XmmReg1[72,8] - XmmReg2[72,8];
|
|
XmmReg1[80,8] = XmmReg1[80,8] - XmmReg2[80,8];
|
|
XmmReg1[88,8] = XmmReg1[88,8] - XmmReg2[88,8];
|
|
XmmReg1[96,8] = XmmReg1[96,8] - XmmReg2[96,8];
|
|
XmmReg1[104,8] = XmmReg1[104,8] - XmmReg2[104,8];
|
|
XmmReg1[112,8] = XmmReg1[112,8] - XmmReg2[112,8];
|
|
XmmReg1[120,8] = XmmReg1[120,8] - XmmReg2[120,8];
|
|
}
|
|
|
|
:PSUBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF9; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = XmmReg[0,16] - m[0,16];
|
|
XmmReg[16,16] = XmmReg[16,16] - m[16,16];
|
|
XmmReg[32,16] = XmmReg[32,16] - m[32,16];
|
|
XmmReg[48,16] = XmmReg[48,16] - m[48,16];
|
|
XmmReg[64,16] = XmmReg[64,16] - m[64,16];
|
|
XmmReg[80,16] = XmmReg[80,16] - m[80,16];
|
|
XmmReg[96,16] = XmmReg[96,16] - m[96,16];
|
|
XmmReg[112,16] = XmmReg[112,16] - m[112,16];
|
|
}
|
|
|
|
:PSUBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF9; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = XmmReg1[0,16] - XmmReg2[0,16];
|
|
XmmReg1[16,16] = XmmReg1[16,16] - XmmReg2[16,16];
|
|
XmmReg1[32,16] = XmmReg1[32,16] - XmmReg2[32,16];
|
|
XmmReg1[48,16] = XmmReg1[48,16] - XmmReg2[48,16];
|
|
XmmReg1[64,16] = XmmReg1[64,16] - XmmReg2[64,16];
|
|
XmmReg1[80,16] = XmmReg1[80,16] - XmmReg2[80,16];
|
|
XmmReg1[96,16] = XmmReg1[96,16] - XmmReg2[96,16];
|
|
XmmReg1[112,16] = XmmReg1[112,16] - XmmReg2[112,16];
|
|
}
|
|
|
|
:PSUBD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFA; XmmReg ... & m128 ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] - m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] - m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] - m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] - m[96,32];
|
|
}
|
|
|
|
:PSUBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFA; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] - XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] - XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] - XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] - XmmReg2[96,32];
|
|
}
|
|
|
|
define pcodeop psubsb;
|
|
:PSUBSB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE8; mmxreg1 ... & mmxreg2_m64 ... { mmxreg1 = psubsb(mmxreg1, mmxreg2_m64); }
|
|
|
|
define pcodeop psubsw;
|
|
:PSUBSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE9; mmxreg1 ... & mmxreg2_m64 ... { mmxreg1 = psubsw(mmxreg1, mmxreg2_m64); }
|
|
|
|
:PSUBSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE8; XmmReg1 ... & XmmReg2_m128 ... { XmmReg1 = psubsb(XmmReg1, XmmReg2_m128); }
|
|
|
|
:PSUBSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE9; XmmReg1 ... & XmmReg2_m128 ... { XmmReg1 = psubsw(XmmReg1, XmmReg2_m128); }
|
|
|
|
define pcodeop psubusb;
|
|
:PSUBUSB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD8; mmxreg1 ... & mmxreg2_m64 ... { mmxreg1 = psubusb(mmxreg1, mmxreg2_m64); }
|
|
|
|
define pcodeop psubusw;
|
|
:PSUBUSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD9; mmxreg1 ... & mmxreg2_m64 ... { mmxreg1 = psubusw(mmxreg1, mmxreg2_m64); }
|
|
|
|
:PSUBUSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD8; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = psubusb(XmmReg1, XmmReg2_m128); }
|
|
|
|
:PSUBUSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD9; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = psubusw(XmmReg1, XmmReg2_m128); }
|
|
|
|
:PUNPCKHBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x68; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = mmxreg[32,8];
|
|
mmxreg[8,8] = m[32,8];
|
|
mmxreg[16,8] = mmxreg[40,8];
|
|
mmxreg[24,8] = m[40,8];
|
|
mmxreg[32,8] = mmxreg[48,8];
|
|
mmxreg[40,8] = m[48,8];
|
|
mmxreg[48,8] = mmxreg[56,8];
|
|
mmxreg[56,8] = m[56,8];
|
|
}
|
|
|
|
:PUNPCKHBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x68; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = mmxreg1[32,8];
|
|
mmxreg1[8,8] = mmxreg2[32,8];
|
|
mmxreg1[16,8] = mmxreg1[40,8];
|
|
mmxreg1[24,8] = mmxreg2[40,8];
|
|
mmxreg1[32,8] = mmxreg1[48,8];
|
|
mmxreg1[40,8] = mmxreg2[48,8];
|
|
mmxreg1[48,8] = mmxreg1[56,8];
|
|
mmxreg1[56,8] = mmxreg2[56,8];
|
|
}
|
|
|
|
:PUNPCKHWD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x69; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = mmxreg[32,16];
|
|
mmxreg[16,16] = m[32,16];
|
|
mmxreg[32,16] = mmxreg[48,16];
|
|
mmxreg[48,16] = m[48,16];
|
|
}
|
|
|
|
:PUNPCKHWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x69; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = mmxreg1[32,16];
|
|
mmxreg1[16,16] = mmxreg2[32,16];
|
|
mmxreg1[32,16] = mmxreg1[48,16];
|
|
mmxreg1[48,16] = mmxreg2[48,16];
|
|
}
|
|
|
|
:PUNPCKHDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6A; mmxreg ... & m64
|
|
{
|
|
mmxreg[0,32] = mmxreg[32,32];
|
|
mmxreg[32,32] = m64[32,32];
|
|
}
|
|
|
|
:PUNPCKHDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6A; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = mmxreg1[32,32];
|
|
mmxreg1[32,32] = mmxreg2[32,32];
|
|
}
|
|
|
|
:PUNPCKHBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x68; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = XmmReg[64,8];
|
|
XmmReg[8,8] = m[64,8];
|
|
XmmReg[16,8] = XmmReg[72,8];
|
|
XmmReg[24,8] = m[72,8];
|
|
XmmReg[32,8] = XmmReg[80,8];
|
|
XmmReg[40,8] = m[80,8];
|
|
XmmReg[48,8] = XmmReg[88,8];
|
|
XmmReg[56,8] = m[88,8];
|
|
XmmReg[64,8] = XmmReg[96,8];
|
|
XmmReg[72,8] = m[96,8];
|
|
XmmReg[80,8] = XmmReg[104,8];
|
|
XmmReg[88,8] = m[104,8];
|
|
XmmReg[96,8] = XmmReg[112,8];
|
|
XmmReg[104,8] = m[112,8];
|
|
XmmReg[112,8] = XmmReg[120,8];
|
|
XmmReg[120,8] = m[120,8];
|
|
}
|
|
|
|
# full set of XMM byte registers
|
|
:PUNPCKHBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x68; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = XmmReg1[64,8];
|
|
XmmReg1[8,8] = XmmReg2[64,8];
|
|
XmmReg1[16,8] = XmmReg1[72,8];
|
|
XmmReg1[24,8] = XmmReg2[72,8];
|
|
XmmReg1[32,8] = XmmReg1[80,8];
|
|
XmmReg1[40,8] = XmmReg2[80,8];
|
|
XmmReg1[48,8] = XmmReg1[88,8];
|
|
XmmReg1[56,8] = XmmReg2[88,8];
|
|
XmmReg1[64,8] = XmmReg1[96,8];
|
|
XmmReg1[72,8] = XmmReg2[96,8];
|
|
XmmReg1[80,8] = XmmReg1[104,8];
|
|
XmmReg1[88,8] = XmmReg2[104,8];
|
|
XmmReg1[96,8] = XmmReg1[112,8];
|
|
XmmReg1[104,8] = XmmReg2[112,8];
|
|
XmmReg1[112,8] = XmmReg1[120,8];
|
|
XmmReg1[120,8] = XmmReg2[120,8];
|
|
}
|
|
|
|
:PUNPCKHWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x69; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = XmmReg[64,16];
|
|
XmmReg[16,16] = m[64,16];
|
|
XmmReg[32,16] = XmmReg[80,16];
|
|
XmmReg[48,16] = m[80,16];
|
|
XmmReg[64,16] = XmmReg[96,16];
|
|
XmmReg[80,16] = m[96,16];
|
|
XmmReg[96,16] = XmmReg[112,16];
|
|
XmmReg[112,16] = m[112,16];
|
|
}
|
|
|
|
:PUNPCKHWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x69; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = XmmReg1[64,16];
|
|
XmmReg1[16,16] = XmmReg2[64,16];
|
|
XmmReg1[32,16] = XmmReg1[80,16];
|
|
XmmReg1[48,16] = XmmReg2[80,16];
|
|
XmmReg1[64,16] = XmmReg1[96,16];
|
|
XmmReg1[80,16] = XmmReg2[96,16];
|
|
XmmReg1[96,16] = XmmReg1[112,16];
|
|
XmmReg1[112,16] = XmmReg2[112,16];
|
|
}
|
|
|
|
:PUNPCKHDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6A; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[64,32];
|
|
XmmReg[32,32] = m[64,32];
|
|
XmmReg[64,32] = XmmReg[96,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:PUNPCKHDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6A; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[64,32];
|
|
XmmReg1[32,32] = XmmReg2[64,32];
|
|
XmmReg1[64,32] = XmmReg1[96,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:PUNPCKHQDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6D; m128 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[64,64];
|
|
XmmReg[64,64] = m128[64,64];
|
|
}
|
|
|
|
:PUNPCKHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6D; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[64,64];
|
|
XmmReg1[64,64] = XmmReg2[64,64];
|
|
}
|
|
|
|
:PUNPCKLBW mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x60; mmxreg ... & m32
|
|
{
|
|
local m:4 = m32;
|
|
mmxreg[56,8] = m[24,8];
|
|
mmxreg[48,8] = mmxreg[24,8];
|
|
mmxreg[40,8] = m[16,8];
|
|
mmxreg[32,8] = mmxreg[16,8];
|
|
mmxreg[24,8] = m[8,8];
|
|
mmxreg[16,8] = mmxreg[8,8];
|
|
mmxreg[8,8] = m[0,8];
|
|
# mmxreg[0,8] = mmxreg[0,8]; superfluous
|
|
}
|
|
|
|
:PUNPCKLBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x60; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[56,8] = mmxreg2[24,8];
|
|
mmxreg1[48,8] = mmxreg1[24,8];
|
|
mmxreg1[40,8] = mmxreg2[16,8];
|
|
mmxreg1[32,8] = mmxreg1[16,8];
|
|
mmxreg1[24,8] = mmxreg2[8,8];
|
|
mmxreg1[16,8] = mmxreg1[8,8];
|
|
mmxreg1[8,8] = mmxreg2[0,8];
|
|
# mmxreg1[0,8] = mmxreg1[0,8]; superfluous
|
|
}
|
|
|
|
:PUNPCKLWD mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x61; mmxreg ... & m32
|
|
{
|
|
local m:4 = m32;
|
|
mmxreg[48,16] = m[16,16];
|
|
mmxreg[32,16] = mmxreg[16,16];
|
|
mmxreg[16,16] = m[0,16];
|
|
# mmxreg[0,16] = mmxreg[0,16]; superfluous
|
|
}
|
|
|
|
:PUNPCKLWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x61; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[48,16] = mmxreg2[16,16];
|
|
mmxreg1[32,16] = mmxreg1[16,16];
|
|
mmxreg1[16,16] = mmxreg2[0,16];
|
|
# mmxreg1[0,16] = mmxreg1[0,16]; superfluous
|
|
}
|
|
|
|
:PUNPCKLDQ mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x62; mmxreg ... & m32
|
|
{
|
|
mmxreg[32,32] = m32;
|
|
# mmxreg[0,32] = mmxreg[0,32]; superfluous
|
|
}
|
|
|
|
:PUNPCKLDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x62; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[32,32] = mmxreg2[0,32];
|
|
# mmxreg1[0,32] = mmxreg1[0,32]; superfluous
|
|
}
|
|
|
|
:PUNPCKLBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x60; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[120,8] = m[56,8];
|
|
XmmReg[112,8] = XmmReg[56,8];
|
|
XmmReg[104,8] = m[48,8];
|
|
XmmReg[96,8] = XmmReg[48,8];
|
|
XmmReg[88,8] = m[40,8];
|
|
XmmReg[80,8] = XmmReg[40,8];
|
|
XmmReg[72,8] = m[32,8];
|
|
XmmReg[64,8] = XmmReg[32,8];
|
|
XmmReg[56,8] = m[24,8];
|
|
XmmReg[48,8] = XmmReg[24,8];
|
|
XmmReg[40,8] = m[16,8];
|
|
XmmReg[32,8] = XmmReg[16,8];
|
|
XmmReg[24,8] = m[8,8];
|
|
XmmReg[16,8] = XmmReg[8,8];
|
|
XmmReg[8,8] = m[0,8];
|
|
# XmmReg[0,8] = XmmReg[0,8]; superfluous
|
|
}
|
|
|
|
:PUNPCKLBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x60; xmmmod = 3 &
|
|
XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[120,8] = XmmReg2[56,8];
|
|
XmmReg1[112,8] = XmmReg1[56,8];
|
|
XmmReg1[104,8] = XmmReg2[48,8];
|
|
XmmReg1[96,8] = XmmReg1[48,8];
|
|
XmmReg1[88,8] = XmmReg2[40,8];
|
|
XmmReg1[80,8] = XmmReg1[40,8];
|
|
XmmReg1[72,8] = XmmReg2[32,8];
|
|
XmmReg1[64,8] = XmmReg1[32,8];
|
|
XmmReg1[56,8] = XmmReg2[24,8];
|
|
XmmReg1[48,8] = XmmReg1[24,8];
|
|
XmmReg1[40,8] = XmmReg2[16,8];
|
|
XmmReg1[32,8] = XmmReg1[16,8];
|
|
XmmReg1[24,8] = XmmReg2[8,8];
|
|
XmmReg1[16,8] = XmmReg1[8,8];
|
|
XmmReg1[8,8] = XmmReg2[0,8];
|
|
# XmmReg1[0,8] = XmmReg1[0,8]; superfluous
|
|
}
|
|
|
|
:PUNPCKLWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x61; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[112,16] = m[48,16];
|
|
XmmReg[96,16] = XmmReg[48,16];
|
|
XmmReg[80,16] = m[32,16];
|
|
XmmReg[64,16] = XmmReg[32,16];
|
|
XmmReg[48,16] = m[16,16];
|
|
XmmReg[32,16] = XmmReg[16,16];
|
|
XmmReg[16,16] = m[0,16];
|
|
# XmmReg[0,16] = XmmReg[0,16]; superfluous
|
|
}
|
|
|
|
:PUNPCKLWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x61; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[112,16] = XmmReg2[48,16];
|
|
XmmReg1[96,16] = XmmReg1[48,16];
|
|
XmmReg1[80,16] = XmmReg2[32,16];
|
|
XmmReg1[64,16] = XmmReg1[32,16];
|
|
XmmReg1[48,16] = XmmReg2[16,16];
|
|
XmmReg1[32,16] = XmmReg1[16,16];
|
|
XmmReg1[16,16] = XmmReg2[0,16];
|
|
# XmmReg1[0,16] = XmmReg1[0,16]; superfluous
|
|
}
|
|
|
|
:PUNPCKLDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x62; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[96,32] = m[32,32];
|
|
XmmReg[64,32] = XmmReg[32,32];
|
|
XmmReg[32,32] = m[0,32];
|
|
# XmmReg[0,32] = XmmReg[0,32]; superfluous
|
|
}
|
|
|
|
:PUNPCKLDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x62; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[96,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[32,32];
|
|
XmmReg1[32,32] = XmmReg2[0,32];
|
|
# XmmReg1[0,32] = XmmReg1[0,32]; superfluous
|
|
}
|
|
|
|
define pcodeop punpcklqdq;
|
|
:PUNPCKLQDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6C; m128 & XmmReg ...
|
|
{
|
|
XmmReg[64,64] = m128[0,64];
|
|
# XmmReg[0,64] = XmmReg[0,64]; superfluous
|
|
}
|
|
|
|
:PUNPCKLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6C; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[64,64] = XmmReg2[0,64];
|
|
# XmmReg1[0,64] = XmmReg1[0,64]; superfluous
|
|
}
|
|
|
|
:PXOR mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEF; mmxreg ... & m64 { mmxreg = mmxreg ^ m64; }
|
|
:PXOR mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEF; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 ^ mmxreg2; }
|
|
:PXOR XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEF; XmmReg ... & m128 { XmmReg = XmmReg ^ m128; }
|
|
:PXOR XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEF; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 ^ XmmReg2; }
|
|
|
|
define pcodeop rcpps;
|
|
:RCPPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x53; XmmReg ... & m128 { XmmReg = rcpps(XmmReg, m128); }
|
|
:RCPPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x53; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rcpps(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop rcpss;
|
|
:RCPSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x53; XmmReg ... & m32 { XmmReg = rcpss(XmmReg, m32); }
|
|
:RCPSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x53; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rcpss(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop rsqrtps;
|
|
:RSQRTPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x52; XmmReg ... & m128 { XmmReg = rsqrtps(XmmReg, m128); }
|
|
:RSQRTPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x52; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rsqrtps(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop rsqrtss;
|
|
:RSQRTSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x52; XmmReg ... & m32 { XmmReg = rsqrtss(XmmReg, m32); }
|
|
:RSQRTSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x52; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rsqrtss(XmmReg1, XmmReg2); }
|
|
|
|
:SHUFPD XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC6; XmmReg1 ... & XmmReg2_m128; imm8
|
|
{
|
|
local srcLow:8 = XmmReg2_m128[0,64];
|
|
local srcHigh:8 = XmmReg2_m128[64,64];
|
|
local destLow:8 = XmmReg1[0,64];
|
|
local destHigh:8 = XmmReg1[64,64];
|
|
local control:1 = (imm8 & 0x1)== 0:1;
|
|
conditionalAssign(XmmReg1[0,64],control,destLow,destHigh);
|
|
control = (imm8 & 0x2) == 0:1;
|
|
conditionalAssign(XmmReg1[64,64],control,srcLow,srcHigh);
|
|
}
|
|
|
|
:SHUFPS XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC6; (XmmReg2_m128 & XmmReg1 ...); imm8 & Order0 & Order1 & Order2 & Order3
|
|
{
|
|
local xmmreg2_m128_c0 = XmmReg2_m128[0,32];
|
|
local xmmreg2_m128_c1 = XmmReg2_m128[32,32];
|
|
local xmmreg2_m128_c2 = XmmReg2_m128[64,32];
|
|
local xmmreg2_m128_c3 = XmmReg2_m128[96,32];
|
|
|
|
local xmm_c0 = XmmReg1[0,32];
|
|
local xmm_c1 = XmmReg1[32,32];
|
|
local xmm_c2 = XmmReg1[64,32];
|
|
local xmm_c3 = XmmReg1[96,32];
|
|
|
|
shuffle_4(XmmReg1[0,32],Order0,xmm_c0,xmm_c1,xmm_c2,xmm_c3);
|
|
shuffle_4(XmmReg1[32,32],Order1,xmm_c0,xmm_c1,xmm_c2,xmm_c3);
|
|
shuffle_4(XmmReg1[64,32],Order2,xmmreg2_m128_c0,xmmreg2_m128_c1,xmmreg2_m128_c2,xmmreg2_m128_c3);
|
|
shuffle_4(XmmReg1[96,32],Order3,xmmreg2_m128_c0,xmmreg2_m128_c1,xmmreg2_m128_c2,xmmreg2_m128_c3);
|
|
}
|
|
|
|
define pcodeop sqrtpd;
|
|
:SQRTPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x51; XmmReg ... & m128 { XmmReg = sqrtpd(XmmReg, m128); }
|
|
:SQRTPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = sqrtpd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop sqrtps;
|
|
:SQRTPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x51; XmmReg ... & m128 { XmmReg = sqrtps(XmmReg, m128); }
|
|
:SQRTPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = sqrtps(XmmReg1, XmmReg2); }
|
|
|
|
:SQRTSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x51; XmmReg ... & m64 { XmmReg[0,64] = sqrt(m64); }
|
|
:SQRTSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = sqrt(XmmReg2[0,64]); }
|
|
|
|
:SQRTSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x51; XmmReg ... & m32 { XmmReg[0,32] = sqrt(m32); }
|
|
:SQRTSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = sqrt(XmmReg2[0,32]); }
|
|
|
|
:SUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5C;XmmReg ... & m128
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f- m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] f- m[64,64];
|
|
}
|
|
|
|
:SUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] f- XmmReg2[64,64];
|
|
}
|
|
|
|
:SUBPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5C; XmmReg ... & m128
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f- m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] f- m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] f- m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] f- m[96,32];
|
|
}
|
|
|
|
:SUBPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] f- XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] f- XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] f- XmmReg2[96,32];
|
|
}
|
|
|
|
:SUBSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5C; XmmReg ... & m64 { XmmReg[0,64] = XmmReg[0,64] f- m64; }
|
|
:SUBSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64]; }
|
|
|
|
:SUBSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5C; XmmReg ...& m32 { XmmReg[0,32] = XmmReg[0,32] f- m32; }
|
|
:SUBSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32]; }
|
|
|
|
#Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS
|
|
# RESULT <- UnorderedCompare(SRC1[63-0] <> SRC2[63-0]) {
|
|
# * Set EFLAGS *CASE (RESULT) OF
|
|
# UNORDERED: ZF,PF,CF <- 111;
|
|
# GREATER_THAN: ZF,PF,CF <- 000;
|
|
# LESS_THAN: ZF,PF,CF <- 001;
|
|
# EQUAL: ZF,PF,CF <- 100;
|
|
# ESAC;
|
|
# OF,AF,SF <- 0;}
|
|
|
|
:UCOMISD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2E; m64 & XmmReg ...
|
|
{
|
|
fucompe(XmmReg[0,64], m64);
|
|
}
|
|
|
|
:UCOMISD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2E; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
fucompe(XmmReg1[0,64], XmmReg2[0,64]);
|
|
}
|
|
|
|
#Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS
|
|
# RESULT <- UnorderedCompare(SRC1[31-0] <> SRC2[31-0]) {
|
|
# * Set EFLAGS *CASE (RESULT) OF
|
|
# UNORDERED: ZF,PF,CF <- 111;
|
|
# GREATER_THAN: ZF,PF,CF <- 000;
|
|
# LESS_THAN: ZF,PF,CF <- 001;
|
|
# EQUAL: ZF,PF,CF <- 100;
|
|
# ESAC;
|
|
# OF,AF,SF <- 0;}
|
|
|
|
:UCOMISS XmmReg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2E; m32 & XmmReg ...
|
|
{
|
|
fucompe(XmmReg[0,32], m32);
|
|
}
|
|
|
|
:UCOMISS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2E; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
fucompe(XmmReg1[0,32], XmmReg2[0,32]);
|
|
}
|
|
|
|
:UNPCKHPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x15; m128 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[64,64];
|
|
XmmReg[64,64] = m128[64,64];
|
|
}
|
|
|
|
:UNPCKHPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[64,64];
|
|
XmmReg1[64,64] = XmmReg2[64,64];
|
|
}
|
|
|
|
:UNPCKHPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x15; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[64,32];
|
|
XmmReg[64,32] = XmmReg[96,32];
|
|
XmmReg[32,32] = m[64,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:UNPCKHPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[64,32];
|
|
XmmReg1[32,32] = XmmReg2[64,32];
|
|
XmmReg1[64,32] = XmmReg1[96,32]; # XmmReg1 and XmmReg2 could be the same register, preserve XmmReg1[64,32] till later
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:UNPCKLPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x14; m128 & XmmReg ...
|
|
{
|
|
# XmmReg[0,64] = XmmReg[0,64]; superfluous
|
|
XmmReg[64,64] = m128[0,64];
|
|
}
|
|
|
|
:UNPCKLPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
# XmmReg1[0,64] = XmmReg1[0,64]; superfluous
|
|
XmmReg1[64,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:UNPCKLPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x14; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
# XmmReg[0,32] = XmmReg[0,32]; superfluous
|
|
XmmReg[64,32] = XmmReg[32,32];
|
|
XmmReg[32,32] = m[0,32];
|
|
XmmReg[96,32] = m[32,32];
|
|
}
|
|
|
|
:UNPCKLPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
# XmmReg1[0,32] = XmmReg1[0,32]; superfluous
|
|
XmmReg1[64,32] = XmmReg1[32,32];
|
|
XmmReg1[96,32] = XmmReg2[32,32];
|
|
XmmReg1[32,32] = XmmReg2[0,32]; # XmmReg1 and XmmReg2 could be the same register, preserve Db till last
|
|
}
|
|
|
|
:XORPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x57; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = ( XmmReg[0,64] ^ m[0,64] );
|
|
XmmReg[64,64] = ( XmmReg[64,64] ^ m[64,64] );
|
|
}
|
|
|
|
:XORPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x57; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = ( XmmReg1[0,64] ^ XmmReg2[0,64] );
|
|
XmmReg1[64,64] = ( XmmReg1[64,64] ^ XmmReg2[64,64] );
|
|
}
|
|
|
|
:XORPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x57; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = ( XmmReg[0,32] ^ m[0,32] );
|
|
XmmReg[32,32] = ( XmmReg[32,32] ^ m[32,32] );
|
|
XmmReg[64,32] = ( XmmReg[64,32] ^ m[64,32] );
|
|
XmmReg[96,32] = ( XmmReg[96,32] ^ m[96,32] );
|
|
}
|
|
|
|
:XORPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x57; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = ( XmmReg1[0,32] ^ XmmReg2[0,32] );
|
|
XmmReg1[32,32] = ( XmmReg1[32,32] ^ XmmReg2[32,32] );
|
|
XmmReg1[64,32] = ( XmmReg1[64,32] ^ XmmReg2[64,32] );
|
|
XmmReg1[96,32] = ( XmmReg1[96,32] ^ XmmReg2[96,32] );
|
|
}
|
|
|
|
####
|
|
#### VIA Padlock instructions
|
|
####
|
|
|
|
define pcodeop xstore_available;
|
|
define pcodeop xstore;
|
|
define pcodeop xcrypt_ecb;
|
|
define pcodeop xcrypt_cbc;
|
|
define pcodeop xcrypt_ctr;
|
|
define pcodeop xcrypt_cfb;
|
|
define pcodeop xcrypt_ofb;
|
|
define pcodeop montmul;
|
|
define pcodeop xsha1;
|
|
define pcodeop xsha256;
|
|
|
|
:XSTORE is vexMode=0 & mandover=0 & byte=0x0F; byte=0xA7; byte=0xC0 {
|
|
EAX = xstore_available(EDX,EDI);
|
|
}
|
|
|
|
:XSTORE.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xC0 {
|
|
EAX = xstore(ECX,EDX,EDI);
|
|
ECX = 0;
|
|
}
|
|
|
|
:XCRYPTECB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xC8 {
|
|
xcrypt_ecb(ECX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:XCRYPTCBC.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xD0 {
|
|
xcrypt_cbc(ECX,EAX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:XCRYPTCTR.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xD8 {
|
|
xcrypt_ctr(ECX,EAX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:XCRYPTCFB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xE0 {
|
|
xcrypt_cfb(ECX,EAX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:XCRYPTOFB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xE8 {
|
|
xcrypt_ofb(ECX,EAX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:MONTMUL.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xC0 {
|
|
montmul(EAX,ECX,ESI);
|
|
ECX=0;
|
|
EDX=0;
|
|
}
|
|
|
|
:XSHA1.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xC8 {
|
|
xsha1(ECX,ESI,EDI);
|
|
EAX = ECX;
|
|
}
|
|
|
|
:XSHA256.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xD0 {
|
|
xsha256(ECX,ESI,EDI);
|
|
EAX = ECX;
|
|
}
|
|
|
|
####
|
|
#### SSE4.1 instructions
|
|
####
|
|
|
|
define pcodeop mpsadbw;
|
|
:MPSADBW XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x42; XmmReg ... & m128; imm8 { XmmReg = mpsadbw(XmmReg, m128, imm8:8); }
|
|
:MPSADBW XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x42; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = mpsadbw(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop phminposuw;
|
|
:PHMINPOSUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x41; XmmReg ... & m128 { XmmReg = phminposuw(m128); }
|
|
:PHMINPOSUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x41; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = phminposuw(XmmReg2); }
|
|
|
|
define pcodeop pmuldq;
|
|
:PMULDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x28; XmmReg ... & m128 { XmmReg = pmuldq(XmmReg, m128); }
|
|
:PMULDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmuldq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmulld;
|
|
:PMULLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x40; XmmReg ... & m128 { XmmReg = pmulld(XmmReg, m128); }
|
|
:PMULLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x40; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulld(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop dpps;
|
|
:DPPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x40; XmmReg ... & m128; imm8 { XmmReg = dpps(XmmReg, m128, imm8:8); }
|
|
:DPPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x40; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = dpps(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop dppd;
|
|
:DPPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x41; XmmReg ... & m128; imm8 { XmmReg = dppd(XmmReg, m128, imm8:8); }
|
|
:DPPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x41; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = dppd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop blendps;
|
|
:BLENDPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0C; XmmReg ... & m128; imm8 { XmmReg = blendps(XmmReg, m128, imm8:8); }
|
|
:BLENDPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0C; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = blendps(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop blendpd;
|
|
:BLENDPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0D; XmmReg ... & m128; imm8 { XmmReg = blendpd(XmmReg, m128, imm8:8); }
|
|
:BLENDPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0D; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = blendpd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop blendvps;
|
|
:BLENDVPS XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x14; XmmReg ... & m128 { XmmReg = blendvps(XmmReg, m128, XMM0); }
|
|
:BLENDVPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = blendvps(XmmReg1, XmmReg2, XMM0); }
|
|
|
|
define pcodeop blendvpd;
|
|
:BLENDVPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x15; XmmReg ... & m128 { XmmReg = blendvpd(XmmReg, m128, XMM0); }
|
|
:BLENDVPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = blendvpd(XmmReg1, XmmReg2, XMM0); }
|
|
|
|
define pcodeop pblendvb;
|
|
:PBLENDVB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x10; XmmReg ... & m128 { XmmReg = pblendvb(XmmReg, m128, XMM0); }
|
|
:PBLENDVB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x10; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pblendvb(XmmReg1, XmmReg2, XMM0); }
|
|
|
|
define pcodeop pblendw;
|
|
:PBLENDW XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0E; XmmReg ... & m128; imm8 { XmmReg = pblendw(XmmReg, m128, imm8:8); }
|
|
:PBLENDW XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0E; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = pblendw(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
:PMINSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x38; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,8],srcCopy[0,8] s< XmmReg1[0,8],srcCopy[0,8],XmmReg1[0,8]);
|
|
conditionalAssign(XmmReg1[8,8],srcCopy[8,8] s< XmmReg1[8,8],srcCopy[8,8],XmmReg1[8,8]);
|
|
conditionalAssign(XmmReg1[16,8],srcCopy[16,8] s< XmmReg1[16,8],srcCopy[16,8],XmmReg1[16,8]);
|
|
conditionalAssign(XmmReg1[24,8],srcCopy[24,8] s< XmmReg1[24,8],srcCopy[24,8],XmmReg1[24,8]);
|
|
conditionalAssign(XmmReg1[32,8],srcCopy[32,8] s< XmmReg1[32,8],srcCopy[32,8],XmmReg1[32,8]);
|
|
conditionalAssign(XmmReg1[40,8],srcCopy[40,8] s< XmmReg1[40,8],srcCopy[40,8],XmmReg1[40,8]);
|
|
conditionalAssign(XmmReg1[48,8],srcCopy[48,8] s< XmmReg1[48,8],srcCopy[48,8],XmmReg1[48,8]);
|
|
conditionalAssign(XmmReg1[56,8],srcCopy[56,8] s< XmmReg1[56,8],srcCopy[56,8],XmmReg1[56,8]);
|
|
conditionalAssign(XmmReg1[64,8],srcCopy[64,8] s< XmmReg1[64,8],srcCopy[64,8],XmmReg1[64,8]);
|
|
conditionalAssign(XmmReg1[72,8],srcCopy[72,8] s< XmmReg1[72,8],srcCopy[72,8],XmmReg1[72,8]);
|
|
conditionalAssign(XmmReg1[80,8],srcCopy[80,8] s< XmmReg1[80,8],srcCopy[80,8],XmmReg1[80,8]);
|
|
conditionalAssign(XmmReg1[88,8],srcCopy[88,8] s< XmmReg1[88,8],srcCopy[88,8],XmmReg1[88,8]);
|
|
conditionalAssign(XmmReg1[96,8],srcCopy[96,8] s< XmmReg1[96,8],srcCopy[96,8],XmmReg1[96,8]);
|
|
conditionalAssign(XmmReg1[104,8],srcCopy[104,8] s< XmmReg1[104,8],srcCopy[104,8],XmmReg1[104,8]);
|
|
conditionalAssign(XmmReg1[112,8],srcCopy[112,8] s< XmmReg1[112,8],srcCopy[112,8],XmmReg1[112,8]);
|
|
conditionalAssign(XmmReg1[120,8],srcCopy[120,8] s< XmmReg1[120,8],srcCopy[120,8],XmmReg1[120,8]);
|
|
}
|
|
|
|
:PMINUW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3A; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,16],srcCopy[0,16] < XmmReg1[0,16],srcCopy[0,16],XmmReg1[0,16]);
|
|
conditionalAssign(XmmReg1[16,16],srcCopy[16,16] < XmmReg1[16,16],srcCopy[16,16],XmmReg1[16,16]);
|
|
conditionalAssign(XmmReg1[32,16],srcCopy[32,16] < XmmReg1[32,16],srcCopy[32,16],XmmReg1[32,16]);
|
|
conditionalAssign(XmmReg1[48,16],srcCopy[48,16] < XmmReg1[48,16],srcCopy[48,16],XmmReg1[48,16]);
|
|
conditionalAssign(XmmReg1[64,16],srcCopy[64,16] < XmmReg1[64,16],srcCopy[64,16],XmmReg1[64,16]);
|
|
conditionalAssign(XmmReg1[80,16],srcCopy[80,16] < XmmReg1[80,16],srcCopy[80,16],XmmReg1[80,16]);
|
|
conditionalAssign(XmmReg1[96,16],srcCopy[96,16] < XmmReg1[96,16],srcCopy[96,16],XmmReg1[96,16]);
|
|
conditionalAssign(XmmReg1[112,16],srcCopy[112,16] < XmmReg1[112,16],srcCopy[112,16],XmmReg1[112,16]);
|
|
}
|
|
|
|
:PMINUD XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3B; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,32],srcCopy[0,32] < XmmReg1[0,32],srcCopy[0,32],XmmReg1[0,32]);
|
|
conditionalAssign(XmmReg1[32,32],srcCopy[32,32] < XmmReg1[32,32],srcCopy[32,32],XmmReg1[32,32]);
|
|
conditionalAssign(XmmReg1[64,32],srcCopy[64,32] < XmmReg1[64,32],srcCopy[64,32],XmmReg1[64,32]);
|
|
conditionalAssign(XmmReg1[96,32],srcCopy[96,32] < XmmReg1[96,32],srcCopy[96,32],XmmReg1[96,32]);
|
|
}
|
|
|
|
:PMINSD XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x39; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,32],srcCopy[0,32] s< XmmReg1[0,32],srcCopy[0,32],XmmReg1[0,32]);
|
|
conditionalAssign(XmmReg1[32,32],srcCopy[32,32] s< XmmReg1[32,32],srcCopy[32,32],XmmReg1[32,32]);
|
|
conditionalAssign(XmmReg1[64,32],srcCopy[64,32] s< XmmReg1[64,32],srcCopy[64,32],XmmReg1[64,32]);
|
|
conditionalAssign(XmmReg1[96,32],srcCopy[96,32] s< XmmReg1[96,32],srcCopy[96,32],XmmReg1[96,32]);
|
|
}
|
|
|
|
:PMAXSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3C; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,8],srcCopy[0,8] s> XmmReg1[0,8],srcCopy[0,8],XmmReg1[0,8]);
|
|
conditionalAssign(XmmReg1[8,8],srcCopy[8,8] s> XmmReg1[8,8],srcCopy[8,8],XmmReg1[8,8]);
|
|
conditionalAssign(XmmReg1[16,8],srcCopy[16,8] s> XmmReg1[16,8],srcCopy[16,8],XmmReg1[16,8]);
|
|
conditionalAssign(XmmReg1[24,8],srcCopy[24,8] s> XmmReg1[24,8],srcCopy[24,8],XmmReg1[24,8]);
|
|
conditionalAssign(XmmReg1[32,8],srcCopy[32,8] s> XmmReg1[32,8],srcCopy[32,8],XmmReg1[32,8]);
|
|
conditionalAssign(XmmReg1[40,8],srcCopy[40,8] s> XmmReg1[40,8],srcCopy[40,8],XmmReg1[40,8]);
|
|
conditionalAssign(XmmReg1[48,8],srcCopy[48,8] s> XmmReg1[48,8],srcCopy[48,8],XmmReg1[48,8]);
|
|
conditionalAssign(XmmReg1[56,8],srcCopy[56,8] s> XmmReg1[56,8],srcCopy[56,8],XmmReg1[56,8]);
|
|
conditionalAssign(XmmReg1[64,8],srcCopy[64,8] s> XmmReg1[64,8],srcCopy[64,8],XmmReg1[64,8]);
|
|
conditionalAssign(XmmReg1[72,8],srcCopy[72,8] s> XmmReg1[72,8],srcCopy[72,8],XmmReg1[72,8]);
|
|
conditionalAssign(XmmReg1[80,8],srcCopy[80,8] s> XmmReg1[80,8],srcCopy[80,8],XmmReg1[80,8]);
|
|
conditionalAssign(XmmReg1[88,8],srcCopy[88,8] s> XmmReg1[88,8],srcCopy[88,8],XmmReg1[88,8]);
|
|
conditionalAssign(XmmReg1[96,8],srcCopy[96,8] s> XmmReg1[96,8],srcCopy[96,8],XmmReg1[96,8]);
|
|
conditionalAssign(XmmReg1[104,8],srcCopy[104,8] s> XmmReg1[104,8],srcCopy[104,8],XmmReg1[104,8]);
|
|
conditionalAssign(XmmReg1[112,8],srcCopy[112,8] s> XmmReg1[112,8],srcCopy[112,8],XmmReg1[112,8]);
|
|
conditionalAssign(XmmReg1[120,8],srcCopy[120,8] s> XmmReg1[120,8],srcCopy[120,8],XmmReg1[120,8]);
|
|
}
|
|
|
|
|
|
:PMAXUW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3E; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,16],srcCopy[0,16] > XmmReg1[0,16],srcCopy[0,16],XmmReg1[0,16]);
|
|
conditionalAssign(XmmReg1[16,16],srcCopy[16,16] > XmmReg1[16,16],srcCopy[16,16],XmmReg1[16,16]);
|
|
conditionalAssign(XmmReg1[32,16],srcCopy[32,16] > XmmReg1[32,16],srcCopy[32,16],XmmReg1[32,16]);
|
|
conditionalAssign(XmmReg1[48,16],srcCopy[48,16] > XmmReg1[48,16],srcCopy[48,16],XmmReg1[48,16]);
|
|
conditionalAssign(XmmReg1[64,16],srcCopy[64,16] > XmmReg1[64,16],srcCopy[64,16],XmmReg1[64,16]);
|
|
conditionalAssign(XmmReg1[80,16],srcCopy[80,16] > XmmReg1[80,16],srcCopy[80,16],XmmReg1[80,16]);
|
|
conditionalAssign(XmmReg1[96,16],srcCopy[96,16] > XmmReg1[96,16],srcCopy[96,16],XmmReg1[96,16]);
|
|
conditionalAssign(XmmReg1[112,16],srcCopy[112,16] > XmmReg1[112,16],srcCopy[112,16],XmmReg1[112,16]);
|
|
}
|
|
|
|
:PMAXUD XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3F; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,32],srcCopy[0,32] > XmmReg1[0,32],srcCopy[0,32],XmmReg1[0,32]);
|
|
conditionalAssign(XmmReg1[32,32],srcCopy[32,32] > XmmReg1[32,32],srcCopy[32,32],XmmReg1[32,32]);
|
|
conditionalAssign(XmmReg1[64,32],srcCopy[64,32] > XmmReg1[64,32],srcCopy[64,32],XmmReg1[64,32]);
|
|
conditionalAssign(XmmReg1[96,32],srcCopy[96,32] > XmmReg1[96,32],srcCopy[96,32],XmmReg1[96,32]);
|
|
}
|
|
|
|
:PMAXSD XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3D; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
local srcCopy:16 = XmmReg2_m128;
|
|
conditionalAssign(XmmReg1[0,32],srcCopy[0,32] s> XmmReg1[0,32],srcCopy[0,32],XmmReg1[0,32]);
|
|
conditionalAssign(XmmReg1[32,32],srcCopy[32,32] s> XmmReg1[32,32],srcCopy[32,32],XmmReg1[32,32]);
|
|
conditionalAssign(XmmReg1[64,32],srcCopy[64,32] s> XmmReg1[64,32],srcCopy[64,32],XmmReg1[64,32]);
|
|
conditionalAssign(XmmReg1[96,32],srcCopy[96,32] s> XmmReg1[96,32],srcCopy[96,32],XmmReg1[96,32]);
|
|
}
|
|
|
|
define pcodeop roundps;
|
|
:ROUNDPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x08; XmmReg ... & m128; imm8 { XmmReg = roundps(XmmReg, m128, imm8:8); }
|
|
:ROUNDPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x08; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundps(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop roundss;
|
|
:ROUNDSS XmmReg, m32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0A; XmmReg ... & m32; imm8 { XmmReg = roundss(XmmReg, m32, imm8:8); }
|
|
:ROUNDSS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0A; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundss(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop roundpd;
|
|
:ROUNDPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x09; XmmReg ... & m128; imm8 { XmmReg = roundpd(XmmReg, m128, imm8:8); }
|
|
:ROUNDPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x09; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundpd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop roundsd;
|
|
:ROUNDSD XmmReg, m64, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0B; XmmReg ... & m64; imm8 { XmmReg = roundsd(XmmReg, m64, imm8:8); }
|
|
:ROUNDSD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0B; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundsd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop insertps;
|
|
:INSERTPS XmmReg, m32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x21; XmmReg ... & m32; imm8 { XmmReg = insertps(XmmReg, m32, imm8:8); }
|
|
:INSERTPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x21; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = insertps(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
:PINSRB XmmReg, rm32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x20; XmmReg ... & rm32; imm8
|
|
{
|
|
local destIndex:1 = (imm8 & 0xf) * 8:1;
|
|
local useLow:1 = destIndex < 64:1;
|
|
local newLow:8 = zext(rm32:1) << destIndex;
|
|
newLow = (XmmReg[0,64] & ~(0xff:8 << destIndex)) | newLow;
|
|
local newHigh:8 = zext(rm32:1) << (destIndex-64:1);
|
|
newHigh = (XmmReg[64,64] & ~(0xff:8 << (destIndex - 64:1))) | newHigh;
|
|
conditionalAssign(XmmReg[0,64],useLow,newLow,XmmReg[0,64]);
|
|
conditionalAssign(XmmReg[64,64],!useLow,newHigh,XmmReg[64,64]);
|
|
}
|
|
|
|
:PINSRD XmmReg, rm32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x22; XmmReg ... & rm32; imm8
|
|
{
|
|
local destIndex:1 = (imm8 & 0x3) * 32:1;
|
|
local useLow:1 = destIndex < 64:1;
|
|
local newLow:8 = zext(rm32) << destIndex;
|
|
newLow = (XmmReg[0,64] & ~(0xffffffff:8 << destIndex)) | newLow;
|
|
local newHigh:8 = zext(rm32) << (destIndex-64:1);
|
|
newHigh = (XmmReg[64,64] & ~(0xffffffff:8 << (destIndex - 64:1))) | newHigh;
|
|
conditionalAssign(XmmReg[0,64],useLow,newLow,XmmReg[0,64]);
|
|
conditionalAssign(XmmReg[64,64],!useLow,newHigh,XmmReg[64,64]);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:PINSRQ XmmReg, rm64, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x22; XmmReg ... & rm64; imm8
|
|
{
|
|
local useHigh:1 = imm8 & 0x1;
|
|
conditionalAssign(XmmReg[0,64],!useHigh,rm64,XmmReg[0,64]);
|
|
conditionalAssign(XmmReg[64,64],useHigh,rm64,XmmReg[64,64]);
|
|
}
|
|
@endif
|
|
|
|
define pcodeop extractps;
|
|
@ifdef IA64
|
|
:EXTRACTPS rm64, XmmReg, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x17; XmmReg ... & rm64; imm8 { rm64 = extractps(XmmReg, imm8:8); }
|
|
@endif
|
|
:EXTRACTPS rm32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x17; XmmReg ... & rm32 & check_rm32_dest ...; imm8 { rm32 = extractps(XmmReg, imm8:8); build check_rm32_dest; }
|
|
|
|
:PEXTRB Rmr32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x14; mod=3 & XmmReg & Rmr32 & check_Rmr32_dest; imm8
|
|
{
|
|
local shift:1 = (imm8 & 0xf) * 8:1;
|
|
local low:1 = shift < 64:1;
|
|
local temp:8;
|
|
conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64));
|
|
Rmr32 = zext(temp:1);
|
|
build check_Rmr32_dest;
|
|
}
|
|
|
|
:PEXTRB Mem, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x14; XmmReg ... & Mem; imm8
|
|
{
|
|
local shift:1 = (imm8 & 0xf) * 8:1;
|
|
local low:1 = shift < 64:1;
|
|
local temp:8;
|
|
conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64));
|
|
*Mem = temp:1;
|
|
}
|
|
|
|
:PEXTRD Rmr32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x16; mod=3 & XmmReg & Rmr32 & check_Rmr32_dest; imm8
|
|
{
|
|
local shift:1 = (imm8 & 0x3) * 32:1;
|
|
local low:1 = shift < 64:1;
|
|
local temp:8;
|
|
conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64));
|
|
Rmr32 = zext(temp:4);
|
|
build check_Rmr32_dest;
|
|
}
|
|
|
|
:PEXTRD Mem, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x16; XmmReg ... & Mem; imm8
|
|
{
|
|
local shift:1 = (imm8 & 0x3) * 32:1;
|
|
local low:1 = shift < 64:1;
|
|
local temp:8;
|
|
conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64));
|
|
*Mem = temp:4;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:PEXTRQ Rmr64, XmmReg, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x16; mod=3 & XmmReg & Rmr64; imm8
|
|
{
|
|
local high:1 = imm8 & 0x1;
|
|
conditionalAssign(Rmr64,high,XmmReg[64,64],XmmReg[0,64]);
|
|
}
|
|
|
|
:PEXTRQ Mem, XmmReg, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x16; XmmReg ... & Mem; imm8
|
|
{
|
|
local high:1 = imm8 & 0x1;
|
|
local temp:8;
|
|
conditionalAssign(temp,high,XmmReg[64,64],XmmReg[0,64]);
|
|
*Mem = temp;
|
|
}
|
|
@endif
|
|
|
|
define pcodeop pmovsxbw;
|
|
:PMOVSXBW XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x20; XmmReg ... & m64 { XmmReg = pmovsxbw(XmmReg, m64); }
|
|
:PMOVSXBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x20; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxbd;
|
|
:PMOVSXBD XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x21; XmmReg ... & m32 { XmmReg = pmovsxbd(XmmReg, m32); }
|
|
:PMOVSXBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x21; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxbq;
|
|
:PMOVSXBQ XmmReg, m16 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x22; XmmReg ... & m16 { XmmReg = pmovsxbq(XmmReg, m16); }
|
|
:PMOVSXBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x22; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxwd;
|
|
:PMOVSXWD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x23; XmmReg ... & m64 { XmmReg = pmovsxwd(XmmReg, m64); }
|
|
:PMOVSXWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x23; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxwd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxwq;
|
|
:PMOVSXWQ XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x24; XmmReg ... & m32 { XmmReg = pmovsxwq(XmmReg, m32); }
|
|
:PMOVSXWQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x24; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxwq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxdq;
|
|
:PMOVSXDQ XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x25; XmmReg ... & m64 { XmmReg = pmovsxdq(XmmReg, m64); }
|
|
:PMOVSXDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x25; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxdq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxbw;
|
|
:PMOVZXBW XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x30; XmmReg ... & m64 { XmmReg = pmovzxbw(XmmReg, m64); }
|
|
:PMOVZXBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x30; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxbd;
|
|
:PMOVZXBD XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x31; XmmReg ... & m32 { XmmReg = pmovzxbd(XmmReg, m32); }
|
|
:PMOVZXBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x31; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxbq;
|
|
:PMOVZXBQ XmmReg, m16 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x32; XmmReg ... & m16 { XmmReg = pmovzxbq(XmmReg, m16); }
|
|
:PMOVZXBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x32; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxwd;
|
|
:PMOVZXWD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x33; XmmReg ... & m64 { XmmReg = pmovzxwd(XmmReg, m64); }
|
|
:PMOVZXWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x33; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxwd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxwq;
|
|
:PMOVZXWQ XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x34; XmmReg ... & m32 { XmmReg = pmovzxwq(XmmReg, m32); }
|
|
:PMOVZXWQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x34; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxwq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxdq;
|
|
:PMOVZXDQ XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x35; XmmReg ... & m64 { XmmReg = pmovzxdq(XmmReg, m64); }
|
|
:PMOVZXDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x35; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxdq(XmmReg1, XmmReg2); }
|
|
|
|
:PTEST XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x17; XmmReg ... & m128 {
|
|
local temp_m128:16 = m128;
|
|
local tmp = temp_m128 & XmmReg;
|
|
ZF = tmp == 0;
|
|
local tmp2 = temp_m128 & ~XmmReg;
|
|
CF = tmp2 == 0;
|
|
AF = 0;
|
|
OF = 0;
|
|
PF = 0;
|
|
SF = 0;
|
|
}
|
|
|
|
:PTEST XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x17; xmmmod=3 & XmmReg1 & XmmReg2 {
|
|
local tmp = XmmReg2 & XmmReg1;
|
|
ZF = tmp == 0;
|
|
local tmp2 = XmmReg2 & ~XmmReg1;
|
|
CF = tmp2 == 0;
|
|
AF = 0;
|
|
OF = 0;
|
|
PF = 0;
|
|
SF = 0;
|
|
}
|
|
|
|
:PCMPEQQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; XmmReg ... & m128
|
|
{
|
|
local temp_m128:16 = m128;
|
|
XmmReg[0,64] = zext(XmmReg[0,64] == temp_m128[0,64]) * 0xffffffffffffffff:8;
|
|
XmmReg[64,64] = zext(XmmReg[64,64] == temp_m128[64,64]) * 0xffffffffffffffff:8;
|
|
}
|
|
:PCMPEQQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = zext(XmmReg1[0,64] == XmmReg2[0,64]) * 0xffffffffffffffff:8;
|
|
XmmReg1[64,64] = zext(XmmReg1[64,64] == XmmReg2[64,64]) * 0xffffffffffffffff:8;
|
|
}
|
|
|
|
define pcodeop packusdw;
|
|
:PACKUSDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2B; XmmReg ... & m128 { XmmReg = packusdw(XmmReg, m128); }
|
|
:PACKUSDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2B; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = packusdw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop movntdqa;
|
|
:MOVNTDQA XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2A; XmmReg ... & m128 { XmmReg = movntdqa(XmmReg, m128); }
|
|
|
|
####
|
|
#### SSE4.2 instructions
|
|
####
|
|
|
|
define pcodeop crc32;
|
|
:CRC32 Reg32, rm8 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF0; Reg32 ... & check_Reg32_dest ... & rm8 { Reg32 = crc32(Reg32, rm8); build check_Reg32_dest; }
|
|
:CRC32 Reg32, rm16 is vexMode=0 & opsize=0 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF1; Reg32 ... & check_Reg32_dest ... & rm16 { Reg32 = crc32(Reg32, rm16); build check_Reg32_dest; }
|
|
:CRC32 Reg32, rm32 is vexMode=0 & opsize=1 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF1; Reg32 ... & check_Reg32_dest ... & rm32 { Reg32 = crc32(Reg32, rm32); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:CRC32 Reg32, rm8 is vexMode=0 & opsize=1 & $(PRE_F2) & $(REX) & byte=0x0F; byte=0x38; byte=0xF0; Reg32 ... & check_Reg32_dest ... & rm8 { Reg32 = crc32(Reg32, rm8); build check_Reg32_dest; }
|
|
:CRC32 Reg64, rm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & $(REX_W) & byte=0x0F; byte=0x38; byte=0xF0; Reg64 ... & rm8 { Reg64 = crc32(Reg64, rm8); }
|
|
:CRC32 Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & $(REX_W) & byte=0x0F; byte=0x38; byte=0xF1; Reg64 ... & rm64 { Reg64 = crc32(Reg64, rm64); }
|
|
@endif
|
|
|
|
define pcodeop pcmpestri;
|
|
:PCMPESTRI XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x61; XmmReg ... & m128; imm8 { ECX = pcmpestri(XmmReg, m128, imm8:8); }
|
|
:PCMPESTRI XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x61; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { ECX = pcmpestri(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pcmpestrm;
|
|
:PCMPESTRM XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x60; XmmReg ... & m128; imm8 { XMM0 = pcmpestrm(XmmReg, m128, imm8:8); }
|
|
:PCMPESTRM XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x60; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XMM0 = pcmpestrm(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pcmpistri;
|
|
:PCMPISTRI XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x63; XmmReg ... & m128; imm8 { ECX = pcmpistri(XmmReg, m128, imm8:8); }
|
|
:PCMPISTRI XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x63; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { ECX = pcmpistri(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pcmpistrm;
|
|
:PCMPISTRM XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x62; XmmReg ... & m128; imm8 { XMM0 = pcmpistrm(XmmReg, m128, imm8:8); }
|
|
:PCMPISTRM XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x62; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XMM0 = pcmpistrm(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
:PCMPGTQ XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x37; XmmReg1 ... & XmmReg2_m128
|
|
{
|
|
XmmReg1[0,64] = 0xffffffffffffffff:8 * (zext(XmmReg1[0,64] s> XmmReg2_m128[0,64]));
|
|
XmmReg1[64,64] = 0xffffffffffffffff:8 * (zext(XmmReg1[64,64] s> XmmReg2_m128[64,64]));
|
|
}
|
|
|
|
macro popcountflags(src){
|
|
OF = 0:1;
|
|
SF = 0:1;
|
|
AF = 0:1;
|
|
CF = 0:1;
|
|
PF = 0:1;
|
|
ZF = (src == 0);
|
|
}
|
|
:POPCNT Reg16, rm16 is vexMode=0 & opsize=0 & $(PRE_F3) & byte=0x0F; byte=0xB8; Reg16 ... & rm16 { popcountflags(rm16); Reg16 = popcount(rm16); }
|
|
:POPCNT Reg32, rm32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0F; byte=0xB8; Reg32 ... & check_Reg32_dest ... & rm32 { popcountflags(rm32); Reg32 = popcount(rm32); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:POPCNT Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & $(REX_W) & byte=0x0F; byte=0xB8; Reg64 ... & rm64 { popcountflags(rm64); Reg64 = popcount(rm64); }
|
|
@endif
|
|
|
|
####
|
|
#### AESNI instructions
|
|
####
|
|
|
|
define pcodeop aesdec;
|
|
:AESDEC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xde; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesdec(XmmReg1, XmmReg2_m128);
|
|
}
|
|
:VAESDEC XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xde; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesdec(vexVVVV_XmmReg, XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aesdeclast;
|
|
:AESDECLAST XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdf; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesdeclast(XmmReg1, XmmReg2_m128);
|
|
}
|
|
:VAESDECLAST XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdf; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesdeclast(vexVVVV_XmmReg, XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aesenc;
|
|
:AESENC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdc; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesenc(XmmReg1, XmmReg2_m128);
|
|
}
|
|
:VAESENC XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdc; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesenc(vexVVVV_XmmReg, XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aesenclast;
|
|
:AESENCLAST XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdd; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesenclast(XmmReg1, XmmReg2_m128);
|
|
}
|
|
:VAESENCLAST XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdd; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesenclast(vexVVVV_XmmReg, XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aesimc;
|
|
:AESIMC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdb; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesimc(XmmReg2_m128);
|
|
}
|
|
:VAESIMC XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0xdb; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesimc(XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aeskeygenassist;
|
|
:AESKEYGENASSIST XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0xdf; XmmReg1 ... & XmmReg2_m128; imm8 {
|
|
XmmReg1 = aeskeygenassist(XmmReg2_m128, imm8:1);
|
|
}
|
|
:VAESKEYGENASSIST XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0xdf; (XmmReg1 & YmmReg1) ... & XmmReg2_m128; imm8 {
|
|
XmmReg1 = aeskeygenassist(XmmReg2_m128, imm8:1);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
|
|
|
|
####
|
|
#### Deprecated 3DNow! instructions
|
|
####
|
|
|
|
define pcodeop PackedIntToFloatingDwordConv;
|
|
:PI2FD mmxreg, m64 is vexMode=0 & suffix3D=0x0D & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedIntToFloatingDwordConv(mmxreg, m64); }
|
|
:PI2FD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x0D & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedIntToFloatingDwordConv(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingToIntDwordConv;
|
|
:PF2ID mmxreg, m64 is vexMode=0 & suffix3D=0x1D & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingToIntDwordConv(mmxreg, m64); }
|
|
:PF2ID mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x1D & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingToIntDwordConv(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingCompareGE;
|
|
:PFCMPGE mmxreg, m64 is vexMode=0 & suffix3D=0x90 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareGE(mmxreg, m64); }
|
|
:PFCMPGE mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x90 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareGE(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingCompareGT;
|
|
:PFCMPGT mmxreg, m64 is vexMode=0 & suffix3D=0xA0 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareGT(mmxreg, m64); }
|
|
:PFCMPGT mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA0 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareGT(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingCompareEQ;
|
|
:PFCMPEQ mmxreg, m64 is vexMode=0 & suffix3D=0xB0 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareEQ(mmxreg, m64); }
|
|
:PFCMPEQ mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB0 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareEQ(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingAccumulate;
|
|
:PFACC mmxreg, m64 is vexMode=0 & suffix3D=0xAE & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingAccumulate(mmxreg, m64); }
|
|
:PFACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xAE & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingAccumulate(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingADD;
|
|
:PFADD mmxreg, m64 is vexMode=0 & suffix3D=0x9E & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingADD(mmxreg, m64); }
|
|
:PFADD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x9E & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingADD(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingSUB;
|
|
:PFSUB mmxreg, m64 is vexMode=0 & suffix3D=0x9A & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingSUB(mmxreg, m64); }
|
|
:PFSUB mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x9A & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingSUB(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingSUBR;
|
|
:PFSUBR mmxreg, m64 is vexMode=0 & suffix3D=0xAA & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingSUBR(mmxreg, m64); }
|
|
:PFSUBR mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xAA & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingSUBR(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingMIN;
|
|
:PFMIN mmxreg, m64 is vexMode=0 & suffix3D=0x94 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMIN(mmxreg, m64); }
|
|
:PFMIN mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x94 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMIN(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingMAX;
|
|
:PFMAX mmxreg, m64 is vexMode=0 & suffix3D=0xA4 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMAX(mmxreg, m64); }
|
|
:PFMAX mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA4 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMAX(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingMUL;
|
|
:PFMUL mmxreg, m64 is vexMode=0 & suffix3D=0xB4 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMUL(mmxreg, m64); }
|
|
:PFMUL mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB4 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMUL(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop FloatingReciprocalAprox;
|
|
:PFRCP mmxreg, m64 is vexMode=0 & suffix3D=0x96 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = FloatingReciprocalAprox(mmxreg, m64); }
|
|
:PFRCP mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x96 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = FloatingReciprocalAprox(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingReciprocalSQRAprox;
|
|
:PFRSQRT mmxreg, m64 is vexMode=0 & suffix3D=0x97 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalSQRAprox(mmxreg, m64); }
|
|
:PFRSQRT mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x97 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalSQRAprox(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingReciprocalIter1;
|
|
:PFRCPIT1 mmxreg, m64 is vexMode=0 & suffix3D=0xA6 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalIter1(mmxreg, m64); }
|
|
:PFRCPIT1 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA6 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalIter1(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingReciprocalSQRIter1;
|
|
:PFRSQIT1 mmxreg, m64 is vexMode=0 & suffix3D=0xA7 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalSQRIter1(mmxreg, m64); }
|
|
:PFRSQIT1 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA7 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalSQRIter1(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingReciprocalIter2;
|
|
:PFRCPIT2 mmxreg, m64 is vexMode=0 & suffix3D=0xB6 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalIter2(mmxreg, m64); }
|
|
:PFRCPIT2 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB6 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalIter2(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedAverageUnsignedBytes;
|
|
:PAVGUSB mmxreg, m64 is vexMode=0 & suffix3D=0xBF & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedAverageUnsignedBytes(mmxreg, m64); }
|
|
:PAVGUSB mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xBF & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedAverageUnsignedBytes(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedAverageHighRoundedWord;
|
|
:PMULHRW mmxreg, m64 is vexMode=0 & suffix3D=0xB7 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedAverageHighRoundedWord(mmxreg, m64); }
|
|
:PMULHRW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB7 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedAverageHighRoundedWord(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop FastExitMediaState;
|
|
:FEMMS is vexMode=0 & byte=0x0F; byte=0x0E { FastExitMediaState(); }
|
|
|
|
#define pcodeop PrefetchDataIntoCache;
|
|
#:PREFETCH m8 is vexMode=0 & byte=0x0F; byte=0x18; m8 { PrefetchDataIntoCache(m8); }
|
|
|
|
#define pcodeop PrefetchDataIntoCacheWrite;
|
|
#:PREFETCHW m8 is vexMode=0 & byte=0x0F; byte=0x0D; reg_opcode=1 ... & m8 { PrefetchDataIntoCacheWrite(m8); }
|
|
|
|
# 3DNow! extensions
|
|
|
|
define pcodeop PackedFloatingToIntWord;
|
|
:PF2IW mmxreg, m64 is vexMode=0 & suffix3D=0x1C & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingToIntWord(mmxreg, m64); }
|
|
:PF2IW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x1C & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingToIntWord(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedIntToFloatingWord;
|
|
:PI2FW mmxreg, m64 is vexMode=0 & suffix3D=0x0C & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedIntToFloatingWord(mmxreg, m64); }
|
|
:PI2FW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x0C & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedIntToFloatingWord(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingNegAccumulate;
|
|
:PFNACC mmxreg, m64 is vexMode=0 & suffix3D=0x8A & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingNegAccumulate(mmxreg, m64); }
|
|
:PFNACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x8A & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingNegAccumulate(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingPosNegAccumulate;
|
|
:PFPNACC mmxreg, m64 is vexMode=0 & suffix3D=0x8E & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingPosNegAccumulate(mmxreg, m64); }
|
|
:PFPNACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x8E & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingPosNegAccumulate(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedSwapDWords;
|
|
:PSWAPD mmxreg, m64 is vexMode=0 & suffix3D=0xBB & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedSwapDWords(mmxreg, m64); }
|
|
:PSWAPD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xBB & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedSwapDWords(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop MaskedMoveQWord;
|
|
:MASKMOVQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF7; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = MaskedMoveQWord(mmxreg1, mmxreg2); }
|
|
|
|
} # end with : lockprefx=0
|