8327 lines
435 KiB
Plaintext
8327 lines
435 KiB
Plaintext
# SLA specification file for Intel x86
|
|
|
|
@ifdef IA64
|
|
@define SIZE "8"
|
|
@define STACKPTR "RSP"
|
|
@else
|
|
@define SIZE "4"
|
|
@define STACKPTR "ESP"
|
|
@endif
|
|
|
|
define endian=little;
|
|
|
|
define space ram type=ram_space size=$(SIZE) default;
|
|
define space register type=register_space size=4;
|
|
|
|
# General purpose registers
|
|
|
|
@ifdef IA64
|
|
define register offset=0 size=8 [ RAX RCX RDX RBX RSP RBP RSI RDI ];
|
|
define register offset=0 size=4 [ EAX _ ECX _ EDX _ EBX _ ESP _ EBP _ ESI _ EDI ];
|
|
define register offset=0 size=2 [ AX _ _ _ CX _ _ _ DX _ _ _ BX _ _ _ SP _ _ _ BP _ _ _ SI _ _ _ DI ];
|
|
define register offset=0 size=1 [ AL AH _ _ _ _ _ _ CL CH _ _ _ _ _ _ DL DH _ _ _ _ _ _ BL BH _ _ _ _ _ _ SPL _ _ _ _ _ _ _ BPL _ _ _ _ _ _ _ SIL _ _ _ _ _ _ _ DIL ];
|
|
|
|
define register offset=0x80 size=8 [ R8 R9 R10 R11 R12 R13 R14 R15 ];
|
|
define register offset=0x80 size=4 [ R8D _ R9D _ R10D _ R11D _ R12D _ R13D _ R14D _ R15D _ ];
|
|
define register offset=0x80 size=2 [ R8W _ _ _ R9W _ _ _ R10W _ _ _ R11W _ _ _ R12W _ _ _ R13W _ _ _ R14W _ _ _ R15W _ _ _ ];
|
|
define register offset=0x80 size=1 [ R8B _ _ _ _ _ _ _ R9B _ _ _ _ _ _ _ R10B _ _ _ _ _ _ _ R11B _ _ _ _ _ _ _ R12B _ _ _ _ _ _ _ R13B _ _ _ _ _ _ _ R14B _ _ _ _ _ _ _ R15B _ _ _ _ _ _ _ ];
|
|
@else
|
|
define register offset=0 size=4 [ EAX ECX EDX EBX ESP EBP ESI EDI ];
|
|
define register offset=0 size=2 [ AX _ CX _ DX _ BX _ SP _ BP _ SI _ DI ];
|
|
define register offset=0 size=1 [ AL AH _ _ CL CH _ _ DL DH _ _ BL BH ];
|
|
@endif
|
|
|
|
# Segment registers
|
|
define register offset=0x100 size=2 [ ES CS SS DS FS GS ];
|
|
define register offset=0x110 size=$(SIZE) [ FS_OFFSET GS_OFFSET ];
|
|
|
|
# Flags
|
|
define register offset=0x200 size=1 [ CF F1 PF F3 AF F5 ZF SF
|
|
TF IF DF OF IOPL NT F15
|
|
RF VM AC VIF VIP ID ];
|
|
@ifdef IA64
|
|
define register offset=0x280 size=8 [ rflags RIP ];
|
|
define register offset=0x280 size=4 [ eflags _ EIP _ ];
|
|
define register offset=0x280 size=2 [ flags _ _ _ IP _ _ _];
|
|
@else
|
|
define register offset=0x280 size=4 [ eflags EIP] ;
|
|
define register offset=0x280 size=2 [ flags _ IP] ;
|
|
@endif
|
|
|
|
# Debug and control registers
|
|
|
|
@ifdef IA64
|
|
define register offset=0x300 size=8 [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7
|
|
DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15
|
|
CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7
|
|
CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 ];
|
|
@else
|
|
define register offset=0x300 size=4 [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7
|
|
CR0 _ CR2 CR3 CR4 ];
|
|
define register offset=0x400 size=4 [ TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 ];
|
|
@endif
|
|
|
|
#Processor State Register - currently only XFEATURE_ENABLED_MASK=XCR0 is defined
|
|
#
|
|
define register offset=0x600 size=8 [ XCR0 ];
|
|
|
|
# Memory Protection Extensions (MPX)
|
|
define register offset=0x700 size=8 [ BNDCFGS BNDCFGU BNDSTATUS ];
|
|
|
|
define register offset=0x740 size=16 [ BND0 BND1 BND2 BND3 _ _ _ _ ];
|
|
define register offset=0x740 size=8 [ BND0_LB BND0_UB BND1_LB BND1_UB BND2_LB BND2_UB BND3_LB BND3_UB _ _ _ _ _ _ _ _ ];
|
|
|
|
# Control Flow Extensions
|
|
define register offset=0x7c0 size=8 [ SSP IA32_PL2_SSP IA32_PL1_SSP IA32_PL0_SSP ];
|
|
|
|
# Floating point registers - as they are in 32-bit protected mode
|
|
define register offset=0x1000 size=10 [ ST0 ST1 ST2 ST3 ST4 ST5 ST6 ST7 ];
|
|
define register offset=0x1080 size=1 [ C0 C1 C2 C3 ];
|
|
define register offset=0x1084 size=4 [ MXCSR ];
|
|
define register offset=0x1090 size=2 [ FPUControlWord FPUStatusWord FPUTagWord
|
|
FPULastInstructionOpcode ];
|
|
define register offset=0x1098 size=4 [ FPUDataPointer FPUInstructionPointer ];
|
|
|
|
|
|
#
|
|
# YMM0 - YMM7 - available in 32 bit mode
|
|
# YMM0 - YMM15 - available in 64 bit mode
|
|
#
|
|
define register offset=0x1100 size=8 [ MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 ];
|
|
define register offset=0x1100 size=4 [
|
|
MM0_Da MM0_Db
|
|
MM1_Da MM1_Db
|
|
MM2_Da MM2_Db
|
|
MM3_Da MM3_Db
|
|
MM4_Da MM4_Db
|
|
MM5_Da MM5_Db
|
|
MM6_Da MM6_Db
|
|
MM7_Da MM7_Db
|
|
];
|
|
define register offset=0x1100 size=2 [
|
|
MM0_Wa MM0_Wb MM0_Wc MM0_Wd
|
|
MM1_Wa MM1_Wb MM1_Wc MM1_Wd
|
|
MM2_Wa MM2_Wb MM2_Wc MM2_Wd
|
|
MM3_Wa MM3_Wb MM3_Wc MM3_Wd
|
|
MM4_Wa MM4_Wb MM4_Wc MM4_Wd
|
|
MM5_Wa MM5_Wb MM5_Wc MM5_Wd
|
|
MM6_Wa MM6_Wb MM6_Wc MM6_Wd
|
|
MM7_Wa MM7_Wb MM7_Wc MM7_Wd
|
|
];
|
|
define register offset=0x1100 size=1 [
|
|
MM0_Ba MM0_Bb MM0_Bc MM0_Bd MM0_Be MM0_Bf MM0_Bg MM0_Bh
|
|
MM1_Ba MM1_Bb MM1_Bc MM1_Bd MM1_Be MM1_Bf MM1_Bg MM1_Bh
|
|
MM2_Ba MM2_Bb MM2_Bc MM2_Bd MM2_Be MM2_Bf MM2_Bg MM2_Bh
|
|
MM3_Ba MM3_Bb MM3_Bc MM3_Bd MM3_Be MM3_Bf MM3_Bg MM3_Bh
|
|
MM4_Ba MM4_Bb MM4_Bc MM4_Bd MM4_Be MM4_Bf MM4_Bg MM4_Bh
|
|
MM5_Ba MM5_Bb MM5_Bc MM5_Bd MM5_Be MM5_Bf MM5_Bg MM5_Bh
|
|
MM6_Ba MM6_Bb MM6_Bc MM6_Bd MM6_Be MM6_Bf MM6_Bg MM6_Bh
|
|
MM7_Ba MM7_Bb MM7_Bc MM7_Bd MM7_Be MM7_Bf MM7_Bg MM7_Bh
|
|
];
|
|
|
|
|
|
# YMMx_H is the formal name for the high double quadword of the YMMx register, XMMx is the overlay in the XMM register set
|
|
define register offset=0x1200 size=16 [
|
|
XMM0 YMM0_H
|
|
XMM1 YMM1_H
|
|
XMM2 YMM2_H
|
|
XMM3 YMM3_H
|
|
XMM4 YMM4_H
|
|
XMM5 YMM5_H
|
|
XMM6 YMM6_H
|
|
XMM7 YMM7_H
|
|
XMM8 YMM8_H
|
|
XMM9 YMM9_H
|
|
XMM10 YMM10_H
|
|
XMM11 YMM11_H
|
|
XMM12 YMM12_H
|
|
XMM13 YMM13_H
|
|
XMM14 YMM14_H
|
|
XMM15 YMM15_H
|
|
];
|
|
|
|
define register offset=0x1200 size=8 [
|
|
XMM0_Qa XMM0_Qb _ _
|
|
XMM1_Qa XMM1_Qb _ _
|
|
XMM2_Qa XMM2_Qb _ _
|
|
XMM3_Qa XMM3_Qb _ _
|
|
XMM4_Qa XMM4_Qb _ _
|
|
XMM5_Qa XMM5_Qb _ _
|
|
XMM6_Qa XMM6_Qb _ _
|
|
XMM7_Qa XMM7_Qb _ _
|
|
XMM8_Qa XMM8_Qb _ _
|
|
XMM9_Qa XMM9_Qb _ _
|
|
XMM10_Qa XMM10_Qb _ _
|
|
XMM11_Qa XMM11_Qb _ _
|
|
XMM12_Qa XMM12_Qb _ _
|
|
XMM13_Qa XMM13_Qb _ _
|
|
XMM14_Qa XMM14_Qb _ _
|
|
XMM15_Qa XMM15_Qb _ _
|
|
];
|
|
define register offset=0x1200 size=4 [
|
|
XMM0_Da XMM0_Db XMM0_Dc XMM0_Dd _ _ _ _
|
|
XMM1_Da XMM1_Db XMM1_Dc XMM1_Dd _ _ _ _
|
|
XMM2_Da XMM2_Db XMM2_Dc XMM2_Dd _ _ _ _
|
|
XMM3_Da XMM3_Db XMM3_Dc XMM3_Dd _ _ _ _
|
|
XMM4_Da XMM4_Db XMM4_Dc XMM4_Dd _ _ _ _
|
|
XMM5_Da XMM5_Db XMM5_Dc XMM5_Dd _ _ _ _
|
|
XMM6_Da XMM6_Db XMM6_Dc XMM6_Dd _ _ _ _
|
|
XMM7_Da XMM7_Db XMM7_Dc XMM7_Dd _ _ _ _
|
|
XMM8_Da XMM8_Db XMM8_Dc XMM8_Dd _ _ _ _
|
|
XMM9_Da XMM9_Db XMM9_Dc XMM9_Dd _ _ _ _
|
|
XMM10_Da XMM10_Db XMM10_Dc XMM10_Dd _ _ _ _
|
|
XMM11_Da XMM11_Db XMM11_Dc XMM11_Dd _ _ _ _
|
|
XMM12_Da XMM12_Db XMM12_Dc XMM12_Dd _ _ _ _
|
|
XMM13_Da XMM13_Db XMM13_Dc XMM13_Dd _ _ _ _
|
|
XMM14_Da XMM14_Db XMM14_Dc XMM14_Dd _ _ _ _
|
|
XMM15_Da XMM15_Db XMM15_Dc XMM15_Dd _ _ _ _
|
|
];
|
|
define register offset=0x1200 size=2 [
|
|
XMM0_Wa XMM0_Wb XMM0_Wc XMM0_Wd XMM0_We XMM0_Wf XMM0_Wg XMM0_Wh _ _ _ _ _ _ _ _
|
|
XMM1_Wa XMM1_Wb XMM1_Wc XMM1_Wd XMM1_We XMM1_Wf XMM1_Wg XMM1_Wh _ _ _ _ _ _ _ _
|
|
XMM2_Wa XMM2_Wb XMM2_Wc XMM2_Wd XMM2_We XMM2_Wf XMM2_Wg XMM2_Wh _ _ _ _ _ _ _ _
|
|
XMM3_Wa XMM3_Wb XMM3_Wc XMM3_Wd XMM3_We XMM3_Wf XMM3_Wg XMM3_Wh _ _ _ _ _ _ _ _
|
|
XMM4_Wa XMM4_Wb XMM4_Wc XMM4_Wd XMM4_We XMM4_Wf XMM4_Wg XMM4_Wh _ _ _ _ _ _ _ _
|
|
XMM5_Wa XMM5_Wb XMM5_Wc XMM5_Wd XMM5_We XMM5_Wf XMM5_Wg XMM5_Wh _ _ _ _ _ _ _ _
|
|
XMM6_Wa XMM6_Wb XMM6_Wc XMM6_Wd XMM6_We XMM6_Wf XMM6_Wg XMM6_Wh _ _ _ _ _ _ _ _
|
|
XMM7_Wa XMM7_Wb XMM7_Wc XMM7_Wd XMM7_We XMM7_Wf XMM7_Wg XMM7_Wh _ _ _ _ _ _ _ _
|
|
XMM8_Wa XMM8_Wb XMM8_Wc XMM8_Wd XMM8_We XMM8_Wf XMM8_Wg XMM8_Wh _ _ _ _ _ _ _ _
|
|
XMM9_Wa XMM9_Wb XMM9_Wc XMM9_Wd XMM9_We XMM9_Wf XMM9_Wg XMM9_Wh _ _ _ _ _ _ _ _
|
|
XMM10_Wa XMM10_Wb XMM10_Wc XMM10_Wd XMM10_We XMM10_Wf XMM10_Wg XMM10_Wh _ _ _ _ _ _ _ _
|
|
XMM11_Wa XMM11_Wb XMM11_Wc XMM11_Wd XMM11_We XMM11_Wf XMM11_Wg XMM11_Wh _ _ _ _ _ _ _ _
|
|
XMM12_Wa XMM12_Wb XMM12_Wc XMM12_Wd XMM12_We XMM12_Wf XMM12_Wg XMM12_Wh _ _ _ _ _ _ _ _
|
|
XMM13_Wa XMM13_Wb XMM13_Wc XMM13_Wd XMM13_We XMM13_Wf XMM13_Wg XMM13_Wh _ _ _ _ _ _ _ _
|
|
XMM14_Wa XMM14_Wb XMM14_Wc XMM14_Wd XMM14_We XMM14_Wf XMM14_Wg XMM14_Wh _ _ _ _ _ _ _ _
|
|
XMM15_Wa XMM15_Wb XMM15_Wc XMM15_Wd XMM15_We XMM15_Wf XMM15_Wg XMM15_Wh _ _ _ _ _ _ _ _
|
|
];
|
|
define register offset=0x1200 size=1 [
|
|
XMM0_Ba XMM0_Bb XMM0_Bc XMM0_Bd XMM0_Be XMM0_Bf XMM0_Bg XMM0_Bh XMM0_Bi XMM0_Bj XMM0_Bk XMM0_Bl XMM0_Bm XMM0_Bn XMM0_Bo XMM0_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM1_Ba XMM1_Bb XMM1_Bc XMM1_Bd XMM1_Be XMM1_Bf XMM1_Bg XMM1_Bh XMM1_Bi XMM1_Bj XMM1_Bk XMM1_Bl XMM1_Bm XMM1_Bn XMM1_Bo XMM1_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM2_Ba XMM2_Bb XMM2_Bc XMM2_Bd XMM2_Be XMM2_Bf XMM2_Bg XMM2_Bh XMM2_Bi XMM2_Bj XMM2_Bk XMM2_Bl XMM2_Bm XMM2_Bn XMM2_Bo XMM2_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM3_Ba XMM3_Bb XMM3_Bc XMM3_Bd XMM3_Be XMM3_Bf XMM3_Bg XMM3_Bh XMM3_Bi XMM3_Bj XMM3_Bk XMM3_Bl XMM3_Bm XMM3_Bn XMM3_Bo XMM3_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM4_Ba XMM4_Bb XMM4_Bc XMM4_Bd XMM4_Be XMM4_Bf XMM4_Bg XMM4_Bh XMM4_Bi XMM4_Bj XMM4_Bk XMM4_Bl XMM4_Bm XMM4_Bn XMM4_Bo XMM4_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM5_Ba XMM5_Bb XMM5_Bc XMM5_Bd XMM5_Be XMM5_Bf XMM5_Bg XMM5_Bh XMM5_Bi XMM5_Bj XMM5_Bk XMM5_Bl XMM5_Bm XMM5_Bn XMM5_Bo XMM5_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM6_Ba XMM6_Bb XMM6_Bc XMM6_Bd XMM6_Be XMM6_Bf XMM6_Bg XMM6_Bh XMM6_Bi XMM6_Bj XMM6_Bk XMM6_Bl XMM6_Bm XMM6_Bn XMM6_Bo XMM6_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM7_Ba XMM7_Bb XMM7_Bc XMM7_Bd XMM7_Be XMM7_Bf XMM7_Bg XMM7_Bh XMM7_Bi XMM7_Bj XMM7_Bk XMM7_Bl XMM7_Bm XMM7_Bn XMM7_Bo XMM7_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM8_Ba XMM8_Bb XMM8_Bc XMM8_Bd XMM8_Be XMM8_Bf XMM8_Bg XMM8_Bh XMM8_Bi XMM8_Bj XMM8_Bk XMM8_Bl XMM8_Bm XMM8_Bn XMM8_Bo XMM8_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM9_Ba XMM9_Bb XMM9_Bc XMM9_Bd XMM9_Be XMM9_Bf XMM9_Bg XMM9_Bh XMM9_Bi XMM9_Bj XMM9_Bk XMM9_Bl XMM9_Bm XMM9_Bn XMM9_Bo XMM9_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM10_Ba XMM10_Bb XMM10_Bc XMM10_Bd XMM10_Be XMM10_Bf XMM10_Bg XMM10_Bh XMM10_Bi XMM10_Bj XMM10_Bk XMM10_Bl XMM10_Bm XMM10_Bn XMM10_Bo XMM10_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM11_Ba XMM11_Bb XMM11_Bc XMM11_Bd XMM11_Be XMM11_Bf XMM11_Bg XMM11_Bh XMM11_Bi XMM11_Bj XMM11_Bk XMM11_Bl XMM11_Bm XMM11_Bn XMM11_Bo XMM11_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM12_Ba XMM12_Bb XMM12_Bc XMM12_Bd XMM12_Be XMM12_Bf XMM12_Bg XMM12_Bh XMM12_Bi XMM12_Bj XMM12_Bk XMM12_Bl XMM12_Bm XMM12_Bn XMM12_Bo XMM12_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM13_Ba XMM13_Bb XMM13_Bc XMM13_Bd XMM13_Be XMM13_Bf XMM13_Bg XMM13_Bh XMM13_Bi XMM13_Bj XMM13_Bk XMM13_Bl XMM13_Bm XMM13_Bn XMM13_Bo XMM13_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM14_Ba XMM14_Bb XMM14_Bc XMM14_Bd XMM14_Be XMM14_Bf XMM14_Bg XMM14_Bh XMM14_Bi XMM14_Bj XMM14_Bk XMM14_Bl XMM14_Bm XMM14_Bn XMM14_Bo XMM14_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
XMM15_Ba XMM15_Bb XMM15_Bc XMM15_Bd XMM15_Be XMM15_Bf XMM15_Bg XMM15_Bh XMM15_Bi XMM15_Bj XMM15_Bk XMM15_Bl XMM15_Bm XMM15_Bn XMM15_Bo XMM15_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
|
];
|
|
define register offset=0x1200 size=32 [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ];
|
|
|
|
# 0x1400 is next spot
|
|
define register offset=0x1400 size=16 [ xmmTmp1 xmmTmp2 ];
|
|
define register offset=0x1400 size=8 [
|
|
xmmTmp1_Qa xmmTmp1_Qb
|
|
xmmTmp2_Qa xmmTmp2_Qb
|
|
];
|
|
define register offset=0x1400 size=4 [
|
|
xmmTmp1_Da xmmTmp1_Db xmmTmp1_Dc xmmTmp1_Dd
|
|
xmmTmp2_Da xmmTmp2_Db xmmTmp2_Dc xmmTmp2_Dd
|
|
];
|
|
|
|
|
|
|
|
# Define context bits
|
|
define register offset=0x2000 size=4 contextreg;
|
|
|
|
#
|
|
#
|
|
# This context layout is important: the 32 bit version sees addrsize as just the
|
|
# low-order bit, whereas the 64 bit sees both bits. This ensures that the 32 and 64
|
|
# are technically binary compatible, but since the 32 bit language can't see that
|
|
# addrsize is 2 bits, they won't be pulled up into constructors where bit 0 is always
|
|
# 0 (which it is), and then you don't get the decision conflicts that choose
|
|
# context over table order
|
|
#
|
|
#
|
|
|
|
define context contextreg
|
|
@ifdef IA64
|
|
addrsize=(0,1) # =0 16-bit addressing =1 32-bit addressing =2 64-bit addressing
|
|
@else
|
|
addrsize=(1,1) # =0 16-bit addressing =1 32-bit addressing
|
|
@endif
|
|
bit64=(0,0) # =0 16/32 bit =1 64-bit
|
|
opsize=(2,3) # =0 16-bit operands =1 32-bit operands =2 64-bit operands
|
|
segover=(4,6) # 0=default 1=cs 2=ss 3=ds 4=es 5=fs 6=gs
|
|
highseg=(4,4) # high bit of segover will be set for ES, FS, GS
|
|
protectedMode=(7,7) # 0 for real mode, 1 for protected mode
|
|
|
|
repneprefx=(8,8) # 0xf2 REPNE prefix
|
|
repprefx=(9,9) # 0xf3 REP prefix
|
|
opprefx=(10,10) # 0x66
|
|
prefix_66=(10,10) # This is not really a OPSIZE override, it means there is an real(read)/implied(vex) 66 byte
|
|
prefix_f3=(9,9) # This is not really a REP override, it means there is an real(read)/implied(vex) f3 byte
|
|
prefix_f2=(8,8) # This is not really a REPNE override, it means there is a real(read)/implied(vex) f2 byte
|
|
mandover=(8,10) # 0x66 0xf2 or 0xf3 overrides (for mandatory prefixes)
|
|
|
|
rexWprefix=(11,11) # REX.W bit prefix (opsize=2 when REX.W is set)
|
|
rexRprefix=(12,12) # REX.R bit prefix extend r
|
|
rexXprefix=(13,13) # REX.X bit prefix extend SIB index field to 4 bits
|
|
rexBprefix=(14,14) # REX.B bit prefix extend r/m, SIB base, Reg operand
|
|
rexprefix=(15,15) # True if the Rex prefix is present - note, if present, vex_mode is not supported
|
|
# rexWRXB bits can be re-used since they are incompatible.
|
|
vexMode=(16,16) # 1 for vex instruction, 0 for normal
|
|
vexL=(17,17) # 0 for 128, 1 for 256
|
|
vexVVVV=(18,21) # value of vex byte for matching
|
|
vexVVVV_r32=(18,21) # value of vex byte for matching a normal 32 bit register
|
|
vexVVVV_r64=(18,21) # value of vex byte for matching a normal 64 bit register
|
|
vexVVVV_XmmReg=(18,21) # value of vex byte for matching XmmReg
|
|
vexVVVV_YmmReg=(18,21) # value of vex byte for matching YmmReg
|
|
vexMMMMM=(22,26) # need to match for preceding bytes 1=0x0F, 2=0x0F 0x38, 3=0x0F 0x3A
|
|
|
|
suffix3D=(17,24) # 3DNow suffix byte (overlaps un-modified vex context region)
|
|
|
|
instrPhase=(31,31) # 0: initial/prefix phase, 1: primary instruction phase
|
|
;
|
|
|
|
|
|
# These are only to be used with pre-REX (original 8086, 80386) and REX encoding. Do not use with VEX encoding.
|
|
# These are to be used to designate that the opcode sequence begins with one of these "mandatory" prefix values.
|
|
# This allows the other prefixes to come before the mandatory value.
|
|
# For example: CRC32 r32, r16 -- 66 F2 OF 38 F1 C8
|
|
|
|
@define PRE_NO "mandover=0"
|
|
@define PRE_66 "prefix_66=1"
|
|
@define PRE_F3 "prefix_f3=1"
|
|
@define PRE_F2 "prefix_f2=1"
|
|
|
|
|
|
|
|
# Define special registers for debugger
|
|
@ifdef IA64
|
|
define register offset=0x2200 size=4 [ IDTR_Limit ];
|
|
define register offset=0x2200 size=12 [ IDTR ];
|
|
define register offset=0x2204 size=8 [ IDTR_Address ];
|
|
|
|
define register offset=0x2220 size=4 [ GDTR_Limit ];
|
|
define register offset=0x2220 size=12 [ GDTR ];
|
|
define register offset=0x2224 size=8 [ GDTR_Address ];
|
|
|
|
define register offset=0x2240 size=4 [ LDTR_Limit ];
|
|
define register offset=0x2240 size=14 [ LDTR ];
|
|
define register offset=0x2244 size=8 [ LDTR_Address ];
|
|
define register offset=0x2248 size=2 [ LDTR_Attributes ];
|
|
|
|
define register offset=0x2260 size=4 [ TR_Limit ];
|
|
define register offset=0x2260 size=14 [ TR ];
|
|
define register offset=0x2264 size=8 [ TR_Address ];
|
|
define register offset=0x2268 size=2 [ TR_Attributes ];
|
|
@else
|
|
define register offset=0x2200 size=6 [ IDTR ];
|
|
define register offset=0x2200 size=2 [ IDTR_Limit ];
|
|
define register offset=0x2202 size=4 [ IDTR_Address ];
|
|
|
|
define register offset=0x2210 size=6 [ GDTR ];
|
|
define register offset=0x2210 size=2 [ GDTR_Limit ];
|
|
define register offset=0x2212 size=4 [ GDTR_Address ];
|
|
|
|
define register offset=0x2220 size=6 [ LDTR ];
|
|
define register offset=0x2220 size=2 [ LDTR_Limit ];
|
|
define register offset=0x2222 size=4 [ LDTR_Address ];
|
|
|
|
define register offset=0x2230 size=6 [ TR ];
|
|
define register offset=0x2230 size=2 [ TR_Limit ];
|
|
define register offset=0x2232 size=4 [ TR_Address ];
|
|
@endif
|
|
|
|
define token opbyte (8)
|
|
byte=(0,7)
|
|
high4=(4,7)
|
|
high5=(3,7)
|
|
low5=(0,4)
|
|
byte_4=(4,4)
|
|
byte_0=(0,0)
|
|
;
|
|
|
|
define token modrm (8)
|
|
mod = (6,7)
|
|
reg_opcode = (3,5)
|
|
reg_opcode_hb = (5,5)
|
|
r_m = (0,2)
|
|
row = (4,7)
|
|
col = (0,2)
|
|
page = (3,3)
|
|
cond = (0,3)
|
|
reg8 = (3,5)
|
|
reg16 = (3,5)
|
|
reg32 = (3,5)
|
|
reg64 = (3,5)
|
|
reg8_x0 = (3,5)
|
|
reg8_x1 = (3,5)
|
|
reg16_x = (3,5)
|
|
reg32_x = (3,5)
|
|
reg64_x = (3,5)
|
|
Sreg = (3,5)
|
|
creg = (3,5)
|
|
creg_x = (3,5)
|
|
debugreg = (3,5)
|
|
debugreg_x = (3,5)
|
|
testreg = (3,5)
|
|
r8 = (0,2)
|
|
r16 = (0,2)
|
|
r32 = (0,2)
|
|
r64 = (0,2)
|
|
r8_x0 = (0,2)
|
|
r8_x1 = (0,2)
|
|
r16_x = (0,2)
|
|
r32_x = (0,2)
|
|
r64_x = (0,2)
|
|
frow = (4,7)
|
|
fpage = (3,3)
|
|
freg = (0,2)
|
|
rexw = (3,3)
|
|
rexr = (2,2)
|
|
rexx = (1,1)
|
|
rexb = (0,0)
|
|
mmxmod = (6,7)
|
|
mmxreg = (3,5)
|
|
mmxreg1 = (3,5)
|
|
mmxreg2 = (0,2)
|
|
xmmmod = (6,7)
|
|
xmmreg = (3,5)
|
|
ymmreg = (3,5)
|
|
|
|
xmmreg1 = (3,5)
|
|
ymmreg1 = (3,5)
|
|
xmmreg2 = (0,2)
|
|
ymmreg2 = (0,2)
|
|
|
|
xmmreg_x = (3,5)
|
|
ymmreg_x = (3,5)
|
|
xmmreg1_x = (3,5)
|
|
ymmreg1_x = (3,5)
|
|
xmmreg2_x = (0,2)
|
|
ymmreg2_x = (0,2)
|
|
vex_pp = (0,1)
|
|
vex_l = (2,2)
|
|
vex_vvvv = (3,6)
|
|
vex_r = (7,7)
|
|
vex_x = (6,6)
|
|
vex_b = (5,5)
|
|
vex_w = (7,7)
|
|
vex_mmmmm = (0,4)
|
|
bnd1 = (3,5)
|
|
bnd1_lb = (3,5)
|
|
bnd1_ub = (3,5)
|
|
bnd2 = (0,2)
|
|
bnd2_lb = (0,2)
|
|
bnd2_ub = (0,2)
|
|
;
|
|
|
|
define token sib (8)
|
|
ss = (6,7)
|
|
index = (3,5)
|
|
index_x = (3,5)
|
|
index64 = (3,5)
|
|
index64_x = (3,5)
|
|
xmm_vsib = (3,5)
|
|
xmm_vsib_x = (3,5)
|
|
ymm_vsib = (3,5)
|
|
ymm_vsib_x = (3,5)
|
|
base = (0,2)
|
|
base_x = (0,2)
|
|
base64 = (0,2)
|
|
base64_x = (0,2)
|
|
;
|
|
|
|
define token I8 (8)
|
|
imm8_7=(7,7)
|
|
Xmm_imm8_7_4=(4,7)
|
|
Ymm_imm8_7_4=(4,7)
|
|
imm8_4=(4,4)
|
|
imm8_0=(0,0)
|
|
imm8_3_0=(0,3)
|
|
imm8=(0,7)
|
|
simm8=(0,7) signed
|
|
;
|
|
|
|
define token I16 (16) imm16_15=(15,15) imm16=(0,15) simm16=(0,15) signed j16=(0,15);
|
|
define token I32 (32) imm32=(0,31) simm32=(0,31) signed;
|
|
define token I64 (64) imm64=(0,63) simm64=(0,63) signed;
|
|
define token override (8) over=(0,7);
|
|
|
|
attach variables [ r32 reg32 base index ] [ EAX ECX EDX EBX ESP EBP ESI EDI ];
|
|
attach variables [ r16 reg16 ] [ AX CX DX BX SP BP SI DI ];
|
|
attach variables [ r8 reg8 ] [ AL CL DL BL AH CH DH BH ];
|
|
attach variables Sreg [ ES CS SS DS FS GS _ _ ];
|
|
attach variables freg [ ST0 ST1 ST2 ST3 ST4 ST5 ST6 ST7 ];
|
|
attach variables [ debugreg ] [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 ];
|
|
@ifdef IA64
|
|
attach variables [ r64 reg64 base64 index64 ] [ RAX RCX RDX RBX RSP RBP RSI RDI ];
|
|
attach variables [ r64_x reg64_x base64_x index64_x ] [ R8 R9 R10 R11 R12 R13 R14 R15 ];
|
|
attach variables [ r32_x reg32_x base_x index_x ] [ R8D R9D R10D R11D R12D R13D R14D R15D ];
|
|
attach variables [ r16_x reg16_x ] [ R8W R9W R10W R11W R12W R13W R14W R15W ];
|
|
attach variables [ r8_x0 reg8_x0 ] [ AL CL DL BL SPL BPL SIL DIL ];
|
|
attach variables [ r8_x1 reg8_x1 ] [ R8B R9B R10B R11B R12B R13B R14B R15B ];
|
|
attach variables [ debugreg_x ] [ DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15 ];
|
|
attach variables creg [ CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7 ];
|
|
attach variables creg_x [ CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 ];
|
|
@else
|
|
attach variables [ testreg ] [ TR0 TR1 TR2 TR3 TR6 TR7 TR6 TR7 ];
|
|
attach variables creg [ CR0 _ CR2 CR3 CR4 _ _ _ ];
|
|
@endif
|
|
|
|
attach values ss [ 1 2 4 8];
|
|
|
|
attach variables [ mmxreg mmxreg1 mmxreg2 ] [ MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 ];
|
|
|
|
attach variables [ xmmreg xmmreg1 xmmreg2 xmm_vsib ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 ];
|
|
|
|
attach variables [ xmmreg_x xmmreg1_x xmmreg2_x xmm_vsib_x ] [ XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ];
|
|
|
|
attach variables [ vexVVVV_XmmReg Xmm_imm8_7_4 ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ];
|
|
|
|
attach variables [ vexVVVV_YmmReg Ymm_imm8_7_4 ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ];
|
|
|
|
@ifdef IA64
|
|
attach variables [ vexVVVV_r32 ] [ EAX ECX EDX EBX ESP EBP ESI EDI R8D R9D R10D R11D R12D R13D R14D R15D ];
|
|
attach variables [ vexVVVV_r64 ] [ RAX RCX RDX RBX RSP RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15 ];
|
|
@else
|
|
attach variables [ vexVVVV_r32 ] [ EAX ECX EDX EBX ESP EBP ESI EDI _ _ _ _ _ _ _ _ ];
|
|
@endif
|
|
|
|
|
|
attach variables [ ymmreg ymmreg1 ymmreg2 ymm_vsib ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 ];
|
|
attach variables [ ymmreg_x ymmreg1_x ymmreg2_x ymm_vsib_x ] [ YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ];
|
|
|
|
attach variables [ bnd1 bnd2 ] [ BND0 BND1 BND2 BND3 _ _ _ _ ];
|
|
attach variables [ bnd1_lb bnd2_lb ] [ BND0_LB BND1_LB BND2_LB BND3_LB _ _ _ _ ];
|
|
attach variables [ bnd1_ub bnd2_ub ] [ BND0_UB BND1_UB BND2_UB BND3_UB _ _ _ _ ];
|
|
|
|
define pcodeop segment; # Define special pcodeop that calculates the RAM address
|
|
# given the segment selector and offset as input
|
|
|
|
define pcodeop in; # force in/out to show up in decompiler
|
|
define pcodeop out;
|
|
define pcodeop sysenter;
|
|
define pcodeop sysexit;
|
|
define pcodeop syscall;
|
|
define pcodeop sysret;
|
|
define pcodeop swapgs;
|
|
define pcodeop invlpg;
|
|
define pcodeop invlpga;
|
|
define pcodeop invpcid;
|
|
define pcodeop rdtscp;
|
|
define pcodeop mwait;
|
|
define pcodeop mwaitx;
|
|
define pcodeop monitor;
|
|
define pcodeop monitorx;
|
|
define pcodeop swi; # for INT instruction
|
|
|
|
define pcodeop LOCK; # for LOCK instruction
|
|
|
|
# MFL: definitions for AMD hardware assisted virtualization instructions
|
|
define pcodeop clgi; # clear global interrupt flag (GIF)
|
|
define pcodeop stgi; # set global interrupt flag (GIF)
|
|
define pcodeop vmload; # Load state from VMCD, opcode 0f 01 da
|
|
define pcodeop vmmcall; # Call VMM, opcode 0f 01 d9
|
|
define pcodeop vmrun; # Run virtual machine, opcode 0f 01 d8
|
|
define pcodeop vmsave; # Save state to VMCB, opcode 0f 0a db
|
|
|
|
# MFL: definitions for Intel IA hardware assisted virtualization instructions
|
|
define pcodeop invept; # Invalidate Translations Derived from extended page tables (EPT); opcode 66 0f 38 80
|
|
define pcodeop invvpid; # Invalidate Translations Based on virtual-processor identifier (VPID); opcode 66 0f 38 81
|
|
define pcodeop vmcall; # Call to VM monitor by causing VM exit, opcode 0f 01 c1
|
|
define pcodeop vmclear; # Clear virtual-machine control structure, opcode 66 0f c7 /6
|
|
define pcodeop vmfunc; # call virtual-machine function refernced by EAX
|
|
define pcodeop vmlaunch; # Launch virtual machine managed by current VMCCS; opcode 0f 01 c2
|
|
define pcodeop vmresume; # Resume virtual machine managed by current VMCS; opcode 0f 01 c3
|
|
define pcodeop vmptrld; # Load pointer to virtual-machine control structure; opcode 0f c6 /6
|
|
define pcodeop vmptrst; # Store pointer to virtual-machine control structure; opcode 0f c7 /7
|
|
define pcodeop vmread; # Read field from virtual-machine control structure; opcode 0f 78
|
|
define pcodeop vmwrite; # Write field to virtual-machine control structure; opcode 0f 79
|
|
define pcodeop vmxoff; # Leave VMX operation; opcode 0f 01 c4
|
|
define pcodeop vmxon; # Enter VMX operation; opcode f3 0f C7 /6
|
|
|
|
@ifdef IA64
|
|
Reg8: reg8 is rexprefix=0 & reg8 { export reg8; }
|
|
Reg8: reg8_x0 is rexprefix=1 & rexRprefix=0 & reg8_x0 { export reg8_x0; }
|
|
Reg8: reg8_x1 is rexprefix=1 & rexRprefix=1 & reg8_x1 { export reg8_x1; }
|
|
Reg16: reg16 is rexRprefix=0 & reg16 { export reg16; }
|
|
Reg16: reg16_x is rexRprefix=1 & reg16_x { export reg16_x; }
|
|
Reg32: reg32 is rexRprefix=0 & reg32 { export reg32; }
|
|
Reg32: reg32_x is rexRprefix=1 & reg32_x { export reg32_x; }
|
|
Reg64: reg64 is rexRprefix=0 & reg64 { export reg64; }
|
|
Reg64: reg64_x is rexRprefix=1 & reg64_x { export reg64_x; }
|
|
Rmr8: r8 is rexprefix=0 & r8 { export r8; }
|
|
Rmr8: r8_x0 is rexprefix=1 & rexBprefix=0 & r8_x0 { export r8_x0; }
|
|
Rmr8: r8_x1 is rexprefix=1 & rexBprefix=1 & r8_x1 { export r8_x1; }
|
|
CRmr8: r8 is rexBprefix=0 & r8 { export r8; }
|
|
CRmr8: r8 is addrsize=2 & rexBprefix=0 & r8 { export r8; }
|
|
CRmr8: r8_x0 is addrsize=2 & rexprefix=1 & rexBprefix=0 & r8_x0 { export r8_x0; }
|
|
CRmr8: r8_x1 is addrsize=2 & rexprefix=1 & rexBprefix=1 & r8_x1 { export r8_x1; }
|
|
Rmr16: r16 is rexBprefix=0 & r16 { export r16; }
|
|
Rmr16: r16_x is rexBprefix=1 & r16_x { export r16_x; }
|
|
CRmr16: r16 is rexBprefix=0 & r16 { export r16; }
|
|
CRmr16: r16_x is rexBprefix=1 & r16_x { export r16_x; }
|
|
Rmr32: r32 is rexBprefix=0 & r32 { export r32; }
|
|
Rmr32: r32_x is rexBprefix=1 & r32_x { export r32_x; }
|
|
CRmr32: r32 is rexBprefix=0 & r32 & r64 { export r64; }
|
|
CRmr32: r32_x is rexBprefix=1 & r32_x & r64_x { export r64_x; }
|
|
Rmr64: r64 is rexBprefix=0 & r64 { export r64; }
|
|
Rmr64: r64_x is rexBprefix=1 & r64_x { export r64_x; }
|
|
Base: base is rexBprefix=0 & base { export base; }
|
|
Base: base_x is rexBprefix=1 & base_x { export base_x; }
|
|
Index: index is rexXprefix=0 & index { export index; }
|
|
Index: index_x is rexXprefix=1 & index_x { export index_x; }
|
|
Base64: base64 is rexBprefix=0 & base64 { export base64; }
|
|
Base64: base64_x is rexBprefix=1 & base64_x { export base64_x; }
|
|
Index64: index64 is rexXprefix=0 & index64 { export index64; }
|
|
Index64: index64_x is rexXprefix=1 & index64_x { export index64_x; }
|
|
XmmReg: xmmreg is rexRprefix=0 & xmmreg { export xmmreg; }
|
|
XmmReg: xmmreg_x is rexRprefix=1 & xmmreg_x { export xmmreg_x; }
|
|
XmmReg1: xmmreg1 is rexRprefix=0 & xmmreg1 { export xmmreg1; }
|
|
XmmReg1: xmmreg1_x is rexRprefix=1 & xmmreg1_x { export xmmreg1_x; }
|
|
XmmReg2: xmmreg2 is rexBprefix=0 & xmmreg2 { export xmmreg2; }
|
|
XmmReg2: xmmreg2_x is rexBprefix=1 & xmmreg2_x { export xmmreg2_x; }
|
|
YmmReg1: ymmreg1 is rexRprefix=0 & ymmreg1 { export ymmreg1; }
|
|
YmmReg1: ymmreg1_x is rexRprefix=1 & ymmreg1_x { export ymmreg1_x; }
|
|
YmmReg2: ymmreg2 is rexBprefix=0 & ymmreg2 { export ymmreg2; }
|
|
YmmReg2: ymmreg2_x is rexBprefix=1 & ymmreg2_x { export ymmreg2_x; }
|
|
Xmm_vsib: xmm_vsib is rexXprefix=0 & xmm_vsib { export xmm_vsib; }
|
|
Xmm_vsib: xmm_vsib_x is rexXprefix=1 & xmm_vsib_x { export xmm_vsib_x; }
|
|
Ymm_vsib: ymm_vsib is rexXprefix=0 & ymm_vsib { export ymm_vsib; }
|
|
Ymm_vsib: ymm_vsib_x is rexXprefix=1 & ymm_vsib_x { export ymm_vsib_x; }
|
|
@else
|
|
Reg8: reg8 is reg8 { export reg8; }
|
|
Reg16: reg16 is reg16 { export reg16; }
|
|
Reg32: reg32 is reg32 { export reg32; }
|
|
Rmr8: r8 is r8 { export r8; }
|
|
CRmr8: r8 is r8 { export r8; }
|
|
Rmr16: r16 is r16 { export r16; }
|
|
CRmr16: r16 is r16 { export r16; }
|
|
Rmr32: r32 is r32 { export r32; }
|
|
CRmr32: r32 is r32 { export r32; }
|
|
Base: base is base { export base; }
|
|
Index: index is index { export index; }
|
|
XmmReg: xmmreg is xmmreg { export xmmreg; }
|
|
XmmReg1: xmmreg1 is xmmreg1 { export xmmreg1; }
|
|
XmmReg2: xmmreg2 is xmmreg2 { export xmmreg2; }
|
|
YmmReg1: ymmreg1 is ymmreg1 { export ymmreg1; }
|
|
YmmReg2: ymmreg2 is ymmreg2 { export ymmreg2; }
|
|
Xmm_vsib: xmm_vsib is xmm_vsib { export xmm_vsib; }
|
|
Ymm_vsib: ymm_vsib is ymm_vsib { export ymm_vsib; }
|
|
@endif
|
|
|
|
# signed immediate value subconstructors
|
|
|
|
simm8_16: simm8 is simm8 { export *[const]:2 simm8; }
|
|
simm8_32: simm8 is simm8 { export *[const]:4 simm8; }
|
|
@ifdef IA64
|
|
simm8_64: simm8 is simm8 { export *[const]:8 simm8; }
|
|
@endif
|
|
simm16_16: simm16 is simm16 { export *[const]:2 simm16; }
|
|
simm32_32: simm32 is simm32 { export *[const]:4 simm32; }
|
|
@ifdef IA64
|
|
simm32_64: simm32 is simm32 { export *[const]:8 simm32; }
|
|
imm32_64: imm32 is imm32 { export *[const]:8 imm32; }
|
|
@endif
|
|
|
|
usimm8_16: imm8 is imm8 & imm8_7=0 { export *[const]:2 imm8; }
|
|
usimm8_16: val is imm8 & imm8_7=1 [ val = 0xff00 | imm8; ] { export *[const]:2 val; }
|
|
usimm8_32: imm8 is imm8 & imm8_7=0 { export *[const]:4 imm8; }
|
|
usimm8_32: val is imm8 & imm8_7=1 [ val = 0xffffff00 | imm8; ] { export *[const]:4 val; }
|
|
@ifdef IA64
|
|
usimm8_64: imm8 is imm8 & imm8_7=0 { export *[const]:8 imm8; }
|
|
usimm8_64: val is imm8 & imm8_7=1 [ val = 0xffffffffffffff00 | imm8; ] { export *[const]:8 val; }
|
|
@endif
|
|
|
|
# unused
|
|
#usimm16_32: imm16 is imm16 & imm16_15=0 { export *[const]:4 imm16; }
|
|
#usimm16_32: val is imm16 & imm16_15=1 [ val = 0xffff0000 | imm16; ] { export *[const]:4 val; }
|
|
|
|
# 16-bit addressing modes (the offset portion)
|
|
addr16: [BX + SI] is mod=0 & r_m=0 & BX & SI { local tmp=BX+SI; export tmp; }
|
|
addr16: [BX + DI] is mod=0 & r_m=1 & BX & DI { local tmp=BX+DI; export tmp; }
|
|
addr16: [BP + SI] is mod=0 & r_m=2 & BP & SI { local tmp=BP+SI; export tmp; }
|
|
addr16: [BP + DI] is mod=0 & r_m=3 & BP & DI { local tmp=BP+DI; export tmp; }
|
|
addr16: [SI] is mod=0 & r_m=4 & SI { export SI; }
|
|
addr16: [DI] is mod=0 & r_m=5 & DI { export DI; }
|
|
addr16: [imm16] is mod=0 & r_m=6; imm16 { export *[const]:2 imm16; }
|
|
addr16: [BX] is mod=0 & r_m=7 & BX { export BX; }
|
|
addr16: [BX + SI + simm8_16] is mod=1 & r_m=0 & BX & SI; simm8_16 { local tmp=BX+SI+simm8_16; export tmp; }
|
|
addr16: [BX + DI + simm8_16] is mod=1 & r_m=1 & BX & DI; simm8_16 { local tmp=BX+DI+simm8_16; export tmp; }
|
|
addr16: [BP + SI + simm8_16] is mod=1 & r_m=2 & BP & SI; simm8_16 { local tmp=BP+SI+simm8_16; export tmp; }
|
|
addr16: [BP + DI + simm8_16] is mod=1 & r_m=3 & BP & DI; simm8_16 { local tmp=BP+DI+simm8_16; export tmp; }
|
|
addr16: [SI + simm8_16] is mod=1 & r_m=4 & SI; simm8_16 { local tmp=SI+simm8_16; export tmp; }
|
|
addr16: [DI + simm8_16] is mod=1 & r_m=5 & DI; simm8_16 { local tmp=DI+simm8_16; export tmp; }
|
|
addr16: [BP + simm8_16] is mod=1 & r_m=6 & BP; simm8_16 { local tmp=BP+simm8_16; export tmp; }
|
|
addr16: [BX + simm8_16] is mod=1 & r_m=7 & BX; simm8_16 { local tmp=BX+simm8_16; export tmp; }
|
|
addr16: [BX + SI + imm16] is mod=2 & r_m=0 & BX & SI; imm16 { local tmp=BX+SI+imm16; export tmp; }
|
|
addr16: [BX + DI + imm16] is mod=2 & r_m=1 & BX & DI; imm16 { local tmp=BX+DI+imm16; export tmp; }
|
|
addr16: [BP + SI + imm16] is mod=2 & r_m=2 & BP & SI; imm16 { local tmp=BP+SI+imm16; export tmp; }
|
|
addr16: [BP + DI + imm16] is mod=2 & r_m=3 & BP & DI; imm16 {local tmp=BP+DI+imm16; export tmp; }
|
|
addr16: [SI + imm16] is mod=2 & r_m=4 & SI; imm16 { local tmp=SI+imm16; export tmp; }
|
|
addr16: [DI + imm16] is mod=2 & r_m=5 & DI; imm16 { local tmp=DI+imm16; export tmp; }
|
|
addr16: [BP + imm16] is mod=2 & r_m=6 & BP; imm16 { local tmp=BP+imm16; export tmp; }
|
|
addr16: [BX + imm16] is mod=2 & r_m=7 & BX; imm16 { local tmp=BX+imm16; export tmp; }
|
|
|
|
# 32-bit addressing modes (the offset portion)
|
|
addr32: [Rmr32] is mod=0 & Rmr32 { export Rmr32; }
|
|
addr32: [Rmr32 + simm8_32] is mod=1 & Rmr32; simm8_32 { local tmp=Rmr32+simm8_32; export tmp; }
|
|
addr32: [Rmr32] is mod=1 & r_m!=4 & Rmr32; simm8=0 { export Rmr32; }
|
|
addr32: [Rmr32 + imm32] is mod=2 & Rmr32; imm32 { local tmp=Rmr32+imm32; export tmp; }
|
|
addr32: [Rmr32] is mod=2 & r_m!=4 & Rmr32; imm32=0 { export Rmr32; }
|
|
addr32: [imm32] is mod=0 & r_m=5; imm32 { export *[const]:4 imm32; }
|
|
addr32: [Base + Index*ss] is mod=0 & r_m=4; Index & Base & ss { local tmp=Base+Index*ss; export tmp; }
|
|
addr32: [Base] is mod=0 & r_m=4; index=4 & Base { export Base; }
|
|
addr32: [Index*ss + imm32] is mod=0 & r_m=4; Index & base=5 & ss; imm32 { local tmp=imm32+Index*ss; export tmp; }
|
|
addr32: [imm32] is mod=0 & r_m=4; index=4 & base=5; imm32 { export *[const]:4 imm32; }
|
|
addr32: [Base + Index*ss + simm8_32] is mod=1 & r_m=4; Index & Base & ss; simm8_32 { local tmp=simm8_32+Base+Index*ss; export tmp; }
|
|
addr32: [Base + simm8_32] is mod=1 & r_m=4; index=4 & Base; simm8_32 { local tmp=simm8_32+Base; export tmp; }
|
|
addr32: [Base + Index*ss] is mod=1 & r_m=4; Index & Base & ss; simm8=0 { local tmp=Base+Index*ss; export tmp; }
|
|
addr32: [Base] is mod=1 & r_m=4; index=4 & Base; simm8=0 { export Base; }
|
|
addr32: [Base + Index*ss + imm32] is mod=2 & r_m=4; Index & Base & ss; imm32 { local tmp=imm32+Base+Index*ss; export tmp; }
|
|
addr32: [Base + imm32] is mod=2 & r_m=4; index=4 & Base; imm32 { local tmp=imm32+Base; export tmp; }
|
|
addr32: [Base + Index*ss] is mod=2 & r_m=4; Index & Base & ss; imm32=0 { local tmp=Base+Index*ss; export tmp; }
|
|
addr32: [Base] is mod=2 & r_m=4; index=4 & Base; imm32=0 { export Base; }
|
|
@ifdef IA64
|
|
addr32: [riprel] is bit64=1 & mod=0 & r_m=4; index=4 & base=5; simm32 [ riprel=inst_next+simm32; ] { export *[const]:4 riprel; }
|
|
|
|
Addr32_64: [eiprel] is mod=0 & r_m=5; simm32 [ eiprel=inst_next+simm32; ] { export *[const]:8 eiprel; }
|
|
Addr32_64: [imm32] is mod=0 & r_m=4; index=4 & base=5; imm32 { export *[const]:8 imm32; }
|
|
Addr32_64: addr32 is addr32 { tmp:8 = sext(addr32); export tmp; }
|
|
|
|
@endif
|
|
|
|
# 64-bit addressing modes (the offset portion)
|
|
|
|
@ifdef IA64
|
|
addr64: [Rmr64] is mod=0 & Rmr64 { export Rmr64; }
|
|
addr64: [Rmr64 + simm8_64] is mod=1 & Rmr64; simm8_64 { local tmp=Rmr64+simm8_64; export tmp; }
|
|
addr64: [Rmr64 + simm32_64] is mod=2 & Rmr64; simm32_64 { local tmp=Rmr64+simm32_64; export tmp; }
|
|
addr64: [Rmr64] is mod=1 & r_m!=4 & Rmr64; simm8=0 { export Rmr64; }
|
|
addr64: [Rmr64] is mod=2 & r_m!=4 & Rmr64; simm32=0 { export Rmr64; }
|
|
addr64: [riprel] is mod=0 & r_m=5; simm32 [ riprel=inst_next+simm32; ] { export *[const]:8 riprel; }
|
|
addr64: [Base64 + Index64*ss] is mod=0 & r_m=4; Index64 & Base64 & ss { local tmp=Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64] is mod=0 & r_m=4; rexXprefix=0 & index64=4 & Base64 { export Base64; }
|
|
addr64: [simm32_64 + Index64*ss] is mod=0 & r_m=4; Index64 & base64=5 & ss; simm32_64 { local tmp=simm32_64+Index64*ss; export tmp; }
|
|
addr64: [Index64*ss] is mod=0 & r_m=4; Index64 & base64=5 & ss; imm32=0 { local tmp=Index64*ss; export tmp; }
|
|
addr64: [imm32_64] is mod=0 & r_m=4; rexXprefix=0 & index64=4 & base64=5; imm32_64 { export *[const]:8 imm32_64; }
|
|
addr64: [Base64 + Index64*ss + simm8_64] is mod=1 & r_m=4; Index64 & Base64 & ss; simm8_64 { local tmp=simm8_64+Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64 + Index64*ss] is mod=1 & r_m=4; Index64 & Base64 & ss; simm8=0 { local tmp=Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64 + simm8_64] is mod=1 & r_m=4; rexXprefix=0 & index64=4 & Base64; simm8_64 { local tmp=simm8_64+Base64; export tmp; }
|
|
addr64: [Base64 + Index64*ss + simm32_64] is mod=2 & r_m=4; Index64 & Base64 & ss; simm32_64 { local tmp=simm32_64+Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64 + simm32_64] is mod=2 & r_m=4; rexXprefix=0 & index64=4 & Base64; simm32_64 { local tmp=simm32_64+Base64; export tmp; }
|
|
addr64: [Base64 + Index64*ss] is mod=2 & r_m=4; Index64 & Base64 & ss; imm32=0 { local tmp=Base64+Index64*ss; export tmp; }
|
|
addr64: [Base64] is mod=2 & r_m=4; rexXprefix=0 & index64=4 & Base64; imm32=0 { export Base64; }
|
|
@endif
|
|
|
|
currentCS: CS is protectedMode=0 & CS { tmp:4 = (inst_next >> 4) & 0xf000; CS = tmp:2; export CS; }
|
|
currentCS: CS is protectedMode=1 & CS { tmp:4 = (inst_next >> 16) & 0xffff; CS = tmp:2; export CS; }
|
|
|
|
segWide: is segover=0 { export 0:$(SIZE); }
|
|
segWide: CS: is segover=1 & CS { export 0:$(SIZE); }
|
|
segWide: SS: is segover=2 & SS { export 0:$(SIZE); }
|
|
segWide: DS: is segover=3 & DS { export 0:$(SIZE); }
|
|
segWide: ES: is segover=4 & ES { export 0:$(SIZE); }
|
|
segWide: FS: is segover=5 & FS { export FS_OFFSET; }
|
|
segWide: GS: is segover=6 & GS { export GS_OFFSET; }
|
|
|
|
seg16: is segover=0 { export DS; }
|
|
seg16: currentCS: is segover=1 & currentCS { export currentCS; }
|
|
seg16: SS: is segover=2 & SS { export SS; }
|
|
seg16: DS: is segover=3 & DS { export DS; }
|
|
seg16: ES: is segover=4 & ES { export ES; }
|
|
seg16: FS: is segover=5 & FS { export FS; }
|
|
seg16: GS: is segover=6 & GS { export GS; }
|
|
|
|
Mem16: addr16 is (segover=0 & mod=0 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=0 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=1 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=1 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=1 & r_m=6) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=2 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=2 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: addr16 is (segover=0 & mod=2 & r_m=6) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; }
|
|
Mem16: seg16^addr16 is seg16; addr16 { tmp:$(SIZE) = segment(seg16,addr16); export tmp; }
|
|
|
|
Mem: Mem16 is addrsize=0 & Mem16 { export Mem16; }
|
|
@ifdef IA64
|
|
Mem: segWide^Addr32_64 is addrsize=1 & segWide; Addr32_64 { export Addr32_64; }
|
|
Mem: segWide^Addr32_64 is addrsize=1 & segWide & highseg=1; Addr32_64 { tmp:8 = segWide + Addr32_64; export tmp; }
|
|
Mem: segWide^addr64 is addrsize=2 & segWide; addr64 { export addr64; }
|
|
Mem: segWide^addr64 is addrsize=2 & segWide & highseg=1; addr64 { tmp:$(SIZE) = segWide + addr64; export tmp; }
|
|
@else
|
|
Mem: segWide^addr32 is addrsize=1 & segWide; addr32 { export addr32; }
|
|
Mem: segWide^addr32 is addrsize=1 & segWide & highseg=1; addr32 { tmp:$(SIZE) = segWide + addr32; export tmp; }
|
|
@endif
|
|
|
|
rel8: reloc is simm8 [ reloc=inst_next+simm8; ] { export *[ram]:$(SIZE) reloc; }
|
|
rel16: reloc is simm16 [ reloc=((inst_next >> 16) << 16) | ((inst_next + simm16) & 0xFFFF); ] { export *[ram]:$(SIZE) reloc; }
|
|
rel32: reloc is simm32 [ reloc=inst_next+simm32; ] { export *[ram]:$(SIZE) reloc; }
|
|
|
|
|
|
m8: "byte ptr" Mem is Mem { export *:1 Mem; }
|
|
m16: "word ptr" Mem is Mem { export *:2 Mem; }
|
|
m32: "dword ptr" Mem is Mem { export *:4 Mem; }
|
|
m64: "qword ptr" Mem is Mem { export *:8 Mem; }
|
|
# m80: Mem is Mem { export *:10 Mem; }
|
|
m128: "xmmword ptr" Mem is Mem { export *:16 Mem; }
|
|
m256: "ymmword ptr" Mem is Mem { export *:32 Mem; }
|
|
|
|
# spec versions of the m8/m16/m32/... tables explicitly print the operand size
|
|
# spec_m8: "byte ptr "^Mem is Mem { export *:1 Mem; }
|
|
spec_m16: "word ptr "^Mem is Mem { export *:2 Mem; }
|
|
spec_m32: "dword ptr "^Mem is Mem { export *:4 Mem; }
|
|
spec_m64: "qword ptr "^Mem is Mem { export *:8 Mem; }
|
|
spec_m80: "tword ptr "^Mem is Mem { export *:10 Mem; }
|
|
# spec_m128: "16-byte ptr "^Mem is Mem { export *:16 Mem; }
|
|
# spec_m512: "64-byte ptr "^Mem is Mem { export *:64 Mem; }
|
|
|
|
|
|
|
|
##
|
|
## VSIB
|
|
##
|
|
|
|
vaddr32x: [Base + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base & ss { }
|
|
vaddr32x: [Xmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Xmm_vsib & base=5 & ss; simm32_32 { }
|
|
vaddr32x: [Base + Xmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Xmm_vsib & Base & ss; simm8_32 { }
|
|
vaddr32x: [Base + Xmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Xmm_vsib & Base & ss; simm32_32 { }
|
|
|
|
vaddr32y: [Base + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base & ss { }
|
|
vaddr32y: [Ymm_vsib*ss + simm32_32] is mod=0 & r_m=4; Ymm_vsib & base=5 & ss; simm32_32 { }
|
|
vaddr32y: [Base + Ymm_vsib*ss + simm8_32] is mod=1 & r_m=4; Ymm_vsib & Base & ss; simm8_32 { }
|
|
vaddr32y: [Base + Ymm_vsib*ss + simm32_32] is mod=2 & r_m=4; Ymm_vsib & Base & ss; simm32_32 { }
|
|
|
|
@ifdef IA64
|
|
vaddr64x: [Base64 + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base64 & ss { }
|
|
vaddr64x: [Xmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Xmm_vsib & base64=5 & ss; simm32_64 { }
|
|
vaddr64x: [Base64 + Xmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Xmm_vsib & Base64 & ss; simm8_64 { }
|
|
vaddr64x: [Base64 + Xmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Xmm_vsib & Base64 & ss; simm32_64 { }
|
|
|
|
vaddr64y: [Base64 + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base64 & ss { }
|
|
vaddr64y: [Ymm_vsib*ss + simm32_64] is mod=0 & r_m=4; Ymm_vsib & base64=5 & ss; simm32_64 { }
|
|
vaddr64y: [Base64 + Ymm_vsib*ss + simm8_64] is mod=1 & r_m=4; Ymm_vsib & Base64 & ss; simm8_64 { }
|
|
vaddr64y: [Base64 + Ymm_vsib*ss + simm32_64] is mod=2 & r_m=4; Ymm_vsib & Base64 & ss; simm32_64 { }
|
|
@endif
|
|
|
|
|
|
vMem32x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { }
|
|
vMem32x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { }
|
|
|
|
vMem32y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { }
|
|
vMem32y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { }
|
|
|
|
@ifdef IA64
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem32x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { }
|
|
vMem32x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { }
|
|
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem32y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { }
|
|
vMem32y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { }
|
|
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem64x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { }
|
|
vMem64x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { }
|
|
|
|
vMem64x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { }
|
|
vMem64x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { }
|
|
|
|
# GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base.
|
|
# Behavior is coded to match Binutils; exceeds what the manual indicates is possible.
|
|
vMem64y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { }
|
|
vMem64y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { }
|
|
|
|
vMem64y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { }
|
|
vMem64y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { }
|
|
@endif
|
|
|
|
|
|
d_vm32x: "dword ptr "^vMem32x is vMem32x { }
|
|
d_vm32y: "dword ptr "^vMem32y is vMem32y { }
|
|
|
|
@ifdef IA64
|
|
d_vm64x: "dword ptr "^vMem64x is vMem64x { }
|
|
d_vm64y: "dword ptr "^vMem64y is vMem64y { }
|
|
@endif
|
|
|
|
|
|
q_vm32x: "qword ptr "^vMem32x is vMem32x { }
|
|
# not used q_vm32y: "qword ptr "^vMem32y is vMem32y { }
|
|
|
|
@ifdef IA64
|
|
q_vm64x: "qword ptr "^vMem64x is vMem64x { }
|
|
q_vm64y: "qword ptr "^vMem64y is vMem64y { }
|
|
@endif
|
|
|
|
Reg32_m8: Rmr32 is mod=3 & Rmr32 { export Rmr32; }
|
|
Reg32_m8: m8 is m8 { local tmp:4 = zext(m8); export tmp; }
|
|
Reg32_m16: Rmr32 is mod=3 & Rmr32 { export Rmr32; }
|
|
Reg32_m16: m16 is m16 { local tmp:4 = zext(m16); export tmp; }
|
|
|
|
XmmReg2_m8: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m8: m8 is m8 { local tmp:16 = zext(m8); export tmp; }
|
|
XmmReg2_m16: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m16: m16 is m16 { local tmp:16 = zext(m16); export tmp; }
|
|
XmmReg2_m32: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m32: m32 is m32 { local tmp:16 = zext(m32); export tmp; }
|
|
XmmReg2_m64: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m64: m64 is m64 { local tmp:16 = zext(m64); export tmp; }
|
|
XmmReg2_m128: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; }
|
|
XmmReg2_m128: m128 is m128 { export m128; }
|
|
|
|
YmmReg2_m256: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; }
|
|
YmmReg2_m256: m256 is m256 { export m256; }
|
|
|
|
moffs8: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:1 tmp; }
|
|
moffs8: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:1 tmp; }
|
|
moffs8: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:1 imm32; }
|
|
@ifdef IA64
|
|
moffs8: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:1 tmp; }
|
|
moffs8: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:1 imm64; }
|
|
@endif
|
|
moffs16: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:2 tmp; }
|
|
moffs16: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:2 tmp; }
|
|
moffs16: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:2 imm32; }
|
|
@ifdef IA64
|
|
moffs16: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:2 tmp; }
|
|
moffs16: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:2 imm64; }
|
|
@endif
|
|
|
|
moffs32: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:4 tmp; }
|
|
moffs32: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:4 imm32; }
|
|
moffs32: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:4 tmp; }
|
|
@ifdef IA64
|
|
moffs32: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:4 imm64; }
|
|
moffs32: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:4 tmp; }
|
|
@endif
|
|
|
|
@ifdef IA64
|
|
moffs64: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:8 imm64; }
|
|
moffs64: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:8 tmp; }
|
|
@endif
|
|
# TODO: segment register offset in 64bit might not be right
|
|
|
|
# String memory access
|
|
dseSI1: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 1-2*zext(DF); export *:1 tmp; }
|
|
dseSI1: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 1-2*zext(DF); export *:1 tmp; }
|
|
dseSI2: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 2-4*zext(DF); export *:2 tmp; }
|
|
dseSI2: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 2-4*zext(DF); export *:2 tmp; }
|
|
dseSI4: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 4-8*zext(DF); export *:4 tmp; }
|
|
dseSI4: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 4-8*zext(DF); export *:4 tmp; }
|
|
eseDI1: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 1-2*zext(DF); export *:1 tmp; }
|
|
eseDI1: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+1-2*zext(DF); export *:1 tmp; }
|
|
eseDI2: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 2-4*zext(DF); export *:2 tmp; }
|
|
eseDI2: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+2-4*zext(DF); export *:2 tmp; }
|
|
eseDI4: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 4-8*zext(DF); export *:4 tmp; }
|
|
eseDI4: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+4-8*zext(DF); export *:4 tmp; }
|
|
|
|
@ifdef IA64
|
|
# quadword string functions
|
|
dseSI8: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 8-16*zext(DF); export *:8 tmp; }
|
|
dseSI8: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 8-16*zext(DF); export *:8 tmp; }
|
|
eseDI8: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 8-16*zext(DF); export *:8 tmp; }
|
|
eseDI8: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+8-16*zext(DF); export *:8 tmp; }
|
|
|
|
dseSI1: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 1-2*zext(DF); export *:1 tmp; }
|
|
dseSI2: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 2-4*zext(DF); export *:2 tmp; }
|
|
dseSI4: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 4-8*zext(DF); export *:4 tmp; }
|
|
dseSI8: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 8-16*zext(DF); export *:8 tmp; }
|
|
eseDI1: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+1-2*zext(DF); export *:1 tmp; }
|
|
eseDI2: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+2-4*zext(DF); export *:2 tmp; }
|
|
eseDI4: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+4-8*zext(DF); export *:4 tmp; }
|
|
eseDI8: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+8-16*zext(DF); export *:8 tmp; }
|
|
@endif
|
|
|
|
rm8: Rmr8 is mod=3 & Rmr8 { export Rmr8; }
|
|
rm8: "byte ptr" Mem is Mem { export *:1 Mem; }
|
|
|
|
spec_rm8: Rmr8 is mod=3 & Rmr8 { export Rmr8; }
|
|
spec_rm8: "byte ptr "^Mem is Mem { export *:1 Mem; }
|
|
|
|
rm16: Rmr16 is mod=3 & Rmr16 { export Rmr16; }
|
|
rm16: "word ptr" Mem is Mem { export *:2 Mem; }
|
|
|
|
spec_rm16: Rmr16 is mod=3 & Rmr16 { export Rmr16; }
|
|
spec_rm16: "word ptr "^Mem is Mem { export *:2 Mem; }
|
|
|
|
rm32: Rmr32 is mod=3 & Rmr32 { export Rmr32; }
|
|
rm32: "dword ptr" Mem is Mem { export *:4 Mem; }
|
|
|
|
spec_rm32: Rmr32 is mod=3 & Rmr32 { export Rmr32; }
|
|
spec_rm32: "dword ptr "^Mem is Mem { export *:4 Mem; }
|
|
|
|
@ifdef IA64
|
|
rm64: Rmr64 is mod=3 & Rmr64 { export Rmr64; }
|
|
rm64: "qword ptr" Mem is Mem { export *:8 Mem; }
|
|
|
|
spec_rm64: Rmr64 is mod=3 & Rmr64 { export Rmr64; }
|
|
spec_rm64: "qword ptr "^Mem is Mem { export *:8 Mem; }
|
|
@endif
|
|
|
|
n1: "1" is epsilon { tmp:1 = 1; export tmp; }
|
|
|
|
@ifdef IA64
|
|
# Handle zero extension in 64-bit mode for 32-bit destination registers
|
|
check_Reg32_dest: is bit64=1 & rexRprefix=0 & reg32 & reg64 { reg64 = zext(reg32); }
|
|
check_Reg32_dest: is bit64=1 & rexRprefix=1 & reg32_x & reg64_x { reg64_x = zext(reg32_x); }
|
|
check_Rmr32_dest: is bit64=1 & rexBprefix=0 & r32 & r64 { r64 = zext(r32); }
|
|
check_Rmr32_dest: is bit64=1 & rexBprefix=1 & r32_x & r64_x { r64_x = zext(r32_x); }
|
|
check_rm32_dest: is bit64=1 & mod=3 & check_Rmr32_dest { build check_Rmr32_dest; }
|
|
check_EAX_dest: is bit64=1 { RAX = zext(EAX); }
|
|
check_EDX_dest: is bit64=1 { RDX = zext(EDX); }
|
|
check_vexVVVV_r32_dest: is bit64=1 & vexVVVV_r64 & vexVVVV_r32 { vexVVVV_r64 = zext(vexVVVV_r32);}
|
|
@endif
|
|
check_Reg32_dest: is epsilon { }
|
|
check_Rmr32_dest: is epsilon { }
|
|
check_rm32_dest: is epsilon { }
|
|
check_EAX_dest: is epsilon { }
|
|
check_EDX_dest: is epsilon { }
|
|
check_vexVVVV_r32_dest: is epsilon { }
|
|
|
|
ptr1616: reloc is protectedMode=0 & imm16; j16 [ reloc = j16*0x10 + imm16; ] { CS = j16; export *[ram]:4 reloc; }
|
|
ptr1616: reloc is protectedMode=1 & imm16; j16 [ reloc = j16*0x10000 + imm16; ] { CS = j16; export *[ram]:4 reloc; }
|
|
ptr1632: j16":"imm32 is imm32; j16 { CS = j16; export *:4 imm32; }
|
|
|
|
# conditions
|
|
|
|
cc: "O" is cond=0 { export OF; }
|
|
cc: "NO" is cond=1 { local tmp = !OF; export tmp; }
|
|
cc: "C" is cond=2 { export CF; }
|
|
cc: "NC" is cond=3 { local tmp = !CF; export tmp; }
|
|
cc: "Z" is cond=4 { export ZF; }
|
|
cc: "NZ" is cond=5 { local tmp = !ZF; export tmp; }
|
|
cc: "BE" is cond=6 { local tmp = CF || ZF; export tmp; }
|
|
cc: "A" is cond=7 { local tmp = !(CF || ZF); export tmp; }
|
|
cc: "S" is cond=8 { export SF; }
|
|
cc: "NS" is cond=9 { local tmp = !SF; export tmp; }
|
|
cc: "P" is cond=10 { export PF; }
|
|
cc: "NP" is cond=11 { local tmp = !PF; export tmp; }
|
|
cc: "L" is cond=12 { local tmp = OF != SF; export tmp; }
|
|
cc: "GE" is cond=13 { local tmp = OF == SF; export tmp; }
|
|
cc: "LE" is cond=14 { local tmp = ZF || (OF != SF); export tmp; }
|
|
cc: "G" is cond=15 { local tmp = !ZF && (OF == SF); export tmp; }
|
|
|
|
# repeat prefixes
|
|
rep: ".REP" is repprefx=1 & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; }
|
|
rep: ".REP" is repprefx=1 & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; }
|
|
@ifdef IA64
|
|
rep: ".REP" is repprefx=1 & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; }
|
|
@endif
|
|
rep: is repprefx=0 { }
|
|
|
|
reptail: is repprefx=1 { goto inst_start; }
|
|
reptail: is repprefx=0 { }
|
|
|
|
repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; }
|
|
repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; }
|
|
@ifdef IA64
|
|
repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; }
|
|
@endif
|
|
repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; }
|
|
repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; }
|
|
@ifdef IA64
|
|
repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; }
|
|
@endif
|
|
repe: is repprefx=0 & repneprefx=0 { }
|
|
|
|
repetail: is repprefx=1 & repneprefx=0 { if (ZF) goto inst_start; }
|
|
repetail: is repneprefx=1 & repprefx=0 { if (!ZF) goto inst_start; }
|
|
repetail: is repprefx=0 & repneprefx=0 { }
|
|
|
|
# Some macros
|
|
|
|
macro ptr2(r,x) {
|
|
r = zext(x);
|
|
}
|
|
|
|
macro ptr4(r,x) {
|
|
@ifdef IA64
|
|
r = zext(x);
|
|
@else
|
|
r = x;
|
|
@endif
|
|
}
|
|
|
|
macro ptr8(r,x) {
|
|
@ifdef IA64
|
|
r = x;
|
|
@else
|
|
r = x:$(SIZE);
|
|
@endif
|
|
}
|
|
|
|
macro push22(x) {
|
|
mysave:2 = x;
|
|
SP = SP -2;
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
*:2 tmp = mysave;
|
|
}
|
|
|
|
macro push24(x) {
|
|
mysave:4 = x;
|
|
SP = SP-4;
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
*:4 tmp = mysave;
|
|
}
|
|
|
|
macro push28(x) {
|
|
mysave:8 = x;
|
|
SP = SP-8;
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
*:8 tmp = mysave;
|
|
}
|
|
|
|
macro push42(x) {
|
|
mysave:2 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 2;
|
|
*:2 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro push44(x) {
|
|
mysave:4 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 4;
|
|
*:4 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro pushseg44(x) {
|
|
mysave:2 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 4;
|
|
*:2 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro push48(x) {
|
|
mysave:8 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 8;
|
|
*:8 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
@ifdef IA64
|
|
macro push82(x) {
|
|
mysave:2 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 2;
|
|
*:2 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro push84(x) {
|
|
mysave:4 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 4;
|
|
*:4 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro push88(x) {
|
|
mysave:8 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 8;
|
|
*:8 $(STACKPTR) = mysave;
|
|
}
|
|
|
|
macro pushseg88(x) {
|
|
mysave:2 = x;
|
|
$(STACKPTR) = $(STACKPTR) - 8;
|
|
*:2 $(STACKPTR) = mysave;
|
|
}
|
|
@endif
|
|
|
|
macro pop22(x) {
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
x = *:2 tmp;
|
|
SP = SP+2;
|
|
}
|
|
|
|
macro pop24(x) {
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
x = *:4 tmp;
|
|
SP = SP+4;
|
|
}
|
|
|
|
macro pop28(x) {
|
|
tmp:$(SIZE) = segment(SS,SP);
|
|
x = *:8 tmp;
|
|
SP = SP+8;
|
|
}
|
|
|
|
macro pop42(x) {
|
|
x = *:2 $(STACKPTR);
|
|
ESP = ESP + 2;
|
|
}
|
|
|
|
macro pop44(x) {
|
|
x = *:4 $(STACKPTR);
|
|
ESP = ESP + 4;
|
|
}
|
|
|
|
macro popseg44(x) {
|
|
x = *:2 $(STACKPTR);
|
|
ESP = ESP + 4;
|
|
}
|
|
|
|
macro pop48(x) {
|
|
x = *:8 $(STACKPTR);
|
|
ESP = ESP + 8;
|
|
}
|
|
|
|
@ifdef IA64
|
|
macro pop82(x) {
|
|
x = *:2 $(STACKPTR);
|
|
RSP = RSP + 2;
|
|
}
|
|
|
|
macro pop84(x) {
|
|
x = *:4 $(STACKPTR);
|
|
RSP = RSP + 4;
|
|
}
|
|
|
|
macro pop88(x) {
|
|
x = *:8 $(STACKPTR);
|
|
RSP = RSP + 8;
|
|
}
|
|
|
|
macro popseg88(x) {
|
|
x = *:2 $(STACKPTR);
|
|
RSP = RSP + 8;
|
|
}
|
|
@endif
|
|
|
|
macro unpackflags(tmp) {
|
|
NT = (tmp & 0x4000) != 0;
|
|
# IOPL = (tmp & 0x1000) != 0;
|
|
OF = (tmp & 0x0800) != 0;
|
|
DF = (tmp & 0x0400) != 0;
|
|
IF = (tmp & 0x0200) != 0;
|
|
TF = (tmp & 0x0100) != 0;
|
|
SF = (tmp & 0x0080) != 0;
|
|
ZF = (tmp & 0x0040) != 0;
|
|
AF = (tmp & 0x0010) != 0;
|
|
PF = (tmp & 0x0004) != 0;
|
|
CF = (tmp & 0x0001) != 0;
|
|
}
|
|
|
|
macro unpackeflags(tmp) {
|
|
ID = (tmp & 0x00200000) != 0;
|
|
AC = (tmp & 0x00040000) != 0;
|
|
# RF = (tmp & 0x00010000) != 0;
|
|
VIP = 0;
|
|
VIF = 0;
|
|
}
|
|
|
|
macro packflags(tmp) {
|
|
tmp= (0x4000 * zext(NT&1))
|
|
# | (0x1000 * zext(IOPL&1))
|
|
| (0x0800 * zext(OF&1))
|
|
| (0x0400 * zext(DF&1)) | (0x0200 * zext(IF&1)) | (0x0100 * zext(TF&1))
|
|
| (0x0080 * zext(SF&1)) | (0x0040 * zext(ZF&1)) | (0x0010 * zext(AF&1))
|
|
| (0x0004 * zext(PF&1)) | (0x0001 * zext(CF&1));
|
|
}
|
|
|
|
macro packeflags(tmp) {
|
|
tmp = tmp | (0x00200000 * zext(ID&1)) | (0x00100000 * zext(VIP&1))
|
|
| (0x00080000 * zext(VIF&1)) | (0x00040000 * zext(AC&1));
|
|
}
|
|
|
|
macro addflags(op1,op2) {
|
|
CF = carry(op1,op2);
|
|
OF = scarry(op1,op2);
|
|
}
|
|
|
|
#
|
|
# full-adder carry and overflow calculations
|
|
#
|
|
macro addCarryFlags ( op1, op2 ) {
|
|
local CFcopy = zext(CF);
|
|
CF = carry( op1, op2 );
|
|
OF = scarry( op1, op2 );
|
|
local result = op1 + op2;
|
|
CF = CF || carry( result, CFcopy );
|
|
OF = OF ^^ scarry( result, CFcopy );
|
|
op1 = result + CFcopy;
|
|
# AF not implemented
|
|
}
|
|
|
|
|
|
macro subCarryFlags ( op1, op2 ) {
|
|
local CFcopy = zext(CF);
|
|
CF = op1 < op2;
|
|
OF = sborrow( op1, op2 );
|
|
local result = op1 - op2;
|
|
CF = CF || (result < CFcopy);
|
|
OF = OF ^^ sborrow( result, CFcopy );
|
|
op1 = result - CFcopy;
|
|
# AF not implemented
|
|
}
|
|
|
|
macro resultflags(result) {
|
|
SF = result s< 0;
|
|
ZF = result == 0;
|
|
PF = ((popcount(result & 0xff) & 1:1) == 0);
|
|
# AF not implemented
|
|
}
|
|
|
|
macro shiftresultflags(result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
|
|
local newSF = (result s< 0);
|
|
SF = (!notzero & SF) | (notzero & newSF);
|
|
|
|
local newZF = (result == 0);
|
|
ZF = (!notzero & ZF) | (notzero & newZF);
|
|
|
|
local newPF = ((popcount(result & 0xff) & 1:1) == 0);
|
|
PF = (!notzero & PF) | (notzero & newPF);
|
|
# AF not implemented
|
|
}
|
|
|
|
macro subflags(op1,op2) {
|
|
CF = op1 < op2;
|
|
OF = sborrow(op1,op2);
|
|
}
|
|
|
|
macro negflags(op1) {
|
|
CF = (op1 != 0);
|
|
OF = sborrow(0,op1);
|
|
}
|
|
|
|
macro logicalflags() {
|
|
CF = 0;
|
|
OF = 0;
|
|
}
|
|
|
|
macro imultflags(low,total){
|
|
CF = sext(low) != total;
|
|
OF = CF;
|
|
}
|
|
|
|
macro multflags(highhalf) {
|
|
CF = highhalf != 0;
|
|
OF = CF;
|
|
}
|
|
|
|
macro rolflags(result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ((result & 1) != 0);
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = CF ^ (result s< 0);
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro rorflags(result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = (result s< 0);
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = (result s< 0) ^ ((result << 1) s< 0);
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro shlflags(op1,result,count) { # works for shld also
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ( (op1 << (count - 1)) s< 0 );
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = CF ^ (result s< 0);
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro sarflags(op1,result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ( ( (op1 s>> (count - 1)) & 1 ) != 0 );
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
OF = (!one & OF);
|
|
}
|
|
|
|
macro shrflags(op1,result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ( ( (op1 >> (count - 1)) & 1 ) != 0 );
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = (op1 s< 0);
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro shrdflags(op1,result,count) {
|
|
|
|
local notzero = (count != 0);
|
|
local newCF = ( ( (op1 >> (count - 1)) & 1 ) != 0 );
|
|
CF = (!notzero & CF) | (notzero & newCF);
|
|
|
|
local one = (count == 1);
|
|
local newOF = ((op1 s< 0) ^ (result s< 0));
|
|
OF = (!one & OF) | (one & newOF);
|
|
}
|
|
|
|
macro fdec() {
|
|
local tmp = ST7;
|
|
ST7 = ST6;
|
|
ST6 = ST5;
|
|
ST5 = ST4;
|
|
ST4 = ST3;
|
|
ST3 = ST2;
|
|
ST2 = ST1;
|
|
ST1 = ST0;
|
|
ST0 = tmp;
|
|
}
|
|
|
|
macro finc() {
|
|
local tmp = ST0;
|
|
ST0 = ST1;
|
|
ST1 = ST2;
|
|
ST2 = ST3;
|
|
ST3 = ST4;
|
|
ST4 = ST5;
|
|
ST5 = ST6;
|
|
ST6 = ST7;
|
|
ST7 = tmp;
|
|
}
|
|
|
|
macro fpop() {
|
|
ST0 = ST1;
|
|
ST1 = ST2;
|
|
ST2 = ST3;
|
|
ST3 = ST4;
|
|
ST4 = ST5;
|
|
ST5 = ST6;
|
|
ST6 = ST7;
|
|
}
|
|
|
|
macro fpushv(val) {
|
|
ST7 = ST6;
|
|
ST6 = ST5;
|
|
ST5 = ST4;
|
|
ST4 = ST3;
|
|
ST3 = ST2;
|
|
ST2 = ST1;
|
|
ST1 = ST0;
|
|
ST0 = val;
|
|
}
|
|
|
|
macro fpopv(val) {
|
|
val = ST0;
|
|
ST0 = ST1;
|
|
ST1 = ST2;
|
|
ST2 = ST3;
|
|
ST3 = ST4;
|
|
ST4 = ST5;
|
|
ST5 = ST6;
|
|
ST6 = ST7;
|
|
}
|
|
|
|
macro fcom(val) {
|
|
C1 = 0;
|
|
|
|
C2 = nan(ST0) || nan(val);
|
|
C0 = C2 | ( ST0 f< val );
|
|
C3 = C2 | ( ST0 f== val );
|
|
|
|
FPUStatusWord = (zext(C0)<<8) | (zext(C1)<<9) | (zext(C2)<<10) | (zext(C3)<<14);
|
|
}
|
|
|
|
macro fcomi(val) {
|
|
PF = nan(ST0) || nan(val);
|
|
ZF = PF | ( ST0 f== val );
|
|
CF = PF | ( ST0 f< val );
|
|
|
|
OF = 0;
|
|
AF = 0;
|
|
SF = 0;
|
|
|
|
FPUStatusWord = FPUStatusWord & 0xfdff; # Clear C1
|
|
C1 = 0;
|
|
}
|
|
|
|
# floating point NaN comparison into EFLAGS
|
|
macro fucompe(val1, val2) {
|
|
PF = nan(val1) || nan(val2 );
|
|
ZF = PF | ( val1 f== val2 );
|
|
CF = PF | ( val1 f< val2 );
|
|
|
|
OF = 0;
|
|
AF = 0;
|
|
SF = 0;
|
|
}
|
|
|
|
# The base level constructors
|
|
# The prefixes
|
|
:^instruction is instrPhase=0 & over=0x2e; instruction [ segover=1; ] {} # CS override
|
|
:^instruction is instrPhase=0 & over=0x36; instruction [ segover=2; ] {} # SS override
|
|
:^instruction is instrPhase=0 & over=0x3e; instruction [ segover=3; ] {} # DS override
|
|
:^instruction is instrPhase=0 & over=0x26; instruction [ segover=4; ] {} # ES override
|
|
:^instruction is instrPhase=0 & over=0x64; instruction [ segover=5; ] {} # FS override
|
|
:^instruction is instrPhase=0 & over=0x65; instruction [ segover=6; ] {} # GS override
|
|
:^instruction is instrPhase=0 & over=0x66; instruction [ opsize=opsize $xor 1; mandover = mandover $xor 1; ] {} # Operand size override
|
|
:^instruction is instrPhase=0 & over=0x67; instruction [ addrsize=addrsize $xor 1; ] {} # Address size override
|
|
:^instruction is instrPhase=0 & over=0xf2; instruction [ repneprefx=1; ] {}
|
|
:^instruction is instrPhase=0 & over=0xf3; instruction [ repprefx=1; ] {}
|
|
@ifdef IA64
|
|
|
|
#
|
|
# REX opcode extension prefixes
|
|
#
|
|
|
|
# REX prefix present
|
|
# Specification is "REX"
|
|
@define REX "rexprefix=1 & rexWprefix=0"
|
|
|
|
# Specification is "REX.W"
|
|
@define REX_W "rexprefix=1 & rexWprefix=1"
|
|
|
|
|
|
|
|
# TODO I don't think the following line can really happen because the 66 67 prefix must come before REX prefix
|
|
:^instruction is instrPhase=0 & over=0x66 & opsize=2; instruction [ opsize=0; mandover=mandover $xor 1; ] {} # Operand size override
|
|
:^instruction is instrPhase=0 & over=0x67 & addrsize=2; instruction [ addrsize=1; ] {} # Address size override
|
|
|
|
:^instruction is instrPhase=0 & addrsize=2 & row=0x4 & rexw=0 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=1; rexWprefix=0; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {}
|
|
:^instruction is instrPhase=0 & addrsize=2 & row=0x4 & rexw=1 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=2; rexWprefix=1; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {}
|
|
:^instruction is instrPhase=0 & addrsize=2 & opsize=0 & row=0x4 & rexw=0 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=0; rexWprefix=0; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {}
|
|
:^instruction is instrPhase=0 & addrsize=2 & opsize=0 & row=0x4 & rexw=1 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=2; rexWprefix=1; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {}
|
|
@endif
|
|
|
|
#
|
|
# VEX definitions: One from each group must be present in the decoding; following the specification from the manual.
|
|
#
|
|
|
|
# VEX encoding for type of VEX data flow.
|
|
# Specification is "VEX.", "VEX.NDS", "VEX.NDD", or "VEX.DDS". If only "VEX." is present, then "VEX_NONE" must be used.
|
|
@define VEX_NONE "vexMode=1 & vexVVVV=0"
|
|
@define VEX_NDS "vexMode=1"
|
|
@define VEX_NDD "vexMode=1"
|
|
@define VEX_DDS "vexMode=1"
|
|
|
|
# Specification is "LIG", "LZ", "128", or "256".
|
|
@define VEX_LIG "vexL"
|
|
@define VEX_LZ "vexL=0"
|
|
@define VEX_L128 "vexL=0"
|
|
@define VEX_L256 "vexL=1"
|
|
|
|
# These are only to be used with VEX or EVEX decoding, where only one "mandatory" prefix is encoded in the VEX or EVEX.
|
|
# If no prefix is specified, then VEX_PRE_NONE must be used.
|
|
# No other "physical" prefixes are allowed.
|
|
# Specification is "(empty)", "66", "F3", or "F2". If none of these are present (empty), then "VEX_PRE_NONE" must be used.
|
|
@define VEX_PRE_NONE "mandover=0"
|
|
@define VEX_PRE_66 "mandover=1"
|
|
@define VEX_PRE_F3 "mandover=2"
|
|
@define VEX_PRE_F2 "mandover=4"
|
|
|
|
# Specification is "0F", "0F38", or "0F3A".
|
|
@define VEX_0F "vexMMMMM=1"
|
|
@define VEX_0F38 "vexMMMMM=2"
|
|
@define VEX_0F3A "vexMMMMM=3"
|
|
|
|
# Specification is "WIG", "W0", or "W1".
|
|
@define VEX_WIG "rexWprefix"
|
|
@define VEX_W0 "rexWprefix=0"
|
|
@define VEX_W1 "rexWprefix=1"
|
|
|
|
|
|
|
|
@ifdef IA64
|
|
|
|
# 64-bit 3-byte VEX
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=0; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=1; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_66=1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=2; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f3=1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=3; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f2=1; ] {}
|
|
|
|
# 64-bit 2-byte VEX
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=0; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=1; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_66=1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=2; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f3=1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=3; instruction
|
|
[ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f2=1; ] {}
|
|
|
|
@else
|
|
|
|
# 32-bit 3-byte VEX
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=0; instruction
|
|
[ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=1; instruction
|
|
[ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_66=1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=2; instruction
|
|
[ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f3=1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=3; instruction
|
|
[ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f2=1; ] {}
|
|
|
|
# 32-bit 2-byte VEX
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_vvvv & vex_l & vex_pp=0; instruction
|
|
[ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_vvvv & vex_l & vex_pp=1; instruction
|
|
[ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_66=1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_vvvv & vex_l & vex_pp=2; instruction
|
|
[ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f3=1; ] {}
|
|
:^instruction is instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_vvvv & vex_l & vex_pp=3; instruction
|
|
[ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f2=1; ] {}
|
|
|
|
@endif
|
|
|
|
# Many of the multimedia instructions have a "mandatory" prefix, either 0x66, 0xf2 or 0xf3
|
|
# where the prefix really becomes part of the encoding. We collect the three possible prefixes of this
|
|
# sort in the mandover context variable so we can pattern all three at once
|
|
|
|
# 3DNow pre-parse to isolate suffix byte into context (suffix3D)
|
|
# - general format: 0x0f 0x0f <modR/M> [sib] [displacement] <suffix3D-byte>
|
|
# - must determine number of bytes consumed by addressing modes
|
|
# TODO: determine supported prefixes? (e.g., 0x26)
|
|
|
|
Suffix3D: imm8 is imm8 [ suffix3D=imm8; ] { }
|
|
|
|
:^instruction is instrPhase=0 & (byte=0x0f; byte=0x0f; XmmReg ... & m64; Suffix3D) ... & instruction ... [ instrPhase=1; ] { }
|
|
:^instruction is instrPhase=0 & (byte=0x0f; byte=0x0f; mmxmod=3; Suffix3D) ... & instruction ... [ instrPhase=1; ] { }
|
|
|
|
|
|
# Instructions in alphabetical order
|
|
|
|
:AAA is vexMode=0 & bit64=0 & byte=0x37 { local car = ((AL & 0xf) > 9) | AF; AL = (AL+6*car)&0xf; AH=AH+car; CF=car; AF=car; }
|
|
:AAD imm8 is vexMode=0 & bit64=0 & byte=0xd5; imm8 { AL = AL + imm8*AH; AH=0; resultflags(AX); }
|
|
:AAM imm8 is vexMode=0 & bit64=0 & byte=0xd4; imm8 { AH = AL/imm8; AL = AL % imm8; resultflags(AX); }
|
|
:AAS is vexMode=0 & bit64=0 & byte=0x3f { local car = ((AL & 0xf) > 9) | AF; AL = (AL-6*car)&0xf; AH=AH-car; CF=car; AF=car; }
|
|
|
|
:ADC AL,imm8 is vexMode=0 & byte=0x14; AL & imm8 { addCarryFlags( AL, imm8:1 ); resultflags( AL ); }
|
|
:ADC AX,imm16 is vexMode=0 & opsize=0 & byte=0x15; AX & imm16 { addCarryFlags( AX, imm16:2 ); resultflags( AX ); }
|
|
:ADC EAX,imm32 is vexMode=0 & opsize=1 & byte=0x15; EAX & check_EAX_dest & imm32 { addCarryFlags( EAX, imm32:4 ); build check_EAX_dest; resultflags( EAX ); }
|
|
@ifdef IA64
|
|
:ADC RAX,simm32 is vexMode=0 & opsize=2 & byte=0x15; RAX & simm32 { addCarryFlags( RAX, simm32 ); resultflags( RAX ); }
|
|
@endif
|
|
:ADC spec_rm8,imm8 is vexMode=0 & (byte=0x80 | byte=0x82); spec_rm8 & reg_opcode=2 ... ; imm8 { addCarryFlags( spec_rm8, imm8:1 ); resultflags( spec_rm8 ); }
|
|
:ADC spec_rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; spec_rm16 & reg_opcode=2 ...; imm16 { addCarryFlags( spec_rm16, imm16:2 ); resultflags( spec_rm16 ); }
|
|
:ADC spec_rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; spec_rm32 & check_rm32_dest ... & reg_opcode=2 ...; imm32 { addCarryFlags( spec_rm32, imm32:4 ); build check_rm32_dest; resultflags( spec_rm32 ); }
|
|
@ifdef IA64
|
|
:ADC spec_rm64,simm32 is vexMode=0 & opsize=2 & byte=0x81; spec_rm64 & reg_opcode=2 ...; simm32 { addCarryFlags( spec_rm64, simm32 ); resultflags( spec_rm64 ); }
|
|
@endif
|
|
:ADC spec_rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; spec_rm16 & reg_opcode=2 ...; simm8_16 { addCarryFlags( spec_rm16, simm8_16 ); resultflags( spec_rm16 ); }
|
|
:ADC spec_rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; spec_rm32 & check_rm32_dest ... & reg_opcode=2 ...; simm8_32 { addCarryFlags( spec_rm32, simm8_32 ); build check_rm32_dest; resultflags( spec_rm32 ); }
|
|
@ifdef IA64
|
|
:ADC spec_rm64,simm8_64 is vexMode=0 & opsize=2 & byte=0x83; spec_rm64 & reg_opcode=2 ...; simm8_64 { addCarryFlags( spec_rm64, simm8_64 ); resultflags( spec_rm64 ); }
|
|
@endif
|
|
:ADC rm8,Reg8 is vexMode=0 & byte=0x10; rm8 & Reg8 ... { addCarryFlags( rm8, Reg8 ); resultflags( rm8 ); }
|
|
:ADC rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x11; rm16 & Reg16 ... { addCarryFlags( rm16, Reg16 ); resultflags( rm16 ); }
|
|
:ADC rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x11; rm32 & check_rm32_dest ... & Reg32 ... { addCarryFlags( rm32, Reg32 ); build check_rm32_dest; resultflags( rm32 ); }
|
|
@ifdef IA64
|
|
:ADC rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x11; rm64 & Reg64 ... { addCarryFlags( rm64, Reg64 ); resultflags( rm64 ); }
|
|
@endif
|
|
:ADC Reg8,rm8 is vexMode=0 & byte=0x12; rm8 & Reg8 ... { addCarryFlags( Reg8, rm8 ); resultflags( Reg8 ); }
|
|
:ADC Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x13; rm16 & Reg16 ... { addCarryFlags( Reg16, rm16 ); resultflags( Reg16 ); }
|
|
:ADC Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x13; rm32 & Reg32 ... & check_Reg32_dest ... { addCarryFlags( Reg32, rm32 ); build check_Reg32_dest; resultflags( Reg32 ); }
|
|
@ifdef IA64
|
|
:ADC Reg64,rm64 is vexMode=0 & opsize=2 & byte=0x13; rm64 & Reg64 ... { addCarryFlags( Reg64, rm64 ); resultflags( Reg64 ); }
|
|
@endif
|
|
|
|
:ADD AL,imm8 is vexMode=0 & byte=0x4; AL & imm8 { addflags( AL,imm8 ); AL = AL + imm8; resultflags( AL); }
|
|
:ADD AX,imm16 is vexMode=0 & opsize=0 & byte=0x5; AX & imm16 { addflags( AX,imm16); AX = AX + imm16; resultflags( AX); }
|
|
:ADD EAX,imm32 is vexMode=0 & opsize=1 & byte=0x5; EAX & check_EAX_dest & imm32 { addflags( EAX,imm32); EAX = EAX + imm32; build check_EAX_dest; resultflags( EAX); }
|
|
@ifdef IA64
|
|
:ADD RAX,simm32 is vexMode=0 & opsize=2 & byte=0x5; RAX & simm32 { addflags( RAX,simm32); RAX = RAX + simm32; resultflags( RAX); }
|
|
@endif
|
|
:ADD spec_rm8,imm8 is vexMode=0 & (byte=0x80 | byte=0x82); spec_rm8 & reg_opcode=0 ...; imm8 { addflags( spec_rm8,imm8 ); spec_rm8 = spec_rm8 + imm8; resultflags( spec_rm8); }
|
|
:ADD spec_rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; spec_rm16 & reg_opcode=0 ...; imm16 { addflags( spec_rm16,imm16); spec_rm16 = spec_rm16 + imm16; resultflags( spec_rm16); }
|
|
:ADD spec_rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; spec_rm32 & check_rm32_dest ... & reg_opcode=0 ...; imm32 { addflags( spec_rm32,imm32); spec_rm32 = spec_rm32 + imm32; build check_rm32_dest; resultflags( spec_rm32); }
|
|
@ifdef IA64
|
|
:ADD spec_rm64,simm32 is vexMode=0 & opsize=2 & byte=0x81; spec_rm64 & reg_opcode=0 ...; simm32 { addflags( spec_rm64,simm32); spec_rm64 = spec_rm64 + simm32; resultflags( spec_rm64); }
|
|
@endif
|
|
:ADD spec_rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; spec_rm16 & reg_opcode=0 ...; simm8_16 { addflags( spec_rm16,simm8_16); spec_rm16 = spec_rm16 + simm8_16; resultflags( spec_rm16); }
|
|
:ADD spec_rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; spec_rm32 & check_rm32_dest ... & reg_opcode=0 ...; simm8_32 { addflags( spec_rm32,simm8_32); spec_rm32 = spec_rm32 + simm8_32; build check_rm32_dest; resultflags( spec_rm32); }
|
|
@ifdef IA64
|
|
:ADD spec_rm64,simm8_64 is vexMode=0 & opsize=2 & byte=0x83; spec_rm64 & reg_opcode=0 ...; simm8_64 { addflags( spec_rm64,simm8_64); spec_rm64 = spec_rm64 + simm8_64; resultflags( spec_rm64); }
|
|
@endif
|
|
:ADD rm8,Reg8 is vexMode=0 & byte=0x00; rm8 & Reg8 ... { addflags( rm8,Reg8 ); rm8 = rm8 + Reg8; resultflags( rm8); }
|
|
:ADD rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x1; rm16 & Reg16 ... { addflags( rm16,Reg16); rm16 = rm16 + Reg16; resultflags( rm16); }
|
|
:ADD rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x1; rm32 & check_rm32_dest ... & Reg32 ... { addflags( rm32,Reg32); rm32 = rm32 + Reg32; build check_rm32_dest; resultflags( rm32); }
|
|
@ifdef IA64
|
|
:ADD rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x1; rm64 & Reg64 ... { addflags( rm64,Reg64); rm64 = rm64 + Reg64; resultflags( rm64); }
|
|
@endif
|
|
:ADD Reg8,rm8 is vexMode=0 & byte=0x2; rm8 & Reg8 ... { addflags( Reg8,rm8 ); Reg8 = Reg8 + rm8; resultflags( Reg8); }
|
|
:ADD Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x3; rm16 & Reg16 ... { addflags(Reg16,rm16 ); Reg16 = Reg16 + rm16; resultflags(Reg16); }
|
|
:ADD Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x3; rm32 & Reg32 ... & check_Reg32_dest ... { addflags(Reg32,rm32 ); Reg32 = Reg32 + rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:ADD Reg64,rm64 is vexMode=0 & opsize=2 & byte=0x3; rm64 & Reg64 ... { addflags(Reg64,rm64 ); Reg64 = Reg64 + rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
:AND AL,imm8 is vexMode=0 & byte=0x24; AL & imm8 { logicalflags(); AL = AL & imm8; resultflags( AL); }
|
|
:AND AX,imm16 is vexMode=0 & opsize=0 & byte=0x25; AX & imm16 { logicalflags(); AX = AX & imm16; resultflags( AX); }
|
|
:AND EAX,imm32 is vexMode=0 & opsize=1 & byte=0x25; EAX & check_EAX_dest & imm32 { logicalflags(); EAX = EAX & imm32; build check_EAX_dest; resultflags( EAX); }
|
|
@ifdef IA64
|
|
:AND RAX,simm32 is vexMode=0 & opsize=2 & byte=0x25; RAX & simm32 { logicalflags(); RAX = RAX & simm32; resultflags( RAX); }
|
|
@endif
|
|
:AND rm8,imm8 is vexMode=0 & (byte=0x80 | byte=0x82); rm8 & reg_opcode=4 ...; imm8 { logicalflags(); rm8 = rm8 & imm8; resultflags( rm8); }
|
|
:AND rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; rm16 & reg_opcode=4 ...; imm16 { logicalflags(); rm16 = rm16 & imm16; resultflags( rm16); }
|
|
:AND rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; rm32 & check_rm32_dest ... & reg_opcode=4 ...; imm32 { logicalflags(); rm32 = rm32 & imm32; build check_rm32_dest; resultflags( rm32); }
|
|
@ifdef IA64
|
|
:AND rm64,simm32 is vexMode=0 & opsize=2 & byte=0x81; rm64 & reg_opcode=4 ...; simm32 { logicalflags(); rm64 = rm64 & simm32; resultflags( rm64); }
|
|
@endif
|
|
:AND rm16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; rm16 & reg_opcode=4 ...; usimm8_16 { logicalflags(); rm16 = rm16 & usimm8_16; resultflags( rm16); }
|
|
:AND rm32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; rm32 & check_rm32_dest ... & reg_opcode=4 ...; usimm8_32 { logicalflags(); rm32 = rm32 & usimm8_32; build check_rm32_dest; resultflags( rm32); }
|
|
@ifdef IA64
|
|
:AND rm64,usimm8_64 is vexMode=0 & opsize=2 & byte=0x83; rm64 & reg_opcode=4 ...; usimm8_64 { logicalflags(); rm64 = rm64 & usimm8_64; resultflags( rm64); }
|
|
@endif
|
|
:AND rm8,Reg8 is vexMode=0 & byte=0x20; rm8 & Reg8 ... { logicalflags(); rm8 = rm8 & Reg8; resultflags( rm8); }
|
|
:AND rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x21; rm16 & Reg16 ... { logicalflags(); rm16 = rm16 & Reg16; resultflags( rm16); }
|
|
:AND rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x21; rm32 & check_rm32_dest ... & Reg32 ... { logicalflags(); rm32 = rm32 & Reg32; build check_rm32_dest; resultflags( rm32); }
|
|
@ifdef IA64
|
|
:AND rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x21; rm64 & Reg64 ... { logicalflags(); rm64 = rm64 & Reg64; resultflags( rm64); }
|
|
@endif
|
|
:AND Reg8,rm8 is vexMode=0 & byte=0x22; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 & rm8; resultflags( Reg8); }
|
|
:AND Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x23; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 & rm16; resultflags(Reg16); }
|
|
:AND Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x23; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 & rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:AND Reg64,rm64 is vexMode=0 & opsize=2 & byte=0x23; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 & rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
:ARPL rm16,Reg16 is vexMode=0 & bit64=0 & byte=0x63; rm16 & Reg16 ... { local rpldest=rm16&3; local rplsrc=Reg16&3; local rpldiff=rplsrc-rpldest;
|
|
ZF = rpldiff s> 0; rm16 = rm16 + (zext(CF) * rpldiff); }
|
|
|
|
:BOUND Reg16,m16 is vexMode=0 & bit64=0 & opsize=0 & byte=0x62; m16 & Reg16 ... { }
|
|
:BOUND Reg32,m32 is vexMode=0 & bit64=0 & opsize=1 & byte=0x62; m32 & Reg32 ... { }
|
|
|
|
#:BSF Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbc; rm16 & Reg16 ... { ZF = rm16 == 0;
|
|
# choose = 0xffff * (zext((0xff & rm16) == 0));
|
|
# mask = (0xf00 & choose) | (0xf | ~choose);
|
|
# pos = 8 & choose;
|
|
# choose = 0xffff * (zext((mask & rm16) == 0));
|
|
# mask1 = (mask << 2) & (mask << 4);
|
|
# mask2 = (mask >> 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (4 & choose);
|
|
# choose = 0xffff * (zext((mask & rm16) == 0));
|
|
# mask1 = (mask << 1) & (mask << 2);
|
|
# mask2 = (mask >> 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (2 & choose);
|
|
# choose = zext((mask & rm16) == 0);
|
|
# Reg16 = pos + choose; }
|
|
|
|
:BSF Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbc; rm16 & Reg16 ...
|
|
{
|
|
bitIndex:2 = 0;
|
|
|
|
ZF = ( rm16 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( ((rm16 >> bitIndex) & 0x0001) != 0 ) goto <done>;
|
|
bitIndex = bitIndex + 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg16 = bitIndex;
|
|
}
|
|
|
|
#:BSF Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbc; rm32 & Reg32 ... & check_Reg32_dest ... { ZF = rm32 == 0;
|
|
# choose = 0xffffffff * (zext((0xffff & rm32) == 0));
|
|
# mask = (0xff0000 & choose) | (0xff | ~choose);
|
|
# pos = 16 & choose;
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask << 4) & (mask << 8);
|
|
# mask2 = (mask >> 4) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (8 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask << 2) & (mask << 4);
|
|
# mask2 = (mask >> 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (4 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask << 1) & (mask << 2);
|
|
# mask2 = (mask >> 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (2 & choose);
|
|
# choose = zext((mask & rm32) == 0);
|
|
# Reg32 = pos + choose;
|
|
# build check_Reg32_dest; }
|
|
|
|
:BSF Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbc; rm32 & Reg32 ... & check_Reg32_dest ...
|
|
{
|
|
bitIndex:4 = 0;
|
|
|
|
ZF = ( rm32 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( ((rm32 >> bitIndex) & 0x00000001) != 0 ) goto <done>;
|
|
bitIndex = bitIndex + 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg32 = bitIndex;
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
@ifdef IA64
|
|
#:BSF Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbc; rm64 & Reg64 ... { ZF = rm64 == 0;
|
|
## TODO: NEED TO EXTEND THIS TO 64bit op
|
|
# choose = 0xffffffff * (zext((0xffff & rm64) == 0));
|
|
# mask = (0xff0000 & choose) | (0xff | ~choose);
|
|
# pos = 16 & choose;
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask << 4) & (mask << 8);
|
|
# mask2 = (mask >> 4) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (8 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask << 2) & (mask << 4);
|
|
# mask2 = (mask >> 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (4 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask << 1) & (mask << 2);
|
|
# mask2 = (mask >> 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos + (2 & choose);
|
|
# choose = zext((mask & rm64) == 0);
|
|
# Reg64 = pos + choose; }
|
|
|
|
:BSF Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbc; rm64 & Reg64 ...
|
|
{
|
|
bitIndex:8 = 0;
|
|
|
|
ZF = ( rm64 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( ((rm64 >> bitIndex) & 0x0000000000000001) != 0 ) goto <done>;
|
|
bitIndex = bitIndex + 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg64 = bitIndex;
|
|
}
|
|
@endif
|
|
|
|
#:BSR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbd; rm16 & Reg16 ... { ZF = rm16 == 0;
|
|
# choose = 0xffff * (zext((0xff00 & rm16) == 0));
|
|
# mask = (0xf000 & ~choose) | (0xf0 | choose);
|
|
# pos = 16 - (8 & choose);
|
|
# choose = 0xffff * (zext((mask & rm16) == 0));
|
|
# mask1 = (mask >> 2) & (mask >> 4);
|
|
# mask2 = (mask << 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (4 & choose);
|
|
# choose = 0xffff * (zext((mask & rm16) == 0));
|
|
# mask1 = (mask >> 1) & (mask >> 2);
|
|
# mask2 = (mask << 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (2 & choose);
|
|
# choose = zext((mask & rm16) == 0);
|
|
# Reg16 = pos - choose; }
|
|
|
|
:BSR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbd; rm16 & Reg16 ...
|
|
{
|
|
bitIndex:2 = 15;
|
|
|
|
ZF = ( rm16 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( (rm16 >> bitIndex) != 0 ) goto <done>;
|
|
bitIndex = bitIndex - 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg16 = bitIndex;
|
|
}
|
|
|
|
#:BSR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbd; rm32 & Reg32 ... & check_Reg32_dest ... { ZF = rm32 == 0;
|
|
# choose = 0xffffffff * (zext((0xffff0000 & rm32) == 0));
|
|
# mask = (0xff000000 & ~choose) | (0xff00 | choose);
|
|
# pos = 32 - (16 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask >> 4) & (mask >> 8);
|
|
# mask2 = (mask << 4) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (8 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask >> 2) & (mask >> 4);
|
|
# mask2 = (mask << 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (4 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm32) == 0));
|
|
# mask1 = (mask >> 1) & (mask >> 2);
|
|
# mask2 = (mask << 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (2 & choose);
|
|
# choose = zext((mask & rm32) == 0);
|
|
# Reg32 = pos - choose;
|
|
# build check_Reg32_dest; }
|
|
|
|
:BSR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbd; rm32 & Reg32 ... & check_Reg32_dest ...
|
|
{
|
|
bitIndex:4 = 31;
|
|
|
|
ZF = ( rm32 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( (rm32 >> bitIndex) != 0 ) goto <done>;
|
|
bitIndex = bitIndex - 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg32 = bitIndex;
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
|
|
@ifdef IA64
|
|
#:BSR Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbd; rm64 & Reg64 ... { ZF = rm64 == 0;
|
|
## TODO: NEED TO EXTEND THIS TO 64bit op
|
|
# choose = 0xffffffff * (zext((0xffff0000 & rm64) == 0));
|
|
# mask = (0xff000000 & ~choose) | (0xff00 | choose);
|
|
# pos = 32 - (16 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask >> 4) & (mask >> 8);
|
|
# mask2 = (mask << 4) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (8 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask >> 2) & (mask >> 4);
|
|
# mask2 = (mask << 2) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (4 & choose);
|
|
# choose = 0xffffffff * (zext((mask & rm64) == 0));
|
|
# mask1 = (mask >> 1) & (mask >> 2);
|
|
# mask2 = (mask << 1) & mask;
|
|
# mask = (mask1 & choose) | (mask2 | ~choose);
|
|
# pos = pos - (2 & choose);
|
|
# choose = zext((mask & rm64) == 0);
|
|
# Reg64 = pos - choose; }
|
|
|
|
:BSR Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbd; rm64 & Reg64 ...
|
|
{
|
|
bitIndex:8 = 63;
|
|
|
|
ZF = ( rm64 == 0 );
|
|
|
|
if ( ZF == 1 ) goto <done>;
|
|
|
|
<start>
|
|
if ( (rm64 >> bitIndex) != 0 ) goto <done>;
|
|
bitIndex = bitIndex - 1;
|
|
goto <start>;
|
|
|
|
<done>
|
|
Reg64 = bitIndex;
|
|
}
|
|
|
|
@endif
|
|
|
|
:BSWAP Rmr32 is vexMode=0 & byte=0xf; row=12 & page=1 & Rmr32 & check_Rmr32_dest
|
|
{ local tmp = (Rmr32 & 0xff000000) >> 24 ;
|
|
tmp = tmp | ((Rmr32 & 0x00ff0000) >> 8 );
|
|
tmp = tmp | ((Rmr32 & 0x0000ff00) << 8 );
|
|
Rmr32 = tmp | ((Rmr32 & 0x000000ff) << 24);
|
|
build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:BSWAP Rmr64 is vexMode=0 & opsize=2 & byte=0xf; row=12 & page=1 & Rmr64
|
|
{ local tmp = (Rmr64 & 0xff00000000000000) >> 56 ;
|
|
tmp = tmp | ((Rmr64 & 0x00ff000000000000) >> 40 );
|
|
tmp = tmp | ((Rmr64 & 0x0000ff0000000000) >> 24 );
|
|
tmp = tmp | ((Rmr64 & 0x000000ff00000000) >> 8 );
|
|
tmp = tmp | ((Rmr64 & 0x00000000ff000000) << 8 );
|
|
tmp = tmp | ((Rmr64 & 0x0000000000ff0000) << 24 );
|
|
tmp = tmp | ((Rmr64 & 0x000000000000ff00) << 40 );
|
|
Rmr64 = tmp | ((Rmr64 & 0x00000000000000ff) << 56); }
|
|
@endif
|
|
|
|
:BT Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xa3; mod=3 & Rmr16 & Reg16 { CF = ((Rmr16 >> (Reg16 & 0xf)) & 1) != 0; }
|
|
:BT Mem,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xa3; Mem & Reg16 ... { local ptr = Mem + (sext(Reg16) s>> 3);
|
|
CF = ((*:1 ptr >> (Reg16 & 0x7)) & 1) != 0; }
|
|
:BT Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xa3; mod=3 & Rmr32 & Reg32 { CF = ((Rmr32 >> (Reg32 & 0x1f)) & 1) != 0; }
|
|
:BT Mem,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xa3; Mem & Reg32 ... {
|
|
@ifdef IA64
|
|
local ptr = Mem + (sext(Reg32) s>> 3);
|
|
@else
|
|
local ptr = Mem + (Reg32 s>> 3);
|
|
@endif
|
|
CF = ((*:1 ptr >> (Reg32 & 0x7)) & 1) != 0;
|
|
}
|
|
@ifdef IA64
|
|
:BT Rmr64,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xa3; mod=3 & Rmr64 & Reg64 { CF = ((Rmr64 >> (Reg64 & 0x3f)) & 1) != 0; }
|
|
:BT Mem,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xa3; Mem & Reg64 ... { local ptr = Mem + (Reg64 s>> 3);
|
|
CF = ((*:1 ptr >> (Reg64 & 0x7)) & 1) != 0; }
|
|
@endif
|
|
:BT rm16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; (rm16 & reg_opcode=4 ...); imm8 { CF = ((rm16 >> (imm8 & 0x0f)) & 1) != 0; }
|
|
:BT rm32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; (rm32 & reg_opcode=4 ...); imm8 { CF = ((rm32 >> (imm8 & 0x1f)) & 1) != 0; }
|
|
@ifdef IA64
|
|
:BT rm64,imm8 is vexMode=0 & opsize=2 & byte=0xf; byte=0xba; (rm64 & reg_opcode=4 ...); imm8 { CF = ((rm64 >> (imm8 & 0x3f)) & 1) != 0; }
|
|
@endif
|
|
|
|
:BTC Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbb; mod=3 & Rmr16 & Reg16 { local bit=Reg16&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16^(1<<bit); CF=(val!=0); }
|
|
:BTC Mem,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbb; Mem & Reg16 ... { local ptr = Mem + (sext(Reg16) s>> 3); local bit=Reg16&7; local val = (*:1 ptr >> bit) & 1; *:1 ptr= *:1 ptr ^(1<<bit); CF=(val!=0); }
|
|
:BTC Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbb; mod=3 & Rmr32 & Reg32 & check_Rmr32_dest { local bit=Reg32&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32^(1<<bit); build check_Rmr32_dest; }
|
|
:BTC Mem,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbb; Mem & Reg32 ... {
|
|
@ifdef IA64
|
|
local ptr = Mem + (sext(Reg32) s>> 3);
|
|
@else
|
|
local ptr = Mem + (Reg32 s>> 3);
|
|
@endif
|
|
local bit=Reg32&7;
|
|
local val = (*:1 ptr >> bit) & 1;
|
|
*:1 ptr = *:1 ptr ^ (1<<bit);
|
|
CF = (val != 0);
|
|
}
|
|
@ifdef IA64
|
|
:BTC Rmr64,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbb; mod=3 & Rmr64 & Reg64 { local bit=Reg64&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64^(1<<bit); CF=(val!=0); }
|
|
:BTC Mem,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbb; Mem & Reg64 ... { local ptr = Mem + (Reg64 s>> 3); local bit=Reg64&7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr ^ (1<<bit); CF = (val != 0); }
|
|
@endif
|
|
:BTC rm16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; (rm16 & reg_opcode=7 ...); imm8 { local bit=imm8&0xf; local val=(rm16>>bit)&1; rm16=rm16^(1<<bit); CF=(val!=0); }
|
|
:BTC rm32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; (rm32 & check_rm32_dest ... & reg_opcode=7 ...); imm8 { local bit=imm8&0x1f; local val=(rm32>>bit)&1; CF=(val!=0); rm32=rm32^(1<<bit); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:BTC rm64,imm8 is vexMode=0 & opsize=2 & byte=0xf; byte=0xba; (rm64 & reg_opcode=7 ...); imm8 { local bit=imm8&0x3f; local val=(rm64>>bit)&1; rm64=rm64^(1<<bit); CF=(val!=0); }
|
|
@endif
|
|
|
|
:BTR Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb3; mod=3 & Rmr16 & Reg16 { local bit=Reg16&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16 & ~(1<<bit); CF=(val!=0); }
|
|
:BTR Mem,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb3; Mem & Reg16 ... { local ptr = Mem + (sext(Reg16) s>> 3); local bit=Reg16&7; local val=(*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr & ~(1<<bit); CF = (val!=0); }
|
|
:BTR Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb3; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { local bit=Reg32&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32 & ~(1<<bit); build check_Rmr32_dest; }
|
|
:BTR Mem,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb3; Mem & Reg32 ... {
|
|
@ifdef IA64
|
|
local ptr = Mem + (sext(Reg32) s>> 3);
|
|
@else
|
|
local ptr = Mem + (Reg32 s>> 3);
|
|
@endif
|
|
local bit = Reg32 & 7;
|
|
local val = (*:1 ptr >> bit) & 1;
|
|
*:1 ptr = *:1 ptr & ~(1<<bit);
|
|
CF = (val!=0);
|
|
}
|
|
@ifdef IA64
|
|
:BTR Rmr64,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xb3; mod=3 & Rmr64 & Reg64 { local bit=Reg64&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64 & ~(1<<bit); CF=(val!=0); }
|
|
:BTR Mem,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xb3; Mem & Reg64 ... { local ptr = Mem + (Reg64 s>> 3); local bit = Reg64 & 7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr & ~(1<<bit); CF = (val!=0); }
|
|
@endif
|
|
:BTR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; (rm16 & reg_opcode=6 ...); imm8 { local bit=imm8&0xf; local val=(rm16>>bit)&1; rm16=rm16 & ~(1<<bit); CF=(val!=0); }
|
|
:BTR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; (rm32 & reg_opcode=6 ... & check_rm32_dest ...); imm8 { local bit=imm8&0x1f; local val=(rm32>>bit)&1; CF=(val!=0); rm32=rm32 & ~(1<<bit); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:BTR rm64,imm8 is vexMode=0 & opsize=2 & byte=0xf; byte=0xba; (rm64 & reg_opcode=6 ...); imm8 { local bit=imm8&0x3f; local val=(rm64>>bit)&1; rm64=rm64 & ~(1<<bit); CF=(val!=0); }
|
|
@endif
|
|
|
|
:BTS Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xab; mod=3 & Rmr16 & Reg16 { local bit=Reg16&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16 | (1<<bit); CF=(val!=0); }
|
|
:BTS Mem,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xab; Mem & Reg16 ... { local ptr = Mem + (sext(Reg16) s>> 3); local bit = Reg16&7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr | (1<<bit); CF = (val != 0); }
|
|
:BTS Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xab; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { local bit=Reg32&0x1f; local val=(Rmr32>>bit)&1; CF=(val!=0); Rmr32=Rmr32 | (1<<bit); build check_Rmr32_dest; }
|
|
:BTS Mem,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xab; Mem & Reg32 ... {
|
|
@ifdef IA64
|
|
local ptr = Mem + (sext(Reg32) s>>3);
|
|
@else
|
|
local ptr = Mem + (Reg32 s>>3);
|
|
@endif
|
|
local bit = Reg32 & 7;
|
|
local val = (*:1 ptr >> bit) & 1;
|
|
*:1 ptr = *:1 ptr | (1<<bit);
|
|
CF = (val != 0);
|
|
}
|
|
@ifdef IA64
|
|
:BTS Rmr64,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xab; mod=3 & Rmr64 & Reg64 { local bit=Reg64&0x3f; local val=(Rmr64>>bit)&1; Rmr64=Rmr64 | (1<<bit); CF=(val!=0); }
|
|
:BTS Mem,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xab; Mem & Reg64 ... { local ptr = Mem + (Reg64 s>>3); local bit = Reg64 & 7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr | (1<<bit); CF = (val != 0); }
|
|
@endif
|
|
:BTS rm16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; (rm16 & reg_opcode=5 ...); imm8 { local bit=imm8&0xf; local val=(rm16>>bit)&1; rm16=rm16 | (1<<bit); CF=(val!=0); }
|
|
:BTS rm32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; (rm32 & reg_opcode=5 ... & check_rm32_dest ...); imm8 { local bit=imm8&0x1f; local val=(rm32>>bit)&1; CF=(val!=0); rm32=rm32 | (1<<bit); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:BTS rm64,imm8 is vexMode=0 & opsize=2 & byte=0xf; byte=0xba; (rm64 & reg_opcode=5 ...); imm8 { local bit=imm8&0x3f; local val=(rm64>>bit)&1; rm64=rm64 | (1<<bit); CF=(val!=0); }
|
|
@endif
|
|
|
|
:CALL rel16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xe8; rel16 { push22(&:2 inst_next); call rel16; }
|
|
:CALL rel16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xe8; rel16 { push42(&:2 inst_next); call rel16; }
|
|
@ifdef IA64
|
|
# 64-bit addressing mode does not support (N.S.) rel16
|
|
#:CALL rel16 is vexMode=0 & addrsize=2 & opsize=0 & byte=0xe8; rel16 { push82(&:2 inst_next); call rel16; }
|
|
@endif
|
|
# When is a Call a Jump, when it jumps right after. Not always the case but...
|
|
:CALL rel16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xe8; simm16=0 & rel16 { push22(&:2 inst_next); goto rel16; }
|
|
:CALL rel16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xe8; simm16=0 & rel16 { push42(&:2 inst_next); goto rel16; }
|
|
@ifdef IA64
|
|
# 64-bit addressing mode does not support (N.S.) rel16
|
|
#:CALL rel16 is vexMode=0 & addrsize=2 & opsize=0 & byte=0xe8; simm16=0 & rel16 { push82(&:2 inst_next); goto rel16; }
|
|
@endif
|
|
:CALL rel32 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xe8; rel32 { push24(&:4 inst_next); call rel32; }
|
|
:CALL rel32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xe8; rel32 { push44(&:4 inst_next); call rel32; }
|
|
@ifdef IA64
|
|
:CALL rel32 is vexMode=0 & addrsize=2 & (opsize=1 | opsize=2) & byte=0xe8; rel32 { push88(&:8 inst_next); call rel32; }
|
|
@endif
|
|
# When is a call a Jump, when it jumps right after. Not always the case but...
|
|
:CALL rel32 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xe8; simm32=0 & rel32 { push24(&:4 inst_next); goto rel32; }
|
|
:CALL rel32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xe8; simm32=0 & rel32 { push44(&:4 inst_next); goto rel32; }
|
|
@ifdef IA64
|
|
:CALL rel32 is vexMode=0 & addrsize=2 & (opsize=1 | opsize=2) & byte=0xe8; simm32=0 & rel32 { push88(&:8 inst_next); goto rel32; }
|
|
@endif
|
|
:CALL rm16 is addrsize=0 & opsize=0 & byte=0xff & currentCS; rm16 & reg_opcode=2 ... { push22(&:2 inst_next); tmp:4 = segment(currentCS,rm16); call [tmp]; }
|
|
:CALL rm16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=2 ... { push42(&:2 inst_next); call [rm16]; }
|
|
@ifdef IA64
|
|
:CALL rm16 is vexMode=0 & addrsize=2 & opsize=0 & byte=0xff; rm16 & reg_opcode=2 ... { push82(&:2 inst_next); tmp:8 = inst_next + zext(rm16); call [tmp]; }
|
|
@endif
|
|
:CALL rm32 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xff; rm32 & reg_opcode=2 ... { push24(&:4 inst_next); call [rm32]; }
|
|
:CALL rm32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; rm32 & reg_opcode=2 ... { push44(&:4 inst_next); call [rm32]; }
|
|
@ifdef IA64
|
|
:CALL rm64 is vexMode=0 & addrsize=2 & opsize=1 & byte=0xff; rm64 & reg_opcode=2 ... { push88(&:8 inst_next); call [rm64]; }
|
|
:CALL rm64 is vexMode=0 & addrsize=2 & opsize=2 & byte=0xff; rm64 & reg_opcode=2 ... { push88(&:8 inst_next); call [rm64]; }
|
|
@endif
|
|
|
|
:CALLF ptr1616 is vexMode=0 & addrsize=0 & opsize=0 & byte=0x9a; ptr1616 { push22(CS); build ptr1616; push22(&:2 inst_next); call ptr1616; }
|
|
:CALLF ptr1616 is vexMode=0 & addrsize=1 & opsize=0 & byte=0x9a; ptr1616 { push42(CS); build ptr1616; push42(&:2 inst_next); call ptr1616; }
|
|
:CALLF ptr1632 is vexMode=0 & addrsize=0 & opsize=1 & byte=0x9a; ptr1632 { push22(CS); build ptr1632; push24(&:4 inst_next); call ptr1632; }
|
|
:CALLF ptr1632 is vexMode=0 & addrsize=1 & opsize=1 & byte=0x9a; ptr1632 { push42(CS); build ptr1632; push44(&:4 inst_next); call ptr1632; }
|
|
:CALLF addr16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xff; addr16 & reg_opcode=3 ... { push22(CS); push22(&:2 inst_next); ptr:$(SIZE) = segment(DS,addr16); addrptr:$(SIZE) = segment(*:2 (ptr+2),*:2 ptr); call [addrptr]; }
|
|
:CALLF addr32 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; addr32 & reg_opcode=3 ... { push42(CS); push42(&:2 inst_next); call [addr32]; }
|
|
@ifdef IA64
|
|
:CALLF addr32 is vexMode=0 & addrsize=2 & opsize=0 & byte=0xff; addr32 & reg_opcode=3 ... { push82(CS); push82(&:2 inst_next); call [addr32]; }
|
|
@endif
|
|
:CALLF addr16 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xff; addr16 & reg_opcode=3 ... { push22(CS); push24(&:4 inst_next); call [addr16]; }
|
|
:CALLF addr32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; addr32 & reg_opcode=3 ... { push42(CS); push44(&:4 inst_next); call [addr32]; }
|
|
@ifdef IA64
|
|
:CALLF addr32 is vexMode=0 & addrsize=2 & opsize=1 & byte=0xff; addr32 & reg_opcode=3 ... { push82(CS); push84(&:4 inst_next); call [addr32]; }
|
|
@endif
|
|
|
|
:CBW is vexMode=0 & opsize=0 & byte=0x98 { AX = sext(AL); }
|
|
:CWDE is vexMode=0 & opsize=1 & byte=0x98 & check_EAX_dest { EAX = sext(AX); build check_EAX_dest;}
|
|
@ifdef IA64
|
|
:CDQE is vexMode=0 & opsize=2 & byte=0x98 { RAX = sext(EAX); }
|
|
@endif
|
|
|
|
:CWD is vexMode=0 & opsize=0 & byte=0x99 { tmp:4 = sext(AX); DX = tmp(2); }
|
|
:CDQ is vexMode=0 & opsize=1 & byte=0x99 & check_EDX_dest { tmp:8 = sext(EAX); EDX = tmp(4); build check_EDX_dest;}
|
|
@ifdef IA64
|
|
:CQO is vexMode=0 & opsize=2 & byte=0x99 { tmp:16 = sext(RAX); RDX = tmp(8); }
|
|
@endif
|
|
|
|
define pcodeop clflush;
|
|
:CLFLUSH m8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=7 ) ... & m8 {
|
|
clflush(m8);
|
|
}
|
|
|
|
:CLAC is vexMode=0 & byte=0x0F; byte=0x01; byte=0xCA { AC = 0; }
|
|
|
|
:CLC is vexMode=0 & byte=0xf8 { CF = 0; }
|
|
:CLD is vexMode=0 & byte=0xfc { DF = 0; }
|
|
# MFL: AMD instruction
|
|
# TODO: define the action.
|
|
# CLGI: clear global interrupt flag (GIF); while GIF is zero, all external interrupts are disabled.
|
|
:CLGI is vexMode=0 & byte=0x0f; byte=0x01; byte=0xDD { clgi(); }
|
|
:CLI is vexMode=0 & byte=0xfa { IF = 0; }
|
|
:CLTS is vexMode=0 & byte=0x0f; byte=0x06 { }
|
|
|
|
define pcodeop clzero;
|
|
|
|
:CLZERO is vexMode=0 & opsize=0 & byte=0x0F; byte=0x01; byte=0xFC { clzero(AX); }
|
|
:CLZERO is vexMode=0 & opsize=1 & byte=0x0F; byte=0x01; byte=0xFC { clzero(EAX); }
|
|
@ifdef IA64
|
|
:CLZERO is vexMode=0 & opsize=2 & byte=0x0F; byte=0x01; byte=0xFC { clzero(RAX); }
|
|
@endif
|
|
|
|
|
|
:CMC is vexMode=0 & byte=0xf5 { CF = CF==0; }
|
|
|
|
:CMOV^cc Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; row=4 & cc; rm16 & Reg16 ... { if (!cc) goto inst_next; Reg16 = rm16; }
|
|
:CMOV^cc Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; row=4 & cc; rm32 & Reg32 ... & check_Reg32_dest ... { build check_Reg32_dest; if (!cc) goto inst_next; Reg32 = rm32;}
|
|
@ifdef IA64
|
|
:CMOV^cc Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; row=4 & cc; rm64 & Reg64 ... { if (!cc) goto inst_next; Reg64 = rm64; }
|
|
@endif
|
|
|
|
:CMP AL,imm8 is vexMode=0 & byte=0x3c; AL & imm8 { subflags( AL,imm8 ); local tmp = AL - imm8; resultflags(tmp); }
|
|
:CMP AX,imm16 is vexMode=0 & opsize=0 & byte=0x3d; AX & imm16 { subflags( AX,imm16); local tmp = AX - imm16; resultflags(tmp); }
|
|
:CMP EAX,imm32 is vexMode=0 & opsize=1 & byte=0x3d; EAX & imm32 { subflags( EAX,imm32); local tmp = EAX - imm32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:CMP RAX,simm32 is vexMode=0 & opsize=2 & byte=0x3d; RAX & simm32 { subflags( RAX,simm32); local tmp = RAX - simm32; resultflags(tmp); }
|
|
@endif
|
|
:CMP spec_rm8,imm8 is vexMode=0 & (byte=0x80 | byte=0x82); spec_rm8 & reg_opcode=7 ...; imm8 { subflags( spec_rm8,imm8 ); local tmp = spec_rm8 - imm8; resultflags(tmp); }
|
|
:CMP spec_rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; spec_rm16 & reg_opcode=7 ...; imm16 { subflags( spec_rm16,imm16); local tmp = spec_rm16 - imm16; resultflags(tmp); }
|
|
:CMP spec_rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; spec_rm32 & reg_opcode=7 ...; imm32 { subflags( spec_rm32,imm32); local tmp = spec_rm32 - imm32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:CMP spec_rm64,simm32 is vexMode=0 & opsize=2 & byte=0x81; spec_rm64 & reg_opcode=7 ...; simm32 { subflags( spec_rm64,simm32); local tmp = spec_rm64 - simm32; resultflags(tmp); }
|
|
@endif
|
|
:CMP spec_rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; spec_rm16 & reg_opcode=7 ...; simm8_16 { subflags( spec_rm16,simm8_16); local tmp = spec_rm16 - simm8_16; resultflags(tmp); }
|
|
:CMP spec_rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; spec_rm32 & reg_opcode=7 ...; simm8_32 { subflags( spec_rm32,simm8_32); local tmp = spec_rm32 - simm8_32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:CMP spec_rm64,simm8_64 is vexMode=0 & opsize=2 & byte=0x83; spec_rm64 & reg_opcode=7 ...; simm8_64 { subflags( spec_rm64,simm8_64); local tmp = spec_rm64 - simm8_64; resultflags(tmp); }
|
|
@endif
|
|
:CMP rm8,Reg8 is vexMode=0 & byte=0x38; rm8 & Reg8 ... { subflags( rm8,Reg8 ); local tmp = rm8 - Reg8; resultflags(tmp); }
|
|
:CMP rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x39; rm16 & Reg16 ... { subflags( rm16,Reg16); local tmp = rm16 - Reg16; resultflags(tmp); }
|
|
:CMP rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x39; rm32 & Reg32 ... { subflags( rm32, Reg32 ); local tmp = rm32 - Reg32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:CMP rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x39; rm64 & Reg64 ... { subflags( rm64,Reg64); local tmp = rm64 - Reg64; resultflags(tmp); }
|
|
@endif
|
|
:CMP Reg8,rm8 is vexMode=0 & byte=0x3a; rm8 & Reg8 ... { subflags( Reg8,rm8 ); local tmp = Reg8 - rm8; resultflags(tmp); }
|
|
:CMP Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x3b; rm16 & Reg16 ... { subflags(Reg16,rm16 ); local tmp = Reg16 - rm16; resultflags(tmp); }
|
|
:CMP Reg32,Rmr32 is vexMode=0 & opsize=1 & byte=0x3b; Reg32 & mod=3 & Rmr32 { subflags(Reg32,Rmr32 ); local tmp = Reg32 - Rmr32; resultflags(tmp); }
|
|
:CMP Reg32,m32 is vexMode=0 & opsize=1 & byte=0x3b; Reg32 ... & m32 {subflags(Reg32,m32 ); local tmp = Reg32 - m32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:CMP Reg64,rm64 is vexMode=0 & opsize=2 & byte=0x3b; rm64 & Reg64 ... { subflags(Reg64,rm64 ); local tmp = Reg64 - rm64; resultflags(tmp); }
|
|
@endif
|
|
|
|
:CMPSB^repe^repetail eseDI1,dseSI1 is vexMode=0 & repe & repetail & byte=0xa6 & dseSI1 & eseDI1 { build repe; build eseDI1; build dseSI1; subflags(dseSI1,eseDI1); local diff=dseSI1-eseDI1; resultflags(diff); build repetail; }
|
|
:CMPSW^repe^repetail eseDI2,dseSI2 is vexMode=0 & repe & repetail & opsize=0 & byte=0xa7 & dseSI2 & eseDI2 { build repe; build eseDI2; build dseSI2; subflags(dseSI2,eseDI2); local diff=dseSI2-eseDI2; resultflags(diff); build repetail; }
|
|
:CMPSD^repe^repetail eseDI4,dseSI4 is vexMode=0 & repe & repetail & opsize=1 & byte=0xa7 & dseSI4 & eseDI4 { build repe; build eseDI4; build dseSI4; subflags(dseSI4,eseDI4); local diff=dseSI4-eseDI4; resultflags(diff); build repetail; }
|
|
@ifdef IA64
|
|
:CMPSD^repe^repetail eseDI8,dseSI8 is vexMode=0 & repe & repetail & opsize=2 & byte=0xa7 & dseSI8 & eseDI8 { build repe; build eseDI8; build dseSI8; subflags(dseSI8,eseDI8); local diff=dseSI8-eseDI8; resultflags(diff); build repetail; }
|
|
@endif
|
|
|
|
:CMPXCHG rm8,Reg8 is vexMode=0 & byte=0xf; byte=0xa6; rm8 & Reg8 ... { }
|
|
:CMPXCHG rm8,Reg8 is vexMode=0 & byte=0xf; byte=0xb0; rm8 & Reg8 ... { subflags(AL,rm8); local tmp=AL-rm8; resultflags(tmp);
|
|
local diff = rm8^Reg8; rm8 = rm8 ^ (ZF*diff);
|
|
diff = AL ^ rm8; AL = AL ^ ((ZF==0)*diff); }
|
|
:CMPXCHG rm16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb1; rm16 & Reg16 ... { subflags(AX,rm16); local tmp=AX-rm16; resultflags(tmp);
|
|
local diff = rm16^Reg16; rm16 = rm16 ^ (zext(ZF) * diff);
|
|
diff = AX ^ rm16; AX = AX ^ (zext(ZF==0) * diff); }
|
|
:CMPXCHG rm32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb1; rm32 & Reg32 ... & check_EAX_dest ... & check_rm32_dest ...
|
|
{
|
|
#this instruction writes to either EAX or rm32
|
|
#in 64-bit mode, a 32-bit register that is written to
|
|
#(and only the register that is written to)
|
|
#must be zero-extended to 64 bits
|
|
subflags(EAX,rm32);
|
|
local tmp=EAX-rm32;
|
|
resultflags(tmp);
|
|
if (ZF==1) goto <equal>;
|
|
EAX = rm32;
|
|
build check_EAX_dest;
|
|
goto inst_next;
|
|
<equal>
|
|
rm32 = Reg32;
|
|
build check_rm32_dest;
|
|
}
|
|
@ifdef IA64
|
|
:CMPXCHG rm64,Reg64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xb1; rm64 & Reg64 ... { subflags(RAX,rm64); local tmp=RAX-rm64; resultflags(tmp);
|
|
local diff = rm64^Reg64; rm64 = rm64 ^ (zext(ZF) * diff);
|
|
diff = RAX ^ rm64; RAX = RAX ^ (zext(ZF==0) * diff); }
|
|
@endif
|
|
|
|
:CMPXCHG8B m64 is vexMode=0 & byte=0xf; byte=0xc7; ( mod != 0b11 & reg_opcode=1 ) ... & m64 {
|
|
ZF = ((zext(EDX) << 32) | zext(EAX)) == m64;
|
|
|
|
if (ZF == 1) goto <equal>;
|
|
EDX = m64(4);
|
|
EAX = m64:4;
|
|
goto <done>;
|
|
|
|
<equal>
|
|
m64 = (zext(ECX) << 32) | zext(EBX);
|
|
<done>
|
|
}
|
|
|
|
# This "bad_CMPXCHG8B" instruction encoding was not meant to be part of the x86 language.
|
|
# It was allowed by a toolchain (at Intel) and was encoded into at least one library.
|
|
# GCC does not recognize it. It does not make any semantic sense.
|
|
|
|
define pcodeop bad_CMPXCHG8B;
|
|
:bad_CMPXCHG8B r32 is vexMode=0 & byte=0xf; byte=0xc7; ( mod = 0b11 & reg_opcode=0b001 ) & r32 {
|
|
r32 = bad_CMPXCHG8B(r32);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CMPXCHG16B m128 is vexMode=0 & opsize=2 & byte=0xf; byte=0xc7; ( mod != 0b11 & reg_opcode=1 ) ... & ( m128 ) {
|
|
ZF = ((zext(RDX) << 64) | zext(RAX)) == m128;
|
|
|
|
if (ZF == 1) goto <equal>;
|
|
RDX = m128(8);
|
|
RAX = m128:8;
|
|
goto <done>;
|
|
|
|
<equal>
|
|
m128 = ((zext(RCX) << 64) | zext(RBX));
|
|
<done>
|
|
}
|
|
@endif
|
|
|
|
|
|
|
|
define pcodeop cpuid;
|
|
define pcodeop cpuid_basic_info;
|
|
define pcodeop cpuid_Version_info;
|
|
define pcodeop cpuid_cache_tlb_info;
|
|
define pcodeop cpuid_serial_info;
|
|
define pcodeop cpuid_Deterministic_Cache_Parameters_info;
|
|
define pcodeop cpuid_MONITOR_MWAIT_Features_info;
|
|
define pcodeop cpuid_Thermal_Power_Management_info;
|
|
define pcodeop cpuid_Extended_Feature_Enumeration_info;
|
|
define pcodeop cpuid_Direct_Cache_Access_info;
|
|
define pcodeop cpuid_Architectural_Performance_Monitoring_info;
|
|
define pcodeop cpuid_Extended_Topology_info;
|
|
define pcodeop cpuid_Processor_Extended_States_info;
|
|
define pcodeop cpuid_Quality_of_Service_info;
|
|
define pcodeop cpuid_brand_part1_info;
|
|
define pcodeop cpuid_brand_part2_info;
|
|
define pcodeop cpuid_brand_part3_info;
|
|
|
|
# CPUID is very difficult to implement correctly
|
|
# The side-effects of the call will show up, but not the correct values
|
|
|
|
:CPUID is vexMode=0 & byte=0xf; byte=0xa2 {
|
|
tmpptr:$(SIZE) = 0;
|
|
if (EAX == 0) goto <basic_info>;
|
|
if (EAX == 1) goto <Version_info>;
|
|
if (EAX == 2) goto <cache_tlb_info>;
|
|
if (EAX == 3) goto <serial_info>;
|
|
if (EAX == 0x4) goto <Deterministic_Cache_Parameters_info>;
|
|
if (EAX == 0x5) goto <MONITOR_MWAIT_Features_info>;
|
|
if (EAX == 0x6) goto <Thermal_Power_Management_info>;
|
|
if (EAX == 0x7) goto <Extended_Feature_Enumeration_info>;
|
|
if (EAX == 0x9) goto <Direct_Cache_Access_info>;
|
|
if (EAX == 0xa) goto <Architectural_Performance_Monitoring_info>;
|
|
if (EAX == 0xb) goto <Extended_Topology_info>;
|
|
if (EAX == 0xd) goto <Processor_Extended_States_info>;
|
|
if (EAX == 0xf) goto <Quality_of_Service_info>;
|
|
if (EAX == 0x80000002) goto <brand_part1_info>;
|
|
if (EAX == 0x80000003) goto <brand_part2_info>;
|
|
if (EAX == 0x80000004) goto <brand_part3_info>;
|
|
tmpptr = cpuid(EAX);
|
|
goto <finish>;
|
|
<basic_info>
|
|
tmpptr = cpuid_basic_info(EAX);
|
|
goto <finish>;
|
|
<Version_info>
|
|
tmpptr = cpuid_Version_info(EAX);
|
|
goto <finish>;
|
|
<cache_tlb_info>
|
|
tmpptr = cpuid_cache_tlb_info(EAX);
|
|
goto <finish>;
|
|
<serial_info>
|
|
tmpptr = cpuid_serial_info(EAX);
|
|
goto <finish>;
|
|
<Deterministic_Cache_Parameters_info>
|
|
tmpptr = cpuid_Deterministic_Cache_Parameters_info(EAX);
|
|
goto <finish>;
|
|
<MONITOR_MWAIT_Features_info>
|
|
tmpptr = cpuid_MONITOR_MWAIT_Features_info(EAX);
|
|
goto <finish>;
|
|
<Thermal_Power_Management_info>
|
|
tmpptr = cpuid_Thermal_Power_Management_info(EAX);
|
|
goto <finish>;
|
|
<Extended_Feature_Enumeration_info>
|
|
tmpptr = cpuid_Extended_Feature_Enumeration_info(EAX);
|
|
goto <finish>;
|
|
<Direct_Cache_Access_info>
|
|
tmpptr = cpuid_Direct_Cache_Access_info(EAX);
|
|
goto <finish>;
|
|
<Architectural_Performance_Monitoring_info>
|
|
tmpptr = cpuid_Architectural_Performance_Monitoring_info(EAX);
|
|
goto <finish>;
|
|
<Extended_Topology_info>
|
|
tmpptr = cpuid_Extended_Topology_info(EAX);
|
|
goto <finish>;
|
|
<Processor_Extended_States_info>
|
|
tmpptr = cpuid_Processor_Extended_States_info(EAX);
|
|
goto <finish>;
|
|
<Quality_of_Service_info>
|
|
tmpptr = cpuid_Quality_of_Service_info(EAX);
|
|
goto <finish>;
|
|
<brand_part1_info>
|
|
tmpptr = cpuid_brand_part1_info(EAX);
|
|
goto <finish>;
|
|
<brand_part2_info>
|
|
tmpptr = cpuid_brand_part2_info(EAX);
|
|
goto <finish>;
|
|
<brand_part3_info>
|
|
tmpptr = cpuid_brand_part3_info(EAX);
|
|
goto <finish>;
|
|
<finish>
|
|
@ifdef IA64
|
|
RAX = zext(*:4 (tmpptr));
|
|
RBX = zext(*:4 (tmpptr + 4));
|
|
RDX = zext(*:4 (tmpptr + 8));
|
|
RCX = zext(*:4 (tmpptr + 12));
|
|
@else
|
|
EAX = *tmpptr;
|
|
EBX = *(tmpptr + 4);
|
|
EDX = *(tmpptr + 8);
|
|
ECX = *(tmpptr + 12);
|
|
@endif
|
|
}
|
|
|
|
|
|
:DAA is vexMode=0 & bit64=0 & byte=0x27 { local car = ((AL & 0xf) > 9) | AF;
|
|
AL = AL + 6 * car;
|
|
CF = CF | car * carry(AL,6);
|
|
AF = car;
|
|
car = ((AL & 0xf0) > 0x90) | CF;
|
|
AL = AL + 0x60 * car;
|
|
CF = car; }
|
|
:DAS is vexMode=0 & bit64=0 & byte=0x2f { local car = ((AL & 0xf) > 9) | AF;
|
|
AL = AL - 6 * car;
|
|
CF = CF | car * (AL < 6);
|
|
AF = car;
|
|
car = (AL > 0x9f) | CF;
|
|
AL = AL - 0x60 * car;
|
|
CF = car; }
|
|
|
|
:DEC spec_rm8 is vexMode=0 & byte=0xfe; spec_rm8 & reg_opcode=1 ... { OF = sborrow(spec_rm8,1); spec_rm8 = spec_rm8 - 1; resultflags( spec_rm8); }
|
|
:DEC spec_rm16 is vexMode=0 & opsize=0 & byte=0xff; spec_rm16 & reg_opcode=1 ... { OF = sborrow(spec_rm16,1); spec_rm16 = spec_rm16 - 1; resultflags(spec_rm16); }
|
|
:DEC spec_rm32 is vexMode=0 & opsize=1 & byte=0xff; spec_rm32 & check_rm32_dest ... & reg_opcode=1 ... { OF = sborrow(spec_rm32,1); spec_rm32 = spec_rm32 - 1; build check_rm32_dest; resultflags(spec_rm32); }
|
|
@ifdef IA64
|
|
:DEC spec_rm64 is vexMode=0 & opsize=2 & byte=0xff; spec_rm64 & reg_opcode=1 ... { OF = sborrow(spec_rm64,1); spec_rm64 = spec_rm64 - 1; resultflags(spec_rm64); }
|
|
@endif
|
|
|
|
@ifndef IA64
|
|
:DEC Rmr16 is vexMode=0 & opsize=0 & row=4 & page=1 & Rmr16 { OF = sborrow(Rmr16,1); Rmr16 = Rmr16 - 1; resultflags( Rmr16); }
|
|
:DEC Rmr32 is vexMode=0 & opsize=1 & row=4 & page=1 & Rmr32 & check_Rmr32_dest { OF = sborrow(Rmr32,1); Rmr32 = Rmr32 - 1; build check_Rmr32_dest; resultflags( Rmr32); }
|
|
@endif
|
|
|
|
:DIV rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=6 ... { rm8ext:2 = zext(rm8);
|
|
local quotient = AX / rm8ext; # DE exception if quotient doesn't fit in AL
|
|
local rem = AX % rm8ext;
|
|
AL = quotient:1;
|
|
AH = rem:1; }
|
|
:DIV rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=6 ... { rm16ext:4 = zext(rm16);
|
|
tmp:4 = (zext(DX) << 16) | zext(AX); # DE exception if quotient doesn't fit in AX
|
|
local quotient = tmp / rm16ext;
|
|
AX = quotient:2;
|
|
local rem = tmp % rm16ext;
|
|
DX = rem:2; }
|
|
:DIV rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EDX_dest ... & check_EAX_dest ... & reg_opcode=6 ... { rm32ext:8 = zext(rm32);
|
|
tmp:8 = (zext(EDX) << 32) | zext(EAX); # DE exception if quotient doesn't fit in EAX
|
|
local quotient = tmp / rm32ext;
|
|
EAX = quotient:4;
|
|
build check_EAX_dest;
|
|
local rem = tmp % rm32ext;
|
|
EDX = rem:4;
|
|
build check_EDX_dest; }
|
|
@ifdef IA64
|
|
:DIV rm64 is vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=6 ... { rm64ext:16 = zext(rm64);
|
|
tmp:16 = (zext(RDX) << 64) | zext(RAX); # DE exception if quotient doesn't fit in RAX
|
|
local quotient = tmp / rm64ext;
|
|
RAX = quotient:8;
|
|
local rem = tmp % rm64ext;
|
|
RDX = rem:8; }
|
|
@endif
|
|
|
|
enterFrames: low5 is low5 { tmp:1 = low5; export tmp; }
|
|
|
|
@ifdef IA64
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=2 & byte=0xc8; imm16; enterFrames & low5=0x00 {
|
|
push88(RBP);
|
|
RBP = RSP;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=2 & byte=0xc8; imm16; enterFrames & low5=0x01 {
|
|
push88(RBP);
|
|
frameTemp:8 = RSP;
|
|
|
|
push88(frameTemp);
|
|
RBP = frameTemp;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=2 & opsize=2 & byte=0xc8; imm16; enterFrames {
|
|
push88(RBP);
|
|
frameTemp:8 = RSP;
|
|
|
|
RSPt:$(SIZE) = RSP;
|
|
RBPt:$(SIZE) = RBP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
RBPt = RBPt - 8;
|
|
RSPt = RSPt - 8;
|
|
*:8 RSPt = *:8 RBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:8 = 8 * zext(enterFrames - 1);
|
|
RSP = RSP - tmp_offset;
|
|
RBP = RBP - tmp_offset;
|
|
|
|
push88(frameTemp);
|
|
RBP = frameTemp;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=2 & opsize=1 & byte=0xc8; imm16; enterFrames {
|
|
push88(RBP);
|
|
frameTemp:8 = RSP;
|
|
|
|
RSPt:$(SIZE) = RSP;
|
|
RBPt:$(SIZE) = RBP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
RBPt = RBPt - 4;
|
|
RSPt = RSPt - 4;
|
|
*:4 RSPt = *:4 RBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:8 = 4 * zext(enterFrames - 1);
|
|
RSP = RSP - tmp_offset;
|
|
RBP = RBP - tmp_offset;
|
|
|
|
push88(frameTemp);
|
|
RBP = frameTemp;
|
|
RSP = RSP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=2 & opsize=0 & byte=0xc8; imm16; enterFrames {
|
|
push88(RBP);
|
|
frameTemp:8 = RSP;
|
|
|
|
RSPt:$(SIZE) = RSP;
|
|
RBPt:$(SIZE) = RBP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
RBPt = RBPt - 2;
|
|
RSPt = RSPt - 2;
|
|
*:2 RSPt = *:2 RBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:8 = 2 * zext(enterFrames - 1);
|
|
RSP = RSP - tmp_offset;
|
|
RBP = RBP - tmp_offset;
|
|
|
|
push88(frameTemp);
|
|
RBP = frameTemp;
|
|
RSP = RSP - imm16;
|
|
}
|
|
@endif
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=1 & byte=0xc8; imm16; enterFrames & low5=0x00 {
|
|
push44(EBP);
|
|
EBP = ESP;
|
|
ESP = ESP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=1 & byte=0xc8; imm16; enterFrames & low5=0x01 {
|
|
push44(EBP);
|
|
frameTemp:4 = ESP;
|
|
|
|
push44(frameTemp);
|
|
EBP = frameTemp;
|
|
ESP = ESP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=1 & opsize=1 & byte=0xc8; imm16; enterFrames {
|
|
push44(EBP);
|
|
frameTemp:4 = ESP;
|
|
|
|
@ifdef IA64
|
|
ESPt:$(SIZE) = zext(ESP);
|
|
EBPt:$(SIZE) = zext(EBP);
|
|
@else
|
|
ESPt:$(SIZE) = ESP;
|
|
EBPt:$(SIZE) = EBP;
|
|
@endif
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
EBPt = EBPt - 4;
|
|
ESPt = ESPt - 4;
|
|
*:4 ESPt = *:4 EBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:4 = 4 * zext(enterFrames - 1);
|
|
ESP = ESP - tmp_offset;
|
|
EBP = EBP - tmp_offset;
|
|
|
|
push44(frameTemp);
|
|
EBP = frameTemp;
|
|
ESP = ESP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=1 & opsize=0 & byte=0xc8; imm16; enterFrames {
|
|
push44(EBP);
|
|
frameTemp:4 = ESP;
|
|
|
|
@ifdef IA64
|
|
ESPt:$(SIZE) = zext(ESP);
|
|
EBPt:$(SIZE) = zext(EBP);
|
|
@else
|
|
ESPt:$(SIZE) = ESP;
|
|
EBPt:$(SIZE) = EBP;
|
|
@endif
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
EBPt = EBPt - 2;
|
|
ESPt = ESPt - 2;
|
|
*:2 ESPt = *:2 EBPt;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:4 = 2 * zext(enterFrames - 1);
|
|
ESP = ESP - tmp_offset;
|
|
EBP = EBP - tmp_offset;
|
|
|
|
push44(frameTemp);
|
|
EBP = frameTemp;
|
|
ESP = ESP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=0 & byte=0xc8; imm16; enterFrames & low5=0x00 {
|
|
push22(BP);
|
|
BP = SP;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & addrsize=0 & byte=0xc8; imm16; enterFrames & low5=0x01 {
|
|
push22(BP);
|
|
frameTemp:2 = SP;
|
|
|
|
push22(frameTemp);
|
|
BP = frameTemp;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & seg16 & addrsize=0 & opsize=1 & byte=0xc8; imm16; enterFrames {
|
|
push24(zext(BP));
|
|
frameTemp:2 = SP;
|
|
|
|
SPt:2 = SP;
|
|
BPt:2 = BP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
|
|
BPt = BPt - 4;
|
|
tmp2:$(SIZE) = segment(seg16,BPt);
|
|
SPt = SPt - 4;
|
|
tmp:$(SIZE) = segment(SS,SPt);
|
|
*:4 tmp = *:4 tmp2;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:2 = 4 * zext(enterFrames - 1);
|
|
SP = SP - tmp_offset;
|
|
BP = BP - tmp_offset;
|
|
|
|
push24(zext(frameTemp));
|
|
BP = frameTemp;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
:ENTER imm16,enterFrames is vexMode=0 & seg16 & addrsize=0 & opsize=0 & byte=0xc8; imm16; enterFrames {
|
|
push22(BP);
|
|
frameTemp:2 = SP;
|
|
|
|
SPt:2 = SP;
|
|
BPt:2 = BP;
|
|
ii:1 = enterFrames - 1;
|
|
<loop>
|
|
BPt = BPt - 2;
|
|
tmp2:$(SIZE) = segment(seg16,BPt);
|
|
SPt = SPt - 2;
|
|
tmp:$(SIZE) = segment(SS,SPt);
|
|
*:2 tmp = *:2 tmp2;
|
|
ii = ii - 1;
|
|
if (ii s> 0) goto <loop>;
|
|
|
|
tmp_offset:2 = 2 * zext(enterFrames - 1);
|
|
SP = SP - tmp_offset;
|
|
BP = BP - tmp_offset;
|
|
|
|
push22(frameTemp);
|
|
BP = frameTemp;
|
|
SP = SP - imm16;
|
|
}
|
|
|
|
# Informs the 80287 coprocessor of the switch to protected mode, treated as NOP for 80387 and later.
|
|
# We used to have a pseudo-op, but as this is a legacy instruction which is now explicitly treated
|
|
# as a NOP. We treat it as a NOP as well.
|
|
:FSETPM is vexMode=0 & byte=0xdb; byte=0xe4 { } # 80287 set protected mode
|
|
|
|
:HLT is vexMode=0 & byte=0xf4 { goto inst_start; }
|
|
|
|
:IDIV rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=7 ... { rm8ext:2 = sext(rm8);
|
|
local quotient = AX s/ rm8ext; # DE exception if quotient doesn't fit in AL
|
|
local rem = AX s% rm8ext;
|
|
AL = quotient:1;
|
|
AH = rem:1; }
|
|
:IDIV rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=7 ... { rm16ext:4 = sext(rm16);
|
|
tmp:4 = (zext(DX) << 16) | zext(AX); # DE exception if quotient doesn't fit in AX
|
|
local quotient = tmp s/ rm16ext;
|
|
AX = quotient:2;
|
|
local rem = tmp s% rm16ext;
|
|
DX = rem:2; }
|
|
:IDIV rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=7 ... { rm32ext:8 = sext(rm32);
|
|
tmp:8 = (zext(EDX) << 32) | zext(EAX); # DE exception if quotient doesn't fit in EAX
|
|
local quotient = tmp s/ rm32ext;
|
|
EAX = quotient:4;
|
|
build check_EAX_dest;
|
|
local rem = tmp s% rm32ext;
|
|
EDX = rem:4;
|
|
build check_EDX_dest; }
|
|
@ifdef IA64
|
|
:IDIV rm64 is vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=7 ... { rm64ext:16 = sext(rm64);
|
|
tmp:16 = (zext(RDX) << 64) | zext(RAX); # DE exception if quotient doesn't fit in RAX
|
|
local quotient = tmp s/ rm64ext;
|
|
RAX = quotient:8;
|
|
local rem = tmp s% rm64ext;
|
|
RDX = rem:8; }
|
|
@endif
|
|
|
|
:IMUL rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=5 ... { AX = sext(AL) * sext(rm8); imultflags(AL,AX); }
|
|
:IMUL rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=5 ... { tmp:4 = sext(AX) * sext(rm16);
|
|
DX = tmp(2); AX = tmp(0); imultflags(AX,tmp); }
|
|
:IMUL rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=5 ... { tmp:8 = sext(EAX) * sext(rm32);
|
|
EDX = tmp(4); build check_EDX_dest; EAX = tmp(0); build check_EAX_dest; imultflags(EAX,tmp); }
|
|
@ifdef IA64
|
|
# We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct
|
|
:IMUL rm64 is vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=5 ... { tmp:16 = sext(RAX) * sext(rm64);
|
|
RAX = RAX * rm64; RDX = tmp(8); imultflags(RAX,tmp); }
|
|
@endif
|
|
:IMUL Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xaf; rm16 & Reg16 ... { tmp:4 = sext(Reg16) * sext(rm16);
|
|
Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);}
|
|
:IMUL Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xaf; rm32 & Reg32 ... & check_Reg32_dest ... { tmp:8 = sext(Reg32) * sext(rm32);
|
|
Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
# We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct
|
|
:IMUL Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xaf; rm64 & Reg64 ... { tmp:16 = sext(Reg64) * sext(rm64);
|
|
Reg64 = Reg64 * rm64; high:8 = tmp(8); imultflags(Reg64,tmp);}
|
|
@endif
|
|
:IMUL Reg16,rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x6b; (rm16 & Reg16 ...) ; simm8_16 { tmp:4 = sext(rm16) * sext(simm8_16);
|
|
Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);}
|
|
:IMUL Reg32,rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x6b; (rm32 & Reg32 ... & check_Reg32_dest ... ) ; simm8_32 { tmp:8 = sext(rm32) * sext(simm8_32);
|
|
Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
# We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct
|
|
:IMUL Reg64,rm64,simm8_64 is vexMode=0 & opsize=2 & byte=0x6b; (rm64 & Reg64 ...) ; simm8_64 { tmp:16 = sext(rm64) * sext(simm8_64);
|
|
Reg64 = rm64 * simm8_64; high:8 = tmp(8); imultflags(Reg64,tmp);}
|
|
@endif
|
|
:IMUL Reg16,rm16,simm16_16 is vexMode=0 & opsize=0 & byte=0x69; (rm16 & Reg16 ...) ; simm16_16 { tmp:4 = sext(rm16) * sext(simm16_16);
|
|
Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);}
|
|
:IMUL Reg32,rm32,simm32_32 is vexMode=0 & opsize=1 & byte=0x69; (rm32 & Reg32 ... & check_Reg32_dest ...) ; simm32_32 { tmp:8 = sext(rm32) * sext(simm32_32);
|
|
Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:IMUL Reg64,rm64,simm32_32 is vexMode=0 & opsize=2 & byte=0x69; (rm64 & Reg64 ...) ; simm32_32 { tmp:16 = sext(rm64) * sext(simm32_32);
|
|
Reg64 = rm64 * sext(simm32_32); high:8 = tmp(8); imultflags(Reg64,tmp);}
|
|
@endif
|
|
|
|
# these appear in intelman2.pdf, but do they really exist?
|
|
#:IMUL Reg16,simm8_16 is vexMode=0 & opsize=0 & byte=0x6b; Reg16; simm8_16
|
|
#:IMUL Reg32,simm8_32 is vexMode=0 & opsize=1 & byte=0x6b; Reg32; simm8_32
|
|
#:IMUL Reg16,simm16 is vexMode=0 & opsize=0 & byte=0x69; Reg16; simm16
|
|
#:IMUL Reg32,simm32 is vexMode=0 & opsize=1 & byte=0x69; Reg32; simm32
|
|
|
|
:IN AL, imm8 is vexMode=0 & AL & (byte=0xe4; imm8) { tmp:1 = imm8; AL = in(tmp); }
|
|
:IN AX, imm8 is vexMode=0 & opsize=0 & AX & (byte=0xe5; imm8) { tmp:1 = imm8; AX = in(tmp); }
|
|
:IN EAX, imm8 is vexMode=0 & opsize=1 & EAX & check_EAX_dest & (byte=0xe5; imm8) { tmp:1 = imm8; EAX = in(tmp); build check_EAX_dest; }
|
|
@ifdef IA64
|
|
:IN RAX, imm8 is vexMode=0 & opsize=2 & RAX & (byte=0xe5; imm8) { tmp:1 = imm8; RAX = in(tmp); }
|
|
@endif
|
|
:IN AL, DX is vexMode=0 & AL & DX & (byte=0xec) { AL = in(DX); }
|
|
:IN AX, DX is vexMode=0 & opsize=0 & AX & DX & (byte=0xed) { AX = in(DX); }
|
|
:IN EAX, DX is vexMode=0 & opsize=1 & EAX & check_EAX_dest & DX & (byte=0xed) { EAX = in(DX); build check_EAX_dest; }
|
|
@ifdef IA64
|
|
:IN RAX, DX is vexMode=0 & opsize=2 & RAX & DX & (byte=0xed) { RAX = in(DX); }
|
|
@endif
|
|
|
|
:INC spec_rm8 is vexMode=0 & byte=0xfe; spec_rm8 & reg_opcode=0 ... { OF = scarry(spec_rm8,1); spec_rm8 = spec_rm8 + 1; resultflags( spec_rm8); }
|
|
:INC spec_rm16 is vexMode=0 & opsize=0 & byte=0xff; spec_rm16 & reg_opcode=0 ... { OF = scarry(spec_rm16,1); spec_rm16 = spec_rm16 + 1; resultflags(spec_rm16); }
|
|
:INC spec_rm32 is vexMode=0 & opsize=1 & byte=0xff; spec_rm32 & check_rm32_dest ... & reg_opcode=0 ... { OF = scarry(spec_rm32,1); spec_rm32 = spec_rm32 + 1; build check_rm32_dest; resultflags(spec_rm32); }
|
|
@ifdef IA64
|
|
:INC spec_rm64 is vexMode=0 & opsize=2 & byte=0xff; spec_rm64 & reg_opcode=0 ... { OF = scarry(spec_rm64,1); spec_rm64 = spec_rm64 + 1; resultflags(spec_rm64); }
|
|
@endif
|
|
|
|
@ifndef IA64
|
|
:INC Rmr16 is vexMode=0 & opsize=0 & row = 4 & page = 0 & Rmr16 { OF = scarry(Rmr16,1); Rmr16 = Rmr16 + 1; resultflags( Rmr16); }
|
|
:INC Rmr32 is vexMode=0 & opsize=1 & row = 4 & page = 0 & Rmr32 { OF = scarry(Rmr32,1); Rmr32 = Rmr32 + 1; resultflags( Rmr32); }
|
|
@endif
|
|
|
|
:INSB^rep^reptail eseDI1,DX is vexMode=0 & rep & reptail & byte=0x6c & eseDI1 & DX { eseDI1 = in(DX); }
|
|
:INSW^rep^reptail eseDI2,DX is vexMode=0 & rep & reptail & opsize=0 & byte=0x6d & eseDI2 & DX { eseDI2 = in(DX); }
|
|
:INSD^rep^reptail eseDI4,DX is vexMode=0 & rep & reptail & opsize=1 & byte=0x6d & eseDI4 & DX { eseDI4 = in(DX); }
|
|
:INSD^rep^reptail eseDI4,DX is vexMode=0 & rep & reptail & opsize=2 & byte=0x6d & eseDI4 & DX { eseDI4 = in(DX); }
|
|
|
|
:INT1 is vexMode=0 & byte=0xf1 { tmp:1 = 0x1; intloc:$(SIZE) = swi(tmp); call [intloc]; return [0:1]; }
|
|
:INT3 is vexMode=0 & byte=0xcc { tmp:1 = 0x3; intloc:$(SIZE) = swi(tmp); call [intloc]; return [0:1]; }
|
|
:INT imm8 is vexMode=0 & byte=0xcd; imm8 { tmp:1 = imm8; intloc:$(SIZE) = swi(tmp); call [intloc]; }
|
|
:INTO is vexMode=0 & byte=0xce & bit64=0
|
|
{
|
|
tmp:1 = 0x4;
|
|
intloc:$(SIZE) = swi(tmp);
|
|
|
|
if (OF != 1) goto <no_overflow>;
|
|
call [intloc];
|
|
<no_overflow>
|
|
}
|
|
|
|
:INVD is vexMode=0 & byte=0xf; byte=0x8 {}
|
|
:INVLPG Mem is vexMode=0 & byte=0xf; byte=0x1; ( reg_opcode=7 ) ... & Mem { invlpg(Mem); }
|
|
|
|
:INVLPGA is vexMode=0 & addrsize=0 & byte=0xf; byte=0x1; byte=0xDF { invlpga(AX,ECX); }
|
|
:INVLPGA is vexMode=0 & addrsize=1 & byte=0xf; byte=0x1; byte=0xDF { invlpga(EAX,ECX); }
|
|
@ifdef IA64
|
|
:INVLPGA is vexMode=0 & addrsize=2 & byte=0xf; byte=0x1; byte=0xDF { invlpga(RAX,ECX); }
|
|
@endif
|
|
|
|
:INVPCID r32, m128 is vexMode=0 & addrsize=1 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x82; r32 ... & m128 { invpcid(r32, m128); }
|
|
@ifdef IA64
|
|
:INVPCID r64, m128 is vexMode=0 & addrsize=2 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x82; r64 ... & m128 { invpcid(r64, m128); }
|
|
@endif
|
|
|
|
:IRET is vexMode=0 & addrsize=0 & opsize=0 & byte=0xcf { pop22(IP); EIP=zext(IP); pop22(CS); pop22(flags); return [EIP]; }
|
|
:IRET is vexMode=0 & addrsize=1 & opsize=0 & byte=0xcf { pop42(IP); EIP=zext(IP); pop42(CS); pop42(flags); return [EIP]; }
|
|
@ifdef IA64
|
|
:IRET is vexMode=0 & addrsize=2 & opsize=0 & byte=0xcf { pop82(IP); RIP=zext(IP); pop82(CS); pop82(flags); return [RIP]; }
|
|
@endif
|
|
:IRETD is vexMode=0 & addrsize=0 & opsize=1 & byte=0xcf { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); pop24(tmp); flags=tmp(0); return [EIP]; }
|
|
:IRETD is vexMode=0 & addrsize=1 & opsize=1 & byte=0xcf { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); pop44(eflags); return [EIP]; }
|
|
@ifdef IA64
|
|
:IRETD is vexMode=0 & addrsize=2 & opsize=1 & byte=0xcf { pop84(RIP); tmp:8=0; pop84(tmp); CS=tmp(0); pop84(eflags); return [RIP]; }
|
|
:IRETQ is vexMode=0 & addrsize=2 & opsize=2 & byte=0xcf { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); pop88(eflags); return [RIP]; }
|
|
@endif
|
|
|
|
:J^cc rel8 is vexMode=0 & row=7 & cc; rel8 { if (cc) goto rel8; }
|
|
:J^cc rel16 is vexMode=0 & bit64=0 & opsize=0 & byte=0xf; row=8 & cc; rel16 { if (cc) goto rel16; }
|
|
:J^cc rel32 is vexMode=0 & opsize=1 & byte=0xf; row=8 & cc; rel32 { if (cc) goto rel32; }
|
|
# The following is vexMode=0 & picked up by the line above. rel32 works for both 32 and 64 bit
|
|
#@ifdef IA64
|
|
#:J^cc rel32 is vexMode=0 & addrsize=2 & byte=0xf; row=8 & cc; rel32 { if (cc) goto rel32; }
|
|
#@endif
|
|
|
|
:JCXZ rel8 is vexMode=0 & opsize=0 & byte=0xe3; rel8 { if (CX==0) goto rel8; }
|
|
:JECXZ rel8 is vexMode=0 & opsize=1 & byte=0xe3; rel8 { if (ECX==0) goto rel8; }
|
|
@ifdef IA64
|
|
:JRCXZ rel8 is vexMode=0 & opsize=2 & byte=0xe3; rel8 { if (RCX==0) goto rel8; }
|
|
@endif
|
|
|
|
:JMP rel8 is vexMode=0 & byte=0xeb; rel8 { goto rel8; }
|
|
:JMP rel16 is vexMode=0 & opsize=0 & byte=0xe9; rel16 { goto rel16; }
|
|
:JMP rel32 is vexMode=0 & opsize=1 & byte=0xe9; rel32 { goto rel32; }
|
|
:JMP rel32 is vexMode=0 & opsize=2 & byte=0xe9; rel32 { goto rel32; }
|
|
:JMP rm16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xff & currentCS; rm16 & reg_opcode=4 ... { target:4 = segment(currentCS,rm16); goto [target]; }
|
|
:JMP rm16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=4 ... { goto [rm16]; }
|
|
:JMP rm32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; rm32 & reg_opcode=4 ... { goto [rm32]; }
|
|
@ifdef IA64
|
|
:JMP rm64 is vexMode=0 & addrsize=2 & byte=0xff; rm64 & reg_opcode=4 ... { goto [rm64]; }
|
|
@endif
|
|
|
|
:JMPF ptr1616 is vexMode=0 & opsize=0 & byte=0xea; ptr1616 { goto ptr1616; }
|
|
:JMPF ptr1632 is vexMode=0 & opsize=1 & byte=0xea; ptr1632 { goto ptr1632; }
|
|
:JMPF Mem is vexMode=0 & opsize=0 & byte=0xff; Mem & reg_opcode=5 ... { target:$(SIZE) = zext(*:2 Mem); goto [target]; }
|
|
:JMPF Mem is vexMode=0 & opsize=1 & byte=0xff; Mem & reg_opcode=5 ... {
|
|
@ifdef IA64
|
|
target:$(SIZE) = zext(*:4 Mem);
|
|
@else
|
|
target:$(SIZE) = *:4 Mem;
|
|
@endif
|
|
goto [target];
|
|
}
|
|
@ifdef IA64
|
|
:JMPF Mem is vexMode=0 & opsize=2 & byte=0xff; Mem & reg_opcode=5 ... { target:$(SIZE) = *:8 Mem; goto [target]; }
|
|
@endif
|
|
|
|
# Initially disallowed in 64bit mode, but later reintroduced
|
|
:LAHF is vexMode=0 & byte=0x9f { AH=(SF<<7)|(ZF<<6)|(AF<<4)|(PF<<2)|2|CF; }
|
|
|
|
:LAR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x2; rm16 & Reg16 ... { Reg16 = rm16 & 0xff00; ZF=1; }
|
|
:LAR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x2; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32 & 0xffff00; build check_Reg32_dest; ZF=1; }
|
|
@ifdef IA64
|
|
:LAR Reg64,rm32 is vexMode=0 & opsize=2 & byte=0xf; byte=0x2; rm32 & Reg64 ... { Reg64 = zext( rm32 & 0xffff00 ); ZF=1; }
|
|
@endif
|
|
|
|
:LDMXCSR m32 is vexMode=0 & byte=0xf; byte=0xae; ( mod != 0b11 & reg_opcode=2 ) ... & m32 { MXCSR = m32; }
|
|
|
|
@ifndef IA64
|
|
# 16 & 32-bit only
|
|
:LDS Reg16,Mem is vexMode=0 & opsize=0 & byte=0xC5; Mem & Reg16 ... { tmp:4 = *Mem; DS = tmp(2); Reg16 = tmp(0); }
|
|
:LDS Reg32,Mem is vexMode=0 & opsize=1 & byte=0xC5 & bit64=0; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; DS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
@endif
|
|
|
|
:LSS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB2; Mem & Reg16 ... { tmp:4 = *Mem; SS = tmp(2); Reg16 = tmp(0); }
|
|
:LSS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB2; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; SS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:LSS Reg64,Mem is vexMode=0 & opsize=2 & byte=0x0F; byte=0xB2; Mem & Reg64 ... { tmp:10 = *Mem; SS = tmp(8); Reg64 = tmp(0); }
|
|
@endif
|
|
|
|
@ifndef IA64
|
|
# 16 & 32-bit only
|
|
:LES Reg16,Mem is vexMode=0 & opsize=0 & byte=0xC4; Mem & Reg16 ... { tmp:4 = *Mem; ES = tmp(2); Reg16 = tmp(0); }
|
|
:LES Reg32,Mem is vexMode=0 & opsize=1 & byte=0xC4 & bit64=0; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; ES = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
@endif
|
|
|
|
:LFS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB4; Mem & Reg16 ... { tmp:4 = *Mem; FS = tmp(2); Reg16 = tmp(0); }
|
|
:LFS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB4; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; FS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:LFS Reg64,Mem is vexMode=0 & opsize=2 & byte=0x0F; byte=0xB4; Mem & Reg64 ... { tmp:10 = *Mem; FS = tmp(8); Reg64 = tmp(0); }
|
|
@endif
|
|
:LGS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB5; Mem & Reg16 ... { tmp:4 = *Mem; GS = tmp(2); Reg16 = tmp(0); }
|
|
:LGS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB5; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; GS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:LGS Reg64,Mem is vexMode=0 & opsize=2 & byte=0x0F; byte=0xB5; Mem & Reg64 ... { tmp:10 = *Mem; GS = tmp(8); Reg64 = tmp(0); }
|
|
@endif
|
|
|
|
:LEA Reg16,addr16 is vexMode=0 & opsize=0 & addrsize=0 & byte=0x8D; addr16 & Reg16 ... { Reg16 = addr16; }
|
|
:LEA Reg16,addr32 is vexMode=0 & opsize=0 & addrsize=1 & byte=0x8D; addr32 & Reg16 ... { Reg16 = addr32(0); }
|
|
:LEA Reg32,addr16 is vexMode=0 & opsize=1 & addrsize=0 & byte=0x8D; addr16 & Reg32 ... { Reg32 = zext(addr16); }
|
|
:LEA Reg32,addr32 is vexMode=0 & opsize=1 & addrsize=1 & byte=0x8D; addr32 & Reg32 ... & check_Reg32_dest ... {
|
|
Reg32 = addr32;
|
|
build check_Reg32_dest;
|
|
}
|
|
|
|
@ifdef IA64
|
|
:LEA Reg16,addr64 is vexMode=0 & opsize=0 & addrsize=2 & byte=0x8D; addr64 & Reg16 ... { Reg16 = addr64(0); }
|
|
:LEA Reg32,addr64 is vexMode=0 & opsize=1 & addrsize=2 & byte=0x8D; addr64 & Reg32 ... & check_Reg32_dest ... {
|
|
Reg32 = addr64(0);
|
|
build check_Reg32_dest;
|
|
}
|
|
:LEA Reg64,addr32 is vexMode=0 & opsize=2 & addrsize=1 & byte=0x8D; addr32 & Reg64 ... { Reg64 = zext(addr32); }
|
|
:LEA Reg64,addr64 is vexMode=0 & opsize=2 & addrsize=2 & byte=0x8D; addr64 & Reg64 ... { Reg64 = addr64; }
|
|
@endif
|
|
|
|
:LEAVE is vexMode=0 & addrsize=0 & byte=0xc9 { SP = BP; tmp:$(SIZE) = segment(SS,SP); BP = *tmp; SP = SP + 2; }
|
|
:LEAVE is vexMode=0 & addrsize=1 & byte=0xc9 { ESP = EBP; EBP = *$(STACKPTR); ESP=ESP+4; }
|
|
@ifdef IA64
|
|
:LEAVE is vexMode=0 & addrsize=2 & byte=0xc9 { RSP = RBP; RBP = *RSP; RSP=RSP+8; }
|
|
@endif
|
|
|
|
define pcodeop GlobalDescriptorTableRegister;
|
|
:LGDT m16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m16
|
|
{
|
|
GlobalDescriptorTableRegister(m16);
|
|
}
|
|
|
|
:LGDT m32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m32
|
|
{
|
|
GlobalDescriptorTableRegister(m32);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:LGDT m64 is vexMode=0 & opsize=2 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m64
|
|
{
|
|
GlobalDescriptorTableRegister(m64);
|
|
}
|
|
@endif
|
|
|
|
define pcodeop InterruptDescriptorTableRegister;
|
|
:LIDT m16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m16
|
|
{
|
|
InterruptDescriptorTableRegister(m16);
|
|
}
|
|
|
|
:LIDT m32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m32
|
|
{
|
|
InterruptDescriptorTableRegister(m32);
|
|
}
|
|
@ifdef IA64
|
|
:LIDT m64 is vexMode=0 & opsize=2 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m64
|
|
{
|
|
InterruptDescriptorTableRegister(m64);
|
|
}
|
|
@endif
|
|
|
|
define pcodeop LocalDescriptorTableRegister;
|
|
:LLDT rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=2 ...
|
|
{
|
|
LocalDescriptorTableRegister(rm16);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:LMSW rm16 is vexMode=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=6 ...
|
|
{
|
|
CR0 = (CR0 & 0xFFFFFFFFFFFFFFF0) | zext(rm16 & 0x000F);
|
|
}
|
|
@else
|
|
:LMSW rm16 is vexMode=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=6 ...
|
|
{
|
|
CR0 = (CR0 & 0xFFFFFFF0) | zext(rm16 & 0x000F);
|
|
}
|
|
@endif
|
|
|
|
:LOCK is vexMode=0 & byte=0xf0 { LOCK(); }
|
|
|
|
:LODSB^rep^reptail dseSI1 is vexMode=0 & rep & reptail & byte=0xAC & dseSI1 { build rep; build dseSI1; AL=dseSI1; build reptail; }
|
|
:LODSW^rep^reptail dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xAD & dseSI2 { build rep; build dseSI2; AX=dseSI2; build reptail; }
|
|
:LODSD^rep^reptail dseSI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xAD & dseSI4 { build rep; build dseSI4; EAX=dseSI4; build reptail; }
|
|
@ifdef IA64
|
|
:LODSQ^rep^reptail dseSI8 is vexMode=0 & rep & reptail & opsize=2 & byte=0xAD & dseSI8 { build rep; build dseSI8; RAX=dseSI8; build reptail; }
|
|
@endif
|
|
|
|
:LOOP rel8 is vexMode=0 & addrsize=0 & byte=0xE2; rel8 { CX = CX -1; if (CX!=0) goto rel8; }
|
|
:LOOP rel8 is vexMode=0 & addrsize=1 & byte=0xE2; rel8 { ECX = ECX -1; if (ECX!=0) goto rel8; }
|
|
@ifdef IA64
|
|
:LOOP rel8 is vexMode=0 & addrsize=2 & byte=0xE2; rel8 { RCX = RCX -1; if (RCX!=0) goto rel8; }
|
|
@endif
|
|
|
|
:LOOPZ rel8 is vexMode=0 & addrsize=0 & byte=0xE1; rel8 { CX = CX -1; if (CX!=0 && ZF!=0) goto rel8; }
|
|
:LOOPZ rel8 is vexMode=0 & addrsize=1 & byte=0xE1; rel8 { ECX = ECX -1; if (ECX!=0 && ZF!=0) goto rel8; }
|
|
@ifdef IA64
|
|
:LOOPZ rel8 is vexMode=0 & addrsize=2 & byte=0xE1; rel8 { RCX = RCX -1; if (RCX!=0 && ZF!=0) goto rel8; }
|
|
@endif
|
|
|
|
:LOOPNZ rel8 is vexMode=0 & addrsize=0 & byte=0xE0; rel8 { CX = CX -1; if (CX!=0 && ZF==0) goto rel8; }
|
|
:LOOPNZ rel8 is vexMode=0 & addrsize=1 & byte=0xE0; rel8 { ECX = ECX -1; if (ECX!=0 && ZF==0) goto rel8; }
|
|
@ifdef IA64
|
|
:LOOPNZ rel8 is vexMode=0 & addrsize=2 & byte=0xE0; rel8 { RCX = RCX -1; if (RCX!=0 && ZF==0) goto rel8; }
|
|
@endif
|
|
|
|
define pcodeop SegmentLimit;
|
|
:LSL Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x3; rm16 & Reg16 ...
|
|
{
|
|
tmp:3 = SegmentLimit(rm16);
|
|
Reg16 = tmp:2;
|
|
ZF = tmp[16,1];
|
|
}
|
|
|
|
:LSL Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x3; rm32 & Reg32 ...
|
|
{
|
|
tmp:3 = SegmentLimit(rm32);
|
|
Reg32 = zext(tmp:2);
|
|
ZF = tmp[16,1];
|
|
}
|
|
|
|
@ifdef IA64
|
|
:LSL Reg64,rm32 is vexMode=0 & opsize=2 & byte=0xf; byte=0x3; rm32 & Reg64 ...
|
|
{
|
|
tmp:3 = SegmentLimit(rm32);
|
|
Reg64 = zext(tmp:2);
|
|
ZF = tmp[16,1];
|
|
}
|
|
@endif
|
|
|
|
define pcodeop TaskRegister;
|
|
:LTR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=3 ... { TaskRegister(rm16); }
|
|
|
|
:MOV rm8,Reg8 is vexMode=0 & byte=0x88; rm8 & Reg8 ... { rm8=Reg8; }
|
|
:MOV rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x89; rm16 & Reg16 ... { rm16=Reg16; }
|
|
#:MOV rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x89; rm32 & Reg32 ... { rm32=Reg32; }
|
|
:MOV rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x89; rm32 & check_rm32_dest ... & Reg32 ... { rm32=Reg32; build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:MOV rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x89; rm64 & Reg64 ... { rm64=Reg64; }
|
|
@endif
|
|
:MOV Reg8,rm8 is vexMode=0 & byte=0x8a; rm8 & Reg8 ... { Reg8 = rm8; }
|
|
:MOV Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x8b; rm16 & Reg16 ... { Reg16 = rm16; }
|
|
#:MOV Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x8b; rm32 & Reg32 ... { Reg32 = rm32; }
|
|
:MOV Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x8b; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32; build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOV Reg64,rm64 is vexMode=0 & opsize=2 & byte=0x8b; rm64 & Reg64 ... { Reg64 = rm64; }
|
|
@endif
|
|
:MOV rm16,Sreg is vexMode=0 & byte=0x8c; rm16 & Sreg ... { rm16 = Sreg; }
|
|
:MOV Sreg,rm16 is vexMode=0 & byte=0x8e; rm16 & Sreg ... { Sreg=rm16; }
|
|
:MOV AL,moffs8 is vexMode=0 & byte=0xa0; AL & moffs8 { AL=moffs8; }
|
|
:MOV AX,moffs16 is vexMode=0 & opsize=0 & byte=0xa1; AX & moffs16 { AX=moffs16; }
|
|
:MOV EAX,moffs32 is vexMode=0 & opsize=1 & byte=0xa1; EAX & check_EAX_dest & moffs32 { EAX=moffs32; build check_EAX_dest; }
|
|
@ifdef IA64
|
|
:MOV RAX,moffs64 is vexMode=0 & opsize=2 & byte=0xa1; RAX & moffs64 { RAX=moffs64; }
|
|
@endif
|
|
:MOV moffs8,AL is vexMode=0 & byte=0xa2; AL & moffs8 { moffs8=AL; }
|
|
:MOV moffs16,AX is vexMode=0 & opsize=0 & byte=0xa3; AX & moffs16 { moffs16=AX; }
|
|
:MOV moffs32,EAX is vexMode=0 & opsize=1 & byte=0xa3; EAX & moffs32 { moffs32=EAX; }
|
|
@ifdef IA64
|
|
:MOV moffs64,RAX is vexMode=0 & opsize=2 & byte=0xa3; RAX & moffs64 { moffs64=RAX; }
|
|
@endif
|
|
:MOV CRmr8,imm8 is vexMode=0 & row=11 & page=0 & CRmr8; imm8 { CRmr8 = imm8; }
|
|
:MOV CRmr16,imm16 is vexMode=0 & opsize=0 & row=11 & page=1 & CRmr16; imm16 { CRmr16 = imm16; }
|
|
:MOV CRmr32,imm32 is vexMode=0 & opsize=1 & row=11 & page=1 & CRmr32; imm32 { CRmr32 = imm32; }
|
|
@ifdef IA64
|
|
:MOV Rmr64,imm64 is vexMode=0 & opsize=2 & row=11 & page=1 & Rmr64; imm64 { Rmr64 = imm64; }
|
|
@endif
|
|
:MOV spec_rm8,imm8 is vexMode=0 & byte=0xc6; (spec_rm8 & reg_opcode=0 ...); imm8 { spec_rm8 = imm8; }
|
|
:MOV CRmr8,imm8 is vexMode=0 & byte=0xc6; (CRmr8 & mod=3 & reg_opcode=0); imm8 { CRmr8 = imm8; }
|
|
:MOV spec_rm16,imm16 is vexMode=0 & opsize=0 & byte=0xc7; (spec_rm16 & reg_opcode=0 ...); imm16 { spec_rm16 = imm16; }
|
|
:MOV CRmr16,imm16 is vexMode=0 & opsize=0 & byte=0xc7; (CRmr16 & mod=3 & reg_opcode=0); imm16 { CRmr16 = imm16; }
|
|
:MOV spec_rm32,imm32 is vexMode=0 & opsize=1 & byte=0xc7; (spec_rm32 & check_rm32_dest ... & reg_opcode=0 ...); imm32 { spec_rm32 = imm32; build check_rm32_dest; }
|
|
:MOV CRmr32,imm32 is vexMode=0 & opsize=1 & byte=0xc7; (CRmr32 & mod=3 & reg_opcode=0); imm32 { CRmr32 = imm32; }
|
|
@ifdef IA64
|
|
:MOV spec_rm64,simm32 is vexMode=0 & opsize=2 & byte=0xc7; (spec_rm64 & reg_opcode=0 ...); simm32 { spec_rm64 = simm32; }
|
|
@endif
|
|
:MOV creg, Rmr32 is vexMode=0 & byte=0xf; byte=0x22; Rmr32 & creg {
|
|
@ifdef IA64
|
|
creg=zext(Rmr32);
|
|
@else
|
|
creg=Rmr32;
|
|
@endif
|
|
}
|
|
@ifdef IA64
|
|
:MOV creg_x, Rmr32 is vexMode=0 & rexRprefix=1 & byte=0xf; byte=0x22; Rmr32 & creg_x { creg_x=zext(Rmr32); }
|
|
:MOV creg, Rmr64 is vexMode=0 & bit64=1 & byte=0xf; byte=0x22; Rmr64 & creg { creg=Rmr64; }
|
|
:MOV creg_x, Rmr64 is vexMode=0 & bit64=1 & rexRprefix=1 & byte=0xf; byte=0x22; Rmr64 & creg_x { creg_x=Rmr64; }
|
|
@endif
|
|
:MOV Rmr32, creg is vexMode=0 & byte=0xf; byte=0x20; Rmr32 & creg {
|
|
@ifdef IA64
|
|
Rmr32 = creg:4;
|
|
@else
|
|
Rmr32 = creg;
|
|
@endif
|
|
}
|
|
:MOV Rmr32, creg_x is vexMode=0 & rexRprefix=1 & byte=0xf; byte=0x20; Rmr32 & creg_x { Rmr32 = creg_x:4; }
|
|
@ifdef IA64
|
|
:MOV Rmr64, creg is vexMode=0 & bit64=1 & byte=0xf; byte=0x20; Rmr64 & creg { Rmr64 = creg; }
|
|
:MOV Rmr64, creg_x is vexMode=0 & bit64=1 & rexRprefix=1 & byte=0xf; byte=0x20; Rmr64 & creg_x { Rmr64 = creg_x; }
|
|
@endif
|
|
:MOV Rmr32, debugreg is vexMode=0 & byte=0xf; byte=0x21; Rmr32 & debugreg {
|
|
@ifdef IA64
|
|
Rmr32 = debugreg:4;
|
|
@else
|
|
Rmr32 = debugreg;
|
|
@endif
|
|
}
|
|
:MOV Rmr32, debugreg_x is vexMode=0 & rexRprefix=1 & byte=0xf; byte=0x21; Rmr32 & debugreg_x { Rmr32 = debugreg_x:4; }
|
|
@ifdef IA64
|
|
:MOV Rmr64, debugreg is vexMode=0 & bit64=1 & byte=0xf; byte=0x21; Rmr64 & debugreg { Rmr64 = debugreg; }
|
|
:MOV Rmr64, debugreg_x is vexMode=0 & bit64=1 & rexRprefix=1 & byte=0xf; byte=0x21; Rmr64 & debugreg_x { Rmr64 = debugreg_x; }
|
|
@endif
|
|
:MOV debugreg, Rmr32 is vexMode=0 & byte=0xf; byte=0x23; Rmr32 & debugreg {
|
|
@ifdef IA64
|
|
debugreg = zext(Rmr32);
|
|
@else
|
|
debugreg = Rmr32;
|
|
@endif
|
|
}
|
|
@ifdef IA64
|
|
:MOV debugreg_x, Rmr32 is vexMode=0 & rexRprefix=1 & byte=0xf; byte=0x23; Rmr32 & debugreg_x & mod=3 { debugreg_x = zext(Rmr32); }
|
|
:MOV debugreg, Rmr64 is vexMode=0 & bit64=1 & byte=0xf; byte=0x23; Rmr64 & debugreg & mod=3 { debugreg = Rmr64; }
|
|
:MOV debugreg_x, Rmr64 is vexMode=0 & bit64=1 & rexRprefix=1 & byte=0xf; byte=0x23; Rmr64 & debugreg_x & mod=3 { debugreg_x = Rmr64; }
|
|
@endif
|
|
|
|
@ifndef IA64
|
|
# These are obsolete instructions after the 486 generation.
|
|
# They were erroneously placed in the IA64 build.
|
|
# They were removed to facilitate instruction patching to generate "MOV EAX, 0x2" correctly.
|
|
#:MOV r32, testreg is vexMode=0 & byte=0xf; byte=0x24; r32 & testreg & mod=3 { r32 = testreg; }
|
|
#:MOV testreg, r32 is vexMode=0 & byte=0xf; byte=0x26; r32 & testreg & mod=3 { testreg = r32; }
|
|
@endif
|
|
|
|
define pcodeop swap_bytes;
|
|
:MOVBE Reg16, m16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x38; byte=0xf0; Reg16 ... & m16 { Reg16 = swap_bytes( m16 ); }
|
|
:MOVBE Reg32, m32 is vexMode=0 & opsize=1 & mandover=0 & byte=0xf; byte=0x38; byte=0xf0; Reg32 ... & m32 { Reg32 = swap_bytes( m32 ); }
|
|
:MOVBE m16, Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x38; byte=0xf1; Reg16 ... & m16 { m16 = swap_bytes( Reg16 ); }
|
|
:MOVBE m32, Reg32 is vexMode=0 & opsize=1 & mandover=0 & byte=0xf; byte=0x38; byte=0xf1; Reg32 ... & m32 { m32 = swap_bytes( Reg32 ); }
|
|
@ifdef IA64
|
|
:MOVBE Reg64, m64 is vexMode=0 & opsize=2 & mandover=0 & byte=0xf; byte=0x38; byte=0xf0; Reg64 ... & m64 { Reg64 = swap_bytes( m64 ); }
|
|
:MOVBE m64, Reg64 is vexMode=0 & opsize=2 & mandover=0 & byte=0xf; byte=0x38; byte=0xf1; Reg64 ... & m64 { m64 = swap_bytes( Reg64 ); }
|
|
@endif
|
|
|
|
|
|
:MOVNTI Mem,Reg32 is vexMode=0 & opsize = 1; byte=0xf; byte=0xc3; Mem & Reg32 ... { *Mem = Reg32; }
|
|
@ifdef IA64
|
|
:MOVNTI Mem,Reg64 is vexMode=0 & opsize = 2; byte=0xf; byte=0xc3; Mem & Reg64 ... { *Mem = Reg64; }
|
|
@endif
|
|
|
|
:MOVSB^rep^reptail eseDI1,dseSI1 is vexMode=0 & rep & reptail & byte=0xa4 & eseDI1 & dseSI1 { build rep; build eseDI1; build dseSI1; eseDI1 = dseSI1; build reptail; }
|
|
:MOVSW^rep^reptail eseDI2,dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xa5 & eseDI2 & dseSI2 { build rep; build eseDI2; build dseSI2; eseDI2 = dseSI2; build reptail; }
|
|
:MOVSD^rep^reptail eseDI4,dseSI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xa5 & eseDI4 & dseSI4 { build rep; build eseDI4; build dseSI4; eseDI4 = dseSI4; build reptail; }
|
|
@ifdef IA64
|
|
:MOVSQ^rep^reptail eseDI8,dseSI8 is vexMode=0 & rep & reptail & opsize=2 & byte=0xa5 & eseDI8 & dseSI8 { build rep; build eseDI8; build dseSI8; eseDI8 = dseSI8; build reptail; }
|
|
@endif
|
|
|
|
:MOVSX Reg16,spec_rm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbe; spec_rm8 & Reg16 ... { Reg16 = sext(spec_rm8); }
|
|
:MOVSX Reg32,spec_rm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbe; spec_rm8 & Reg32 ... & check_Reg32_dest ... { Reg32 = sext(spec_rm8); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVSX Reg64,spec_rm8 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbe; spec_rm8 & Reg64 ... { Reg64 = sext(spec_rm8); }
|
|
@endif
|
|
:MOVSX Reg32,spec_rm16 is vexMode=0 & byte=0xf; byte=0xbf; spec_rm16 & Reg32 ... & check_Reg32_dest ... { Reg32 = sext(spec_rm16); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVSX Reg64,spec_rm16 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbf; spec_rm16 & Reg64 ... { Reg64 = sext(spec_rm16); }
|
|
@endif
|
|
|
|
:MOVSXD Reg32,rm32 is vexMode=0 & bit64=1 & opsize=1 & byte=0x63; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32; build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVSXD Reg64,rm32 is vexMode=0 & bit64=1 & opsize=2 & byte=0x63; rm32 & Reg64 ... { Reg64 = sext(rm32); }
|
|
@endif
|
|
|
|
:MOVZX Reg16,spec_rm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb6; spec_rm8 & Reg16 ... { Reg16 = zext(spec_rm8); }
|
|
:MOVZX Reg32,spec_rm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb6; spec_rm8 & Reg32 ... & check_Reg32_dest ... { Reg32 = zext(spec_rm8); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVZX Reg64,spec_rm8 is vexMode=0 & opsize=2 & byte=0xf; byte=0xb6; spec_rm8 & Reg64 ... { Reg64 = zext(spec_rm8); }
|
|
@endif
|
|
|
|
:MOVZX Reg32,spec_rm16 is vexMode=0 & byte=0xf; byte=0xb7; spec_rm16 & Reg32 ... & check_Reg32_dest ... { Reg32 = zext(spec_rm16); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:MOVZX Reg64,spec_rm16 is vexMode=0 & opsize=2 & byte=0xf; byte=0xb7; spec_rm16 & Reg64 ... { Reg64 = zext(spec_rm16); }
|
|
@endif
|
|
|
|
:MUL rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=4 ... { AX=zext(AL)*zext(rm8); multflags(AH); }
|
|
:MUL rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=4 ... { tmp:4=zext(AX)*zext(rm16); DX=tmp(2); AX=tmp(0); multflags(DX); }
|
|
:MUL rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=4 ... { tmp:8=zext(EAX)*zext(rm32); EDX=tmp(4); build check_EDX_dest; multflags(EDX); EAX=tmp(0); build check_EAX_dest; }
|
|
@ifdef IA64
|
|
:MUL rm64 is vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=4 ... { tmp:16=zext(RAX)*zext(rm64); RDX=tmp(8); RAX=tmp(0); multflags(RDX); }
|
|
@endif
|
|
|
|
:MWAIT is vexMode=0 & byte=0x0f; byte=0x01; byte=0xC9 { mwait(); }
|
|
:MWAITX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xFB { mwaitx(); }
|
|
:MONITOR is vexMode=0 & byte=0x0f; byte=0x01; byte=0xC8 { monitor(); }
|
|
:MONITORX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xFA { monitorx(); }
|
|
|
|
:NEG rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=3 ... { negflags(rm8); rm8 = -rm8; resultflags(rm8 ); }
|
|
:NEG rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=3 ... { negflags(rm16); rm16 = -rm16; resultflags(rm16); }
|
|
:NEG rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_rm32_dest ... & reg_opcode=3 ... { negflags(rm32); rm32 = -rm32; resultflags(rm32); build check_rm32_dest;}
|
|
@ifdef IA64
|
|
:NEG rm64 is vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=3 ... { negflags(rm64); rm64 = -rm64; resultflags(rm64); }
|
|
@endif
|
|
|
|
:NOP is vexMode=0 & opsize=0 & byte=0x90 { }
|
|
:NOP is vexMode=0 & opsize=1 & byte=0x90 { }
|
|
:NOP rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; high5=3; rm16 ... { }
|
|
:NOP rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; high5=3; rm32 ... { }
|
|
:NOP^"/reserved" rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; byte=0x18; rm16 & reg_opcode_hb=1 ... { }
|
|
:NOP^"/reserved" rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; byte=0x18; rm32 & reg_opcode_hb=1 ... { }
|
|
:NOP rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; byte=0x1f; rm16 & reg_opcode=0 ... { }
|
|
:NOP rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; byte=0x1f; rm32 & reg_opcode=0 ... { }
|
|
|
|
:NOT rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=2 ... { rm8 = ~rm8; }
|
|
:NOT rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=2 ... { rm16 = ~rm16; }
|
|
:NOT rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_rm32_dest ... & reg_opcode=2 ... { rm32 = ~rm32; build check_rm32_dest;}
|
|
@ifdef IA64
|
|
:NOT rm64 is vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=2 ... { rm64 = ~rm64; }
|
|
@endif
|
|
|
|
:OR AL,imm8 is vexMode=0 & byte=0x0c; AL & imm8 { logicalflags(); AL = AL | imm8; resultflags( AL); }
|
|
:OR AX,imm16 is vexMode=0 & opsize=0 & byte=0xd; AX & imm16 { logicalflags(); AX = AX | imm16; resultflags( AX); }
|
|
:OR EAX,imm32 is vexMode=0 & opsize=1 & byte=0xd; EAX & check_EAX_dest & imm32 { logicalflags(); EAX = EAX | imm32; build check_EAX_dest; resultflags( EAX); }
|
|
@ifdef IA64
|
|
:OR RAX,simm32 is vexMode=0 & opsize=2 & byte=0xd; RAX & simm32 { logicalflags(); RAX = RAX | simm32; resultflags( RAX); }
|
|
@endif
|
|
:OR spec_rm8,imm8 is vexMode=0 & (byte=0x80 | byte=0x82); spec_rm8 & reg_opcode=1 ...; imm8 { logicalflags(); spec_rm8 = spec_rm8 | imm8; resultflags( spec_rm8); }
|
|
:OR spec_rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; spec_rm16 & reg_opcode=1 ...; imm16 { logicalflags(); spec_rm16 = spec_rm16 | imm16; resultflags( spec_rm16); }
|
|
:OR spec_rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; spec_rm32 & check_rm32_dest ... & reg_opcode=1 ...; imm32 { logicalflags(); spec_rm32 = spec_rm32 | imm32; build check_rm32_dest; resultflags( spec_rm32); }
|
|
@ifdef IA64
|
|
:OR spec_rm64,simm32 is vexMode=0 & opsize=2 & byte=0x81; spec_rm64 & reg_opcode=1 ...; simm32 { logicalflags(); tmp:8 = spec_rm64; spec_rm64 = tmp | simm32; resultflags( spec_rm64); }
|
|
@endif
|
|
:OR spec_rm16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; spec_rm16 & reg_opcode=1 ...; usimm8_16 { logicalflags(); spec_rm16 = spec_rm16 | usimm8_16; resultflags( spec_rm16); }
|
|
:OR spec_rm32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; spec_rm32 & check_rm32_dest ... & reg_opcode=1 ...; usimm8_32 { logicalflags(); spec_rm32 = spec_rm32 | usimm8_32; build check_rm32_dest; resultflags( spec_rm32); }
|
|
@ifdef IA64
|
|
:OR spec_rm64,usimm8_64 is vexMode=0 & opsize=2 & byte=0x83; spec_rm64 & reg_opcode=1 ...; usimm8_64 { logicalflags(); spec_rm64 = spec_rm64 | usimm8_64; resultflags( spec_rm64); }
|
|
@endif
|
|
:OR rm8,Reg8 is vexMode=0 & byte=0x8; rm8 & Reg8 ... { logicalflags(); rm8 = rm8 | Reg8; resultflags( rm8); }
|
|
:OR rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x9; rm16 & Reg16 ... { logicalflags(); rm16 = rm16 | Reg16; resultflags( rm16); }
|
|
:OR rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x9; rm32 & check_rm32_dest ... & Reg32 ... { logicalflags(); rm32 = rm32 | Reg32; build check_rm32_dest; resultflags( rm32); }
|
|
@ifdef IA64
|
|
:OR rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x9; rm64 & Reg64 ... { logicalflags(); rm64 = rm64 | Reg64; resultflags( rm64); }
|
|
@endif
|
|
:OR Reg8,rm8 is vexMode=0 & byte=0xa; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 | rm8; resultflags( Reg8); }
|
|
:OR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xb; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 | rm16; resultflags(Reg16); }
|
|
:OR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xb; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 | rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:OR Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xb; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 | rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
:OUT imm8,AL is vexMode=0 & byte=0xe6; imm8 & AL { tmp:1 = imm8; out(tmp,AL); }
|
|
:OUT imm8,AX is vexMode=0 & opsize=0 & byte=0xe7; imm8 & AX { tmp:1 = imm8; out(tmp,AX); }
|
|
:OUT imm8,EAX is vexMode=0 & byte=0xe7; imm8 & EAX { tmp:1 = imm8; out(tmp,EAX); }
|
|
:OUT DX,AL is vexMode=0 & byte=0xee & DX & AL { out(DX,AL); }
|
|
:OUT DX,AX is vexMode=0 & opsize=0 & byte=0xef & DX & AX { out(DX,AX); }
|
|
:OUT DX,EAX is vexMode=0 & byte=0xef & DX & EAX { out(DX,EAX); }
|
|
|
|
:OUTSB^rep^reptail DX,dseSI1 is vexMode=0 & rep & reptail & byte=0x6e & DX & dseSI1 { out(dseSI1,DX); }
|
|
:OUTSW^rep^reptail DX,dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0x6f & DX & dseSI2 { out(dseSI2,DX); }
|
|
:OUTSD^rep^reptail DX,dseSI4 is vexMode=0 & rep & reptail & byte=0x6f & DX & dseSI4 { out(dseSI4,DX); }
|
|
|
|
:PAUSE is vexMode=0 & opsize=0 & $(PRE_F3) & byte=0x90 { }
|
|
:PAUSE is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x90 { }
|
|
|
|
:POP rm16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { pop22(rm16); }
|
|
:POP rm16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { pop42(rm16); }
|
|
:POP rm32 is vexMode=0 & addrsize=0 & opsize=1 & byte=0x8f; rm32 & reg_opcode=0 ... { pop24(rm32); }
|
|
:POP rm32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0x8f; rm32 & reg_opcode=0 ... { pop44(rm32); }
|
|
@ifdef IA64
|
|
:POP rm16 is vexMode=0 & addrsize=2 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { pop82(rm16); }
|
|
:POP rm64 is vexMode=0 & addrsize=2 & byte=0x8f; rm64 & reg_opcode=0 ... { pop88(rm64); }
|
|
@endif
|
|
|
|
:POP Rmr16 is vexMode=0 & addrsize=0 & opsize=0 & row=5 & page=1 & Rmr16 { pop22(Rmr16); }
|
|
:POP Rmr16 is vexMode=0 & addrsize=1 & opsize=0 & row=5 & page=1 & Rmr16 { pop42(Rmr16); }
|
|
:POP Rmr32 is vexMode=0 & addrsize=0 & opsize=1 & row=5 & page=1 & Rmr32 { pop24(Rmr32); }
|
|
:POP Rmr32 is vexMode=0 & addrsize=1 & opsize=1 & row=5 & page=1 & Rmr32 { pop44(Rmr32); }
|
|
@ifdef IA64
|
|
:POP Rmr16 is vexMode=0 & addrsize=2 & opsize=0 & row=5 & page=1 & Rmr16 { pop82(Rmr16); }
|
|
:POP Rmr64 is vexMode=0 & addrsize=2 & row=5 & page=1 & Rmr64 { pop88(Rmr64); }
|
|
@endif
|
|
|
|
:POP DS is vexMode=0 & addrsize=0 & byte=0x1f & DS { pop22(DS); }
|
|
:POP DS is vexMode=0 & addrsize=1 & byte=0x1f & DS { popseg44(DS); }
|
|
:POP ES is vexMode=0 & addrsize=0 & byte=0x7 & ES { pop22(ES); }
|
|
:POP ES is vexMode=0 & addrsize=1 & byte=0x7 & ES { popseg44(ES); }
|
|
:POP SS is vexMode=0 & addrsize=0 & byte=0x17 & SS { pop22(SS); }
|
|
:POP SS is vexMode=0 & addrsize=1 & byte=0x17 & SS { popseg44(SS); }
|
|
:POP FS is vexMode=0 & addrsize=0 & byte=0xf; byte=0xa1 & FS { pop22(FS); }
|
|
:POP FS is vexMode=0 & addrsize=1 & byte=0xf; byte=0xa1 & FS { popseg44(FS); }
|
|
@ifdef IA64
|
|
:POP FS is vexMode=0 & addrsize=2 & byte=0xf; byte=0xa1 & FS { popseg88(FS); }
|
|
@endif
|
|
:POP GS is vexMode=0 & addrsize=0 & byte=0xf; byte=0xa9 & GS { pop22(GS); }
|
|
:POP GS is vexMode=0 & addrsize=1 & byte=0xf; byte=0xa9 & GS { popseg44(GS); }
|
|
@ifdef IA64
|
|
:POP GS is vexMode=0 & addrsize=2 & byte=0xf; byte=0xa9 & GS { popseg88(GS); }
|
|
@endif
|
|
|
|
:POPA is vexMode=0 & addrsize=0 & opsize=0 & byte=0x61 { pop22(DI); pop22(SI); pop22(BP); tmp:2=0; pop22(tmp); pop22(BX); pop22(DX); pop22(CX); pop22(AX); }
|
|
:POPA is vexMode=0 & addrsize=1 & opsize=0 & byte=0x61 { pop42(DI); pop42(SI); pop42(BP); tmp:2=0; pop42(tmp); pop42(BX); pop42(DX); pop42(CX); pop42(AX); }
|
|
:POPAD is vexMode=0 & addrsize=0 & opsize=1 & byte=0x61 { pop24(EDI); pop24(ESI); pop24(EBP); tmp:4=0; pop24(tmp); pop24(EBX); pop24(EDX); pop24(ECX); pop24(EAX); }
|
|
:POPAD is vexMode=0 & addrsize=1 & opsize=1 & byte=0x61 { pop44(EDI); pop44(ESI); pop44(EBP); tmp:4=0; pop44(tmp); pop44(EBX); pop44(EDX); pop44(ECX); pop44(EAX); }
|
|
:POPF is vexMode=0 & addrsize=0 & opsize=0 & byte=0x9d { pop22(flags); unpackflags(flags); }
|
|
:POPF is vexMode=0 & addrsize=1 & opsize=0 & byte=0x9d { pop42(flags); unpackflags(flags); }
|
|
:POPFD is vexMode=0 & addrsize=0 & opsize=1 & byte=0x9d { pop24(eflags); unpackflags(eflags); unpackeflags(eflags); }
|
|
:POPFD is vexMode=0 & addrsize=1 & opsize=1 & byte=0x9d { pop44(eflags); unpackflags(eflags); unpackeflags(eflags); }
|
|
@ifdef IA64
|
|
:POPF is vexMode=0 & addrsize=2 & opsize=0 & byte=0x9d { pop82(flags); unpackflags(flags); }
|
|
:POPFD is vexMode=0 & addrsize=2 & opsize=1 & byte=0x9d { pop84(eflags); unpackflags(eflags); unpackeflags(eflags); }
|
|
:POPFQ is vexMode=0 & addrsize=2 & opsize=2 & byte=0x9d { pop88(rflags); unpackflags(rflags); unpackeflags(rflags); }
|
|
@endif
|
|
|
|
:PREFETCH m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=0 ... { }
|
|
:PREFETCH m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode ... { } # rest aliased to /0
|
|
:PREFETCHW m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=1 ... { }
|
|
:PREFETCHWT1 m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=2 ... { }
|
|
|
|
:PREFETCHT0 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=1 ) ... & m8 { }
|
|
:PREFETCHT1 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=2 ) ... & m8 { }
|
|
:PREFETCHT2 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=3 ) ... & m8 { }
|
|
:PREFETCHNTA m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=0 ) ... & m8 { }
|
|
|
|
define pcodeop ptwrite;
|
|
|
|
:PTWRITE rm32 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0xae; rm32 & reg_opcode=4 ... { ptwrite(rm32); }
|
|
|
|
:PUSH rm16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push22(rm16); }
|
|
:PUSH rm16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push42(rm16); }
|
|
:PUSH rm32 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xff; rm32 & reg_opcode=6 ... { push24(rm32); }
|
|
:PUSH rm32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; rm32 & reg_opcode=6 ... { push44(rm32); }
|
|
@ifdef IA64
|
|
:PUSH rm16 is vexMode=0 & addrsize=2 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push82(rm16); }
|
|
:PUSH rm64 is vexMode=0 & addrsize=2 & byte=0xff; rm64 & reg_opcode=6 ... { push88(rm64); }
|
|
@endif
|
|
:PUSH Rmr16 is vexMode=0 & addrsize=0 & opsize=0 & row=5 & page=0 & Rmr16 { push22(Rmr16); }
|
|
:PUSH Rmr16 is vexMode=0 & addrsize=1 & opsize=0 & row=5 & page=0 & Rmr16 { push42(Rmr16); }
|
|
:PUSH Rmr32 is vexMode=0 & addrsize=0 & opsize=1 & row=5 & page=0 & Rmr32 { push24(Rmr32); }
|
|
:PUSH Rmr32 is vexMode=0 & addrsize=1 & opsize=1 & row=5 & page=0 & Rmr32 { push44(Rmr32); }
|
|
@ifdef IA64
|
|
:PUSH Rmr16 is vexMode=0 & addrsize=2 & opsize=0 & row=5 & page=0 & Rmr16 { push82(Rmr16); }
|
|
:PUSH Rmr64 is vexMode=0 & addrsize=2 & row=5 & page=0 & Rmr64 { push88(Rmr64); }
|
|
@endif
|
|
:PUSH simm8_16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push22(tmp); }
|
|
:PUSH simm8_16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push42(tmp); }
|
|
@ifdef IA64
|
|
:PUSH simm8_16 is vexMode=0 & addrsize=2 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push82(tmp); }
|
|
@endif
|
|
:PUSH simm8_32 is vexMode=0 & addrsize=0 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push24(tmp); }
|
|
:PUSH simm8_32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push44(tmp); }
|
|
@ifdef IA64
|
|
:PUSH simm8_64 is vexMode=0 & addrsize=2 & opsize=1 & byte=0x6a; simm8_64 { tmp:8=simm8_64; push88(tmp); }
|
|
:PUSH simm8_64 is vexMode=0 & addrsize=2 & opsize=2 & byte=0x6a; simm8_64 { tmp:8=simm8_64; push88(tmp); }
|
|
@endif
|
|
:PUSH simm16_16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push22(tmp); }
|
|
:PUSH simm16_16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push42(tmp); }
|
|
@ifdef IA64
|
|
:PUSH simm16_16 is vexMode=0 & addrsize=2 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push82(tmp); }
|
|
@endif
|
|
:PUSH imm32 is vexMode=0 & addrsize=0 & opsize=1 & byte=0x68; imm32 { tmp:4=imm32; push24(tmp); }
|
|
:PUSH imm32 is vexMode=0 & addrsize=1 & opsize=1 & byte=0x68; imm32 { tmp:4=imm32; push44(tmp); }
|
|
@ifdef IA64
|
|
:PUSH simm32 is vexMode=0 & addrsize=2 & opsize=1 & byte=0x68; simm32 { tmp:8=simm32; push88(tmp); }
|
|
:PUSH simm32 is vexMode=0 & addrsize=2 & opsize=2 & byte=0x68; simm32 { tmp:8=simm32; push88(tmp); }
|
|
@endif
|
|
|
|
:PUSH CS is vexMode=0 & addrsize=0 & byte=0xe & CS { push22(CS); }
|
|
:PUSH CS is vexMode=0 & addrsize=1 & byte=0xe & CS { pushseg44(CS); }
|
|
:PUSH SS is vexMode=0 & addrsize=0 & byte=0x16 & SS { push22(SS); }
|
|
:PUSH SS is vexMode=0 & addrsize=1 & byte=0x16 & SS { pushseg44(SS); }
|
|
:PUSH DS is vexMode=0 & addrsize=0 & byte=0x1e & DS { push22(DS); }
|
|
:PUSH DS is vexMode=0 & addrsize=1 & byte=0x1e & DS { pushseg44(DS); }
|
|
:PUSH ES is vexMode=0 & addrsize=0 & byte=0x6 & ES { push22(ES); }
|
|
:PUSH ES is vexMode=0 & addrsize=1 & byte=0x6 & ES { pushseg44(ES); }
|
|
:PUSH FS is vexMode=0 & addrsize=0 & byte=0xf; byte=0xa0 & FS { push22(FS); }
|
|
:PUSH FS is vexMode=0 & addrsize=1 & byte=0xf; byte=0xa0 & FS { pushseg44(FS); }
|
|
@ifdef IA64
|
|
:PUSH FS is vexMode=0 & addrsize=2 & byte=0xf; byte=0xa0 & FS { pushseg88(FS); }
|
|
@endif
|
|
:PUSH GS is vexMode=0 & addrsize=0 & byte=0xf; byte=0xa8 & GS { push22(GS); }
|
|
:PUSH GS is vexMode=0 & addrsize=1 & byte=0xf; byte=0xa8 & GS { pushseg44(GS); }
|
|
@ifdef IA64
|
|
:PUSH GS is vexMode=0 & addrsize=2 & byte=0xf; byte=0xa8 & GS { pushseg88(GS); }
|
|
@endif
|
|
|
|
:PUSHA is vexMode=0 & addrsize=0 & opsize=0 & byte=0x60 { local tmp=SP; push22(AX); push22(CX); push22(DX); push22(BX); push22(tmp); push22(BP); push22(SI); push22(DI); }
|
|
:PUSHA is vexMode=0 & addrsize=1 & opsize=0 & byte=0x60 { local tmp=SP; push42(AX); push42(CX); push42(DX); push42(BX); push42(tmp); push42(BP); push42(SI); push42(DI); }
|
|
:PUSHAD is vexMode=0 & addrsize=0 & opsize=1 & byte=0x60 { local tmp=ESP; push24(EAX); push24(ECX); push24(EDX); push24(EBX); push24(tmp); push24(EBP); push24(ESI); push24(EDI); }
|
|
:PUSHAD is vexMode=0 & addrsize=1 & opsize=1 & byte=0x60 { local tmp=ESP; push44(EAX); push44(ECX); push44(EDX); push44(EBX); push44(tmp); push44(EBP); push44(ESI); push44(EDI); }
|
|
|
|
:PUSHF is vexMode=0 & addrsize=0 & opsize=0 & byte=0x9c { packflags(flags); push22(flags); }
|
|
:PUSHF is vexMode=0 & addrsize=1 & opsize=0 & byte=0x9c { packflags(flags); push42(flags); }
|
|
:PUSHFD is vexMode=0 & addrsize=0 & opsize=1 & byte=0x9c { packflags(eflags); packeflags(eflags); push24(eflags); }
|
|
:PUSHFD is vexMode=0 & addrsize=1 & opsize=1 & byte=0x9c { packflags(eflags); packeflags(eflags); push44(eflags); }
|
|
@ifdef IA64
|
|
:PUSHF is vexMode=0 & addrsize=2 & opsize=0 & byte=0x9c { packflags(flags); push82(flags); }
|
|
:PUSHFQ is vexMode=0 & addrsize=2 & byte=0x9c { packflags(rflags); packeflags(rflags); push88(rflags); }
|
|
@endif
|
|
|
|
:RCL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=2 ... { local tmpCF = CF; CF = rm8 s< 0; rm8 = (rm8 << 1) | tmpCF; OF = CF ^ (rm8 s< 0); }
|
|
:RCL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=2 ... { local cnt=(CL&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp<<cnt)|(tmp>>(9-cnt));rm8=tmp(0); CF=(tmp&0x100)!=0; }
|
|
:RCL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=2 ... ; imm8 { local cnt=(imm8&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp<<cnt)|(tmp>>(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; }
|
|
:RCL rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=2 ... { local tmpCF = CF; CF = rm16 s< 0; rm16 = (rm16 << 1) | zext(tmpCF); OF = CF ^ (rm16 s< 0);}
|
|
:RCL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=2 ... {local cnt=(CL&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp<<cnt)|(tmp>>(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; }
|
|
:RCL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=2 ... ; imm8 { local cnt=(imm8&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp<<cnt)|(tmp>>(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; }
|
|
:RCL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=2 ... { local tmpCF=CF; CF=rm32 s< 0; rm32=(rm32<<1)|zext(tmpCF); OF=CF^(rm32 s< 0); build check_rm32_dest; }
|
|
:RCL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=2 ... { local cnt=CL&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp<<cnt)|(tmp>>(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; }
|
|
:RCL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=2 ... ; imm8 { local cnt=imm8&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp<<cnt)|(tmp>>(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:RCL rm64,n1 is vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=2 ... { local tmpCF=CF; CF=rm64 s< 0; rm64=(rm64<<1)|zext(tmpCF); OF=CF^(rm64 s< 0);}
|
|
:RCL rm64,CL is vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=2 ... { local cnt=CL&0x3f; tmp:16=(zext(CF)<<64)|zext(rm64); tmp=(tmp<<cnt)|(tmp>>(65-cnt)); rm64=tmp(0); CF=(tmp&0x1000000000000000)!=0; }
|
|
:RCL rm64,imm8 is vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=2 ... ; imm8 { local cnt=imm8&0x3f; tmp:16=(zext(CF)<<64)|zext(rm64); tmp=(tmp<<cnt)|(tmp>>(65-cnt)); rm64=tmp(0); CF=(tmp&0x1000000000000000)!=0; }
|
|
@endif
|
|
|
|
:RCR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm8 s< 0; CF=(rm8&1)!=0; rm8=(rm8>>1)|(tmpCF<<7); OF=OF^(rm8 s< 0); }
|
|
:RCR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=3 ... { local cnt=(CL&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp>>cnt)|(tmp<<(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; }
|
|
:RCR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=3 ... ; imm8 { local cnt=(imm8&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp>>cnt)|(tmp<<(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; }
|
|
:RCR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm16 s< 0; CF=(rm16&1)!=0; rm16=(rm16>>1)|(zext(tmpCF)<<15); OF=OF^(rm16 s< 0); }
|
|
:RCR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=3 ... { local cnt=(CL&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp>>cnt)|(tmp<<(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; }
|
|
:RCR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=3 ... ; imm8 { local cnt=(imm8&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp>>cnt)|(tmp<<(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; }
|
|
:RCR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=3 ... { local tmpCF=CF; OF=rm32 s< 0; CF=(rm32&1)!=0; rm32=(rm32>>1)|(zext(tmpCF)<<31); OF=OF^(rm32 s< 0); build check_rm32_dest; }
|
|
:RCR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=3 ... { local cnt=CL&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp>>cnt)|(tmp<<(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; }
|
|
:RCR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=3 ... ; imm8 { local cnt=imm8&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp>>cnt)|(tmp<<(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:RCR rm64,n1 is vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm64 s< 0; CF=(rm64&1)!=0; rm64=(rm64>>1)|(zext(tmpCF)<<63); OF=OF^(rm64 s< 0); }
|
|
:RCR rm64,CL is vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=3 ... { local cnt=CL&0x3f; tmp:16=(zext(CF)<<64)|zext(rm64); tmp=(tmp>>cnt)|(tmp<<(65-cnt)); rm64=tmp(0); CF=(tmp&0x1000000000000000)!=0; }
|
|
:RCR rm64,imm8 is vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=3 ... ; imm8 { local cnt=imm8&0x3f; tmp:16=(zext(CF)<<64)|zext(rm64); tmp=(tmp>>cnt)|(tmp<<(65-cnt)); rm64=tmp(0); CF=(tmp&0x1000000000000000)!=0; }
|
|
@endif
|
|
|
|
@ifdef IA64
|
|
define pcodeop readfsbase;
|
|
:RDFSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=0 & r32 { r32 = readfsbase(); }
|
|
:RDFSBASE r64 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=0 & r64 { r64 = readfsbase(); }
|
|
|
|
define pcodeop readgsbase;
|
|
:RDGSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=1 & r32 { r32 = readgsbase(); }
|
|
:RDGSBASE r64 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=1 & r64 { r64 = readgsbase(); }
|
|
@endif
|
|
|
|
define pcodeop rdmsr;
|
|
:RDMSR is vexMode=0 & byte=0xf; byte=0x32 { tmp:8 = rdmsr(ECX); EDX = tmp(4); EAX = tmp(0); }
|
|
|
|
define pcodeop readPID;
|
|
:RDPID r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xc7; reg_opcode=7 & r32 { r32 = readPID(); }
|
|
@ifdef IA64
|
|
:RDPID r64 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xc7; reg_opcode=7 & r64 { r64 = readPID(); }
|
|
@endif
|
|
|
|
define pcodeop rdpkru_u32;
|
|
:RDPKRU is vexMode=0 & byte=0x0f; byte=0x01; byte=0xee { EAX = rdpkru_u32(); }
|
|
|
|
define pcodeop rdpmc;
|
|
:RDPMC is vexMode=0 & byte=0xf; byte=0x33 { tmp:8 = rdpmc(ECX); EDX = tmp(4); EAX = tmp:4; }
|
|
|
|
define pcodeop rdtsc;
|
|
:RDTSC is vexMode=0 & byte=0xf; byte=0x31 { tmp:8 = rdtsc(); EDX = tmp(4); EAX = tmp(0); }
|
|
|
|
:RET is vexMode=0 & addrsize=0 & opsize=0 & byte=0xc3 { pop22(IP); EIP=segment(CS,IP); return [EIP]; }
|
|
:RET is vexMode=0 & addrsize=1 & opsize=0 & byte=0xc3 { pop42(IP); EIP=zext(IP); return [EIP]; }
|
|
:RET is vexMode=0 & addrsize=0 & opsize=1 & byte=0xc3 { pop24(EIP); return [EIP]; }
|
|
:RET is vexMode=0 & addrsize=1 & opsize=1 & byte=0xc3 { pop44(EIP); return [EIP]; }
|
|
@ifdef IA64
|
|
:RET is vexMode=0 & addrsize=2 & byte=0xc3 { pop88(RIP); return [RIP]; }
|
|
@endif
|
|
|
|
:RET is vexMode=0 & addrsize=0 & opsize=0 & byte=0xcb { pop22(IP); pop22(CS); EIP = segment(CS,IP); return [EIP]; }
|
|
:RET is vexMode=0 & addrsize=1 & opsize=0 & byte=0xcb { pop42(IP); EIP=zext(IP); pop42(CS); return [EIP]; }
|
|
@ifdef IA64
|
|
:RET is vexMode=0 & addrsize=2 & opsize=0 & byte=0xcb { pop42(IP); RIP=zext(IP); pop42(CS); return [RIP]; }
|
|
@endif
|
|
:RET is vexMode=0 & addrsize=0 & opsize=1 & byte=0xcb { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); return [EIP]; }
|
|
:RET is vexMode=0 & addrsize=1 & opsize=1 & byte=0xcb { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); return [EIP]; }
|
|
@ifdef IA64
|
|
:RET is vexMode=0 & addrsize=2 & opsize=1 & byte=0xcb { pop48(EIP); RIP=zext(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); return [RIP]; }
|
|
:RET is vexMode=0 & addrsize=2 & opsize=2 & byte=0xcb { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); return [RIP]; }
|
|
@endif
|
|
|
|
:RET imm16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xc2; imm16 { pop22(IP); EIP=zext(IP); SP=SP+imm16; return [EIP]; }
|
|
:RET imm16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xc2; imm16 { pop42(IP); EIP=zext(IP); ESP=ESP+imm16; return [EIP]; }
|
|
:RET imm16 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xc2; imm16 { pop24(EIP); SP=SP+imm16; return [EIP]; }
|
|
:RET imm16 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xc2; imm16 { pop44(EIP); ESP=ESP+imm16; return [EIP]; }
|
|
@ifdef IA64
|
|
:RET imm16 is vexMode=0 & addrsize=2 & byte=0xc2; imm16 { pop88(RIP); RSP=RSP+imm16; return [RIP]; }
|
|
@endif
|
|
|
|
:RET imm16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xca; imm16 { pop22(IP); EIP=zext(IP); pop22(CS); SP=SP+imm16; return [EIP]; }
|
|
:RET imm16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xca; imm16 { pop42(IP); EIP=zext(IP); pop42(CS); ESP=ESP+imm16; return [EIP]; }
|
|
@ifdef IA64
|
|
:RET imm16 is vexMode=0 & addrsize=2 & opsize=0 & byte=0xca; imm16 { pop42(IP); RIP=zext(IP); pop42(CS); RSP=RSP+imm16; return [RIP]; }
|
|
@endif
|
|
|
|
:RET imm16 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xca; imm16 { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); SP=SP+imm16; return [EIP]; }
|
|
:RET imm16 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xca; imm16 { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); ESP=ESP+imm16; return [EIP]; }
|
|
@ifdef IA64
|
|
:RET imm16 is vexMode=0 & addrsize=2 & opsize=1 & byte=0xca; imm16 { pop44(EIP); tmp:4=0; pop44(tmp); RIP=zext(EIP); CS=tmp(0); RSP=RSP+imm16; return [RIP]; }
|
|
:RET imm16 is vexMode=0 & addrsize=2 & opsize=2 & byte=0xca; imm16 { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); RSP=RSP+imm16; return [RIP]; }
|
|
@endif
|
|
|
|
:ROL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=0 ... { CF = rm8 s< 0; rm8 = (rm8 << 1) | CF; OF = CF ^ (rm8 s< 0); }
|
|
:ROL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=0 ... { local cnt = CL & 0x7; local count_and_mask = CL & 0x1f;rm8 = (rm8 << cnt) | (rm8 >> (8 - cnt)); rolflags(rm8, count_and_mask);}
|
|
:ROL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x7; rm8 = (rm8 << cnt) | (rm8 >> (8 - cnt)); rolflags(rm8,imm8 & 0x1f:1);}
|
|
:ROL rm16,"1" is vexMode=0 & opsize=0 & byte=0xD1; rm16 & reg_opcode=0 ... { CF = rm16 s< 0; rm16 = (rm16 << 1) | zext(CF); OF = CF ^ (rm16 s< 0); }
|
|
:ROL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=0 ... { local cnt = CL & 0xf; local count_and_mask = CL & 0x1f;rm16 = (rm16 << cnt) | (rm16 >> (16 - cnt)); rolflags(rm16,count_and_mask);}
|
|
:ROL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0xf; rm16 = (rm16 << cnt) | (rm16 >> (16 - cnt)); rolflags(rm16,imm8 & 0x1f:1);}
|
|
:ROL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=0 ... { CF = rm32 s< 0; rm32 = (rm32 << 1) | zext(CF); OF = CF ^ (rm32 s< 0); build check_rm32_dest; }
|
|
:ROL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=0 ... { local cnt = CL & 0x1f; rm32 = (rm32 << cnt) | (rm32 >> (32 - cnt)); rolflags(rm32,cnt); build check_rm32_dest; }
|
|
:ROL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x1f; rm32 = (rm32 << cnt) | (rm32 >> (32 - cnt)); rolflags(rm32,cnt); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:ROL rm64,n1 is vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=0 ... { CF = rm64 s< 0; rm64 = (rm64 << 1) | zext(CF); OF = CF ^ (rm64 s< 0); }
|
|
:ROL rm64,CL is vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=0 ... { local cnt = CL & 0x3f; rm64 = (rm64 << cnt) | (rm64 >> (64 - cnt)); rolflags(rm64,cnt);}
|
|
:ROL rm64,imm8 is vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x3f; rm64 = (rm64 << cnt) | (rm64 >> (64 - cnt)); rolflags(rm64,cnt);}
|
|
@endif
|
|
|
|
:ROR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=1 ... { CF = rm8 & 1; rm8 = (rm8 >> 1) | (CF << 7); OF = ((rm8 & 0x40) != 0) ^ (rm8 s< 0); }
|
|
:ROR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=1 ... { local cnt = CL & 0x7; local count_and_mask = CL & 0x1f;rm8 = (rm8 >> cnt) | (rm8 << (8 - cnt)); rorflags(rm8,count_and_mask);}
|
|
:ROR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x7; rm8 = (rm8 >> cnt) | (rm8 << (8 - cnt)); rorflags(rm8,imm8 & 0x1f:1);}
|
|
:ROR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=1 ... { CF=(rm16 & 1)!=0; rm16=(rm16>>1)|(zext(CF)<<15); OF=((rm16 & 0x4000) != 0) ^ (rm16 s< 0); }
|
|
:ROR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=1 ... { local cnt = CL & 0xf; local count_and_mask = CL & 0x1f; rm16 = (rm16 >> cnt) | (rm16 << (16 - cnt)); rorflags(rm16,count_and_mask);}
|
|
:ROR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0xf; rm16 = (rm16 >> cnt) | (rm16 << (16 - cnt)); rorflags(rm16,imm8 & 0x1f:1);}
|
|
:ROR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=1 ... { CF=(rm32&1)!=0; rm32=(rm32>>1)|(zext(CF)<<31); OF=((rm32&0x40000000)!=0) ^ (rm32 s< 0); build check_rm32_dest; }
|
|
:ROR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=1 ... { local cnt = CL & 0x1f; rm32 = (rm32 >> cnt) | (rm32 << (32 - cnt)); rorflags(rm32,cnt); build check_rm32_dest; }
|
|
:ROR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x1f; rm32 = (rm32 >> cnt) | (rm32 << (32 - cnt)); rorflags(rm32,cnt); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:ROR rm64,n1 is vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=1 ... { CF=(rm64&1)!=0; rm64=(rm64>>1)|(zext(CF)<<63); OF=((rm64&0x4000000000000000)!=0) ^ (rm64 s< 0); }
|
|
:ROR rm64,CL is vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=1 ... { local cnt = CL & 0x3f; rm64 = (rm64 >> cnt) | (rm64 << (64 - cnt)); rorflags(rm64,cnt);}
|
|
:ROR rm64,imm8 is vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x3f; rm64 = (rm64 >> cnt) | (rm64 << (64 - cnt)); rorflags(rm64,cnt);}
|
|
@endif
|
|
|
|
define pcodeop smm_restore_state;
|
|
:RSM is vexMode=0 & byte=0xf; byte=0xaa { tmp:4 = smm_restore_state(); return [tmp]; }
|
|
|
|
# Initially disallowed in 64bit mode, but later reintroduced
|
|
:SAHF is vexMode=0 & byte=0x9e { SF = (AH & 0x80) != 0;
|
|
ZF = (AH & 0x40) != 0;
|
|
AF = (AH & 0x10) != 0;
|
|
PF = (AH & 0x04) != 0;
|
|
CF = (AH & 0x01) != 0; }
|
|
|
|
:SALC is vexMode=0 & bit64=0 & byte=0xd6 { AL = CF * 0xff; }
|
|
|
|
:SAR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=7 ... { CF = rm8 & 1; OF = 0; rm8 = rm8 s>> 1; resultflags(rm8); }
|
|
:SAR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 s>> count;
|
|
sarflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SAR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 s>> count;
|
|
sarflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SAR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=7 ... { CF = (rm16 & 1) != 0; OF = 0; rm16 = rm16 s>> 1; resultflags(rm16); }
|
|
:SAR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 s>> count;
|
|
sarflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SAR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 s>> count;
|
|
sarflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SAR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=7 ... { CF = (rm32 & 1) != 0; OF = 0; rm32 = rm32 s>> 1; build check_rm32_dest; resultflags(rm32); }
|
|
:SAR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 s>> count; build check_rm32_dest;
|
|
sarflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
:SAR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 s>> count; build check_rm32_dest;
|
|
sarflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SAR rm64,n1 is vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=7 ... { CF = (rm64 & 1) != 0; OF = 0; rm64 = rm64 s>> 1; resultflags(rm64); }
|
|
:SAR rm64,CL is vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=7 ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 s>> count;
|
|
sarflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
:SAR rm64,imm8 is vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 s>> count;
|
|
sarflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SBB AL,imm8 is vexMode=0 & byte=0x1c; AL & imm8 { subCarryFlags( AL, imm8 ); resultflags(AL); }
|
|
:SBB AX,imm16 is vexMode=0 & opsize=0 & byte=0x1d; AX & imm16 { subCarryFlags( AX, imm16 ); resultflags(AX); }
|
|
:SBB EAX,imm32 is vexMode=0 & opsize=1 & byte=0x1d; EAX & check_EAX_dest & imm32 { subCarryFlags( EAX, imm32 ); build check_EAX_dest; resultflags(EAX); }
|
|
@ifdef IA64
|
|
:SBB RAX,imm32 is vexMode=0 & opsize=2 & byte=0x1d; RAX & imm32 { subCarryFlags( RAX, imm32 ); resultflags(RAX); }
|
|
@endif
|
|
:SBB rm8,imm8 is vexMode=0 & (byte=0x80 | byte=0x82); rm8 & reg_opcode=3 ...; imm8 { subCarryFlags( rm8, imm8 ); resultflags(rm8); }
|
|
:SBB rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; rm16 & reg_opcode=3 ...; imm16 { subCarryFlags( rm16, imm16 ); resultflags(rm16); }
|
|
:SBB rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; rm32 & check_rm32_dest ... & reg_opcode=3 ...; imm32 { subCarryFlags( rm32, imm32 ); build check_rm32_dest; resultflags(rm32); }
|
|
@ifdef IA64
|
|
:SBB rm64,imm32 is vexMode=0 & opsize=2 & byte=0x81; rm64 & reg_opcode=3 ...; imm32 { subCarryFlags( rm64, imm32 ); resultflags(rm64); }
|
|
@endif
|
|
|
|
:SBB rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; rm16 & reg_opcode=3 ...; simm8_16 { subCarryFlags( rm16, simm8_16 ); resultflags(rm16); }
|
|
:SBB rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; rm32 & check_rm32_dest ... & reg_opcode=3 ...; simm8_32 { subCarryFlags( rm32, simm8_32 ); build check_rm32_dest; resultflags(rm32); }
|
|
@ifdef IA64
|
|
:SBB rm64,simm8_64 is vexMode=0 & opsize=2 & byte=0x83; rm64 & reg_opcode=3 ...; simm8_64 { subCarryFlags( rm64, simm8_64 ); resultflags(rm64); }
|
|
@endif
|
|
|
|
:SBB rm8,Reg8 is vexMode=0 & byte=0x18; rm8 & Reg8 ... { subCarryFlags( rm8, Reg8 ); resultflags(rm8); }
|
|
:SBB rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x19; rm16 & Reg16 ... { subCarryFlags( rm16, Reg16 ); resultflags(rm16); }
|
|
:SBB rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x19; rm32 & check_rm32_dest ... & Reg32 ... { subCarryFlags( rm32, Reg32 ); build check_rm32_dest; resultflags(rm32); }
|
|
@ifdef IA64
|
|
:SBB rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x19; rm64 & Reg64 ... { subCarryFlags( rm64, Reg64 ); resultflags(rm64); }
|
|
@endif
|
|
|
|
:SBB Reg8,rm8 is vexMode=0 & byte=0x1a; rm8 & Reg8 ... { subCarryFlags( Reg8, rm8 ); resultflags(Reg8); }
|
|
:SBB Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x1b; rm16 & Reg16 ... { subCarryFlags( Reg16, rm16 ); resultflags(Reg16); }
|
|
:SBB Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x1b; rm32 & Reg32 ... & check_Reg32_dest ... { subCarryFlags( Reg32, rm32 ); build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:SBB Reg64,rm64 is vexMode=0 & opsize=2 & byte=0x1b; rm64 & Reg64 ... { subCarryFlags( Reg64, rm64 ); resultflags(Reg64); }
|
|
@endif
|
|
|
|
:SCASB^repe^repetail eseDI1 is vexMode=0 & repe & repetail & byte=0xae & eseDI1 { build repe; build eseDI1; subflags(AL,eseDI1); local diff=AL-eseDI1; resultflags(diff); build repetail; }
|
|
:SCASW^repe^repetail eseDI2 is vexMode=0 & repe & repetail & opsize=0 & byte=0xaf & eseDI2 { build repe; build eseDI2; subflags(AX,eseDI2); local diff=AX-eseDI2; resultflags(diff); build repetail; }
|
|
:SCASD^repe^repetail eseDI4 is vexMode=0 & repe & repetail & opsize=1 & byte=0xaf & eseDI4 { build repe; build eseDI4; subflags(EAX,eseDI4); local diff=EAX-eseDI4; resultflags(diff); build repetail; }
|
|
@ifdef IA64
|
|
:SCASQ^repe^repetail eseDI8 is vexMode=0 & repe & repetail & opsize=2 & byte=0xaf & eseDI8 { build repe; build eseDI8; subflags(RAX,eseDI8); local diff=RAX-eseDI8; resultflags(diff); build repetail; }
|
|
@endif
|
|
|
|
:SET^cc rm8 is vexMode=0 & byte=0xf; row=9 & cc; rm8 { rm8 = cc; }
|
|
|
|
# manual is not consistent on operands
|
|
:SGDT m16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m16
|
|
{
|
|
m16 = GlobalDescriptorTableRegister();
|
|
}
|
|
|
|
:SGDT m32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m32
|
|
{
|
|
m32 = GlobalDescriptorTableRegister();
|
|
}
|
|
|
|
@ifdef IA64
|
|
:SGDT m64 is vexMode=0 & opsize=2 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m64
|
|
{
|
|
m64 = GlobalDescriptorTableRegister();
|
|
}
|
|
@endif
|
|
|
|
|
|
:SHL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 &(reg_opcode=4|reg_opcode=6) ... { CF = rm8 s< 0; rm8 = rm8 << 1; OF = CF ^ (rm8 s< 0); resultflags(rm8); }
|
|
:SHL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 << count;
|
|
shlflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SHL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 << count;
|
|
shlflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SHL rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & (reg_opcode=4|reg_opcode=6) ... { CF = rm16 s< 0; rm16 = rm16 << 1; OF = CF ^ (rm16 s< 0); resultflags(rm16); }
|
|
:SHL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 << count;
|
|
shlflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SHL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 << count;
|
|
shlflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SHL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... { CF = rm32 s< 0; rm32 = rm32 << 1; OF = CF ^ (rm32 s< 0); build check_rm32_dest; resultflags(rm32); }
|
|
:SHL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 << count; build check_rm32_dest;
|
|
shlflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
:SHL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 << count; build check_rm32_dest;
|
|
shlflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SHL rm64,n1 is vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & (reg_opcode=4|reg_opcode=6) ... { CF = rm64 s< 0; rm64 = rm64 << 1; OF = CF ^ (rm64 s< 0); resultflags(rm64); }
|
|
:SHL rm64,CL is vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 << count;
|
|
shlflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
:SHL rm64,imm8 is vexMode=0 & opsize=2 & byte=0xC1; rm64 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 << count;
|
|
shlflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SHLD rm16,Reg16,imm8 is vexMode=0 & opsize=0; byte=0x0F; byte=0xA4; rm16 & Reg16 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16;
|
|
rm16 = (rm16 << count) | (Reg16 >> (16 - count));
|
|
shlflags(tmp,rm16,count); shiftresultflags(rm16,count);}
|
|
:SHLD rm16,Reg16,CL is vexMode=0 & opsize=0; byte=0x0F; byte=0xA5; CL & rm16 & Reg16 ... { local count = CL & 0x1f; local tmp = rm16;
|
|
rm16 = (rm16 << count) | (Reg16 >> (16 - count));
|
|
shlflags(tmp,rm16,count); shiftresultflags(rm16,count); }
|
|
:SHLD rm32,Reg32,imm8 is vexMode=0 & opsize=1; byte=0x0F; byte=0xA4; rm32 & check_rm32_dest ... & Reg32 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32;
|
|
rm32 = (rm32 << count) | (Reg32 >> (32 - count)); build check_rm32_dest;
|
|
shlflags(tmp,rm32,count); shiftresultflags(rm32,count); }
|
|
:SHLD rm32,Reg32,CL is vexMode=0 & opsize=1; byte=0x0F; byte=0xA5; CL & rm32 & check_rm32_dest ... & Reg32 ... { local count = CL & 0x1f; local tmp = rm32;
|
|
rm32 = (rm32 << count) | (Reg32 >> (32 - count)); build check_rm32_dest;
|
|
shlflags(tmp,rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SHLD rm64,Reg64,imm8 is vexMode=0 & opsize=2; byte=0x0F; byte=0xA4; rm64 & Reg64 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64;
|
|
rm64 = (rm64 << count) | (Reg64 >> (64 - count));
|
|
shlflags(tmp,rm64,count); shiftresultflags(rm64,count); }
|
|
:SHLD rm64,Reg64,CL is vexMode=0 & opsize=2; byte=0x0F; byte=0xA5; CL & rm64 & Reg64 ... { local count = CL & 0x3f; local tmp = rm64;
|
|
rm64 = (rm64 << count) | (Reg64 >> (64 - count));
|
|
shlflags(tmp,rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SHRD rm16,Reg16,imm8 is vexMode=0 & opsize=0; byte=0x0F; byte=0xAC; rm16 & Reg16 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16;
|
|
rm16 = (rm16 >> count) | (Reg16 << (16 - count));
|
|
shrdflags(tmp,rm16,count); shiftresultflags(rm16,count); }
|
|
:SHRD rm16,Reg16,CL is vexMode=0 & opsize=0; byte=0x0F; byte=0xAD; CL & rm16 & Reg16 ... { local count = CL & 0x1f; local tmp = rm16;
|
|
rm16 = (rm16 >> count) | (Reg16 << (16 - count));
|
|
shrdflags(tmp,rm16,count); shiftresultflags(rm16,count); }
|
|
:SHRD rm32,Reg32,imm8 is vexMode=0 & opsize=1; byte=0x0F; byte=0xAC; rm32 & check_rm32_dest ... & Reg32 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32;
|
|
rm32 = (rm32 >> count) | (Reg32 << (32 - count)); build check_rm32_dest;
|
|
shrdflags(tmp,rm32,count); shiftresultflags(rm32,count); }
|
|
:SHRD rm32,Reg32,CL is vexMode=0 & opsize=1; byte=0x0F; byte=0xAD; CL & rm32 & check_rm32_dest ... & Reg32 ... { local count = CL & 0x1f; local tmp = rm32;
|
|
rm32 = (rm32 >> count) | (Reg32 << (32 - count)); build check_rm32_dest;
|
|
shrdflags(tmp,rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SHRD rm64,Reg64,imm8 is vexMode=0 & opsize=2; byte=0x0F; byte=0xAC; rm64 & Reg64 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64;
|
|
rm64 = (rm64 >> count) | (Reg64 << (64 - count));
|
|
shrdflags(tmp,rm64,count); shiftresultflags(rm64,count); }
|
|
:SHRD rm64,Reg64,CL is vexMode=0 & opsize=2; byte=0x0F; byte=0xAD; CL & rm64 & Reg64 ... { local count = CL & 0x3f; local tmp = rm64;
|
|
rm64 = (rm64 >> count) | (Reg64 << (64 - count));
|
|
shrdflags(tmp,rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SHR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=5 ... { CF = rm8 & 1; OF = 0; rm8 = rm8 >> 1; resultflags(rm8); }
|
|
:SHR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 >> count;
|
|
shrflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SHR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 >> count;
|
|
shrflags(tmp, rm8,count); shiftresultflags(rm8,count); }
|
|
:SHR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=5 ... { CF = (rm16 & 1) != 0; OF = 0; rm16 = rm16 >> 1; resultflags(rm16); }
|
|
:SHR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 >> count;
|
|
shrflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SHR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 >> count;
|
|
shrflags(tmp, rm16,count); shiftresultflags(rm16,count); }
|
|
:SHR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=5 ... { CF = (rm32 & 1) != 0; OF = 0; rm32 = rm32 >> 1; build check_rm32_dest; resultflags(rm32); }
|
|
:SHR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 >> count; build check_rm32_dest;
|
|
shrflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
:SHR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 >> count; build check_rm32_dest;
|
|
shrflags(tmp, rm32,count); shiftresultflags(rm32,count); }
|
|
@ifdef IA64
|
|
:SHR rm64,n1 is vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 ®_opcode=5 ... { CF = (rm64 & 1) != 0; OF = 0; rm64 = rm64 >> 1; resultflags(rm64); }
|
|
:SHR rm64,CL is vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=5 ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 >> count;
|
|
shrflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
:SHR rm64,imm8 is vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 >> count;
|
|
shrflags(tmp, rm64,count); shiftresultflags(rm64,count); }
|
|
@endif
|
|
|
|
:SIDT m16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m16
|
|
{
|
|
m16 = InterruptDescriptorTableRegister();
|
|
}
|
|
|
|
:SIDT m32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m32
|
|
{
|
|
m32 = InterruptDescriptorTableRegister();
|
|
}
|
|
@ifdef IA64
|
|
:SIDT m64 is vexMode=0 & opsize=2 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m64
|
|
{
|
|
m64 = InterruptDescriptorTableRegister();
|
|
}
|
|
@endif
|
|
|
|
define pcodeop skinit;
|
|
:SKINIT EAX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xde & EAX { skinit(EAX); }
|
|
|
|
:SLDT rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=0 ...
|
|
{
|
|
rm16 = LocalDescriptorTableRegister();
|
|
}
|
|
:SLDT rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x0; rm32 & reg_opcode=0 ...
|
|
{
|
|
rm32 = LocalDescriptorTableRegister();
|
|
}
|
|
@ifdef IA64
|
|
:SLDT rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0x0; rm64 & reg_opcode=0 ...
|
|
{
|
|
rm64 = LocalDescriptorTableRegister();
|
|
}
|
|
@endif
|
|
|
|
:SMSW rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=4 ... { rm16 = CR0:2; }
|
|
:SMSW rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x01; rm32 & reg_opcode=4 ... { rm32 = zext(CR0:2); }
|
|
@ifdef IA64
|
|
:SMSW rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0x01; rm64 & reg_opcode=4 ... { rm64 = CR0; }
|
|
@endif
|
|
|
|
:STAC is vexMode=0 & byte=0x0f; byte=0x01; byte=0xcb { AC = 1; }
|
|
:STC is vexMode=0 & byte=0xf9 { CF = 1; }
|
|
:STD is vexMode=0 & byte=0xfd { DF = 1; }
|
|
# MFL: AMD instruction
|
|
# TODO: define the action.
|
|
# STGI: set global interrupt flag (GIF); while GIF is zero, all external interrupts are disabled.
|
|
:STGI is vexMode=0 & byte=0x0f; byte=0x01; byte=0xdc { stgi(); }
|
|
:STI is vexMode=0 & byte=0xfb { IF = 1; }
|
|
|
|
:STMXCSR m32 is vexMode=0 & byte=0xf; byte=0xae; ( mod != 0b11 & reg_opcode=3 ) ... & m32 { m32 = MXCSR; }
|
|
|
|
:STOSB^rep^reptail eseDI1 is vexMode=0 & rep & reptail & byte=0xaa & eseDI1 { build rep; build eseDI1; eseDI1=AL; build reptail; }
|
|
:STOSW^rep^reptail eseDI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xab & eseDI2 { build rep; build eseDI2; eseDI2=AX; build reptail; }
|
|
:STOSD^rep^reptail eseDI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xab & eseDI4 { build rep; build eseDI4; eseDI4=EAX; build reptail; }
|
|
@ifdef IA64
|
|
:STOSQ^rep^reptail eseDI8 is vexMode=0 & rep & reptail & opsize=2 & byte=0xab & eseDI8 { build rep; build eseDI8; eseDI8=RAX; build reptail; }
|
|
@endif
|
|
|
|
:STR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=1 ... { rm16 = TaskRegister(); }
|
|
|
|
:SUB AL,imm8 is vexMode=0 & byte=0x2c; AL & imm8 { subflags( AL,imm8 ); AL = AL - imm8; resultflags( AL); }
|
|
:SUB AX,imm16 is vexMode=0 & opsize=0 & byte=0x2d; AX & imm16 { subflags( AX,imm16); AX = AX - imm16; resultflags( AX); }
|
|
:SUB EAX,imm32 is vexMode=0 & opsize=1 & byte=0x2d; EAX & check_EAX_dest & imm32 { subflags( EAX,imm32); EAX = EAX - imm32; build check_EAX_dest; resultflags( EAX); }
|
|
@ifdef IA64
|
|
:SUB RAX,simm32 is vexMode=0 & opsize=2 & byte=0x2d; RAX & simm32 { subflags( RAX,simm32); RAX = RAX - simm32; resultflags( RAX); }
|
|
@endif
|
|
:SUB spec_rm8,imm8 is vexMode=0 & (byte=0x80 | byte=0x82); spec_rm8 & reg_opcode=5 ...; imm8 { subflags( spec_rm8,imm8 ); spec_rm8 = spec_rm8 - imm8; resultflags( spec_rm8); }
|
|
:SUB spec_rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; spec_rm16 & reg_opcode=5 ...; imm16 { subflags( spec_rm16,imm16); spec_rm16 = spec_rm16 - imm16; resultflags( spec_rm16); }
|
|
:SUB spec_rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; spec_rm32 & check_rm32_dest ... & reg_opcode=5 ...; imm32 { subflags( spec_rm32,imm32); spec_rm32 = spec_rm32 - imm32; build check_rm32_dest; resultflags( spec_rm32); }
|
|
@ifdef IA64
|
|
:SUB spec_rm64,simm32 is vexMode=0 & opsize=2 & byte=0x81; spec_rm64 & reg_opcode=5 ...; simm32 { subflags( spec_rm64,simm32); spec_rm64 = spec_rm64 - simm32; resultflags( spec_rm64); }
|
|
@endif
|
|
:SUB spec_rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; spec_rm16 & reg_opcode=5 ...; simm8_16 { subflags( spec_rm16,simm8_16); spec_rm16 = spec_rm16 - simm8_16; resultflags( spec_rm16); }
|
|
:SUB spec_rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; spec_rm32 & check_rm32_dest ... & reg_opcode=5 ...; simm8_32 { subflags( spec_rm32,simm8_32); spec_rm32 = spec_rm32 - simm8_32; build check_rm32_dest; resultflags( spec_rm32); }
|
|
@ifdef IA64
|
|
:SUB spec_rm64,simm8_64 is vexMode=0 & opsize=2 & byte=0x83; spec_rm64 & reg_opcode=5 ...; simm8_64 { subflags( spec_rm64,simm8_64); spec_rm64 = spec_rm64 - simm8_64; resultflags( spec_rm64); }
|
|
@endif
|
|
:SUB rm8,Reg8 is vexMode=0 & byte=0x28; rm8 & Reg8 ... { subflags( rm8,Reg8 ); rm8 = rm8 - Reg8; resultflags( rm8); }
|
|
:SUB rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x29; rm16 & Reg16 ... { subflags( rm16,Reg16); rm16 = rm16 - Reg16; resultflags( rm16); }
|
|
:SUB rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x29; rm32 & check_rm32_dest ... & Reg32 ... { subflags( rm32,Reg32); rm32 = rm32 - Reg32; build check_rm32_dest; resultflags( rm32); }
|
|
@ifdef IA64
|
|
:SUB rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x29; rm64 & Reg64 ... { subflags( rm64,Reg64); rm64 = rm64 - Reg64; resultflags( rm64); }
|
|
@endif
|
|
:SUB Reg8,rm8 is vexMode=0 & byte=0x2a; rm8 & Reg8 ... { subflags( Reg8,rm8 ); Reg8 = Reg8 - rm8; resultflags( Reg8); }
|
|
:SUB Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x2b; rm16 & Reg16 ... { subflags(Reg16,rm16 ); Reg16 = Reg16 - rm16; resultflags(Reg16); }
|
|
:SUB Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x2b; rm32 & Reg32 ... & check_Reg32_dest ... { subflags(Reg32,rm32 ); Reg32 = Reg32 - rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:SUB Reg64,rm64 is vexMode=0 & opsize=2 & byte=0x2b; rm64 & Reg64 ... { subflags(Reg64,rm64 ); Reg64 = Reg64 - rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
:SYSENTER is vexMode=0 & byte=0x0f; byte=0x34 { sysenter(); }
|
|
:SYSEXIT is vexMode=0 & byte=0x0f; byte=0x35 { sysexit();
|
|
@ifdef IA64
|
|
RIP=RCX; return [RIP];
|
|
@endif
|
|
}
|
|
|
|
:SYSCALL is vexMode=0 & byte=0x0f; byte=0x05 { syscall(); }
|
|
|
|
# returning to 32bit mode loads ECX
|
|
# returning to 64bit mode loads RCX
|
|
:SYSRET is vexMode=0 & byte=0x0f; byte=0x07 { sysret();
|
|
@ifdef IA64
|
|
RIP=RCX; return [RIP];
|
|
@endif
|
|
}
|
|
|
|
:SWAPGS is vexMode=0 & bit64=1 & byte=0x0f; byte=0x01; byte=0xf8 { swapgs(); }
|
|
|
|
:RDTSCP is vexMode=0 & bit64=1 & byte=0x0f; byte=0x01; byte=0xf9 { rdtscp(); }
|
|
|
|
:TEST AL,imm8 is vexMode=0 & byte=0xA8; AL & imm8 { logicalflags(); local tmp = AL & imm8; resultflags(tmp); }
|
|
:TEST AX,imm16 is vexMode=0 & opsize=0; byte=0xA9; AX & imm16 { logicalflags(); local tmp = AX & imm16; resultflags(tmp); }
|
|
:TEST EAX,imm32 is vexMode=0 & opsize=1; byte=0xA9; EAX & imm32 { logicalflags(); local tmp = EAX & imm32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:TEST RAX,simm32 is vexMode=0 & opsize=2; byte=0xA9; RAX & simm32 { logicalflags(); local tmp = RAX & simm32; resultflags(tmp); }
|
|
@endif
|
|
:TEST spec_rm8,imm8 is vexMode=0 & byte=0xF6; spec_rm8 & (reg_opcode=0 | reg_opcode=1) ... ; imm8 { logicalflags(); local tmp = spec_rm8 & imm8; resultflags(tmp); }
|
|
:TEST spec_rm16,imm16 is vexMode=0 & opsize=0; byte=0xF7; spec_rm16 & (reg_opcode=0 | reg_opcode=1) ... ; imm16 { logicalflags(); local tmp = spec_rm16 & imm16; resultflags(tmp); }
|
|
:TEST spec_rm32,imm32 is vexMode=0 & opsize=1; byte=0xF7; spec_rm32 & (reg_opcode=0 | reg_opcode=1) ... ; imm32 { logicalflags(); local tmp = spec_rm32 & imm32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:TEST spec_rm64,simm32 is vexMode=0 & opsize=2; byte=0xF7; spec_rm64 & (reg_opcode=0 | reg_opcode=1) ... ; simm32 { logicalflags(); local tmp = spec_rm64 & simm32; resultflags(tmp); }
|
|
@endif
|
|
:TEST rm8,Reg8 is vexMode=0 & byte=0x84; rm8 & Reg8 ... { logicalflags(); local tmp = rm8 & Reg8; resultflags(tmp); }
|
|
:TEST rm16,Reg16 is vexMode=0 & opsize=0; byte=0x85; rm16 & Reg16 ... { logicalflags(); local tmp = rm16 & Reg16; resultflags(tmp); }
|
|
:TEST rm32,Reg32 is vexMode=0 & opsize=1; byte=0x85; rm32 & Reg32 ... { logicalflags(); local tmp = rm32 & Reg32; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:TEST rm64,Reg64 is vexMode=0 & opsize=2; byte=0x85; rm64 & Reg64 ... { logicalflags(); local tmp = rm64 & Reg64; resultflags(tmp); }
|
|
@endif
|
|
|
|
define pcodeop invalidInstructionException;
|
|
:UD0 Reg32, rm32 is vexMode=0 & byte=0x0f; byte=0xff; rm32 & Reg32 ... { invalidInstructionException(); goto inst_start; }
|
|
:UD1 Reg32, rm32 is vexMode=0 & byte=0x0f; byte=0xb9; rm32 & Reg32 ... { invalidInstructionException(); goto inst_start; }
|
|
:UD2 is vexMode=0 & byte=0xf; byte=0xb { invalidInstructionException(); goto inst_start; }
|
|
|
|
:VERR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=4 ... { }
|
|
:VERW rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=5 ... { }
|
|
|
|
# MFL added VMX opcodes
|
|
#
|
|
# AMD hardware assisted virtualization opcodes
|
|
:VMLOAD EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xda & EAX { vmload(EAX); }
|
|
@ifdef IA64
|
|
:VMLOAD RAX is vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xda & RAX { vmload(RAX); }
|
|
@endif
|
|
:VMMCALL is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd9 { vmmcall(); }
|
|
# Limiting the effective address size to 32 and 64 bit. Surely we're not expecting a 16-bit VM address, are we?
|
|
:VMRUN EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xd8 & EAX { vmrun(EAX); }
|
|
@ifdef IA64
|
|
:VMRUN RAX is vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xd8 & RAX { vmrun(RAX); }
|
|
@endif
|
|
# Limiting the effective address size to 32 and 64 bit. Surely we're not expecting a 16-bit VM address, are we?
|
|
:VMSAVE EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xdb & EAX { vmsave(EAX); }
|
|
@ifdef IA64
|
|
:VMSAVE RAX is vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xdb & RAX { vmsave(RAX); }
|
|
@endif
|
|
#
|
|
|
|
#
|
|
# Intel hardware assisted virtualization opcodes
|
|
@ifdef IA64
|
|
:INVEPT Reg64, m128 is vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x80; Reg64 ... & m128 { invept(Reg64, m128); }
|
|
@endif
|
|
:INVEPT Reg32, m128 is vexMode=0 & bit64=0 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x80; Reg32 ... & m128 { invept(Reg32, m128); }
|
|
@ifdef IA64
|
|
:INVVPID Reg64, m128 is vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x81; Reg64 ... & m128 { invvpid(Reg64, m128); }
|
|
@endif
|
|
:INVVPID Reg32, m128 is vexMode=0 & bit64=0 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x81; Reg32 ... & m128 { invvpid(Reg32, m128); }
|
|
:VMCALL is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc1 { vmcall(); }
|
|
@ifdef IA64
|
|
:VMCLEAR m64 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmclear(m64); }
|
|
@endif
|
|
#TODO: invokes a VM function specified in EAX
|
|
:VMFUNC EAX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd4 & EAX { vmfunc(EAX); }
|
|
#TODO: this launches the VM managed by the current VMCS. How is the VMCS expressed for the emulator? For Ghidra analysis?
|
|
:VMLAUNCH is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc2 { vmlaunch(); }
|
|
#TODO: this resumes the VM managed by the current VMCS. How is the VMCS expressed for the emulator? For Ghidra analysis?
|
|
:VMRESUME is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc3 { vmresume(); }
|
|
#TODO: this loads the VMCS pointer from the m64 memory address and makes the VMCS pointer valid; how to express
|
|
# this for analysis and emulation?
|
|
:VMPTRLD m64 is vexMode=0 & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmptrld(m64); }
|
|
#TODO: stores the current VMCS pointer into the specified 64-bit memory address; how to express this for analysis and emulation?
|
|
#TODO: note that the Intel manual does not specify m64 (which it does for VMPTRLD, yet it does state that "the operand
|
|
# of this instruction is always 64-bits and is always in memory". Is it an error that the "Instruction" entry in the
|
|
# box giving the definition does not specify m64?
|
|
:VMPTRST m64 is vexMode=0 & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=7 ) ... & m64 { vmptrst(m64); }
|
|
:VMREAD rm32, Reg32 is vexMode=0 & opsize=1 & byte=0x0f; byte=0x78; rm32 & check_rm32_dest ... & Reg32 ... { rm32 = vmread(Reg32); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:VMREAD rm64, Reg64 is vexMode=0 & opsize=2 & byte=0x0f; byte=0x78; rm64 & Reg64 ... { rm64 = vmread(Reg64); }
|
|
@endif
|
|
:VMWRITE Reg32, rm32 is vexMode=0 & opsize=1 & byte=0x0f; byte=0x79; rm32 & Reg32 ... & check_Reg32_dest ... { vmwrite(rm32,Reg32); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:VMWRITE Reg64, rm64 is vexMode=0 & opsize=2 & byte=0x0f; byte=0x79; rm64 & Reg64 ... { vmwrite(rm64,Reg64); }
|
|
@endif
|
|
:VMXOFF is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc4 { vmxoff(); }
|
|
# NB: this opcode is incorrect in the 2005 edition of the Intel manual. Opcode below is taken from the 2008 version.
|
|
:VMXON m64 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmxon(m64); }
|
|
|
|
#END of changes for VMX opcodes
|
|
|
|
:WAIT is vexMode=0 & byte=0x9b { }
|
|
:WBINVD is vexMode=0 & byte=0xf; byte=0x9 { }
|
|
|
|
@ifdef IA64
|
|
define pcodeop writefsbase;
|
|
:WRFSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=2 & r32 { tmp:8 = zext(r32); writefsbase(tmp); }
|
|
:WRFSBASE r64 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=2 & r64 { writefsbase(r64); }
|
|
|
|
define pcodeop writegsbase;
|
|
:WRGSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=3 & r32 { tmp:8 = zext(r32); writegsbase(tmp); }
|
|
:WRGSBASE r64 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=3 & r64 { writegsbase(r64); }
|
|
@endif
|
|
|
|
define pcodeop wrpkru;
|
|
:WRPKRU is byte=0x0F; byte=0x01; byte=0xEF { wrpkru(EAX); }
|
|
|
|
define pcodeop wrmsr;
|
|
:WRMSR is vexMode=0 & byte=0xf; byte=0x30 { tmp:8 = (zext(EDX) << 32) | zext(EAX); wrmsr(ECX,tmp); }
|
|
|
|
:XADD rm8,Reg8 is vexMode=0 & byte=0x0F; byte=0xC0; rm8 & Reg8 ... { addflags( rm8,Reg8 ); local tmp = rm8 + Reg8; Reg8 = rm8; rm8 = tmp; resultflags(tmp); }
|
|
:XADD rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x0F; byte=0xC1; rm16 & Reg16 ... { addflags(rm16,Reg16); local tmp = rm16 + Reg16; Reg16 = rm16; rm16 = tmp; resultflags(tmp); }
|
|
:XADD rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x0F; byte=0xC1; rm32 & check_rm32_dest ... & Reg32 ... & check_Reg32_dest ... { addflags(rm32,Reg32); local tmp = rm32 + Reg32; Reg32 = rm32; rm32 = tmp; build check_rm32_dest; build check_Reg32_dest; resultflags(tmp); }
|
|
@ifdef IA64
|
|
:XADD rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x0F; byte=0xC1; rm64 & Reg64 ... { addflags(rm64,Reg64); local tmp = rm64 + Reg64; Reg64 = rm64; rm64 = tmp; resultflags(tmp); }
|
|
@endif
|
|
|
|
define pcodeop xabort;
|
|
|
|
:XABORT imm8 is vexMode=0 & byte=0xc6; byte=0xf8; imm8 { tmp:1 = imm8; xabort(tmp); }
|
|
|
|
define pcodeop xbegin;
|
|
define pcodeop xend;
|
|
|
|
:XBEGIN rel16 is vexMode=0 & opsize=0 & byte=0xc7; byte=0xf8; rel16 { xbegin(&:$(SIZE) rel16); }
|
|
:XBEGIN rel32 is vexMode=0 & (opsize=1 | opsize=2) & byte=0xc7; byte=0xf8; rel32 { xbegin(&:$(SIZE) rel32); }
|
|
|
|
:XEND is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd5 { xend(); }
|
|
|
|
:XCHG AX,Rmr16 is vexMode=0 & opsize=0 & row = 9 & page = 0 & AX & Rmr16 { local tmp = AX; AX = Rmr16; Rmr16 = tmp; }
|
|
:XCHG EAX,Rmr32 is vexMode=0 & opsize=1 & row = 9 & page = 0 & EAX & check_EAX_dest & Rmr32 & check_Rmr32_dest { local tmp = EAX; EAX = Rmr32; build check_EAX_dest; Rmr32 = tmp; build check_Rmr32_dest; }
|
|
@ifdef IA64
|
|
:XCHG RAX,Rmr64 is vexMode=0 & opsize=2 & row = 9 & page = 0 & RAX & Rmr64 { local tmp = RAX; RAX = Rmr64; Rmr64 = tmp; }
|
|
@endif
|
|
|
|
:XCHG rm8,Reg8 is vexMode=0 & byte=0x86; rm8 & Reg8 ... { local tmp = rm8; rm8 = Reg8; Reg8 = tmp; }
|
|
:XCHG rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x87; rm16 & Reg16 ... { local tmp = rm16; rm16 = Reg16; Reg16 = tmp; }
|
|
:XCHG rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x87; rm32 & check_rm32_dest ... & Reg32 ... & check_Reg32_dest ... { local tmp = rm32; rm32 = Reg32; build check_rm32_dest; Reg32 = tmp; build check_Reg32_dest;}
|
|
@ifdef IA64
|
|
:XCHG rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x87; rm64 & Reg64 ... { local tmp = rm64; rm64 = Reg64; Reg64 = tmp; }
|
|
@endif
|
|
|
|
:XLAT seg16^BX is vexMode=0 & addrsize=0 & seg16 & byte=0xd7; BX { tmp:$(SIZE)= 0; ptr2(tmp,BX+zext(AL)); AL = *tmp; }
|
|
:XLAT segWide^EBX is vexMode=0 & addrsize=1 & segWide & byte=0xd7; EBX { tmp:$(SIZE)= 0; ptr4(tmp,EBX+zext(AL)); AL = *tmp; }
|
|
@ifdef IA64
|
|
:XLAT segWide^RBX is vexMode=0 & addrsize=2 & segWide & byte=0xd7; RBX { tmp:$(SIZE)= 0; ptr8(tmp,RBX+zext(AL)); AL = *tmp; }
|
|
@endif
|
|
|
|
:XOR AL,imm8 is vexMode=0 & byte=0x34; AL & imm8 { logicalflags(); AL = AL ^ imm8; resultflags( AL); }
|
|
:XOR AX,imm16 is vexMode=0 & opsize=0 & byte=0x35; AX & imm16 { logicalflags(); AX = AX ^ imm16; resultflags( AX); }
|
|
:XOR EAX,imm32 is vexMode=0 & opsize=1 & byte=0x35; EAX & imm32 & check_EAX_dest { logicalflags(); EAX = EAX ^ imm32; build check_EAX_dest; resultflags( EAX);}
|
|
@ifdef IA64
|
|
:XOR RAX,simm32 is vexMode=0 & opsize=2 & byte=0x35; RAX & simm32 { logicalflags(); RAX = RAX ^ simm32; resultflags( RAX); }
|
|
@endif
|
|
:XOR spec_rm8,imm8 is vexMode=0 & (byte=0x80 | byte=0x82); spec_rm8 & reg_opcode=6 ...; imm8 { logicalflags(); spec_rm8 = spec_rm8 ^ imm8; resultflags( spec_rm8); }
|
|
:XOR spec_rm16,imm16 is vexMode=0 & opsize=0 & byte=0x81; spec_rm16 & reg_opcode=6 ...; imm16 { logicalflags(); spec_rm16 = spec_rm16 ^ imm16; resultflags( spec_rm16); }
|
|
:XOR spec_rm32,imm32 is vexMode=0 & opsize=1 & byte=0x81; spec_rm32 & check_rm32_dest ... & reg_opcode=6 ...; imm32 { logicalflags(); spec_rm32 = spec_rm32 ^ imm32; build check_rm32_dest; resultflags( spec_rm32); }
|
|
@ifdef IA64
|
|
:XOR spec_rm64,simm32 is vexMode=0 & opsize=2 & byte=0x81; spec_rm64 & reg_opcode=6 ...; simm32 { logicalflags(); spec_rm64 = spec_rm64 ^ simm32; resultflags( spec_rm64); }
|
|
@endif
|
|
:XOR spec_rm16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; spec_rm16 & reg_opcode=6 ...; usimm8_16 { logicalflags(); spec_rm16 = spec_rm16 ^ usimm8_16; resultflags( spec_rm16); }
|
|
:XOR spec_rm32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; spec_rm32 & check_rm32_dest ... & reg_opcode=6 ...; usimm8_32 { logicalflags(); spec_rm32 = spec_rm32 ^ usimm8_32; build check_rm32_dest; resultflags( spec_rm32); }
|
|
@ifdef IA64
|
|
:XOR spec_rm64,usimm8_64 is vexMode=0 & opsize=2 & byte=0x83; spec_rm64 & reg_opcode=6 ...; usimm8_64 { logicalflags(); spec_rm64 = spec_rm64 ^ usimm8_64; resultflags( spec_rm64); }
|
|
@endif
|
|
:XOR rm8,Reg8 is vexMode=0 & byte=0x30; rm8 & Reg8 ... { logicalflags(); rm8 = rm8 ^ Reg8; resultflags( rm8); }
|
|
:XOR rm16,Reg16 is vexMode=0 & opsize=0 & byte=0x31; rm16 & Reg16 ... { logicalflags(); rm16 = rm16 ^ Reg16; resultflags( rm16); }
|
|
:XOR rm32,Reg32 is vexMode=0 & opsize=1 & byte=0x31; rm32 & check_rm32_dest ... & Reg32 ... { logicalflags(); rm32 = rm32 ^ Reg32; build check_rm32_dest; resultflags( rm32); }
|
|
@ifdef IA64
|
|
:XOR rm64,Reg64 is vexMode=0 & opsize=2 & byte=0x31; rm64 & Reg64 ... { logicalflags(); rm64 = rm64 ^ Reg64; resultflags( rm64); }
|
|
@endif
|
|
:XOR Reg8,rm8 is vexMode=0 & byte=0x32; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 ^ rm8; resultflags( Reg8); }
|
|
:XOR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x33; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 ^ rm16; resultflags(Reg16); }
|
|
:XOR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x33; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 ^ rm32; build check_Reg32_dest; resultflags(Reg32); }
|
|
@ifdef IA64
|
|
:XOR Reg64,rm64 is vexMode=0 & opsize=2 & byte=0x33; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 ^ rm64; resultflags(Reg64); }
|
|
@endif
|
|
|
|
:XGETBV is vexMode=0 & byte=0x0F; byte=0x01; byte=0xD0 { local tmp = XCR0 >> 32; EDX = tmp:4; EAX = XCR0:4; }
|
|
:XSETBV is vexMode=0 & byte=0x0F; byte=0x01; byte=0xD1 { XCR0 = (zext(EDX) << 32) | zext(EAX); }
|
|
|
|
define pcodeop xsave;
|
|
define pcodeop xsave64;
|
|
define pcodeop xsavec;
|
|
define pcodeop xsavec64;
|
|
define pcodeop xsaveopt;
|
|
define pcodeop xsaveopt64;
|
|
define pcodeop xsaves;
|
|
define pcodeop xsaves64;
|
|
define pcodeop xrstor;
|
|
define pcodeop xrstor64;
|
|
define pcodeop xrstors;
|
|
define pcodeop xrstors64;
|
|
|
|
:XRSTOR Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xrstor(Mem,tmp); }
|
|
@ifdef IA64
|
|
:XRSTOR64 Mem is vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xrstor64(Mem,tmp); }
|
|
@endif
|
|
|
|
:XRSTORS Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=3 ) ... & Mem { tmp:4 = 512; xrstors(Mem,tmp); }
|
|
@ifdef IA64
|
|
:XRSTORS64 Mem is vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=3 ) ... & Mem { tmp:4 = 512; xrstors64(Mem,tmp); }
|
|
@endif
|
|
|
|
:XSAVE Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsave(Mem,tmp); }
|
|
@ifdef IA64
|
|
:XSAVE64 Mem is vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsave64(Mem,tmp); }
|
|
@endif
|
|
|
|
:XSAVEC Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsavec(Mem,tmp); }
|
|
@ifdef IA64
|
|
:XSAVEC64 Mem is vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsavec64(Mem,tmp); }
|
|
@endif
|
|
|
|
:XSAVEOPT Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=6 ) ... & Mem { tmp:4 = 512; xsaveopt(Mem,tmp); }
|
|
@ifdef IA64
|
|
:XSAVEOPT64 Mem is vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=6 ) ... & Mem { tmp:4 = 512; xsaveopt64(Mem,tmp); }
|
|
@endif
|
|
|
|
:XSAVES Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xsaves(Mem,tmp); }
|
|
@ifdef IA64
|
|
:XSAVES64 Mem is vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xsaves64(Mem,tmp); }
|
|
@endif
|
|
|
|
define pcodeop xtest;
|
|
:XTEST is byte=0x0F; byte=0x01; byte=0xD6 { ZF = xtest(); }
|
|
|
|
:LFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=5 & r_m=0 { }
|
|
:MFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=6 & r_m=0 { }
|
|
:SFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=7 & r_m=0 { }
|
|
|
|
#
|
|
# floating point instructions
|
|
#
|
|
|
|
define pcodeop f2xm1;
|
|
:F2XM1 is vexMode=0 & byte=0xD9; byte=0xF0 { ST0 = f2xm1(ST0); } # compute 2^x-1
|
|
|
|
:FABS is vexMode=0 & byte=0xD9; byte=0xE1 { ST0 = abs(ST0); }
|
|
|
|
:FADD spec_m32 is vexMode=0 & byte=0xD8; reg_opcode=0 ... & spec_m32 { ST0 = ST0 f+ float2float(spec_m32); }
|
|
:FADD spec_m64 is vexMode=0 & byte=0xDC; reg_opcode=0 ... & spec_m64 { ST0 = ST0 f+ float2float(spec_m64); }
|
|
:FADD ST0, freg is vexMode=0 & byte=0xD8; frow=12 & fpage=0 & freg & ST0 { ST0 = ST0 f+ freg; }
|
|
:FADD freg, ST0 is vexMode=0 & byte=0xDC; frow=12 & fpage=0 & freg & ST0 { freg = freg f+ ST0; }
|
|
:FADDP is vexMode=0 & byte=0xDE; byte=0xC1 { ST1 = ST0 f+ ST1; fpop(); }
|
|
:FADDP freg, ST0 is vexMode=0 & byte=0xDE; frow=12 & fpage=0 & freg & ST0 { freg = ST0 f+ freg; fpop(); }
|
|
:FIADD spec_m32 is vexMode=0 & byte=0xDA; reg_opcode=0 ... & spec_m32 { ST0 = ST0 f+ int2float(spec_m32); }
|
|
:FIADD spec_m16 is vexMode=0 & byte=0xDE; reg_opcode=0 ... & spec_m16 { ST0 = ST0 f+ int2float(spec_m16); }
|
|
|
|
:FBLD spec_m80 is vexMode=0 & byte=0xDF; reg_opcode=4 ... & spec_m80 { spec_m80 = ST0; fpop(); }
|
|
|
|
:FBSTP spec_m80 is vexMode=0 & byte=0xDF; reg_opcode=6 ... & spec_m80 { spec_m80 = ST0; fpop(); }
|
|
|
|
:FCHS is vexMode=0 & byte=0xD9; byte=0xE0 { ST0 = f- ST0; }
|
|
|
|
:FCLEX is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE2 { FPUStatusWord[0,8] = 0; FPUStatusWord[15,1] = 0; }
|
|
:FNCLEX is vexMode=0 & byte=0xDB; byte=0xE2 { FPUStatusWord[0,8] = 0; FPUStatusWord[15,1] = 0; }
|
|
|
|
:FCMOVB ST0, freg is vexMode=0 & byte=0xDA; frow=12 & fpage=0 & freg & ST0 { if ( !CF ) goto inst_next; ST0 = freg; }
|
|
:FCMOVE ST0, freg is vexMode=0 & byte=0xDA; frow=12 & fpage=1 & freg & ST0 { if ( !ZF ) goto inst_next; ST0 = freg; }
|
|
:FCMOVBE ST0, freg is vexMode=0 & byte=0xDA; frow=13 & fpage=0 & freg & ST0 { if ( !CF & !ZF ) goto inst_next; ST0 = freg; }
|
|
:FCMOVU ST0, freg is vexMode=0 & byte=0xDA; frow=13 & fpage=1 & freg & ST0 { if ( !PF ) goto inst_next; ST0 = freg; }
|
|
:FCMOVNB ST0, freg is vexMode=0 & byte=0xDB; frow=12 & fpage=0 & freg & ST0 { if ( CF ) goto inst_next; ST0 = freg; }
|
|
:FCMOVNE ST0, freg is vexMode=0 & byte=0xDB; frow=12 & fpage=1 & freg & ST0 { if ( ZF ) goto inst_next; ST0 = freg; }
|
|
:FCMOVNBE ST0, freg is vexMode=0 & byte=0xDB; frow=13 & fpage=0 & freg & ST0 { if ( CF & ZF ) goto inst_next; ST0 = freg; }
|
|
:FCMOVNU ST0, freg is vexMode=0 & byte=0xDB; frow=13 & fpage=1 & freg & ST0 { if ( PF ) goto inst_next; ST0 = freg; }
|
|
|
|
:FCOM spec_m32 is vexMode=0 & byte=0xD8; reg_opcode=2 ... & spec_m32 { local tmp=float2float(spec_m32); fcom(tmp); }
|
|
:FCOM spec_m64 is vexMode=0 & byte=0xDC; reg_opcode=2 ... & spec_m64 { local tmp=float2float(spec_m64); fcom(tmp); }
|
|
:FCOM freg is vexMode=0 & byte=0xD8; frow=13 & fpage=0 & freg { fcom(freg); }
|
|
:FCOM is vexMode=0 & byte=0xD8; byte=0xD1 { fcom(ST1); }
|
|
:FCOMP spec_m32 is vexMode=0 & byte=0xD8; reg_opcode=3 ... & spec_m32 { local tmp=float2float(spec_m32); fcom(tmp); fpop(); }
|
|
:FCOMP spec_m64 is vexMode=0 & byte=0xDC; reg_opcode=3 ... & spec_m64 { local tmp=float2float(spec_m64); fcom(tmp); fpop(); }
|
|
:FCOMP freg is vexMode=0 & byte=0xD8; frow=13 & fpage=1 & freg { fcom(freg); fpop(); }
|
|
:FCOMP is vexMode=0 & byte=0xD8; byte=0xD9 { fcom(ST1); fpop(); }
|
|
:FCOMPP is vexMode=0 & byte=0xDE; byte=0xD9 { fcom(ST1); fpop(); fpop(); }
|
|
|
|
:FCOMI ST0, freg is vexMode=0 & byte=0xDB; frow=15 & fpage=0 & freg & ST0 { fcomi(freg); }
|
|
:FCOMIP ST0, freg is vexMode=0 & byte=0xDF; frow=15 & fpage=0 & freg & ST0 { fcomi(freg); fpop(); }
|
|
:FUCOMI ST0, freg is vexMode=0 & byte=0xDB; frow=14 & fpage=1 & freg & ST0 { fcomi(freg); }
|
|
:FUCOMIP ST0, freg is vexMode=0 & byte=0xDF; frow=14 & fpage=1 & freg & ST0 { fcomi(freg); fpop(); }
|
|
|
|
define pcodeop fcos;
|
|
:FCOS is vexMode=0 & byte=0xD9; byte=0xFF { ST0 = fcos(ST0); }
|
|
|
|
:FDECSTP is vexMode=0 & byte=0xD9; byte=0xF6 { fdec(); FPUStatusWord = FPUStatusWord & 0xfeff; C0 = 0; } #Clear C0
|
|
|
|
:FDIV spec_m32 is vexMode=0 & byte=0xD8; reg_opcode=6 ... & spec_m32 { ST0 = ST0 f/ float2float(spec_m32); }
|
|
:FDIV spec_m64 is vexMode=0 & byte=0xDC; reg_opcode=6 ... & spec_m64 { ST0 = ST0 f/ float2float(spec_m64); }
|
|
:FDIV ST0,freg is vexMode=0 & byte=0xD8; frow=15 & fpage=0 & freg & ST0 { ST0 = ST0 f/ freg; }
|
|
:FDIV freg,ST0 is vexMode=0 & byte=0xDC; frow=15 & fpage=1 & freg & ST0 { freg = freg f/ ST0; }
|
|
:FDIVP freg,ST0 is vexMode=0 & byte=0xDE; frow=15 & fpage=1 & freg & ST0 { freg = ST0 f/ freg; fpop(); }
|
|
:FDIVP is vexMode=0 & byte=0xDE; byte=0xF9 { ST1 = ST1 f/ ST0; fpop(); }
|
|
:FIDIV spec_m32 is vexMode=0 & byte=0xDA; reg_opcode=6 ... & spec_m32 { ST0 = ST0 f/ int2float(spec_m32); }
|
|
:FIDIV spec_m16 is vexMode=0 & byte=0xDE; reg_opcode=6 ... & spec_m16 { ST0 = ST0 f/ int2float(spec_m16); }
|
|
|
|
:FDIVR spec_m32 is vexMode=0 & byte=0xD8; reg_opcode=7 ... & spec_m32 { ST0 = float2float(spec_m32) f/ ST0; }
|
|
:FDIVR spec_m64 is vexMode=0 & byte=0xDC; reg_opcode=7 ... & spec_m64 { ST0 = float2float(spec_m64) f/ ST0; }
|
|
:FDIVR ST0,freg is vexMode=0 & byte=0xD8; frow=15 & fpage=1 & freg & ST0 { ST0 = freg f/ ST0; }
|
|
:FDIVR freg,ST0 is vexMode=0 & byte=0xDC; frow=15 & fpage=0 & freg & ST0 { freg = ST0 f/ freg; }
|
|
:FDIVRP freg,ST0 is vexMode=0 & byte=0xDE; frow=15 & fpage=0 & freg & ST0 { freg = freg f/ ST0; fpop(); }
|
|
:FDIVRP is vexMode=0 & byte=0xDE; byte=0xF1 { ST1 = ST0 f/ ST1; fpop(); }
|
|
:FIDIVR spec_m32 is vexMode=0 & byte=0xDA; reg_opcode=7 ... & spec_m32 { ST0 = int2float(spec_m32) f/ ST0; }
|
|
:FIDIVR spec_m16 is vexMode=0 & byte=0xDE; reg_opcode=7 ... & spec_m16 { ST0 = int2float(spec_m16) f/ ST0; }
|
|
|
|
:FFREE freg is vexMode=0 & byte=0xDD; frow=12 & fpage=0 & freg { } # Set freg to invalid value
|
|
|
|
:FICOM spec_m16 is vexMode=0 & byte=0xDE; reg_opcode=2 ... & spec_m16 { local tmp = int2float(spec_m16); fcom(tmp); }
|
|
:FICOM spec_m32 is vexMode=0 & byte=0xDA; reg_opcode=2 ... & spec_m32 { local tmp = int2float(spec_m32); fcom(tmp); }
|
|
:FICOMP spec_m16 is vexMode=0 & byte=0xDE; (mod != 0b11 & reg_opcode=3) ... & spec_m16 { local tmp = int2float(spec_m16); fcom(tmp); fpop(); }
|
|
:FICOMP spec_m32 is vexMode=0 & byte=0xDA; reg_opcode=3 ... & spec_m32 { local tmp = int2float(spec_m32); fcom(tmp); fpop(); }
|
|
|
|
:FILD spec_m16 is vexMode=0 & byte=0xDF; reg_opcode=0 ... & spec_m16 { fdec(); ST0 = int2float(spec_m16); }
|
|
:FILD spec_m32 is vexMode=0 & byte=0xDB; reg_opcode=0 ... & spec_m32 { fdec(); ST0 = int2float(spec_m32); }
|
|
:FILD spec_m64 is vexMode=0 & byte=0xDF; reg_opcode=5 ... & spec_m64 { fdec(); ST0 = int2float(spec_m64); }
|
|
|
|
:FINCSTP is vexMode=0 & byte=0xD9; byte=0xF7 { finc(); }
|
|
|
|
:FINIT is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE3 { FPUControlWord = 0x037f;
|
|
FPUStatusWord = 0x0000;
|
|
FPUTagWord = 0xffff;
|
|
FPUDataPointer = 0x00000000;
|
|
FPUInstructionPointer = 0x00000000;
|
|
FPULastInstructionOpcode = 0x0000;
|
|
C0 = 0;
|
|
C1 = 0;
|
|
C2 = 0;
|
|
C3 = 0; }
|
|
|
|
:FNINIT is vexMode=0 & byte=0xDB; byte=0xE3 { }
|
|
|
|
:FIST spec_m16 is vexMode=0 & byte=0xDF; (mod != 0b11 & reg_opcode=2) ... & spec_m16 { tmp:10 = round(ST0); spec_m16 = trunc(tmp); }
|
|
:FIST spec_m32 is vexMode=0 & byte=0xDB; (mod != 0b11 & reg_opcode=2) ... & spec_m32 { tmp:10 = round(ST0); spec_m32 = trunc(tmp); }
|
|
:FISTP spec_m16 is vexMode=0 & byte=0xDF; reg_opcode=3 ... & spec_m16 { tmp:10 = round(ST0); fpop(); spec_m16 = trunc(tmp); }
|
|
:FISTP spec_m32 is vexMode=0 & byte=0xDB; reg_opcode=3 ... & spec_m32 { tmp:10 = round(ST0); fpop(); spec_m32 = trunc(tmp); }
|
|
:FISTP spec_m64 is vexMode=0 & byte=0xDF; reg_opcode=7 ... & spec_m64 { tmp:10 = round(ST0); fpop(); spec_m64 = trunc(tmp); }
|
|
|
|
:FISTTP spec_m16 is vexMode=0 & byte=0xDF; reg_opcode=1 ... & spec_m16 { spec_m16 = trunc(ST0); fpop(); }
|
|
:FISTTP spec_m32 is vexMode=0 & byte=0xDB; reg_opcode=1 ... & spec_m32 { spec_m32 = trunc(ST0); fpop(); }
|
|
:FISTTP spec_m64 is vexMode=0 & byte=0xDD; reg_opcode=1 ... & spec_m64 { spec_m64 = trunc(ST0); fpop(); }
|
|
|
|
:FLD spec_m32 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=0) ... & spec_m32 { fdec(); ST0 = float2float(spec_m32); }
|
|
:FLD spec_m64 is vexMode=0 & byte=0xDD; reg_opcode=0 ... & spec_m64 { fdec(); ST0 = float2float(spec_m64);}
|
|
:FLD spec_m80 is vexMode=0 & byte=0xDB; reg_opcode=5 ... & spec_m80 { fpushv(spec_m80); }
|
|
|
|
# Be careful that you don't clobber freg during fpushv, need a tmp to hold the value
|
|
:FLD freg is vexMode=0 & byte=0xD9; frow=12 & fpage=0 & freg { tmp:10 = freg; fpushv(tmp); }
|
|
|
|
:FLD1 is vexMode=0 & byte=0xD9; byte=0xE8 { one:4 = 1; tmp:10 = int2float(one); fpushv(tmp); }
|
|
:FLDL2T is vexMode=0 & byte=0xD9; byte=0xE9 { src:8 = 0x400a934f0979a371; tmp:10 = float2float(src); fpushv(tmp); }
|
|
:FLDL2E is vexMode=0 & byte=0xD9; byte=0xEA { src:8 = 0x3ff71547652b82fe; tmp:10 = float2float(src); fpushv(tmp); }
|
|
:FLDPI is vexMode=0 & byte=0xD9; byte=0xEB { src:8 = 0x400921fb54442d18; tmp:10 = float2float(src); fpushv(tmp); }
|
|
:FLDLG2 is vexMode=0 & byte=0xD9; byte=0xEC { src:8 = 0x3fd34413509f79ff; tmp:10 = float2float(src); fpushv(tmp); }
|
|
:FLDLN2 is vexMode=0 & byte=0xD9; byte=0xED { src:8 = 0x3fe62e42fefa39ef; tmp:10 = float2float(src); fpushv(tmp); }
|
|
:FLDZ is vexMode=0 & byte=0xD9; byte=0xEE { zero:4 = 0; tmp:10 = int2float(zero); fpushv(tmp); }
|
|
|
|
:FLDCW m16 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=5) ... & m16 { FPUControlWord = m16; }
|
|
|
|
define pcodeop fldenv;
|
|
:FLDENV Mem is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=4) ... & Mem
|
|
{
|
|
FPUControlWord = *:2 (Mem);
|
|
FPUStatusWord = *:2 (Mem + 4);
|
|
FPUTagWord = *:2 (Mem + 8);
|
|
FPUDataPointer = *:4 (Mem + 20);
|
|
FPUInstructionPointer = *:4 (Mem + 12);
|
|
FPULastInstructionOpcode = *:2 (Mem + 18);
|
|
}
|
|
|
|
:FMUL spec_m32 is vexMode=0 & byte=0xD8; reg_opcode=1 ... & spec_m32 { ST0 = ST0 f* float2float(spec_m32); }
|
|
:FMUL spec_m64 is vexMode=0 & byte=0xDC; reg_opcode=1 ... & spec_m64 { ST0 = ST0 f* float2float(spec_m64); }
|
|
:FMUL freg is vexMode=0 & byte=0xD8; frow=12 & fpage=1 & freg { ST0 = ST0 f* freg; }
|
|
:FMUL freg is vexMode=0 & byte=0xDC; frow=12 & fpage=1 & freg { freg = freg f* ST0; }
|
|
:FMULP freg is vexMode=0 & byte=0xDE; frow=12 & fpage=1 & freg { freg = ST0 f* freg; fpop(); }
|
|
:FMULP is vexMode=0 & byte=0xDE; byte=0xC9 { ST1 = ST0 f* ST1; fpop(); }
|
|
:FIMUL spec_m32 is vexMode=0 & byte=0xDA; reg_opcode=1 ... & spec_m32 { ST0 = ST0 f* int2float(spec_m32); }
|
|
:FIMUL spec_m16 is vexMode=0 & byte=0xDE; reg_opcode=1 ... & spec_m16 { ST0 = ST0 f* int2float(spec_m16); }
|
|
|
|
:FNOP is vexMode=0 & byte=0xD9; byte=0xD0 { }
|
|
|
|
define pcodeop fpatan;
|
|
:FPATAN is vexMode=0 & byte=0xD9; byte=0xF3 { ST1 = fpatan(ST1, ST0); fpop(); }
|
|
|
|
:FPREM is vexMode=0 & byte=0xD9; byte=0xF8 { local tmp = ST0 f/ ST1; tmp = tmp f* ST1; ST0 = ST0 f- tmp; }
|
|
|
|
:FPREM1 is vexMode=0 & byte=0xD9; byte=0xF5 { local tmp = ST0 f/ ST1; tmp = tmp f* ST1; ST0 = ST0 f- tmp; }
|
|
|
|
define pcodeop fptan;
|
|
:FPTAN is vexMode=0 & byte=0xD9; byte=0xF2 { ST0 = fptan(ST0); one:4 = 1; tmp:10 = int2float(one); fpushv(tmp); }
|
|
|
|
:FRNDINT is vexMode=0 & byte=0xD9; byte=0xFC { local tmp = round(ST0); ST0 = tmp; }
|
|
|
|
:FRSTOR Mem is vexMode=0 & byte=0xDD; reg_opcode=4 ... & Mem
|
|
{
|
|
FPUControlWord = *:2 (Mem);
|
|
FPUStatusWord = *:2 (Mem + 4);
|
|
FPUTagWord = *:2 (Mem + 8);
|
|
FPUDataPointer = *:4 (Mem + 20);
|
|
FPUInstructionPointer = *:4 (Mem + 12);
|
|
FPULastInstructionOpcode = *:2 (Mem + 18);
|
|
|
|
ST0 = *:10 (Mem + 28);
|
|
ST1 = *:10 (Mem + 38);
|
|
ST2 = *:10 (Mem + 48);
|
|
ST3 = *:10 (Mem + 58);
|
|
ST4 = *:10 (Mem + 68);
|
|
ST5 = *:10 (Mem + 78);
|
|
ST6 = *:10 (Mem + 88);
|
|
ST7 = *:10 (Mem + 98);
|
|
}
|
|
|
|
:FSAVE Mem is vexMode=0 & byte=0x9B; byte=0xDD; reg_opcode=6 ... & Mem
|
|
{
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
|
|
*:10 (Mem + 28) = ST0;
|
|
*:10 (Mem + 38) = ST1;
|
|
*:10 (Mem + 48) = ST2;
|
|
*:10 (Mem + 58) = ST3;
|
|
*:10 (Mem + 68) = ST4;
|
|
*:10 (Mem + 78) = ST5;
|
|
*:10 (Mem + 88) = ST6;
|
|
*:10 (Mem + 98) = ST7;
|
|
|
|
FPUControlWord = 0x037f;
|
|
FPUStatusWord = 0x0000;
|
|
FPUTagWord = 0xffff;
|
|
FPUDataPointer = 0x00000000;
|
|
FPUInstructionPointer = 0x00000000;
|
|
FPULastInstructionOpcode = 0x0000;
|
|
}
|
|
|
|
:FNSAVE Mem is vexMode=0 & byte=0xDD; reg_opcode=6 ... & Mem
|
|
{
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
|
|
*:10 (Mem + 28) = ST0;
|
|
*:10 (Mem + 38) = ST1;
|
|
*:10 (Mem + 48) = ST2;
|
|
*:10 (Mem + 58) = ST3;
|
|
*:10 (Mem + 68) = ST4;
|
|
*:10 (Mem + 78) = ST5;
|
|
*:10 (Mem + 88) = ST6;
|
|
*:10 (Mem + 98) = ST7;
|
|
|
|
FPUControlWord = 0x037f;
|
|
FPUStatusWord = 0x0000;
|
|
FPUTagWord = 0xffff;
|
|
FPUDataPointer = 0x00000000;
|
|
FPUInstructionPointer = 0x00000000;
|
|
FPULastInstructionOpcode = 0x0000;
|
|
}
|
|
|
|
define pcodeop fscale;
|
|
:FSCALE is vexMode=0 & byte=0xD9; byte=0xFD { ST0 = fscale(ST0, ST1); }
|
|
|
|
define pcodeop fsin;
|
|
:FSIN is vexMode=0 & byte=0xD9; byte=0xFE { ST0 = fsin(ST0); }
|
|
:FSINCOS is vexMode=0 & byte=0xD9; byte=0xFB { tmp:10 = fcos(ST0); ST0 = fsin(ST0); fpushv(tmp); }
|
|
:FSQRT is vexMode=0 & byte=0xD9; byte=0xFA { ST0 = sqrt(ST0); }
|
|
|
|
:FST spec_m32 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=2) ... & spec_m32 { spec_m32 = float2float(ST0); }
|
|
:FST spec_m64 is vexMode=0 & byte=0xDD; reg_opcode=2 ... & spec_m64 { spec_m64 = float2float(ST0); }
|
|
:FST freg is vexMode=0 & byte=0xDD; frow=13 & fpage=0 & freg { ST0 = freg; }
|
|
:FSTP spec_m32 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=3) ... & spec_m32 { spec_m32 = float2float(ST0); fpop(); }
|
|
:FSTP spec_m64 is vexMode=0 & byte=0xDD; reg_opcode=3 ... & spec_m64 { spec_m64 = float2float(ST0); fpop(); }
|
|
:FSTP spec_m80 is vexMode=0 & byte=0xDB; reg_opcode=7 ... & spec_m80 { fpopv(spec_m80); }
|
|
:FSTP freg is vexMode=0 & byte=0xDD; frow=13 & fpage=1 & freg { fpopv(freg); }
|
|
|
|
:FSTCW m16 is vexMode=0 & byte=0x9B; byte=0xD9; (mod != 0b11 & reg_opcode=7) ... & m16 { m16 = FPUControlWord; }
|
|
:FNSTCW m16 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=7) ... & m16 { m16 = FPUControlWord; }
|
|
|
|
:FSTENV Mem is vexMode=0 & byte=0x9B; byte=0xD9; (mod != 0b11 & reg_opcode=6) ... & Mem
|
|
{
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
}
|
|
|
|
:FNSTENV Mem is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=6) ... & Mem
|
|
{
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
}
|
|
|
|
:FSTSW m16 is vexMode=0 & byte=0x9B; byte=0xDD; reg_opcode=7 ... & m16 { m16 = FPUStatusWord; }
|
|
:FSTSW AX is vexMode=0 & byte=0x9B; byte=0xDF; byte=0xE0 & AX { AX = FPUStatusWord; }
|
|
:FNSTSW m16 is vexMode=0 & byte=0xDD; reg_opcode=7 ... & m16 { m16 = FPUStatusWord; }
|
|
:FNSTSW AX is vexMode=0 & byte=0xDF; byte=0xE0 & AX { AX = FPUStatusWord; }
|
|
|
|
:FSUB spec_m32 is vexMode=0 & byte=0xD8; reg_opcode=4 ... & spec_m32 { ST0 = ST0 f- float2float(spec_m32); }
|
|
:FSUB spec_m64 is vexMode=0 & byte=0xDC; reg_opcode=4 ... & spec_m64 { ST0 = ST0 f- float2float(spec_m64); }
|
|
:FSUB ST0,freg is vexMode=0 & byte=0xD8; frow=14 & fpage=0 & freg & ST0 { ST0 = ST0 f- freg; }
|
|
:FSUB freg,ST0 is vexMode=0 & byte=0xDC; frow=14 & fpage=1 & freg & ST0 { freg = freg f- ST0; }
|
|
:FSUBP is vexMode=0 & byte=0xDE; byte=0xE9 { ST1 = ST1 f- ST0; fpop(); }
|
|
:FSUBP freg,ST0 is vexMode=0 & byte=0xDE; frow=14 & fpage=1 & freg & ST0 { freg = freg f- ST0; fpop(); }
|
|
:FISUB spec_m32 is vexMode=0 & byte=0xDA; (mod != 0b11 & reg_opcode=4) ... & spec_m32 { ST0 = ST0 f- int2float(spec_m32); }
|
|
:FISUB spec_m16 is vexMode=0 & byte=0xDE; reg_opcode=4 ... & spec_m16 { ST0 = ST0 f- int2float(spec_m16); }
|
|
|
|
:FSUBR spec_m32 is vexMode=0 & byte=0xD8; reg_opcode=5 ... & spec_m32 { ST0 = float2float(spec_m32) f- ST0; }
|
|
:FSUBR spec_m64 is vexMode=0 & byte=0xDC; reg_opcode=5 ... & spec_m64 { ST0 = float2float(spec_m64) f- ST0; }
|
|
:FSUBR ST0,freg is vexMode=0 & byte=0xD8; frow=14 & fpage=1 & freg & ST0 { ST0 = freg f- ST0; }
|
|
:FSUBR freg,ST0 is vexMode=0 & byte=0xDC; frow=14 & fpage=0 & freg & ST0 { freg = ST0 f- freg; }
|
|
:FSUBRP is vexMode=0 & byte=0xDE; byte=0xE1 { ST1 = ST0 f- ST1; fpop(); }
|
|
:FSUBRP freg,ST0 is vexMode=0 & byte=0xDE; frow=14 & fpage=0 & freg & ST0 { freg = ST0 f- freg; fpop(); }
|
|
:FISUBR spec_m32 is vexMode=0 & byte=0xDA; reg_opcode=5 ... & spec_m32 { ST0 = int2float(spec_m32) f- ST0; }
|
|
:FISUBR spec_m16 is vexMode=0 & byte=0xDE; reg_opcode=5 ... & spec_m16 { ST0 = int2float(spec_m16) f- ST0; }
|
|
|
|
:FTST is vexMode=0 & byte=0xD9; byte=0xE4 { zero:4 = 0; tmp:10 = int2float(zero); fcom(tmp); }
|
|
|
|
:FUCOM freg is vexMode=0 & byte=0xDD; frow=14 & fpage=0 & freg { fcom(freg); }
|
|
:FUCOM is vexMode=0 & byte=0xDD; byte=0xE1 { fcom(ST1); }
|
|
:FUCOMP freg is vexMode=0 & byte=0xDD; frow=14 & fpage=1 & freg { fcom(freg); fpop(); }
|
|
:FUCOMP is vexMode=0 & byte=0xDD; byte=0xE9 { fcom(ST1); fpop(); }
|
|
:FUCOMPP is vexMode=0 & byte=0xDA; byte=0xE9 { fcom(ST1); fpop(); fpop(); }
|
|
|
|
:FXAM is vexMode=0 & byte=0xD9; byte=0xE5
|
|
{
|
|
# this is not an exact implementation, but gets the sign and zero tests right
|
|
izero:4 = 0;
|
|
fzero:10 = int2float(izero);
|
|
|
|
# did not know how test for infinity or empty
|
|
C0 = nan(ST0);
|
|
|
|
# sign of ST0
|
|
C1 = ( ST0 f< fzero );
|
|
|
|
# assume normal if not zero
|
|
C2 = ( ST0 f!= fzero );
|
|
|
|
# equal to zero
|
|
C3 = ( ST0 f== fzero );
|
|
|
|
FPUStatusWord = (zext(C0)<<8) | (zext(C1)<<9) | (zext(C2)<<10) | (zext(C3)<<14);
|
|
}
|
|
|
|
:FXCH freg is vexMode=0 & byte=0xD9; frow=12 & fpage=1 & freg { local tmp = ST0; ST0 = freg; freg = tmp; }
|
|
:FXCH is vexMode=0 & byte=0xD9; byte=0xC9 { local tmp = ST0; ST0 = ST1; ST1 = tmp; }
|
|
|
|
# this saves the FPU state into 512 bytes of memory similar to the 32-bit mode
|
|
:FXSAVE Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=0 ) ... & Mem
|
|
{
|
|
# not saved in the same spacing as the actual processor
|
|
*:2 (Mem) = FPUControlWord;
|
|
*:2 (Mem + 4) = FPUStatusWord;
|
|
*:2 (Mem + 8) = FPUTagWord;
|
|
*:4 (Mem + 12) = FPUInstructionPointer;
|
|
*:2 (Mem + 18) = FPULastInstructionOpcode;
|
|
*:4 (Mem + 20) = FPUDataPointer;
|
|
*:4 (Mem + 24) = MXCSR;
|
|
|
|
# saved the FPU ST registers to the ST/MM area of the structure,
|
|
# irregardless of the state of the FPU/MMX last execution
|
|
*:10 (Mem + 32) = ST0;
|
|
*:10 (Mem + 48) = ST1;
|
|
*:10 (Mem + 64) = ST2;
|
|
*:10 (Mem + 80) = ST3;
|
|
*:10 (Mem + 96) = ST4;
|
|
*:10 (Mem + 112) = ST5;
|
|
*:10 (Mem + 128) = ST6;
|
|
*:10 (Mem + 144) = ST7;
|
|
|
|
*:16 (Mem + 160) = XMM0;
|
|
*:16 (Mem + 176) = XMM1;
|
|
*:16 (Mem + 192) = XMM2;
|
|
*:16 (Mem + 208) = XMM3;
|
|
*:16 (Mem + 224) = XMM4;
|
|
*:16 (Mem + 240) = XMM5;
|
|
*:16 (Mem + 256) = XMM6;
|
|
*:16 (Mem + 272) = XMM7;
|
|
|
|
# saved the MMX registers to the reserved area of the structure,
|
|
# irregardless of the state of the FPU/MMX last execution
|
|
*:10 (Mem + 288) = MM0;
|
|
*:10 (Mem + 304) = MM1;
|
|
*:10 (Mem + 320) = MM2;
|
|
*:10 (Mem + 336) = MM3;
|
|
*:10 (Mem + 352) = MM4;
|
|
*:10 (Mem + 368) = MM5;
|
|
*:10 (Mem + 384) = MM6;
|
|
*:10 (Mem + 400) = MM7;
|
|
}
|
|
|
|
:FXRSTOR Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=1 ) ... & Mem
|
|
{
|
|
# not saved in the same spacing as the actual processor
|
|
FPUControlWord = *:2 (Mem);
|
|
FPUStatusWord = *:2 (Mem + 4);
|
|
FPUTagWord = *:2 (Mem + 8);
|
|
FPUInstructionPointer = *:4 (Mem + 12);
|
|
FPULastInstructionOpcode = *:2 (Mem + 18);
|
|
FPUDataPointer = *:4 (Mem + 20);
|
|
MXCSR = *:4 (Mem + 24);
|
|
|
|
# saved the FPU ST registers to the ST/MM area of the structure,
|
|
# irregardless of the state of the FPU/MMX last execution
|
|
ST0 = *:10 (Mem + 32);
|
|
ST1 = *:10 (Mem + 48);
|
|
ST2 = *:10 (Mem + 64);
|
|
ST3 = *:10 (Mem + 80);
|
|
ST4 = *:10 (Mem + 96);
|
|
ST5 = *:10 (Mem + 112);
|
|
ST6 = *:10 (Mem + 128);
|
|
ST7 = *:10 (Mem + 144);
|
|
|
|
XMM0 = *:16 (Mem + 160);
|
|
XMM1 = *:16 (Mem + 176);
|
|
XMM2 = *:16 (Mem + 192);
|
|
XMM3 = *:16 (Mem + 208);
|
|
XMM4 = *:16 (Mem + 224);
|
|
XMM5 = *:16 (Mem + 240);
|
|
XMM6 = *:16 (Mem + 256);
|
|
XMM7 = *:16 (Mem + 272);
|
|
|
|
# saved the MMX registers to the reserved area of the structure,
|
|
# irregardless of the state of the FPU/MMX last execution
|
|
MM0 = *:10 (Mem + 288);
|
|
MM1 = *:10 (Mem + 304);
|
|
MM2 = *:10 (Mem + 320);
|
|
MM3 = *:10 (Mem + 336);
|
|
MM4 = *:10 (Mem + 352);
|
|
MM5 = *:10 (Mem + 368);
|
|
MM6 = *:10 (Mem + 384);
|
|
MM7 = *:10 (Mem + 400);
|
|
}
|
|
|
|
:FXTRACT is vexMode=0 & byte=0xD9; byte=0xF4 { significand:10 = ST0; exponent:10 = ST0; ST0 = exponent; fpushv(significand); }
|
|
|
|
:FYL2X is vexMode=0 & byte=0xD9; byte=0xF1 { local log2st0 = ST0; ST1 = ST1 f* log2st0; fpop(); }
|
|
:FYL2XP1 is vexMode=0 & byte=0xD9; byte=0xF9 { one:4 = 1; tmp:10 = int2float(one); log2st0:10 = ST0 f+ tmp; ST1 = ST1 f* log2st0; fpop(); }
|
|
|
|
|
|
#
|
|
# MMX instructions
|
|
#
|
|
|
|
:ADDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x58; XmmReg ... & m128
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] f+ m128[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] f+ m128[64,64];
|
|
}
|
|
|
|
:ADDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] f+ XmmReg2[64,64];
|
|
}
|
|
|
|
:ADDPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x58; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128; # Guarantee value is in a fixed location
|
|
XmmReg[0,32] = XmmReg[0,32] f+ m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] f+ m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] f+ m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] f+ m[96,32];
|
|
}
|
|
|
|
:ADDPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] f+ XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] f+ XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] f+ XmmReg2[96,32];
|
|
}
|
|
|
|
:ADDSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x58; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] f+ m64;
|
|
}
|
|
|
|
:ADDSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg2[0,64];
|
|
}
|
|
|
|
:ADDSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x58; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = XmmReg[0,32] f+ m32;
|
|
}
|
|
|
|
:ADDSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg2[0,32];
|
|
}
|
|
|
|
:ADDSUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD0; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f- m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] f+ m[64,64];
|
|
}
|
|
|
|
:ADDSUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD0; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] f+ XmmReg2[64,64];
|
|
}
|
|
|
|
:ADDSUBPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD0; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f- m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] f+ m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] f- m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] f+ m[96,32];
|
|
}
|
|
|
|
:ADDSUBPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD0; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] f+ XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] f- XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] f+ XmmReg2[96,32];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND
|
|
:ANDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x54; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] & m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] & m[64,64];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND
|
|
:ANDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x54; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] & XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] & XmmReg2[64,64];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND
|
|
:ANDPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x54; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] & m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] & m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] & m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] & m[96,32];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND
|
|
:ANDPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x54; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] & XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] & XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] & XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] & XmmReg2[96,32];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND NOT
|
|
:ANDNPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x55; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = ~XmmReg[0,64] & m[0,64];
|
|
XmmReg[64,64] = ~XmmReg[64,64] & m[64,64];
|
|
}
|
|
|
|
:ANDNPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x55; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = ~XmmReg1[0,64] & XmmReg2[0,64];
|
|
XmmReg1[64,64] = ~XmmReg1[64,64] & XmmReg2[64,64];
|
|
}
|
|
|
|
# special FLOATING POINT bitwise AND NOT
|
|
:ANDNPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x55; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = ~XmmReg[0,32] & m[0,32];
|
|
XmmReg[32,32] = ~XmmReg[32,32] & m[32,32];
|
|
XmmReg[64,32] = ~XmmReg[64,32] & m[64,32];
|
|
XmmReg[96,32] = ~XmmReg[96,32] & m[96,32];
|
|
}
|
|
|
|
:ANDNPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x55; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = ~XmmReg1[0,32] & XmmReg2[0,32];
|
|
XmmReg1[32,32] = ~XmmReg1[32,32] & XmmReg2[32,32];
|
|
XmmReg1[64,32] = ~XmmReg1[64,32] & XmmReg2[64,32];
|
|
XmmReg1[96,32] = ~XmmReg1[96,32] & XmmReg2[96,32];
|
|
}
|
|
|
|
# predicate mnemonics for "CMP...PD" opcode
|
|
XmmCondPD: "EQ" is imm8=0 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f== xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( xmmTmp1_Qb f== xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "LT" is imm8=1 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f< xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( xmmTmp1_Qb f< xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "LE" is imm8=2 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f<= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( xmmTmp1_Qb f<= xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "UNORD" is imm8=3 {
|
|
xmmTmp1_Qa = zext( nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( nan(xmmTmp1_Qb) || nan(xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "NEQ" is imm8=4 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f!= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( xmmTmp1_Qb f!= xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "NLT" is imm8=5 {
|
|
xmmTmp1_Qa = zext( !(xmmTmp1_Qa f< xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( !(xmmTmp1_Qb f< xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "NLE" is imm8=6 {
|
|
xmmTmp1_Qa = zext( !(xmmTmp1_Qa f<= xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( !(xmmTmp1_Qb f<= xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPD: "ORD" is imm8=7 {
|
|
xmmTmp1_Qa = zext( !(nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa)) ) * 0xFFFFFFFFFFFFFFFF;
|
|
xmmTmp1_Qb = zext( !(nan(xmmTmp1_Qb) || nan(xmmTmp2_Qb)) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
define pcodeop cmppd;
|
|
XmmCondPD: is imm8 {
|
|
xmmTmp1_Qa = cmppd(xmmTmp1_Qa, xmmTmp2_Qa, imm8:1);
|
|
xmmTmp1_Qb = cmppd(xmmTmp1_Qb, xmmTmp2_Qb, imm8:1);
|
|
}
|
|
|
|
# immediate operand for "CMP...PD" opcode
|
|
# note: normally blank, "imm8" emits for all out of range cases
|
|
CMPPD_OPERAND: is imm8<8 { }
|
|
CMPPD_OPERAND: ", "^imm8 is imm8 { }
|
|
|
|
:CMP^XmmCondPD^"PD" XmmReg,m128^CMPPD_OPERAND is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC2; (m128 & XmmReg ...); XmmCondPD & CMPPD_OPERAND
|
|
{
|
|
local m:16 = m128;
|
|
xmmTmp1_Qa = XmmReg[0,64];
|
|
xmmTmp1_Qb = XmmReg[64,64];
|
|
|
|
xmmTmp2_Qa = m[0,64];
|
|
xmmTmp2_Qb = m[64,64];
|
|
|
|
build XmmCondPD;
|
|
|
|
XmmReg[0,64] = xmmTmp1_Qa;
|
|
XmmReg[64,64] = xmmTmp1_Qb;
|
|
}
|
|
|
|
:CMP^XmmCondPD^"PD" XmmReg1,XmmReg2^CMPPD_OPERAND is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondPD & CMPPD_OPERAND
|
|
{
|
|
xmmTmp1_Qa = XmmReg1[0,64];
|
|
xmmTmp1_Qb = XmmReg1[64,64];
|
|
|
|
xmmTmp2_Qa = XmmReg2[0,64];
|
|
xmmTmp2_Qb = XmmReg2[64,64];
|
|
|
|
build XmmCondPD;
|
|
|
|
XmmReg1[0,64] = xmmTmp1_Qa;
|
|
XmmReg1[64,64] = xmmTmp1_Qb;
|
|
}
|
|
|
|
|
|
# predicate mnemonics for "CMP...PS" opcode
|
|
XmmCondPS: "EQ" is imm8=0 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f== xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( xmmTmp1_Db f== xmmTmp2_Db ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( xmmTmp1_Dc f== xmmTmp2_Dc ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( xmmTmp1_Dd f== xmmTmp2_Dd ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "LT" is imm8=1 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f< xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( xmmTmp1_Db f< xmmTmp2_Db ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( xmmTmp1_Dc f< xmmTmp2_Dc ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( xmmTmp1_Dd f< xmmTmp2_Dd ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "LE" is imm8=2 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f<= xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( xmmTmp1_Db f<= xmmTmp2_Db ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( xmmTmp1_Dc f<= xmmTmp2_Dc ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( xmmTmp1_Dd f<= xmmTmp2_Dd ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "UNORD" is imm8=3 {
|
|
xmmTmp1_Da = zext( nan(xmmTmp1_Da) || nan(xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( nan(xmmTmp1_Db) || nan(xmmTmp2_Db) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( nan(xmmTmp1_Dc) || nan(xmmTmp2_Dc) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( nan(xmmTmp1_Dd) || nan(xmmTmp2_Dd) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "NEQ" is imm8=4 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f!= xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( xmmTmp1_Db f!= xmmTmp2_Db ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( xmmTmp1_Dc f!= xmmTmp2_Dc ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( xmmTmp1_Dd f!= xmmTmp2_Dd ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "NLT" is imm8=5 {
|
|
xmmTmp1_Da = zext( !(xmmTmp1_Da f< xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( !(xmmTmp1_Db f< xmmTmp2_Db) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( !(xmmTmp1_Dc f< xmmTmp2_Dc) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( !(xmmTmp1_Dd f< xmmTmp2_Dd) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "NLE" is imm8=6 {
|
|
xmmTmp1_Da = zext( !(xmmTmp1_Da f<= xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( !(xmmTmp1_Db f<= xmmTmp2_Db) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( !(xmmTmp1_Dc f<= xmmTmp2_Dc) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( !(xmmTmp1_Dd f<= xmmTmp2_Dd) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondPS: "ORD" is imm8=7 {
|
|
xmmTmp1_Da = zext( !(nan(xmmTmp1_Da) || nan(xmmTmp2_Da)) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Db = zext( !(nan(xmmTmp1_Db) || nan(xmmTmp2_Db)) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dc = zext( !(nan(xmmTmp1_Dc) || nan(xmmTmp2_Dc)) ) * 0xFFFFFFFF;
|
|
xmmTmp1_Dd = zext( !(nan(xmmTmp1_Dd) || nan(xmmTmp2_Dd)) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
define pcodeop cmpps;
|
|
XmmCondPS: is imm8 {
|
|
xmmTmp1_Da = cmpps(xmmTmp1_Da, xmmTmp2_Da, imm8:1);
|
|
xmmTmp1_Db = cmpps(xmmTmp1_Db, xmmTmp2_Db, imm8:1);
|
|
xmmTmp1_Dc = cmpps(xmmTmp1_Dc, xmmTmp2_Dc, imm8:1);
|
|
xmmTmp1_Dd = cmpps(xmmTmp1_Dd, xmmTmp2_Dd, imm8:1);
|
|
}
|
|
|
|
# immediate operand for "CMP...PS" opcode
|
|
# note: normally blank, "imm8" emits for all out of range cases
|
|
CMPPS_OPERAND: is imm8<8 { }
|
|
CMPPS_OPERAND: ", "^imm8 is imm8 { }
|
|
|
|
:CMP^XmmCondPS^"PS" XmmReg,m128^CMPPS_OPERAND is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC2; (m128 & XmmReg ...); XmmCondPS & CMPPS_OPERAND
|
|
{
|
|
local m:16 = m128;
|
|
xmmTmp1_Da = XmmReg[0,32];
|
|
xmmTmp1_Db = XmmReg[32,32];
|
|
xmmTmp1_Dc = XmmReg[64,32];
|
|
xmmTmp1_Dd = XmmReg[96,32];
|
|
|
|
xmmTmp2_Da = m[0,32];
|
|
xmmTmp2_Db = m[32,32];
|
|
xmmTmp2_Dc = m[64,32];
|
|
xmmTmp2_Dd = m[96,32];
|
|
|
|
build XmmCondPS;
|
|
|
|
XmmReg[0,32] = xmmTmp1_Da;
|
|
XmmReg[32,32] = xmmTmp1_Db;
|
|
XmmReg[64,32] = xmmTmp1_Dc;
|
|
XmmReg[96,32] = xmmTmp1_Dd;
|
|
}
|
|
|
|
:CMP^XmmCondPS^"PS" XmmReg1,XmmReg2^CMPPS_OPERAND is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondPS & CMPPS_OPERAND
|
|
{
|
|
xmmTmp1_Da = XmmReg1[0,32];
|
|
xmmTmp1_Db = XmmReg1[32,32];
|
|
xmmTmp1_Dc = XmmReg1[64,32];
|
|
xmmTmp1_Dd = XmmReg1[96,32];
|
|
|
|
xmmTmp2_Da = XmmReg2[0,32];
|
|
xmmTmp2_Db = XmmReg2[32,32];
|
|
xmmTmp2_Dc = XmmReg2[64,32];
|
|
xmmTmp2_Dc = XmmReg2[96,32];
|
|
|
|
build XmmCondPS;
|
|
|
|
XmmReg1[0,32] = xmmTmp1_Da;
|
|
XmmReg1[32,32] = xmmTmp1_Db;
|
|
XmmReg1[64,32] = xmmTmp1_Dc;
|
|
XmmReg1[96,32] = xmmTmp1_Dd;
|
|
}
|
|
|
|
|
|
# predicate mnemonics for "CMP...SD" opcode
|
|
XmmCondSD: "EQ" is imm8=0 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f== xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "LT" is imm8=1 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f< xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "LE" is imm8=2 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f<= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "UNORD" is imm8=3 {
|
|
xmmTmp1_Qa = zext( nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "NEQ" is imm8=4 {
|
|
xmmTmp1_Qa = zext( xmmTmp1_Qa f!= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "NLT" is imm8=5 {
|
|
xmmTmp1_Qa = zext( !(xmmTmp1_Qa f< xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "NLE" is imm8=6 {
|
|
xmmTmp1_Qa = zext( !(xmmTmp1_Qa f<= xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSD: "ORD" is imm8=7 {
|
|
xmmTmp1_Qa = zext( !(nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa)) ) * 0xFFFFFFFFFFFFFFFF;
|
|
}
|
|
|
|
|
|
define pcodeop cmpsd;
|
|
XmmCondSD: is imm8 {
|
|
xmmTmp1_Qa = cmpsd(xmmTmp1_Qa, xmmTmp2_Qa, imm8:1);
|
|
}
|
|
|
|
# immediate operand for "CMP...SD" opcode
|
|
# note: normally blank, "imm8" emits for all out of range cases
|
|
CMPSD_OPERAND: is imm8<8 { }
|
|
CMPSD_OPERAND: ", "^imm8 is imm8 { }
|
|
|
|
:CMP^XmmCondSD^"SD" XmmReg, m64^CMPSD_OPERAND is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xC2; (m64 & XmmReg ...); XmmCondSD & CMPSD_OPERAND
|
|
{
|
|
xmmTmp1_Qa = XmmReg[0,64];
|
|
xmmTmp2_Qa = m64;
|
|
build XmmCondSD;
|
|
XmmReg[0,64] = xmmTmp1_Qa;
|
|
}
|
|
|
|
:CMP^XmmCondSD^"SD" XmmReg1, XmmReg2^CMPSD_OPERAND is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondSD & CMPSD_OPERAND
|
|
{
|
|
xmmTmp1_Qa = XmmReg1[0,64];
|
|
xmmTmp2_Qa = XmmReg2[0,64];
|
|
build XmmCondSD;
|
|
XmmReg1[0,64] = xmmTmp1_Qa;
|
|
}
|
|
|
|
|
|
# predicate mnemonics for "CMP...SS" opcode
|
|
XmmCondSS: "EQ" is imm8=0 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f== xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "LT" is imm8=1 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f< xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "LE" is imm8=2 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f<= xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "UNORD" is imm8=3 {
|
|
xmmTmp1_Da = zext( nan(xmmTmp1_Da) || nan(xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "NEQ" is imm8=4 {
|
|
xmmTmp1_Da = zext( xmmTmp1_Da f!= xmmTmp2_Da ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "NLT" is imm8=5 {
|
|
xmmTmp1_Da = zext( !(xmmTmp1_Da f< xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "NLE" is imm8=6 {
|
|
xmmTmp1_Da = zext( !(xmmTmp1_Da f<= xmmTmp2_Da) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
XmmCondSS: "ORD" is imm8=7 {
|
|
xmmTmp1_Da = zext( !(nan(xmmTmp1_Da) || nan(xmmTmp2_Da)) ) * 0xFFFFFFFF;
|
|
}
|
|
|
|
|
|
define pcodeop cmpss;
|
|
XmmCondSS: is imm8 {
|
|
xmmTmp1_Da = cmpss(xmmTmp1_Da, xmmTmp2_Da, imm8:1);
|
|
}
|
|
|
|
# immediate operand for "CMP...SS" opcode
|
|
# note: normally blank, "imm8" emits for all out of range cases
|
|
CMPSS_OPERAND: is imm8<8 { }
|
|
CMPSS_OPERAND: ", "^imm8 is imm8 { }
|
|
|
|
:CMP^XmmCondSS^"SS" XmmReg, m32^CMPSS_OPERAND is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xC2; (m32 & XmmReg ...); XmmCondSS & CMPSS_OPERAND
|
|
{
|
|
xmmTmp1_Da = XmmReg[0,32];
|
|
xmmTmp2_Da = m32;
|
|
build XmmCondSS;
|
|
XmmReg[0,32] = xmmTmp1_Da;
|
|
}
|
|
|
|
:CMP^XmmCondSS^"SS" XmmReg1, XmmReg2^CMPSS_OPERAND is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondSS & CMPSS_OPERAND
|
|
{
|
|
xmmTmp1_Da = XmmReg1[0,32];
|
|
xmmTmp2_Da = XmmReg2[0,32];
|
|
build XmmCondSS;
|
|
XmmReg1[0,32] = xmmTmp1_Da;
|
|
}
|
|
|
|
|
|
:COMISD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2F; m64 & XmmReg ...
|
|
{
|
|
fucompe(XmmReg[0,64], m64);
|
|
}
|
|
|
|
:COMISD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2F; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
fucompe(XmmReg1[0,64], XmmReg2[0,64]);
|
|
}
|
|
|
|
:COMISS XmmReg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2F; m32 & XmmReg ...
|
|
{
|
|
fucompe(XmmReg[0,32], m32);
|
|
}
|
|
|
|
:COMISS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2F; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
fucompe(XmmReg1[0,32], XmmReg2[0,32]);
|
|
}
|
|
|
|
:CVTDQ2PD XmmReg, m64 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xE6; m64 & XmmReg ...
|
|
{
|
|
local m:8 = m64;
|
|
XmmReg[0,64] = int2float( m[0,32] );
|
|
XmmReg[64,64] = int2float( m[32,32] );
|
|
}
|
|
|
|
:CVTDQ2PD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = int2float( XmmReg2[0,32] );
|
|
XmmReg1[64,64] = int2float( XmmReg2[32,32] );
|
|
}
|
|
|
|
:CVTDQ2PS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5B; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = int2float( m[0,32] );
|
|
XmmReg[32,32] = int2float( m[32,32] );
|
|
XmmReg[64,32] = int2float( m[64,32] );
|
|
XmmReg[96,32] = int2float( m[96,32] );
|
|
}
|
|
|
|
:CVTDQ2PS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = int2float( XmmReg2[0,32] );
|
|
XmmReg1[32,32] = int2float( XmmReg2[32,32] );
|
|
XmmReg1[64,32] = int2float( XmmReg2[64,32] );
|
|
XmmReg1[96,32] = int2float( XmmReg2[96,32] );
|
|
}
|
|
|
|
:CVTPD2DQ XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xE6; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = trunc( m[0,64] );
|
|
XmmReg[32,32] = trunc( m[64,64] );
|
|
XmmReg[64,32] = 0;
|
|
XmmReg[96,32] = 0;
|
|
}
|
|
|
|
:CVTPD2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = trunc( XmmReg2[0,64] );
|
|
XmmReg1[32,32] = trunc( XmmReg2[64,64] );
|
|
XmmReg1[64,32] = 0;
|
|
XmmReg1[96,32] = 0;
|
|
}
|
|
|
|
:CVTPD2PI mmxreg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2D; mmxreg ... & m128
|
|
{
|
|
local m:16 = m128;
|
|
mmxreg[0,32] = trunc( m[0,64] );
|
|
mmxreg[32,32] = trunc( m[64,64] );
|
|
}
|
|
|
|
:CVTPD2PI mmxreg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2D; xmmmod=3 & mmxreg1 & XmmReg2
|
|
{
|
|
mmxreg1[0,32] = trunc( XmmReg2[0,64] );
|
|
mmxreg1[32,32] = trunc( XmmReg2[64,64] );
|
|
}
|
|
|
|
:CVTPD2PS XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5A; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = float2float( m[0,64] );
|
|
XmmReg[32,32] = float2float( m[64,64] );
|
|
XmmReg[64,32] = 0;
|
|
XmmReg[96,32] = 0;
|
|
}
|
|
|
|
:CVTPD2PS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = float2float( XmmReg2[0,64] );
|
|
XmmReg1[32,32] = float2float( XmmReg2[64,64] );
|
|
XmmReg1[64,32] = 0;
|
|
XmmReg1[96,32] = 0;
|
|
}
|
|
|
|
:CVTPI2PD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2A; m64 & XmmReg ...
|
|
{
|
|
local m:8 = m64;
|
|
XmmReg[0,64] = int2float(m[0,32]);
|
|
XmmReg[64,64] = int2float(m[32,32]);
|
|
}
|
|
|
|
:CVTPI2PD XmmReg1, mmxreg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2A; xmmmod=3 & XmmReg1 & mmxreg2
|
|
{
|
|
XmmReg1[0,64] = int2float(mmxreg2[0,32]);
|
|
XmmReg1[64,64] = int2float(mmxreg2[32,32]);
|
|
}
|
|
|
|
:CVTPI2PS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2A; m64 & XmmReg ...
|
|
{
|
|
local m:8 = m64;
|
|
XmmReg[0,32] = int2float(m[0,32]);
|
|
XmmReg[32,32] = int2float(m[32,32]);
|
|
}
|
|
|
|
:CVTPI2PS XmmReg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2A; xmmmod=3 & XmmReg1 & mmxreg2
|
|
{
|
|
XmmReg1[0,32] = int2float(mmxreg2[0,32]);
|
|
XmmReg1[32,32] = int2float(mmxreg2[32,32]);
|
|
}
|
|
|
|
:CVTPS2DQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5B; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = trunc( m[0,32] );
|
|
XmmReg[32,32] = trunc( m[32,32] );
|
|
XmmReg[64,32] = trunc( m[64,32] );
|
|
XmmReg[96,32] = trunc( m[96,32] );
|
|
}
|
|
|
|
:CVTPS2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = trunc( XmmReg2[0,32] );
|
|
XmmReg1[32,32] = trunc( XmmReg2[32,32] );
|
|
XmmReg1[64,32] = trunc( XmmReg2[64,32] );
|
|
XmmReg1[96,32] = trunc( XmmReg2[96,32] );
|
|
}
|
|
|
|
:CVTPS2PD XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5A; m64 & XmmReg ...
|
|
{
|
|
local m:8 = m64;
|
|
XmmReg[0,64] = float2float( m[0,32] );
|
|
XmmReg[64,64] = float2float( m[32,32] );
|
|
}
|
|
|
|
:CVTPS2PD XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = float2float( XmmReg2[0,32] );
|
|
XmmReg1[64,64] = float2float( XmmReg2[32,32] );
|
|
}
|
|
|
|
:CVTPS2PI mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2D; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = round(m[0,32]);
|
|
mmxreg[32,32] = round(m[32,32]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTPS2PI mmxreg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2D; xmmmod=3 & mmxreg1 & XmmReg2
|
|
{
|
|
mmxreg1[0,32] = round(XmmReg2[0,32]);
|
|
mmxreg1[32,32] = round(XmmReg2[32,32]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTSD2SI Reg32, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2D; Reg32 ... & m64
|
|
{
|
|
Reg32 = trunc(round(m64));
|
|
}
|
|
|
|
:CVTSD2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg32 & XmmReg2
|
|
{
|
|
Reg32 = trunc(round(XmmReg2[0,64]));
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTSD2SI Reg64, m64 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2D; Reg64 ... & m64
|
|
{
|
|
Reg64 = round(m64);
|
|
}
|
|
|
|
:CVTSD2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg64 & XmmReg2
|
|
{
|
|
Reg64 = round(XmmReg2[0,64]);
|
|
}
|
|
@endif
|
|
|
|
:CVTSD2SS XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5A; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = float2float(m64);
|
|
}
|
|
|
|
:CVTSD2SS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = float2float(XmmReg2[0,64]);
|
|
}
|
|
|
|
:CVTSI2SD XmmReg, rm32 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2A; rm32 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = int2float(rm32);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTSI2SD XmmReg, rm64 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2A; rm64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = int2float(rm64);
|
|
}
|
|
@endif
|
|
|
|
:CVTSI2SS XmmReg, rm32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2A; rm32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = int2float(rm32);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTSI2SS XmmReg, rm64 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2A; rm64 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = int2float(rm64);
|
|
}
|
|
@endif
|
|
|
|
:CVTSS2SD XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5A; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = float2float(m32);
|
|
}
|
|
|
|
:CVTSS2SD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = float2float(XmmReg2[0,32]);
|
|
}
|
|
|
|
:CVTSS2SI Reg32, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2D; Reg32 ... & m32
|
|
{
|
|
Reg32 = round(m32);
|
|
}
|
|
|
|
:CVTSS2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg32 & XmmReg2
|
|
{
|
|
Reg32 = round(XmmReg2[0,32]);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTSS2SI Reg64, m32 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2D; Reg64 ... & m32
|
|
{
|
|
Reg64 = trunc(round(m32));
|
|
}
|
|
|
|
:CVTSS2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg64 & XmmReg2
|
|
{
|
|
Reg64 = trunc(round(XmmReg2[0,32]));
|
|
}
|
|
@endif
|
|
|
|
:CVTTPD2PI mmxreg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2C; mmxreg ... & m128
|
|
{
|
|
local m:16 = m128;
|
|
mmxreg[0,32] = trunc(m[0,64]);
|
|
mmxreg[32,32] = trunc(m[64,64]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTTPD2PI mmxreg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2C; xmmmod=3 & mmxreg1 & XmmReg2
|
|
{
|
|
mmxreg1[0,32] = trunc(XmmReg2[0,64]);
|
|
mmxreg1[32,32] = trunc(XmmReg2[64,64]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTTPD2DQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE6; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = trunc(m[0,64]);
|
|
XmmReg[32,32] = trunc(m[64,64]);
|
|
XmmReg[64,32] = 0;
|
|
XmmReg[96,32] = 0;
|
|
}
|
|
|
|
:CVTTPD2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = trunc(XmmReg2[0,64]);
|
|
XmmReg1[32,32] = trunc(XmmReg2[64,64]);
|
|
XmmReg1[64,32] = 0;
|
|
XmmReg1[96,32] = 0;
|
|
}
|
|
|
|
:CVTTPS2DQ XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5B; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = trunc(m[0,32]);
|
|
XmmReg[32,32] = trunc(m[32,32]);
|
|
XmmReg[64,32] = trunc(m[64,32]);
|
|
XmmReg[96,32] = trunc(m[96,32]);
|
|
}
|
|
|
|
:CVTTPS2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = trunc(XmmReg2[0,32]);
|
|
XmmReg1[32,32] = trunc(XmmReg2[32,32]);
|
|
XmmReg1[64,32] = trunc(XmmReg2[64,32]);
|
|
XmmReg1[96,32] = trunc(XmmReg2[96,32]);
|
|
}
|
|
|
|
:CVTTPS2PI mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2C; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = trunc(m[0,32]);
|
|
mmxreg[32,32] = trunc(m[32,32]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTTPS2PI mmxreg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2C; xmmmod=3 & mmxreg1 & XmmReg2
|
|
{
|
|
mmxreg1[0,32] = trunc(XmmReg2[0,32]);
|
|
mmxreg1[32,32] = trunc(XmmReg2[32,32]);
|
|
FPUTagWord = 0x0000;
|
|
}
|
|
|
|
:CVTTSD2SI Reg32, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2C; Reg32 ... & m64
|
|
{
|
|
Reg32 = trunc(m64);
|
|
}
|
|
|
|
:CVTTSD2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg32 & XmmReg2
|
|
{
|
|
Reg32 = trunc(XmmReg2[0,64]);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTTSD2SI Reg64, m64 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2C; Reg64 ... & m64
|
|
{
|
|
Reg64 = trunc(m64);
|
|
}
|
|
|
|
:CVTTSD2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg64 & XmmReg2
|
|
{
|
|
Reg64 = trunc(XmmReg2[0,64]);
|
|
}
|
|
@endif
|
|
|
|
:CVTTSS2SI Reg32, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2C; Reg32 ... & m32
|
|
{
|
|
Reg32 = trunc(m32);
|
|
}
|
|
|
|
:CVTTSS2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg32 & XmmReg2
|
|
{
|
|
Reg32 = trunc(XmmReg2[0,32]);
|
|
}
|
|
|
|
@ifdef IA64
|
|
:CVTTSS2SI Reg64, m32 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2C; Reg64 ... & m32
|
|
{
|
|
Reg64 = trunc(m32);
|
|
}
|
|
|
|
:CVTTSS2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg64 & XmmReg2
|
|
{
|
|
Reg64 = trunc(XmmReg2[0,32]);
|
|
}
|
|
@endif
|
|
|
|
define pcodeop divpd;
|
|
:DIVPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5E; XmmReg ... & m128 { XmmReg = divpd(XmmReg, m128); }
|
|
:DIVPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = divpd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop divps;
|
|
:DIVPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5E; XmmReg ... & m128 { XmmReg = divps(XmmReg, m128); }
|
|
:DIVPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = divps(XmmReg1, XmmReg2); }
|
|
|
|
:DIVSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5E; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] f/ m64;
|
|
}
|
|
|
|
:DIVSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f/ XmmReg2[0,64];
|
|
}
|
|
|
|
:DIVSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5E; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = XmmReg[0,32] f/ m32;
|
|
}
|
|
|
|
:DIVSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f/ XmmReg2[0,32];
|
|
}
|
|
|
|
:EMMS is vexMode=0 & byte=0x0F; byte=0x77 { FPUTagWord = 0xFFFF; }
|
|
|
|
:HADDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7C; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f+ XmmReg[64,64];
|
|
XmmReg[64,64] = m[0,64] f+ m[64,64];
|
|
}
|
|
|
|
:HADDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7C; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg1[64,64];
|
|
XmmReg1[64,64] = XmmReg2[0,64] f+ XmmReg2[64,64];
|
|
}
|
|
|
|
:HADDPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7C; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f+ XmmReg[32,32];
|
|
XmmReg[32,32] = XmmReg[64,32] f+ XmmReg[96,32];
|
|
XmmReg[64,32] = m[0,32] f+ m[32,32];
|
|
XmmReg[96,32] = m[64,32] f+ m[96,32];
|
|
}
|
|
|
|
:HADDPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7C; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg1[32,32];
|
|
XmmReg1[32,32] = XmmReg1[64,32] f+ XmmReg1[96,32];
|
|
XmmReg1[64,32] = XmmReg2[0,32] f+ XmmReg2[32,32];
|
|
XmmReg1[96,32] = XmmReg2[64,32] f+ XmmReg2[96,32];
|
|
}
|
|
|
|
:HSUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7D; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f- XmmReg[64,64];
|
|
XmmReg[64,64] = m[0,64] f- m[64,64];
|
|
}
|
|
|
|
:HSUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7D; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg1[64,64];
|
|
XmmReg1[64,64] = XmmReg2[0,64] f- XmmReg2[64,64];
|
|
}
|
|
|
|
:HSUBPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7D; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f- XmmReg[32,32];
|
|
XmmReg[32,32] = XmmReg[64,32] f- XmmReg[96,32];
|
|
XmmReg[64,32] = m[0,32] f- m[32,32];
|
|
XmmReg[96,32] = m[64,32] f- m[96,32];
|
|
}
|
|
|
|
:HSUBPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7D; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg1[32,32];
|
|
XmmReg1[32,32] = XmmReg1[64,32] f- XmmReg1[96,32];
|
|
XmmReg1[64,32] = XmmReg2[0,32] f- XmmReg2[32,32];
|
|
XmmReg1[96,32] = XmmReg2[64,32] f- XmmReg2[96,32];
|
|
}
|
|
|
|
#--------------------
|
|
#SSE3...
|
|
#--------------------
|
|
|
|
define pcodeop lddqu;
|
|
:LDDQU XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xF0; XmmReg ... & m128 { XmmReg = lddqu(XmmReg, m128); }
|
|
|
|
define pcodeop maskmovdqu;
|
|
:MASKMOVDQU XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF7; XmmReg1 & XmmReg2 { XmmReg1 = maskmovdqu(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop maxpd;
|
|
:MAXPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5F; XmmReg ... & m128 { XmmReg = maxpd(XmmReg, m128); }
|
|
:MAXPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = maxpd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop maxps;
|
|
:MAXPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5F; XmmReg ... & m128 { XmmReg = maxps(XmmReg, m128); }
|
|
:MAXPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = maxps(XmmReg1, XmmReg2); }
|
|
|
|
:MAXSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5F; XmmReg ... & m64
|
|
{
|
|
local tmp:8 = m64;
|
|
if (tmp f< XmmReg[0,64]) goto inst_next;
|
|
XmmReg[0,64] = tmp;
|
|
}
|
|
|
|
:MAXSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
if (XmmReg2[0,64] f< XmmReg1[0,64]) goto inst_next;
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:MAXSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5F; XmmReg ... & m32
|
|
{
|
|
local tmp:4 = m32;
|
|
if (tmp f< XmmReg[0,32]) goto inst_next;
|
|
XmmReg[0,32] = tmp;
|
|
}
|
|
|
|
:MAXSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
if (XmmReg2[0,32] f< XmmReg1[0,32]) goto inst_next;
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
}
|
|
|
|
define pcodeop minpd;
|
|
:MINPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5D; XmmReg ... & m128 { XmmReg = minpd(XmmReg, m128); }
|
|
:MINPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = minpd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop minps;
|
|
:MINPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5D; XmmReg ... & m128 { XmmReg = minps(XmmReg, m128); }
|
|
:MINPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = minps(XmmReg1, XmmReg2); }
|
|
|
|
:MINSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5D; XmmReg ... & m64
|
|
{
|
|
local tmp:8 = m64;
|
|
if (XmmReg[0,64] f< tmp) goto inst_next;
|
|
XmmReg[0,64] = tmp;
|
|
}
|
|
|
|
:MINSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
if (XmmReg1[0,64] f< XmmReg2[0,64]) goto inst_next;
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:MINSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5D; XmmReg ... & m32
|
|
{
|
|
local tmp:4 = m32;
|
|
if (XmmReg[0,32] f< tmp) goto inst_next;
|
|
XmmReg[0,32] = tmp;
|
|
}
|
|
|
|
:MINSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
if (XmmReg1[0,32] f< XmmReg2[0,32]) goto inst_next;
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
}
|
|
|
|
:MOVAPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x28; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = m[0,64];
|
|
XmmReg[64,64] = m[64,64];
|
|
}
|
|
|
|
:MOVAPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg2[64,64];
|
|
}
|
|
|
|
:MOVAPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x29; m128 & XmmReg ...
|
|
{
|
|
m128 = XmmReg;
|
|
}
|
|
|
|
:MOVAPD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,64] = XmmReg1[0,64];
|
|
XmmReg2[64,64] = XmmReg1[64,64];
|
|
}
|
|
|
|
:MOVAPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x28; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = m[0,32];
|
|
XmmReg[32,32] = m[32,32];
|
|
XmmReg[64,32] = m[64,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:MOVAPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:MOVAPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x29; m128 & XmmReg ...
|
|
{
|
|
m128 = XmmReg;
|
|
}
|
|
|
|
:MOVAPS XmmReg2, XmmReg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,32] = XmmReg1[0,32];
|
|
XmmReg2[32,32] = XmmReg1[32,32];
|
|
XmmReg2[64,32] = XmmReg1[64,32];
|
|
XmmReg2[96,32] = XmmReg1[96,32];
|
|
}
|
|
|
|
:MOVD mmxreg, rm32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6E; rm32 & mmxreg ... { mmxreg = zext(rm32); }
|
|
:MOVD rm32, mmxreg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x7E; rm32 & check_rm32_dest ... & mmxreg ... { rm32 = mmxreg(0); build check_rm32_dest; }
|
|
:MOVD XmmReg, rm32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6E; rm32 & XmmReg ... { XmmReg = zext(rm32); }
|
|
:MOVD rm32, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7E; rm32 & check_rm32_dest ... & XmmReg ... { rm32 = XmmReg(0); build check_rm32_dest; }
|
|
@ifdef IA64
|
|
:MOVQ mmxreg, rm64 is vexMode=0 & opsize=2 & mandover=0 & byte=0x0F; byte=0x6E; rm64 & mmxreg ... { mmxreg = rm64; }
|
|
:MOVQ rm64, mmxreg is vexMode=0 & opsize=2 & mandover=0 & byte=0x0F; byte=0x7E; rm64 & mmxreg ... { rm64 = mmxreg; }
|
|
:MOVQ XmmReg, rm64 is vexMode=0 & opsize=2 & $(PRE_66) & byte=0x0F; byte=0x6E; rm64 & XmmReg ... { XmmReg = zext(rm64); }
|
|
:MOVQ rm64, XmmReg is vexMode=0 & opsize=2 & $(PRE_66) & byte=0x0F; byte=0x7E; rm64 & XmmReg ... { rm64 = XmmReg(0); }
|
|
@endif
|
|
|
|
:MOVDDUP XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x12; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = m64;
|
|
XmmReg[64,64] = m64;
|
|
}
|
|
|
|
:MOVDDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:MOVSHDUP XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x16; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = m[32,32];
|
|
XmmReg[32,32] = m[32,32];
|
|
XmmReg[64,32] = m[96,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:MOVSHDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x16; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[32,32];
|
|
XmmReg1[32,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg2[96,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:MOVSLDUP XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x12; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = m[0,32];
|
|
XmmReg[32,32] = m[0,32];
|
|
XmmReg[64,32] = m[64,32];
|
|
XmmReg[96,32] = m[64,32];
|
|
}
|
|
|
|
:MOVSLDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg2[0,32];
|
|
XmmReg1[64,32] = XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg2[64,32];
|
|
}
|
|
|
|
:MOVDQA XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6F; XmmReg ... & m128 { XmmReg = m128; }
|
|
:MOVDQA XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg2; }
|
|
:MOVDQA m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7F; XmmReg ... & m128 { m128 = XmmReg; }
|
|
:MOVDQA XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg2 = XmmReg1; }
|
|
|
|
:MOVDQU XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x6F; XmmReg ... & m128 { XmmReg = m128; }
|
|
:MOVDQU XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x6F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg2; }
|
|
:MOVDQU m128, XmmReg is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7F; XmmReg ... & m128 { m128 = XmmReg; }
|
|
:MOVDQU XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg2 = XmmReg1; }
|
|
|
|
# this vexMode=0 & is potentially wrong
|
|
|
|
define pcodeop movdq2q;
|
|
:MOVDQ2Q mmxreg2, XmmReg1 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD6; XmmReg1 & mmxreg2 { mmxreg2 = movdq2q(mmxreg2, XmmReg1); }
|
|
|
|
:MOVHLPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg2[64,64]; }
|
|
|
|
:MOVHPD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x16; XmmReg ... & m64 { XmmReg[64,64] = m64; }
|
|
|
|
:MOVHPD m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x17; XmmReg ... & m64 { m64 = XmmReg[64,64]; }
|
|
|
|
:MOVHPS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x16; XmmReg ... & m64 { XmmReg[64,64] = m64; }
|
|
|
|
:MOVHPS m64, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x17; XmmReg ... & m64 { m64 = XmmReg[64,64]; }
|
|
|
|
:MOVLHPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x16; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[64,64] = XmmReg2[0,64]; }
|
|
|
|
:MOVLPD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x12; XmmReg ... & m64 { XmmReg[0,64] = m64; }
|
|
|
|
:MOVLPD m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x13; XmmReg ... & m64 { m64 = XmmReg[0,64]; }
|
|
|
|
:MOVLPS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x12; XmmReg ... & m64 { XmmReg[0,64] = m64; }
|
|
|
|
:MOVLPS m64, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x13; XmmReg ... & m64 { m64 = XmmReg[0,64]; }
|
|
|
|
define pcodeop movmskpd;
|
|
:MOVMSKPD Reg32, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x50; XmmReg2 & Reg32 { Reg32 = movmskpd(Reg32, XmmReg2); }
|
|
|
|
define pcodeop movmskps;
|
|
:MOVMSKPS Reg32, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x50; XmmReg2 & Reg32 { Reg32 = movmskps(Reg32, XmmReg2); }
|
|
|
|
:MOVNTQ m64, mmxreg is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE7; mmxreg ... & m64 { m64 = mmxreg; }
|
|
|
|
:MOVNTDQ m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE7; XmmReg ... & m128 { m128 = XmmReg; }
|
|
|
|
:MOVNTPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2B; XmmReg ... & m128 { m128 = XmmReg; }
|
|
|
|
:MOVNTPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2B; XmmReg ... & m128 { m128 = XmmReg; }
|
|
|
|
:MOVQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6F; mmxreg ... & m64 { mmxreg = m64; }
|
|
:MOVQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg2; }
|
|
:MOVQ m64, mmxreg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x7F; mmxreg ... & m64 { m64 = mmxreg; }
|
|
:MOVQ mmxreg2, mmxreg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x7F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg2 = mmxreg1; }
|
|
|
|
:MOVQ XmmReg, m64 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7E; XmmReg ... & m64
|
|
{
|
|
XmmReg = zext(m64);
|
|
}
|
|
|
|
:MOVQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7E; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1 = zext(XmmReg2[0,64]);
|
|
}
|
|
|
|
:MOVQ m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD6; m64 & XmmReg ...
|
|
{
|
|
m64 = XmmReg[0,64];
|
|
}
|
|
|
|
:MOVQ XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD6; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2 = zext(XmmReg1[0,64]);
|
|
}
|
|
|
|
:MOVQ2DQ XmmReg, mmxreg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xD6; XmmReg & mmxreg2
|
|
{
|
|
XmmReg = zext(mmxreg2);
|
|
# may need to model x87 FPU state changes too ?????
|
|
}
|
|
|
|
:MOVSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x10; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = m64;
|
|
XmmReg[64,64] = 0;
|
|
}
|
|
|
|
:MOVSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:MOVSD m64, XmmReg is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x11; m64 & XmmReg ...
|
|
{
|
|
m64 = XmmReg[0,64];
|
|
}
|
|
|
|
:MOVSD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,64] = XmmReg1[0,64];
|
|
}
|
|
|
|
:MOVSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x10; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = m32;
|
|
XmmReg[32,32] = 0;
|
|
XmmReg[64,32] = 0;
|
|
XmmReg[96,32] = 0;
|
|
}
|
|
|
|
:MOVSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
}
|
|
|
|
:MOVSS m32, XmmReg is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x11; m32 & XmmReg ...
|
|
{
|
|
m32 = XmmReg[0,32];
|
|
}
|
|
|
|
:MOVSS XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,32] = XmmReg1[0,32];
|
|
}
|
|
|
|
:MOVUPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x10; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = m[0,64];
|
|
XmmReg[64,64] = m[64,64];
|
|
}
|
|
|
|
:MOVUPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg2[64,64];
|
|
}
|
|
|
|
:MOVUPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x11; m128 & XmmReg ...
|
|
{
|
|
m128 = XmmReg;
|
|
}
|
|
|
|
:MOVUPD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg2[0,64] = XmmReg1[0,64];
|
|
XmmReg2[64,64] = XmmReg1[64,64];
|
|
}
|
|
|
|
# Not sure why someone had done it this way ?????
|
|
#Xmm2m128: m128 is vexMode=0 & m128 { export m128; }
|
|
#Xmm2m128: XmmReg2 is vexMode=0 & xmmmod=3 & XmmReg2 { export XmmReg2; }
|
|
#
|
|
#define pcodeop movups;
|
|
##:MOVUPS XmmReg, m128 is vexMode=0 & byte=0x0F; byte=0x10; XmmReg ... & m128 { XmmReg = movups(XmmReg, m128); }
|
|
##:MOVUPS XmmReg1, XmmReg2 is vexMode=0 & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = movups(XmmReg1, XmmReg2); }
|
|
#
|
|
#:MOVUPS XmmReg,Xmm2m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x10; XmmReg ... & Xmm2m128 { XmmReg = movups(XmmReg, Xmm2m128); }
|
|
|
|
:MOVUPS XmmReg, m128 is vexMode=0 & byte=0x0F; byte=0x10; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = m[0,32];
|
|
XmmReg[32,32] = m[32,32];
|
|
XmmReg[64,32] = m[64,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:MOVUPS XmmReg1, XmmReg2 is vexMode=0 & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:MOVUPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x11; m128 & XmmReg ...
|
|
{
|
|
m128 = XmmReg;
|
|
}
|
|
|
|
:MOVUPS XmmReg2, XmmReg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:MULPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x59; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f* m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] f* m[64,64];
|
|
}
|
|
|
|
:MULPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f* XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] f* XmmReg2[64,64];
|
|
}
|
|
|
|
:MULPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x59; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f* m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] f* m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] f* m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] f* m[96,32];
|
|
}
|
|
|
|
:MULPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f* XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] f* XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] f* XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] f* XmmReg2[96,32];
|
|
}
|
|
|
|
:MULSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x59; m64 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] f* m64;
|
|
}
|
|
|
|
:MULSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f* XmmReg2[0,64];
|
|
}
|
|
|
|
:MULSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x59; m32 & XmmReg ...
|
|
{
|
|
XmmReg[0,32] = XmmReg[0,32] f* m32;
|
|
}
|
|
|
|
:MULSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f* XmmReg2[0,32];
|
|
}
|
|
|
|
:ORPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x56; XmmReg ... & m128 { XmmReg = XmmReg | m128; }
|
|
:ORPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x56; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; }
|
|
|
|
:ORPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x56; XmmReg ... & m128 { XmmReg = XmmReg | m128; }
|
|
:ORPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x56; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; }
|
|
|
|
# what about these ?????
|
|
define pcodeop packsswb;
|
|
:PACKSSWB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x63; mmxreg ... & m64 { mmxreg = packsswb(mmxreg, m64); }
|
|
:PACKSSWB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x63; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = packsswb(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop packssdw;
|
|
:PACKSSDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6B; mmxreg ... & m64 { mmxreg = packssdw(mmxreg, m64); }
|
|
:PACKSSDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6B; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = packssdw(mmxreg1, mmxreg2); }
|
|
|
|
:PACKSSWB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x63; XmmReg ... & m128 { XmmReg = packsswb(XmmReg, m128); }
|
|
:PACKSSWB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x63; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = packsswb(XmmReg1, XmmReg2); }
|
|
:PACKSSDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6B; XmmReg ... & m128 { XmmReg = packssdw(XmmReg, m128); }
|
|
:PACKSSDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6B; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = packssdw(XmmReg1, XmmReg2); }
|
|
|
|
#sword < 0 : ubyte = 0
|
|
#sword > 0xff: ubyte = 0xff
|
|
#otherwise ubyte = sword
|
|
macro sswub(sword, ubyte) {
|
|
ubyte = (sword s> 0xff:2) * 0xff:1;
|
|
ubyte = ubyte + (sword s> 0:2) * (sword s< 0xff:2) * sword:1;
|
|
}
|
|
|
|
:PACKUSWB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x67; mmxreg ... & m64
|
|
{
|
|
local dest_copy:8 = mmxreg;
|
|
local src_copy:8 = m64;
|
|
local ubyte:1 = 0;
|
|
sswub(dest_copy[0,16],ubyte);
|
|
mmxreg[0,8] = ubyte;
|
|
sswub(dest_copy[16,16],ubyte);
|
|
mmxreg[8,8] = ubyte;
|
|
sswub(dest_copy[32,16],ubyte);
|
|
mmxreg[16,8] = ubyte;
|
|
sswub(dest_copy[48,16],ubyte);
|
|
mmxreg[24,8] = ubyte;
|
|
sswub(src_copy[0,16],ubyte);
|
|
mmxreg[32,8] = ubyte;
|
|
sswub(src_copy[16,16],ubyte);
|
|
mmxreg[40,8] = ubyte;
|
|
sswub(src_copy[32,16],ubyte);
|
|
mmxreg[48,8] = ubyte;
|
|
sswub(src_copy[48,16],ubyte);
|
|
mmxreg[56,8] = ubyte;
|
|
}
|
|
|
|
:PACKUSWB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x67; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
local dest_copy:8 = mmxreg1;
|
|
local src_copy:8 = mmxreg2;
|
|
local ubyte:1 = 0;
|
|
sswub(dest_copy[0,16],ubyte);
|
|
mmxreg1[0,8] = ubyte;
|
|
sswub(dest_copy[16,16],ubyte);
|
|
mmxreg1[8,8] = ubyte;
|
|
sswub(dest_copy[32,16],ubyte);
|
|
mmxreg1[16,8] = ubyte;
|
|
sswub(dest_copy[48,16],ubyte);
|
|
mmxreg1[24,8] = ubyte;
|
|
sswub(src_copy[0,16],ubyte);
|
|
mmxreg1[32,8] = ubyte;
|
|
sswub(src_copy[16,16],ubyte);
|
|
mmxreg1[40,8] = ubyte;
|
|
sswub(src_copy[32,16],ubyte);
|
|
mmxreg1[48,8] = ubyte;
|
|
sswub(src_copy[48,16],ubyte);
|
|
mmxreg1[56,8] = ubyte;
|
|
}
|
|
|
|
:PACKUSWB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x67; XmmReg ... & m128
|
|
{
|
|
local dest_copy:16 = XmmReg;
|
|
local src_copy:16 = m128;
|
|
local ubyte:1 = 0;
|
|
sswub(dest_copy[0,16],ubyte);
|
|
XmmReg[0,8] = ubyte;
|
|
sswub(dest_copy[16,16],ubyte);
|
|
XmmReg[8,8] = ubyte;
|
|
sswub(dest_copy[32,16],ubyte);
|
|
XmmReg[16,8] = ubyte;
|
|
sswub(dest_copy[48,16],ubyte);
|
|
XmmReg[24,8] = ubyte;
|
|
sswub(dest_copy[64,16],ubyte);
|
|
XmmReg[32,8] = ubyte;
|
|
sswub(dest_copy[80,16],ubyte);
|
|
XmmReg[40,8] = ubyte;
|
|
sswub(dest_copy[96,16],ubyte);
|
|
XmmReg[48,8] = ubyte;
|
|
sswub(dest_copy[112,16],ubyte);
|
|
XmmReg[56,8] = ubyte;
|
|
|
|
sswub(src_copy[0,16],ubyte);
|
|
XmmReg[64,8] = ubyte;
|
|
sswub(src_copy[16,16],ubyte);
|
|
XmmReg[72,8] = ubyte;
|
|
sswub(src_copy[32,16],ubyte);
|
|
XmmReg[80,8] = ubyte;
|
|
sswub(src_copy[48,16],ubyte);
|
|
XmmReg[88,8] = ubyte;
|
|
sswub(src_copy[64,16],ubyte);
|
|
XmmReg[96,8] = ubyte;
|
|
sswub(src_copy[80,16],ubyte);
|
|
XmmReg[104,8] = ubyte;
|
|
sswub(src_copy[96,16],ubyte);
|
|
XmmReg[112,8] = ubyte;
|
|
sswub(src_copy[112,16],ubyte);
|
|
XmmReg[120,8] = ubyte;
|
|
}
|
|
|
|
:PACKUSWB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x67; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
local dest_copy:16 = XmmReg1;
|
|
local src_copy:16 = XmmReg2;
|
|
local ubyte:1 = 0;
|
|
sswub(dest_copy[0,16],ubyte);
|
|
XmmReg1[0,8] = ubyte;
|
|
sswub(dest_copy[16,16],ubyte);
|
|
XmmReg1[8,8] = ubyte;
|
|
sswub(dest_copy[32,16],ubyte);
|
|
XmmReg1[16,8] = ubyte;
|
|
sswub(dest_copy[48,16],ubyte);
|
|
XmmReg1[24,8] = ubyte;
|
|
sswub(dest_copy[64,16],ubyte);
|
|
XmmReg1[32,8] = ubyte;
|
|
sswub(dest_copy[80,16],ubyte);
|
|
XmmReg1[40,8] = ubyte;
|
|
sswub(dest_copy[96,16],ubyte);
|
|
XmmReg1[48,8] = ubyte;
|
|
sswub(dest_copy[112,16],ubyte);
|
|
XmmReg1[56,8] = ubyte;
|
|
|
|
sswub(src_copy[0,16],ubyte);
|
|
XmmReg1[64,8] = ubyte;
|
|
sswub(src_copy[16,16],ubyte);
|
|
XmmReg1[72,8] = ubyte;
|
|
sswub(src_copy[32,16],ubyte);
|
|
XmmReg1[80,8] = ubyte;
|
|
sswub(src_copy[48,16],ubyte);
|
|
XmmReg1[88,8] = ubyte;
|
|
sswub(src_copy[64,16],ubyte);
|
|
XmmReg1[96,8] = ubyte;
|
|
sswub(src_copy[80,16],ubyte);
|
|
XmmReg1[104,8] = ubyte;
|
|
sswub(src_copy[96,16],ubyte);
|
|
XmmReg1[112,8] = ubyte;
|
|
sswub(src_copy[112,16],ubyte);
|
|
XmmReg1[120,8] = ubyte;
|
|
}
|
|
|
|
define pcodeop pabsb;
|
|
:PABSB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1c; mmxreg ... & m64 { mmxreg=pabsb(mmxreg,m64); }
|
|
:PABSB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1c; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsb(mmxreg1,mmxreg2); }
|
|
:PABSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1c; XmmReg ... & m128 { XmmReg=pabsb(XmmReg,m128); }
|
|
:PABSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1c; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsb(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pabsw;
|
|
:PABSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1d; mmxreg ... & m64 { mmxreg=pabsw(mmxreg,m64); }
|
|
:PABSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1d; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsw(mmxreg1,mmxreg2); }
|
|
:PABSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1d; XmmReg ... & m128 { XmmReg=pabsw(XmmReg,m128); }
|
|
:PABSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1d; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pabsd;
|
|
:PABSD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1e; mmxreg ... & m64 { mmxreg=pabsd(mmxreg,m64); }
|
|
:PABSD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1e; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsd(mmxreg1,mmxreg2); }
|
|
:PABSD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1e; XmmReg ... & m128 { XmmReg=pabsd(XmmReg,m128); }
|
|
:PABSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1e; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsd(XmmReg1,XmmReg2); }
|
|
|
|
:PADDB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFC; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = mmxreg[0,8] + m[0,8];
|
|
mmxreg[8,8] = mmxreg[8,8] + m[8,8];
|
|
mmxreg[16,8] = mmxreg[16,8] + m[16,8];
|
|
mmxreg[24,8] = mmxreg[24,8] + m[24,8];
|
|
mmxreg[32,8] = mmxreg[32,8] + m[32,8];
|
|
mmxreg[40,8] = mmxreg[40,8] + m[40,8];
|
|
mmxreg[48,8] = mmxreg[48,8] + m[48,8];
|
|
mmxreg[56,8] = mmxreg[56,8] + m[56,8];
|
|
}
|
|
|
|
:PADDB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFC; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = mmxreg1[0,8] + mmxreg2[0,8];
|
|
mmxreg1[8,8] = mmxreg1[8,8] + mmxreg2[8,8];
|
|
mmxreg1[16,8] = mmxreg1[16,8] + mmxreg2[16,8];
|
|
mmxreg1[24,8] = mmxreg1[24,8] + mmxreg2[24,8];
|
|
mmxreg1[32,8] = mmxreg1[32,8] + mmxreg2[32,8];
|
|
mmxreg1[40,8] = mmxreg1[40,8] + mmxreg2[40,8];
|
|
mmxreg1[48,8] = mmxreg1[48,8] + mmxreg2[48,8];
|
|
mmxreg1[56,8] = mmxreg1[56,8] + mmxreg2[56,8];
|
|
}
|
|
|
|
:PADDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFD; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = mmxreg[0,16] + m[0,16];
|
|
mmxreg[16,16] = mmxreg[16,16] + m[16,16];
|
|
mmxreg[32,16] = mmxreg[32,16] + m[32,16];
|
|
mmxreg[48,16] = mmxreg[48,16] + m[48,16];
|
|
}
|
|
|
|
:PADDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFD; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = mmxreg1[0,16] + mmxreg2[0,16];
|
|
mmxreg1[16,16] = mmxreg1[16,16] + mmxreg2[16,16];
|
|
mmxreg1[32,16] = mmxreg1[32,16] + mmxreg2[32,16];
|
|
mmxreg1[48,16] = mmxreg1[48,16] + mmxreg2[48,16];
|
|
}
|
|
|
|
:PADDD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFE; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = mmxreg[0,32] + m[0,32];
|
|
mmxreg[32,32] = mmxreg[32,32] + m[32,32];
|
|
}
|
|
|
|
:PADDD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFE; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = mmxreg1[0,32] + mmxreg2[0,32];
|
|
mmxreg1[32,32] = mmxreg1[32,32] + mmxreg2[32,32];
|
|
}
|
|
|
|
:PADDB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = XmmReg[0,8] + m[0,8];
|
|
XmmReg[8,8] = XmmReg[8,8] + m[8,8];
|
|
XmmReg[16,8] = XmmReg[16,8] + m[16,8];
|
|
XmmReg[24,8] = XmmReg[24,8] + m[24,8];
|
|
XmmReg[32,8] = XmmReg[32,8] + m[32,8];
|
|
XmmReg[40,8] = XmmReg[40,8] + m[40,8];
|
|
XmmReg[48,8] = XmmReg[48,8] + m[48,8];
|
|
XmmReg[56,8] = XmmReg[56,8] + m[56,8];
|
|
XmmReg[64,8] = XmmReg[64,8] + m[64,8];
|
|
XmmReg[72,8] = XmmReg[72,8] + m[72,8];
|
|
XmmReg[80,8] = XmmReg[80,8] + m[80,8];
|
|
XmmReg[88,8] = XmmReg[88,8] + m[88,8];
|
|
XmmReg[96,8] = XmmReg[96,8] + m[96,8];
|
|
XmmReg[104,8] = XmmReg[104,8] + m[104,8];
|
|
XmmReg[112,8] = XmmReg[112,8] + m[112,8];
|
|
XmmReg[120,8] = XmmReg[120,8] + m[120,8];
|
|
}
|
|
|
|
## example of bitfield solution
|
|
#:PADDB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
#{
|
|
# XmmReg1[ 0,8] = XmmReg1[ 0,8] + XmmReg2[ 0,8];
|
|
# XmmReg1[ 8,8] = XmmReg1[ 8,8] + XmmReg2[ 8,8];
|
|
# XmmReg1[ 16,8] = XmmReg1[ 16,8] + XmmReg2[ 16,8];
|
|
# XmmReg1[ 24,8] = XmmReg1[ 24,8] + XmmReg2[ 24,8];
|
|
# XmmReg1[ 32,8] = XmmReg1[ 32,8] + XmmReg2[ 32,8];
|
|
# XmmReg1[ 40,8] = XmmReg1[ 40,8] + XmmReg2[ 40,8];
|
|
# XmmReg1[ 48,8] = XmmReg1[ 48,8] + XmmReg2[ 48,8];
|
|
# XmmReg1[ 56,8] = XmmReg1[ 56,8] + XmmReg2[ 56,8];
|
|
## XmmReg1[ 64,8] = XmmReg1[ 64,8] + XmmReg2[ 64,8];
|
|
## XmmReg1[ 72,8] = XmmReg1[ 72,8] + XmmReg2[ 72,8];
|
|
## XmmReg1[ 80,8] = XmmReg1[ 80,8] + XmmReg2[ 80,8];
|
|
## XmmReg1[ 88,8] = XmmReg1[ 88,8] + XmmReg2[ 88,8];
|
|
## XmmReg1[ 96,8] = XmmReg1[ 96,8] + XmmReg2[ 96,8];
|
|
## XmmReg1[104,8] = XmmReg1[104,8] + XmmReg2[104,8];
|
|
## XmmReg1[112,8] = XmmReg1[112,8] + XmmReg2[112,8];
|
|
## XmmReg1[120,8] = XmmReg1[120,8] + XmmReg2[120,8];
|
|
#}
|
|
|
|
# full set of XMM byte registers
|
|
:PADDB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = XmmReg1[0,8] + XmmReg2[0,8];
|
|
XmmReg1[8,8] = XmmReg1[8,8] + XmmReg2[8,8];
|
|
XmmReg1[16,8] = XmmReg1[16,8] + XmmReg2[16,8];
|
|
XmmReg1[24,8] = XmmReg1[24,8] + XmmReg2[24,8];
|
|
XmmReg1[32,8] = XmmReg1[32,8] + XmmReg2[32,8];
|
|
XmmReg1[40,8] = XmmReg1[40,8] + XmmReg2[40,8];
|
|
XmmReg1[48,8] = XmmReg1[48,8] + XmmReg2[48,8];
|
|
XmmReg1[56,8] = XmmReg1[56,8] + XmmReg2[56,8];
|
|
XmmReg1[64,8] = XmmReg1[64,8] + XmmReg2[64,8];
|
|
XmmReg1[72,8] = XmmReg1[72,8] + XmmReg2[72,8];
|
|
XmmReg1[80,8] = XmmReg1[80,8] + XmmReg2[80,8];
|
|
XmmReg1[88,8] = XmmReg1[88,8] + XmmReg2[88,8];
|
|
XmmReg1[96,8] = XmmReg1[96,8] + XmmReg2[96,8];
|
|
XmmReg1[104,8] = XmmReg1[104,8] + XmmReg2[104,8];
|
|
XmmReg1[112,8] = XmmReg1[112,8] + XmmReg2[112,8];
|
|
XmmReg1[120,8] = XmmReg1[120,8] + XmmReg2[120,8];
|
|
}
|
|
|
|
:PADDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFD; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = XmmReg[0,16] + m[0,16];
|
|
XmmReg[16,16] = XmmReg[16,16] + m[16,16];
|
|
XmmReg[32,16] = XmmReg[32,16] + m[32,16];
|
|
XmmReg[48,16] = XmmReg[48,16] + m[48,16];
|
|
XmmReg[64,16] = XmmReg[64,16] + m[64,16];
|
|
XmmReg[80,16] = XmmReg[80,16] + m[80,16];
|
|
XmmReg[96,16] = XmmReg[96,16] + m[96,16];
|
|
XmmReg[112,16] = XmmReg[112,16] + m[112,16];
|
|
}
|
|
|
|
:PADDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFD; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = XmmReg1[0,16] + XmmReg2[0,16];
|
|
XmmReg1[16,16] = XmmReg1[16,16] + XmmReg2[16,16];
|
|
XmmReg1[32,16] = XmmReg1[32,16] + XmmReg2[32,16];
|
|
XmmReg1[48,16] = XmmReg1[48,16] + XmmReg2[48,16];
|
|
XmmReg1[64,16] = XmmReg1[64,16] + XmmReg2[64,16];
|
|
XmmReg1[80,16] = XmmReg1[80,16] + XmmReg2[80,16];
|
|
XmmReg1[96,16] = XmmReg1[96,16] + XmmReg2[96,16];
|
|
XmmReg1[112,16] = XmmReg1[112,16] + XmmReg2[112,16];
|
|
}
|
|
|
|
:PADDD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFE; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] + m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] + m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] + m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] + m[96,32];
|
|
}
|
|
|
|
:PADDD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFE; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] + XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] + XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] + XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] + XmmReg2[96,32];
|
|
}
|
|
|
|
:PADDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD4; mmxreg ... & m64
|
|
{
|
|
mmxreg = mmxreg + m64;
|
|
}
|
|
|
|
:PADDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD4; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1 = mmxreg1 + mmxreg2;
|
|
}
|
|
|
|
:PADDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD4; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] + m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] + m[64,64];
|
|
}
|
|
|
|
:PADDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD4; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] + XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] + XmmReg2[64,64];
|
|
}
|
|
|
|
define pcodeop paddsb;
|
|
:PADDSB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEC; mmxreg ... & m64 { mmxreg = paddsb(mmxreg, m64); }
|
|
:PADDSB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEC; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = paddsb(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop paddsw;
|
|
:PADDSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xED; mmxreg ... & m64 { mmxreg = paddsw(mmxreg, m64); }
|
|
:PADDSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xED; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = paddsw(mmxreg1, mmxreg2); }
|
|
|
|
:PADDSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEC; XmmReg ... & m128 { XmmReg = paddsb(XmmReg, m128); }
|
|
:PADDSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEC; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = paddsb(XmmReg1, XmmReg2); }
|
|
:PADDSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xED; XmmReg ... & m128 { XmmReg = paddsw(XmmReg, m128); }
|
|
:PADDSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xED; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = paddsw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop paddusb;
|
|
:PADDUSB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDC; mmxreg ... & m64 { mmxreg = paddusb(mmxreg, m64); }
|
|
:PADDUSB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDC; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = paddusb(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop paddusw;
|
|
:PADDUSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDD; mmxreg ... & m64 { mmxreg = paddusw(mmxreg, m64); }
|
|
:PADDUSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDD; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = paddusw(mmxreg1, mmxreg2); }
|
|
|
|
:PADDUSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDC; XmmReg ... & m128 { XmmReg = paddusb(XmmReg, m128); }
|
|
:PADDUSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDC; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = paddusb(XmmReg1, XmmReg2); }
|
|
:PADDUSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDD; XmmReg ... & m128 { XmmReg = paddusw(XmmReg, m128); }
|
|
:PADDUSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDD; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = paddusw(XmmReg1, XmmReg2); }
|
|
|
|
:PALIGNR mmxreg, m64, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x3A; byte=0x0F; m64 & mmxreg ...; imm8
|
|
{
|
|
temp:16 = ( ( zext(mmxreg) << 64 ) | zext( m64 ) ) >> ( imm8 * 8 );
|
|
mmxreg = temp:8;
|
|
}
|
|
|
|
:PALIGNR mmxreg1, mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x3A; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2; imm8
|
|
{
|
|
temp:16 = ( ( zext(mmxreg1) << 64 ) | zext( mmxreg2 ) ) >> ( imm8 * 8 );
|
|
mmxreg1 = temp:8;
|
|
}
|
|
|
|
:PALIGNR XmmReg1, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0F; m128 & XmmReg1 ...; imm8
|
|
{
|
|
temp:32 = ( ( zext(XmmReg1) << 128 ) | zext( m128 ) ) >> ( imm8 * 8 );
|
|
XmmReg1 = temp:16;
|
|
}
|
|
|
|
:PALIGNR XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0F; xmmmod=3 & XmmReg1 & XmmReg2; imm8
|
|
{
|
|
temp:32 = ( ( zext(XmmReg1) << 128 ) | zext( XmmReg2 ) ) >> ( imm8 * 8 );
|
|
XmmReg1 = temp:16;
|
|
}
|
|
|
|
:PAND mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDB; mmxreg ... & m64 { mmxreg = mmxreg & m64; }
|
|
:PAND mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 & mmxreg2; }
|
|
:PAND XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDB; XmmReg ... & m128 { XmmReg = XmmReg & m128; }
|
|
:PAND XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDB; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 & XmmReg2; }
|
|
|
|
:PANDN mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDF; mmxreg ... & m64 { mmxreg = ~mmxreg & m64; }
|
|
:PANDN mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDF; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = ~mmxreg1 & mmxreg2; }
|
|
:PANDN XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDF; XmmReg ... & m128 { XmmReg = ~XmmReg & m128; }
|
|
:PANDN XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDF; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = ~XmmReg1 & XmmReg2; }
|
|
|
|
define pcodeop pavgb;
|
|
:PAVGB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE0; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = pavgb(mmxreg[0,8], m[0,8]);
|
|
mmxreg[8,8] = pavgb(mmxreg[8,8], m[8,8]);
|
|
mmxreg[16,8] = pavgb(mmxreg[16,8], m[16,8]);
|
|
mmxreg[24,8] = pavgb(mmxreg[24,8], m[24,8]);
|
|
mmxreg[32,8] = pavgb(mmxreg[32,8], m[32,8]);
|
|
mmxreg[40,8] = pavgb(mmxreg[40,8], m[40,8]);
|
|
mmxreg[48,8] = pavgb(mmxreg[48,8], m[48,8]);
|
|
mmxreg[56,8] = pavgb(mmxreg[56,8], m[56,8]);
|
|
}
|
|
|
|
:PAVGB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE0; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = pavgb(mmxreg1[0,8], mmxreg2[0,8]);
|
|
mmxreg1[8,8] = pavgb(mmxreg1[8,8], mmxreg2[8,8]);
|
|
mmxreg1[16,8] = pavgb(mmxreg1[16,8], mmxreg2[16,8]);
|
|
mmxreg1[24,8] = pavgb(mmxreg1[24,8], mmxreg2[24,8]);
|
|
mmxreg1[32,8] = pavgb(mmxreg1[32,8], mmxreg2[32,8]);
|
|
mmxreg1[40,8] = pavgb(mmxreg1[40,8], mmxreg2[40,8]);
|
|
mmxreg1[48,8] = pavgb(mmxreg1[48,8], mmxreg2[48,8]);
|
|
mmxreg1[56,8] = pavgb(mmxreg1[56,8], mmxreg2[56,8]);
|
|
}
|
|
|
|
define pcodeop pavgw;
|
|
:PAVGW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE3; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = pavgw(mmxreg[0,16], m[0,16]);
|
|
mmxreg[16,16] = pavgw(mmxreg[16,16], m[16,16]);
|
|
mmxreg[32,16] = pavgw(mmxreg[32,16], m[32,16]);
|
|
mmxreg[48,16] = pavgw(mmxreg[48,16], m[48,16]);
|
|
}
|
|
|
|
:PAVGW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE3; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = pavgw(mmxreg1[0,16], mmxreg2[0,16]);
|
|
mmxreg1[16,16] = pavgw(mmxreg1[16,16], mmxreg2[16,16]);
|
|
mmxreg1[32,16] = pavgw(mmxreg1[32,16], mmxreg2[32,16]);
|
|
mmxreg1[48,16] = pavgw(mmxreg1[48,16], mmxreg2[48,16]);
|
|
}
|
|
|
|
:PAVGB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE0; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = pavgb(XmmReg[0,8], m[0,8]);
|
|
XmmReg[8,8] = pavgb(XmmReg[8,8], m[8,8]);
|
|
XmmReg[16,8] = pavgb(XmmReg[16,8], m[16,8]);
|
|
XmmReg[24,8] = pavgb(XmmReg[24,8], m[24,8]);
|
|
XmmReg[32,8] = pavgb(XmmReg[32,8], m[32,8]);
|
|
XmmReg[40,8] = pavgb(XmmReg[40,8], m[40,8]);
|
|
XmmReg[48,8] = pavgb(XmmReg[48,8], m[48,8]);
|
|
XmmReg[56,8] = pavgb(XmmReg[56,8], m[56,8]);
|
|
XmmReg[64,8] = pavgb(XmmReg[64,8], m[64,8]);
|
|
XmmReg[72,8] = pavgb(XmmReg[72,8], m[72,8]);
|
|
XmmReg[80,8] = pavgb(XmmReg[80,8], m[80,8]);
|
|
XmmReg[88,8] = pavgb(XmmReg[88,8], m[88,8]);
|
|
XmmReg[96,8] = pavgb(XmmReg[96,8], m[96,8]);
|
|
XmmReg[104,8] = pavgb(XmmReg[104,8], m[104,8]);
|
|
XmmReg[112,8] = pavgb(XmmReg[112,8], m[112,8]);
|
|
XmmReg[120,8] = pavgb(XmmReg[120,8], m[120,8]);
|
|
}
|
|
|
|
# full set of XMM byte registers
|
|
:PAVGB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE0; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = pavgb(XmmReg1[0,8], XmmReg2[0,8]);
|
|
XmmReg1[8,8] = pavgb(XmmReg1[8,8], XmmReg2[8,8]);
|
|
XmmReg1[16,8] = pavgb(XmmReg1[16,8], XmmReg2[16,8]);
|
|
XmmReg1[24,8] = pavgb(XmmReg1[24,8], XmmReg2[24,8]);
|
|
XmmReg1[32,8] = pavgb(XmmReg1[32,8], XmmReg2[32,8]);
|
|
XmmReg1[40,8] = pavgb(XmmReg1[40,8], XmmReg2[40,8]);
|
|
XmmReg1[48,8] = pavgb(XmmReg1[48,8], XmmReg2[48,8]);
|
|
XmmReg1[56,8] = pavgb(XmmReg1[56,8], XmmReg2[56,8]);
|
|
XmmReg1[64,8] = pavgb(XmmReg1[64,8], XmmReg2[64,8]);
|
|
XmmReg1[72,8] = pavgb(XmmReg1[72,8], XmmReg2[72,8]);
|
|
XmmReg1[80,8] = pavgb(XmmReg1[80,8], XmmReg2[80,8]);
|
|
XmmReg1[88,8] = pavgb(XmmReg1[88,8], XmmReg2[88,8]);
|
|
XmmReg1[96,8] = pavgb(XmmReg1[96,8], XmmReg2[96,8]);
|
|
XmmReg1[104,8] = pavgb(XmmReg1[104,8], XmmReg2[104,8]);
|
|
XmmReg1[112,8] = pavgb(XmmReg1[112,8], XmmReg2[112,8]);
|
|
XmmReg1[120,8] = pavgb(XmmReg1[120,8], XmmReg2[120,8]);
|
|
}
|
|
|
|
:PAVGW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE3; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = pavgw(XmmReg[0,16], m[0,16]);
|
|
XmmReg[16,16] = pavgw(XmmReg[16,16], m[16,16]);
|
|
XmmReg[32,16] = pavgw(XmmReg[32,16], m[32,16]);
|
|
XmmReg[48,16] = pavgw(XmmReg[48,16], m[48,16]);
|
|
XmmReg[64,16] = pavgw(XmmReg[64,16], m[64,16]);
|
|
XmmReg[80,16] = pavgw(XmmReg[80,16], m[80,16]);
|
|
XmmReg[96,16] = pavgw(XmmReg[96,16], m[96,16]);
|
|
XmmReg[112,16] = pavgw(XmmReg[112,16], m[112,16]);
|
|
}
|
|
|
|
:PAVGW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE3; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = pavgw(XmmReg1[0,16], XmmReg2[0,16]);
|
|
XmmReg1[16,16] = pavgw(XmmReg1[16,16], XmmReg2[16,16]);
|
|
XmmReg1[32,16] = pavgw(XmmReg1[32,16], XmmReg2[32,16]);
|
|
XmmReg1[48,16] = pavgw(XmmReg1[48,16], XmmReg2[48,16]);
|
|
XmmReg1[64,16] = pavgw(XmmReg1[64,16], XmmReg2[64,16]);
|
|
XmmReg1[80,16] = pavgw(XmmReg1[80,16], XmmReg2[80,16]);
|
|
XmmReg1[96,16] = pavgw(XmmReg1[96,16], XmmReg2[96,16]);
|
|
XmmReg1[112,16] = pavgw(XmmReg1[112,16], XmmReg2[112,16]);
|
|
}
|
|
|
|
:PCMPEQB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x74; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = (mmxreg[0,8] == m[0,8]) * 0xFF;
|
|
mmxreg[8,8] = (mmxreg[8,8] == m[8,8]) * 0xFF;
|
|
mmxreg[16,8] = (mmxreg[16,8] == m[16,8]) * 0xFF;
|
|
mmxreg[24,8] = (mmxreg[24,8] == m[24,8]) * 0xFF;
|
|
mmxreg[32,8] = (mmxreg[32,8] == m[32,8]) * 0xFF;
|
|
mmxreg[40,8] = (mmxreg[40,8] == m[40,8]) * 0xFF;
|
|
mmxreg[48,8] = (mmxreg[48,8] == m[48,8]) * 0xFF;
|
|
mmxreg[56,8] = (mmxreg[56,8] == m[56,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPEQB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x74; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = (mmxreg1[0,8] == mmxreg2[0,8]) * 0xFF;
|
|
mmxreg1[8,8] = (mmxreg1[8,8] == mmxreg2[8,8]) * 0xFF;
|
|
mmxreg1[16,8] = (mmxreg1[16,8] == mmxreg2[16,8]) * 0xFF;
|
|
mmxreg1[24,8] = (mmxreg1[24,8] == mmxreg2[24,8]) * 0xFF;
|
|
mmxreg1[32,8] = (mmxreg1[32,8] == mmxreg2[32,8]) * 0xFF;
|
|
mmxreg1[40,8] = (mmxreg1[40,8] == mmxreg2[40,8]) * 0xFF;
|
|
mmxreg1[48,8] = (mmxreg1[48,8] == mmxreg2[48,8]) * 0xFF;
|
|
mmxreg1[56,8] = (mmxreg1[56,8] == mmxreg2[56,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPEQW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x75; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = zext(mmxreg[0,16] == m[0,16]) * 0xFFFF;
|
|
mmxreg[16,16] = zext(mmxreg[16,16] == m[16,16]) * 0xFFFF;
|
|
mmxreg[32,16] = zext(mmxreg[32,16] == m[32,16]) * 0xFFFF;
|
|
mmxreg[48,16] = zext(mmxreg[48,16] == m[48,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPEQW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x75; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = zext(mmxreg1[0,16] == mmxreg2[0,16]) * 0xFFFF;
|
|
mmxreg1[16,16] = zext(mmxreg1[16,16] == mmxreg2[16,16]) * 0xFFFF;
|
|
mmxreg1[32,16] = zext(mmxreg1[32,16] == mmxreg2[32,16]) * 0xFFFF;
|
|
mmxreg1[48,16] = zext(mmxreg1[48,16] == mmxreg2[48,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPEQD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x76; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = zext(mmxreg[0,32] == m[0,32]) * 0xFFFFFFFF;
|
|
mmxreg[32,32] = zext(mmxreg[32,32] == m[32,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPEQD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x76; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = zext(mmxreg1[0,32] == mmxreg2[0,32]) * 0xFFFFFFFF;
|
|
mmxreg1[32,32] = zext(mmxreg1[32,32] == mmxreg2[32,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
define pcodeop pcmpeqb;
|
|
:PCMPEQB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x74; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = (XmmReg[0,8] == m[0,8]) * 0xFF;
|
|
XmmReg[8,8] = (XmmReg[8,8] == m[8,8]) * 0xFF;
|
|
XmmReg[16,8] = (XmmReg[16,8] == m[16,8]) * 0xFF;
|
|
XmmReg[24,8] = (XmmReg[24,8] == m[24,8]) * 0xFF;
|
|
XmmReg[32,8] = (XmmReg[32,8] == m[32,8]) * 0xFF;
|
|
XmmReg[40,8] = (XmmReg[40,8] == m[40,8]) * 0xFF;
|
|
XmmReg[48,8] = (XmmReg[48,8] == m[48,8]) * 0xFF;
|
|
XmmReg[56,8] = (XmmReg[56,8] == m[56,8]) * 0xFF;
|
|
XmmReg[64,8] = (XmmReg[64,8] == m[64,8]) * 0xFF;
|
|
XmmReg[72,8] = (XmmReg[72,8] == m[72,8]) * 0xFF;
|
|
XmmReg[80,8] = (XmmReg[80,8] == m[80,8]) * 0xFF;
|
|
XmmReg[88,8] = (XmmReg[88,8] == m[88,8]) * 0xFF;
|
|
XmmReg[96,8] = (XmmReg[96,8] == m[96,8]) * 0xFF;
|
|
XmmReg[104,8] = (XmmReg[104,8] == m[104,8]) * 0xFF;
|
|
XmmReg[112,8] = (XmmReg[112,8] == m[112,8]) * 0xFF;
|
|
XmmReg[120,8] = (XmmReg[120,8] == m[120,8]) * 0xFF;
|
|
}
|
|
|
|
# full set of XMM byte registers
|
|
:PCMPEQB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x74; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = (XmmReg1[0,8] == XmmReg2[0,8]) * 0xFF;
|
|
XmmReg1[8,8] = (XmmReg1[8,8] == XmmReg2[8,8]) * 0xFF;
|
|
XmmReg1[16,8] = (XmmReg1[16,8] == XmmReg2[16,8]) * 0xFF;
|
|
XmmReg1[24,8] = (XmmReg1[24,8] == XmmReg2[24,8]) * 0xFF;
|
|
XmmReg1[32,8] = (XmmReg1[32,8] == XmmReg2[32,8]) * 0xFF;
|
|
XmmReg1[40,8] = (XmmReg1[40,8] == XmmReg2[40,8]) * 0xFF;
|
|
XmmReg1[48,8] = (XmmReg1[48,8] == XmmReg2[48,8]) * 0xFF;
|
|
XmmReg1[56,8] = (XmmReg1[56,8] == XmmReg2[56,8]) * 0xFF;
|
|
XmmReg1[64,8] = (XmmReg1[64,8] == XmmReg2[64,8]) * 0xFF;
|
|
XmmReg1[72,8] = (XmmReg1[72,8] == XmmReg2[72,8]) * 0xFF;
|
|
XmmReg1[80,8] = (XmmReg1[80,8] == XmmReg2[80,8]) * 0xFF;
|
|
XmmReg1[88,8] = (XmmReg1[88,8] == XmmReg2[88,8]) * 0xFF;
|
|
XmmReg1[96,8] = (XmmReg1[96,8] == XmmReg2[96,8]) * 0xFF;
|
|
XmmReg1[104,8] = (XmmReg1[104,8] == XmmReg2[104,8]) * 0xFF;
|
|
XmmReg1[112,8] = (XmmReg1[112,8] == XmmReg2[112,8]) * 0xFF;
|
|
XmmReg1[120,8] = (XmmReg1[120,8] == XmmReg2[120,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPEQW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x75; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = zext(XmmReg[0,16] == m[0,16]) * 0xFFFF;
|
|
XmmReg[16,16] = zext(XmmReg[16,16] == m[16,16]) * 0xFFFF;
|
|
XmmReg[32,16] = zext(XmmReg[32,16] == m[32,16]) * 0xFFFF;
|
|
XmmReg[48,16] = zext(XmmReg[48,16] == m[48,16]) * 0xFFFF;
|
|
XmmReg[64,16] = zext(XmmReg[64,16] == m[64,16]) * 0xFFFF;
|
|
XmmReg[80,16] = zext(XmmReg[80,16] == m[80,16]) * 0xFFFF;
|
|
XmmReg[96,16] = zext(XmmReg[96,16] == m[96,16]) * 0xFFFF;
|
|
XmmReg[112,16] = zext(XmmReg[112,16] == m[112,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPEQW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x75; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = zext(XmmReg1[0,16] == XmmReg2[0,16]) * 0xFFFF;
|
|
XmmReg1[16,16] = zext(XmmReg1[16,16] == XmmReg2[16,16]) * 0xFFFF;
|
|
XmmReg1[32,16] = zext(XmmReg1[32,16] == XmmReg2[32,16]) * 0xFFFF;
|
|
XmmReg1[48,16] = zext(XmmReg1[48,16] == XmmReg2[48,16]) * 0xFFFF;
|
|
XmmReg1[64,16] = zext(XmmReg1[64,16] == XmmReg2[64,16]) * 0xFFFF;
|
|
XmmReg1[80,16] = zext(XmmReg1[80,16] == XmmReg2[80,16]) * 0xFFFF;
|
|
XmmReg1[96,16] = zext(XmmReg1[96,16] == XmmReg2[96,16]) * 0xFFFF;
|
|
XmmReg1[112,16] = zext(XmmReg1[112,16] == XmmReg2[112,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPEQD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x76; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = zext(XmmReg[0,32] == m[0,32]) * 0xFFFFFFFF;
|
|
XmmReg[32,32] = zext(XmmReg[32,32] == m[32,32]) * 0xFFFFFFFF;
|
|
XmmReg[64,32] = zext(XmmReg[64,32] == m[64,32]) * 0xFFFFFFFF;
|
|
XmmReg[96,32] = zext(XmmReg[96,32] == m[96,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPEQD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x76; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = zext(XmmReg1[0,32] == XmmReg2[0,32]) * 0xFFFFFFFF;
|
|
XmmReg1[32,32] = zext(XmmReg1[32,32] == XmmReg2[32,32]) * 0xFFFFFFFF;
|
|
XmmReg1[64,32] = zext(XmmReg1[64,32] == XmmReg2[64,32]) * 0xFFFFFFFF;
|
|
XmmReg1[96,32] = zext(XmmReg1[96,32] == XmmReg2[96,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPGTB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x64; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = (mmxreg[0,8] s> m[0,8]) * 0xFF;
|
|
mmxreg[8,8] = (mmxreg[8,8] s> m[8,8]) * 0xFF;
|
|
mmxreg[16,8] = (mmxreg[16,8] s> m[16,8]) * 0xFF;
|
|
mmxreg[24,8] = (mmxreg[24,8] s> m[24,8]) * 0xFF;
|
|
mmxreg[32,8] = (mmxreg[32,8] s> m[32,8]) * 0xFF;
|
|
mmxreg[40,8] = (mmxreg[40,8] s> m[40,8]) * 0xFF;
|
|
mmxreg[48,8] = (mmxreg[48,8] s> m[48,8]) * 0xFF;
|
|
mmxreg[56,8] = (mmxreg[56,8] s> m[56,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPGTB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x64; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = (mmxreg1[0,8] s> mmxreg2[0,8]) * 0xFF;
|
|
mmxreg1[8,8] = (mmxreg1[8,8] s> mmxreg2[8,8]) * 0xFF;
|
|
mmxreg1[16,8] = (mmxreg1[16,8] s> mmxreg2[16,8]) * 0xFF;
|
|
mmxreg1[24,8] = (mmxreg1[24,8] s> mmxreg2[24,8]) * 0xFF;
|
|
mmxreg1[32,8] = (mmxreg1[32,8] s> mmxreg2[32,8]) * 0xFF;
|
|
mmxreg1[40,8] = (mmxreg1[40,8] s> mmxreg2[40,8]) * 0xFF;
|
|
mmxreg1[48,8] = (mmxreg1[48,8] s> mmxreg2[48,8]) * 0xFF;
|
|
mmxreg1[56,8] = (mmxreg1[56,8] s> mmxreg2[56,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPGTW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x65; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = zext(mmxreg[0,16] s> m[0,16]) * 0xFFFF;
|
|
mmxreg[16,16] = zext(mmxreg[16,16] s> m[16,16]) * 0xFFFF;
|
|
mmxreg[32,16] = zext(mmxreg[32,16] s> m[32,16]) * 0xFFFF;
|
|
mmxreg[48,16] = zext(mmxreg[48,16] s> m[48,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPGTW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x65; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = zext(mmxreg1[0,16] s> mmxreg2[0,16]) * 0xFFFF;
|
|
mmxreg1[16,16] = zext(mmxreg1[16,16] s> mmxreg2[16,16]) * 0xFFFF;
|
|
mmxreg1[32,16] = zext(mmxreg1[32,16] s> mmxreg2[32,16]) * 0xFFFF;
|
|
mmxreg1[48,16] = zext(mmxreg1[48,16] s> mmxreg2[48,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPGTD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x66; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = zext(mmxreg[0,32] s> m[0,32]) * 0xFFFFFFFF;
|
|
mmxreg[32,32] = zext(mmxreg[32,32] s> m[32,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPGTD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x66; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = zext(mmxreg1[0,32] s> mmxreg2[0,32]) * 0xFFFFFFFF;
|
|
mmxreg1[32,32] = zext(mmxreg1[32,32] s> mmxreg2[32,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPGTB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x64; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = (XmmReg[0,8] s> m[0,8]) * 0xFF;
|
|
XmmReg[8,8] = (XmmReg[8,8] s> m[8,8]) * 0xFF;
|
|
XmmReg[16,8] = (XmmReg[16,8] s> m[16,8]) * 0xFF;
|
|
XmmReg[24,8] = (XmmReg[24,8] s> m[24,8]) * 0xFF;
|
|
XmmReg[32,8] = (XmmReg[32,8] s> m[32,8]) * 0xFF;
|
|
XmmReg[40,8] = (XmmReg[40,8] s> m[40,8]) * 0xFF;
|
|
XmmReg[48,8] = (XmmReg[48,8] s> m[48,8]) * 0xFF;
|
|
XmmReg[56,8] = (XmmReg[56,8] s> m[56,8]) * 0xFF;
|
|
XmmReg[64,8] = (XmmReg[64,8] s> m[64,8]) * 0xFF;
|
|
XmmReg[72,8] = (XmmReg[72,8] s> m[72,8]) * 0xFF;
|
|
XmmReg[80,8] = (XmmReg[80,8] s> m[80,8]) * 0xFF;
|
|
XmmReg[88,8] = (XmmReg[88,8] s> m[88,8]) * 0xFF;
|
|
XmmReg[96,8] = (XmmReg[96,8] s> m[96,8]) * 0xFF;
|
|
XmmReg[104,8] = (XmmReg[104,8] s> m[104,8]) * 0xFF;
|
|
XmmReg[112,8] = (XmmReg[112,8] s> m[112,8]) * 0xFF;
|
|
XmmReg[120,8] = (XmmReg[120,8] s> m[120,8]) * 0xFF;
|
|
}
|
|
|
|
# full set of XMM byte registers
|
|
:PCMPGTB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x64; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = (XmmReg1[0,8] s> XmmReg2[0,8]) * 0xFF;
|
|
XmmReg1[8,8] = (XmmReg1[8,8] s> XmmReg2[8,8]) * 0xFF;
|
|
XmmReg1[16,8] = (XmmReg1[16,8] s> XmmReg2[16,8]) * 0xFF;
|
|
XmmReg1[24,8] = (XmmReg1[24,8] s> XmmReg2[24,8]) * 0xFF;
|
|
XmmReg1[32,8] = (XmmReg1[32,8] s> XmmReg2[32,8]) * 0xFF;
|
|
XmmReg1[40,8] = (XmmReg1[40,8] s> XmmReg2[40,8]) * 0xFF;
|
|
XmmReg1[48,8] = (XmmReg1[48,8] s> XmmReg2[48,8]) * 0xFF;
|
|
XmmReg1[56,8] = (XmmReg1[56,8] s> XmmReg2[56,8]) * 0xFF;
|
|
XmmReg1[64,8] = (XmmReg1[64,8] s> XmmReg2[64,8]) * 0xFF;
|
|
XmmReg1[72,8] = (XmmReg1[72,8] s> XmmReg2[72,8]) * 0xFF;
|
|
XmmReg1[80,8] = (XmmReg1[80,8] s> XmmReg2[80,8]) * 0xFF;
|
|
XmmReg1[88,8] = (XmmReg1[88,8] s> XmmReg2[88,8]) * 0xFF;
|
|
XmmReg1[96,8] = (XmmReg1[96,8] s> XmmReg2[96,8]) * 0xFF;
|
|
XmmReg1[104,8] = (XmmReg1[104,8] s> XmmReg2[104,8]) * 0xFF;
|
|
XmmReg1[112,8] = (XmmReg1[112,8] s> XmmReg2[112,8]) * 0xFF;
|
|
XmmReg1[120,8] = (XmmReg1[120,8] s> XmmReg2[120,8]) * 0xFF;
|
|
}
|
|
|
|
:PCMPGTW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x65; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = zext(XmmReg[0,16] s> m[0,16]) * 0xFFFF;
|
|
XmmReg[16,16] = zext(XmmReg[16,16] s> m[16,16]) * 0xFFFF;
|
|
XmmReg[32,16] = zext(XmmReg[32,16] s> m[32,16]) * 0xFFFF;
|
|
XmmReg[48,16] = zext(XmmReg[48,16] s> m[48,16]) * 0xFFFF;
|
|
XmmReg[64,16] = zext(XmmReg[64,16] s> m[64,16]) * 0xFFFF;
|
|
XmmReg[80,16] = zext(XmmReg[80,16] s> m[80,16]) * 0xFFFF;
|
|
XmmReg[96,16] = zext(XmmReg[96,16] s> m[96,16]) * 0xFFFF;
|
|
XmmReg[112,16] = zext(XmmReg[112,16] s> m[112,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPGTW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x65; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = zext(XmmReg1[0,16] s> XmmReg2[0,16]) * 0xFFFF;
|
|
XmmReg1[16,16] = zext(XmmReg1[16,16] s> XmmReg2[16,16]) * 0xFFFF;
|
|
XmmReg1[32,16] = zext(XmmReg1[32,16] s> XmmReg2[32,16]) * 0xFFFF;
|
|
XmmReg1[48,16] = zext(XmmReg1[48,16] s> XmmReg2[48,16]) * 0xFFFF;
|
|
XmmReg1[64,16] = zext(XmmReg1[64,16] s> XmmReg2[64,16]) * 0xFFFF;
|
|
XmmReg1[80,16] = zext(XmmReg1[80,16] s> XmmReg2[80,16]) * 0xFFFF;
|
|
XmmReg1[96,16] = zext(XmmReg1[96,16] s> XmmReg2[96,16]) * 0xFFFF;
|
|
XmmReg1[112,16] = zext(XmmReg1[112,16] s> XmmReg2[112,16]) * 0xFFFF;
|
|
}
|
|
|
|
:PCMPGTD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x66; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = zext(XmmReg[0,32] s> m[0,32]) * 0xFFFFFFFF;
|
|
XmmReg[32,32] = zext(XmmReg[32,32] s> m[32,32]) * 0xFFFFFFFF;
|
|
XmmReg[64,32] = zext(XmmReg[64,32] s> m[64,32]) * 0xFFFFFFFF;
|
|
XmmReg[96,32] = zext(XmmReg[96,32] s> m[96,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PCMPGTD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x66; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = zext(XmmReg1[0,32] s> XmmReg2[0,32]) * 0xFFFFFFFF;
|
|
XmmReg1[32,32] = zext(XmmReg1[32,32] s> XmmReg2[32,32]) * 0xFFFFFFFF;
|
|
XmmReg1[64,32] = zext(XmmReg1[64,32] s> XmmReg2[64,32]) * 0xFFFFFFFF;
|
|
XmmReg1[96,32] = zext(XmmReg1[96,32] s> XmmReg2[96,32]) * 0xFFFFFFFF;
|
|
}
|
|
|
|
:PEXTRW Reg32, mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC5; Reg32 & mmxreg2; imm8
|
|
{
|
|
temp:8 = mmxreg2 >> ( (imm8 & 0x03) * 16 );
|
|
Reg32 = zext(temp:2);
|
|
}
|
|
|
|
:PEXTRW Reg32, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC5; Reg32 & XmmReg2; imm8
|
|
{
|
|
temp:16 = XmmReg2 >> ( (imm8 & 0x07) * 16 );
|
|
Reg32 = zext(temp:2);
|
|
}
|
|
|
|
:PEXTRW Reg32_m16, XmmReg1, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x15; XmmReg1 ... & Reg32_m16; imm8
|
|
{
|
|
temp:16 = XmmReg1 >> ( (imm8 & 0x07) * 16 );
|
|
Reg32_m16 = zext(temp:2);
|
|
}
|
|
|
|
define pcodeop phaddd;
|
|
:PHADDD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x02; mmxreg ... & m64 { mmxreg=phaddd(mmxreg,m64); }
|
|
:PHADDD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x02; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddd(mmxreg1,mmxreg2); }
|
|
:PHADDD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x02; XmmReg ... & m128 { XmmReg=phaddd(XmmReg,m128); }
|
|
:PHADDD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x02; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddd(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phaddw;
|
|
:PHADDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x01; mmxreg ... & m64 { mmxreg=phaddw(mmxreg,m64); }
|
|
:PHADDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x01; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddw(mmxreg1,mmxreg2); }
|
|
:PHADDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x01; XmmReg ... & m128 { XmmReg=phaddw(XmmReg,m128); }
|
|
:PHADDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x01; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phaddsw;
|
|
:PHADDSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x03; mmxreg ... & m64 { mmxreg=phaddsw(mmxreg,m64); }
|
|
:PHADDSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x03; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddsw(mmxreg1,mmxreg2); }
|
|
:PHADDSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x03; XmmReg ... & m128 { XmmReg=phaddsw(XmmReg,m128); }
|
|
:PHADDSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x03; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phsubd;
|
|
:PHSUBD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x06; mmxreg ... & m64 { mmxreg=phsubd(mmxreg,m64); }
|
|
:PHSUBD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x06; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubd(mmxreg1,mmxreg2); }
|
|
:PHSUBD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x06; XmmReg ... & m128 { XmmReg=phsubd(XmmReg,m128); }
|
|
:PHSUBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x06; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubd(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phsubw;
|
|
:PHSUBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x05; mmxreg ... & m64 { mmxreg=phsubw(mmxreg,m64); }
|
|
:PHSUBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x05; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubw(mmxreg1,mmxreg2); }
|
|
:PHSUBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x05; XmmReg ... & m128 { XmmReg=phsubw(XmmReg,m128); }
|
|
:PHSUBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x05; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop phsubsw;
|
|
:PHSUBSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x07; mmxreg ... & m64 { mmxreg=phsubsw(mmxreg,m64); }
|
|
:PHSUBSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x07; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubsw(mmxreg1,mmxreg2); }
|
|
:PHSUBSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x07; XmmReg ... & m128 { XmmReg=phsubsw(XmmReg,m128); }
|
|
:PHSUBSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x07; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pinsrw;
|
|
:PINSRW mmxreg, r32, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC4; mmxmod=3 & r32 & mmxreg; imm8 { mmxreg = pinsrw(mmxreg, r32, imm8:8); }
|
|
:PINSRW mmxreg, m16, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC4; m16 & mmxreg ... ; imm8 { mmxreg = pinsrw(mmxreg, m16, imm8:8); }
|
|
:PINSRW XmmReg, r32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC4; xmmmod=3 & r32 & XmmReg; imm8 { XmmReg = pinsrw(XmmReg, r32, imm8:8); }
|
|
:PINSRW XmmReg, m16, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC4; m16 & XmmReg ...; imm8 { XmmReg = pinsrw(XmmReg, m16, imm8:8); }
|
|
|
|
define pcodeop pmaddubsw;
|
|
:PMADDUBSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x04; mmxreg ... & m64 { mmxreg=pmaddubsw(mmxreg,m64); }
|
|
:PMADDUBSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x04; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pmaddubsw(mmxreg1,mmxreg2); }
|
|
:PMADDUBSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x04; XmmReg ... & m128 { XmmReg=pmaddubsw(XmmReg,m128); }
|
|
:PMADDUBSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x04; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pmaddubsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pmaddwd;
|
|
:PMADDWD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF5; mmxreg ... & m64 { mmxreg = pmaddwd(mmxreg, m64); }
|
|
:PMADDWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF5; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmaddwd(mmxreg1, mmxreg2); }
|
|
:PMADDWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF5; XmmReg ... & m128 { XmmReg = pmaddwd(XmmReg, m128); }
|
|
:PMADDWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF5; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaddwd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmaxsw;
|
|
:PMAXSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEE; mmxreg ... & m64 { mmxreg = pmaxsw(mmxreg, m64); }
|
|
:PMAXSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEE; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmaxsw(mmxreg1, mmxreg2); }
|
|
:PMAXSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEE; XmmReg ... & m128 { XmmReg = pmaxsw(XmmReg, m128); }
|
|
:PMAXSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEE; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxsw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmaxub;
|
|
:PMAXUB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDE; mmxreg ... & m64 { mmxreg = pmaxub(mmxreg, m64); }
|
|
:PMAXUB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDE; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmaxub(mmxreg1, mmxreg2); }
|
|
:PMAXUB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDE; XmmReg ... & m128 { XmmReg = pmaxub(XmmReg, m128); }
|
|
:PMAXUB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDE; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxub(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pminsw;
|
|
:PMINSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEA; mmxreg ... & m64 { mmxreg = pminsw(mmxreg, m64); }
|
|
:PMINSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEA; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pminsw(mmxreg1, mmxreg2); }
|
|
:PMINSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEA; XmmReg ... & m128 { XmmReg = pminsw(XmmReg, m128); }
|
|
:PMINSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEA; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pminsw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pminub;
|
|
:PMINUB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDA; mmxreg ... & m64 { mmxreg = pminub(mmxreg, m64); }
|
|
:PMINUB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDA; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pminub(mmxreg1, mmxreg2); }
|
|
:PMINUB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDA; XmmReg ... & m128 { XmmReg = pminub(XmmReg, m128); }
|
|
:PMINUB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDA; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pminub(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovmskb;
|
|
:PMOVMSKB Reg32, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD7; Reg32 & mmxreg2 { Reg32 = pmovmskb(Reg32, mmxreg2); }
|
|
:PMOVMSKB Reg32, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD7; Reg32 & XmmReg2 { Reg32 = pmovmskb(Reg32, XmmReg2); }
|
|
@ifdef IA64
|
|
:PMOVMSKB Reg64, mmxreg2 is vexMode=0 & opsize=2 & mandover=0 & byte=0x0F; byte=0xD7; Reg64 & mmxreg2 { Reg64 = pmovmskb(Reg64, mmxreg2); }
|
|
@endif
|
|
|
|
define pcodeop pmulhrsw;
|
|
:PMULHRSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0B; mmxreg ... & m64 { mmxreg=pmulhrsw(mmxreg,m64); }
|
|
:PMULHRSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0B; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pmulhrsw(mmxreg1,mmxreg2); }
|
|
:PMULHRSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0B; XmmReg ... & m128 { XmmReg=pmulhrsw(XmmReg,m128); }
|
|
:PMULHRSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0B; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pmulhrsw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pmulhuw;
|
|
:PMULHUW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE4; mmxreg ... & m64 { mmxreg = pmulhuw(mmxreg, m64); }
|
|
:PMULHUW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE4; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmulhuw(mmxreg1, mmxreg2); }
|
|
:PMULHUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE4; XmmReg ... & m128 { XmmReg = pmulhuw(XmmReg, m128); }
|
|
:PMULHUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE4; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulhuw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmulhw;
|
|
:PMULHW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE5; mmxreg ... & m64 { mmxreg = pmulhw(mmxreg, m64); }
|
|
:PMULHW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE5; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmulhw(mmxreg1, mmxreg2); }
|
|
:PMULHW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE5; XmmReg ... & m128 { XmmReg = pmulhw(XmmReg, m128); }
|
|
:PMULHW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE5; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulhw(XmmReg1, XmmReg2); }
|
|
|
|
:PMULLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD5; mmxreg ... & m64 {
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = mmxreg[0,16] * m[0,16];
|
|
mmxreg[16,16] = mmxreg[16,16] * m[16,16];
|
|
mmxreg[32,16] = mmxreg[32,16] * m[32,16];
|
|
mmxreg[48,16] = mmxreg[48,16] * m[48,16];
|
|
}
|
|
|
|
:PMULLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD5; mmxmod = 3 & mmxreg1 & mmxreg2 {
|
|
mmxreg1[0,16] = mmxreg1[0,16] * mmxreg2[0,16];
|
|
mmxreg1[16,16] = mmxreg1[16,16] * mmxreg2[16,16];
|
|
mmxreg1[32,16] = mmxreg1[32,16] * mmxreg2[32,16];
|
|
mmxreg1[48,16] = mmxreg1[48,16] * mmxreg2[48,16];
|
|
}
|
|
|
|
:PMULLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD5; XmmReg ... & m128 {
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = XmmReg[0,16] * m[0,16];
|
|
XmmReg[16,16] = XmmReg[16,16] * m[16,16];
|
|
XmmReg[32,16] = XmmReg[32,16] * m[32,16];
|
|
XmmReg[48,16] = XmmReg[48,16] * m[48,16];
|
|
XmmReg[64,16] = XmmReg[64,16] * m[64,16];
|
|
XmmReg[80,16] = XmmReg[80,16] * m[80,16];
|
|
XmmReg[96,16] = XmmReg[96,16] * m[96,16];
|
|
XmmReg[112,16] = XmmReg[112,16] * m[112,16];
|
|
}
|
|
|
|
:PMULLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD5; xmmmod = 3 & XmmReg1 & XmmReg2 {
|
|
XmmReg1[0,16] = XmmReg1[0,16] * XmmReg2[0,16];
|
|
XmmReg1[16,16] = XmmReg1[16,16] * XmmReg2[16,16];
|
|
XmmReg1[32,16] = XmmReg1[32,16] * XmmReg2[32,16];
|
|
XmmReg1[48,16] = XmmReg1[48,16] * XmmReg2[48,16];
|
|
XmmReg1[64,16] = XmmReg1[64,16] * XmmReg2[64,16];
|
|
XmmReg1[80,16] = XmmReg1[80,16] * XmmReg2[80,16];
|
|
XmmReg1[96,16] = XmmReg1[96,16] * XmmReg2[96,16];
|
|
XmmReg1[112,16] = XmmReg1[112,16] * XmmReg2[112,16];
|
|
}
|
|
|
|
:PMULUDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF4; mmxreg ... & m64
|
|
{
|
|
local a:8 = zext(mmxreg[0,32]);
|
|
local b:8 = zext(m64[0,32]);
|
|
mmxreg = a * b;
|
|
}
|
|
|
|
:PMULUDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF4; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
local a:8 = zext(mmxreg1[0,32]);
|
|
local b:8 = zext(mmxreg2[0,32]);
|
|
mmxreg1 = a * b;
|
|
}
|
|
|
|
:PMULUDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF4; XmmReg ... & m128
|
|
{
|
|
local a:8 = zext(XmmReg[0,32]);
|
|
local b:8 = zext(m128[0,32]);
|
|
XmmReg[0,64] = a * b;
|
|
local c:8 = zext(XmmReg[64,32]);
|
|
local d:8 = zext(m128[64,32]);
|
|
XmmReg[64,64] = c * d;
|
|
}
|
|
|
|
:PMULUDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF4; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
local a:8 = zext(XmmReg1[0,32]);
|
|
local b:8 = zext(XmmReg2[0,32]);
|
|
XmmReg1[0,64] = a * b;
|
|
local c:8 = zext(XmmReg1[64,32]);
|
|
local d:8 = zext(XmmReg2[64,32]);
|
|
XmmReg1[64,64] = c * d;
|
|
}
|
|
|
|
:POR mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEB; mmxreg ... & m64 { mmxreg = mmxreg | m64; }
|
|
:POR mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 | mmxreg2; }
|
|
:POR XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEB; XmmReg ... & m128 { XmmReg = XmmReg | m128; }
|
|
:POR XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEB; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; }
|
|
|
|
define pcodeop psadbw;
|
|
:PSADBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF6; mmxreg ... & m64 { mmxreg = psadbw(mmxreg, m64); }
|
|
:PSADBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF6; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psadbw(mmxreg1, mmxreg2); }
|
|
:PSADBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF6; XmmReg ... & m128 { XmmReg = psadbw(XmmReg, m128); }
|
|
:PSADBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF6; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psadbw(XmmReg1, XmmReg2); }
|
|
|
|
# these byte and word shuffles need to be done also ?????
|
|
define pcodeop pshufb;
|
|
:PSHUFB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x00; mmxreg ... & m64 { mmxreg=pshufb(mmxreg,m64); }
|
|
:PSHUFB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x00; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pshufb(mmxreg1,mmxreg2); }
|
|
:PSHUFB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x00; XmmReg ... & m128 { XmmReg=pshufb(XmmReg,m128); }
|
|
:PSHUFB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x00; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pshufb(XmmReg1,XmmReg2); }
|
|
|
|
# determine the total shift required by the bit fields in a shuffle opcode
|
|
Order0: order0 is imm8 [ order0 = (( imm8 & 0x3) << 5); ] { export *[const]:1 order0; }
|
|
Order1: order1 is imm8 [ order1 = (((imm8 >> 2) & 0x3) << 5); ] { export *[const]:1 order1; }
|
|
Order2: order2 is imm8 [ order2 = (((imm8 >> 4) & 0x3) << 5); ] { export *[const]:1 order2; }
|
|
Order3: order3 is imm8 [ order3 = (((imm8 >> 6) & 0x3) << 5); ] { export *[const]:1 order3; }
|
|
|
|
:PSHUFD XmmReg1, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x70; (m128 & XmmReg1 ...); imm8 & Order0 & Order1 & Order2 & Order3
|
|
{
|
|
shifted:16 = m128 >> Order0;
|
|
XmmReg1[0,32] = shifted:4;
|
|
|
|
shifted = m128 >> Order1;
|
|
XmmReg1[32,32] = shifted:4;
|
|
|
|
shifted = m128 >> Order2;
|
|
XmmReg1[64,32] = shifted:4;
|
|
|
|
shifted = m128 >> Order3;
|
|
XmmReg1[96,32] = shifted:4;
|
|
}
|
|
|
|
:PSHUFD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x70; xmmmod=3 & XmmReg1 & XmmReg2 ; imm8 & Order0 & Order1 & Order2 & Order3
|
|
{
|
|
shifted:16 = XmmReg2 >> Order0;
|
|
XmmReg1[0,32] = shifted:4;
|
|
|
|
shifted = XmmReg2 >> Order1;
|
|
XmmReg1[32,32] = shifted:4;
|
|
|
|
shifted = XmmReg2 >> Order2;
|
|
XmmReg1[64,32] = shifted:4;
|
|
|
|
shifted = XmmReg2 >> Order3;
|
|
XmmReg1[96,32] = shifted:4;
|
|
}
|
|
|
|
define pcodeop pshufhw;
|
|
:PSHUFHW XmmReg1, m128, imm8 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x70; m128 & XmmReg1 ...; imm8 { XmmReg1 = pshufhw(XmmReg1, m128, imm8:8); }
|
|
:PSHUFHW XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x70; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = pshufhw(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pshuflw;
|
|
:PSHUFLW XmmReg1, m128, imm8 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x70; m128 & XmmReg1 ...; imm8 { XmmReg1 = pshuflw(XmmReg1, m128, imm8:8); }
|
|
:PSHUFLW XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x70; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = pshuflw(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pshufw;
|
|
:PSHUFW mmxreg, m64, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x70; m64 & mmxreg ...; imm8 { mmxreg = pshufw(mmxreg, m64, imm8:8); }
|
|
:PSHUFW mmxreg1, mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x70; mmxmod = 3 & mmxreg1 & mmxreg2; imm8 { mmxreg1 = pshufw(mmxreg1, mmxreg2, imm8:8); }
|
|
|
|
define pcodeop psignb;
|
|
:PSIGNB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x08; mmxreg ... & m64 { mmxreg=psignb(mmxreg,m64); }
|
|
:PSIGNB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x08; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignb(mmxreg1,mmxreg2); }
|
|
:PSIGNB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x08; XmmReg ... & m128 { XmmReg=psignb(XmmReg,m128); }
|
|
:PSIGNB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x08; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignb(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop psignw;
|
|
:PSIGNW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x09; mmxreg ... & m64 { mmxreg=psignw(mmxreg,m64); }
|
|
:PSIGNW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x09; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignw(mmxreg1,mmxreg2); }
|
|
:PSIGNW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x09; XmmReg ... & m128 { XmmReg=psignw(XmmReg,m128); }
|
|
:PSIGNW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x09; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignw(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop psignd;
|
|
:PSIGND mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0a; mmxreg ... & m64 { mmxreg=psignd(mmxreg,m64); }
|
|
:PSIGND mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0a; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignd(mmxreg1,mmxreg2); }
|
|
:PSIGND XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0a; XmmReg ... & m128 { XmmReg=psignd(XmmReg,m128); }
|
|
:PSIGND XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0a; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignd(XmmReg1,XmmReg2); }
|
|
|
|
define pcodeop pslldq;
|
|
:PSLLDQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; xmmmod = 3 & reg_opcode=7 & XmmReg2; imm8 { XmmReg2 = pslldq(XmmReg2, imm8:8); }
|
|
|
|
define pcodeop psllw;
|
|
:PSLLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF1; mmxreg ... & m64 ... { mmxreg = psllw(mmxreg, m64); }
|
|
:PSLLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF1; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psllw(mmxreg1, mmxreg2); }
|
|
:PSLLW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 { mmxreg2 = psllw(mmxreg2, imm8:8); }
|
|
|
|
:PSLLD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF2; mmxreg ... & m64 ... {
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = mmxreg[0,32] << m[0,32];
|
|
mmxreg[32,32] = mmxreg[32,32] << m[32,32];
|
|
}
|
|
|
|
:PSLLD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF2; mmxmod = 3 & mmxreg1 & mmxreg2 {
|
|
mmxreg1[0,32] = mmxreg1[0,32] << mmxreg2[0,32];
|
|
mmxreg1[32,32] = mmxreg1[32,32] << mmxreg2[32,32];
|
|
}
|
|
|
|
:PSLLD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 {
|
|
mmxreg2[0,32] = mmxreg2[0,32] << imm8;
|
|
mmxreg2[32,32] = mmxreg2[32,32] << imm8;
|
|
}
|
|
|
|
:PSLLQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF3; mmxreg ... & m64 ... { mmxreg = mmxreg << m64; }
|
|
:PSLLQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF3; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 << mmxreg2; }
|
|
:PSLLQ mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 { mmxreg2 = mmxreg2 << imm8:8; }
|
|
|
|
:PSLLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF1; XmmReg ... & m128 ... { XmmReg = psllw(XmmReg, m128); }
|
|
:PSLLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF1; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psllw(XmmReg1, XmmReg2); }
|
|
:PSLLW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 { XmmReg2 = psllw(XmmReg2, imm8:8); }
|
|
|
|
:PSLLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF2; XmmReg ... & m128 ... {
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] << m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] << m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] << m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] << m[96,32];
|
|
}
|
|
|
|
:PSLLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF2; xmmmod = 3 & XmmReg1 & XmmReg2 {
|
|
XmmReg1[0,32] = XmmReg1[0,32] << XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] << XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] << XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] << XmmReg2[96,32];
|
|
}
|
|
|
|
:PSLLD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 {
|
|
XmmReg2[0,32] = XmmReg2[0,32] << imm8;
|
|
XmmReg2[32,32] = XmmReg2[32,32] << imm8;
|
|
XmmReg2[64,32] = XmmReg2[64,32] << imm8;
|
|
XmmReg2[96,32] = XmmReg2[96,32] << imm8;
|
|
}
|
|
|
|
:PSLLQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF3; XmmReg ... & m128 ... {
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] << m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] << m[64,64];
|
|
}
|
|
|
|
:PSLLQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF3; xmmmod = 3 & XmmReg1 & XmmReg2 {
|
|
XmmReg1[0,64] = XmmReg1[0,64] << XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] << XmmReg2[64,64];
|
|
}
|
|
|
|
:PSLLQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 {
|
|
XmmReg2[0,64] = XmmReg2[0,64] << imm8;
|
|
XmmReg2[64,64] = XmmReg2[64,64] << imm8;
|
|
}
|
|
|
|
define pcodeop psraw;
|
|
:PSRAW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE1; mmxreg ... & m64 ... { mmxreg = psraw(mmxreg, m64); }
|
|
:PSRAW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE1; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psraw(mmxreg1, mmxreg2); }
|
|
:PSRAW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=4 & mmxreg2; imm8 { mmxreg2 = psraw(mmxreg2, imm8:8); }
|
|
|
|
:PSRAD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE2; mmxreg ... & m64
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
mmxreg[0,32] = mmxreg[0,32] s>> m64;
|
|
mmxreg[32,32] = mmxreg[32,32] s>> m64;
|
|
}
|
|
|
|
:PSRAD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE2; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
mmxreg1[0,32] = mmxreg1[0,32] s>> mmxreg2;
|
|
mmxreg1[32,32] = mmxreg1[32,32] s>> mmxreg2;
|
|
}
|
|
|
|
:PSRAD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=4 & mmxreg2; imm8
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
mmxreg2[0,32] = mmxreg2[0,32] s>> imm8;
|
|
mmxreg2[32,32] = mmxreg2[32,32] s>> imm8;
|
|
}
|
|
|
|
:PSRAW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE1; XmmReg ... & m128 ... { XmmReg = psraw(XmmReg, m128); }
|
|
:PSRAW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE1; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psraw(XmmReg1, XmmReg2); }
|
|
:PSRAW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=4 & XmmReg2; imm8 { XmmReg2 = psraw(XmmReg2, imm8:8); }
|
|
|
|
:PSRAD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE2; m128 & XmmReg ...
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
XmmReg[0,32] = XmmReg[0,32] s>> m128;
|
|
XmmReg[32,32] = XmmReg[32,32] s>> m128;
|
|
XmmReg[64,32] = XmmReg[64,32] s>> m128;
|
|
XmmReg[96,32] = XmmReg[96,32] s>> m128;
|
|
}
|
|
|
|
:PSRAD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE2; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
XmmReg1[0,32] = XmmReg1[0,32] s>> XmmReg2;
|
|
XmmReg1[32,32] = XmmReg1[32,32] s>> XmmReg2;
|
|
XmmReg1[64,32] = XmmReg1[64,32] s>> XmmReg2;
|
|
XmmReg1[96,32] = XmmReg1[96,32] s>> XmmReg2;
|
|
}
|
|
|
|
:PSRAD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=4 & XmmReg2; imm8
|
|
{
|
|
# a count greater than 31 just clears all the bits
|
|
XmmReg2[0,32] = XmmReg2[0,32] s>> imm8;
|
|
XmmReg2[32,32] = XmmReg2[32,32] s>> imm8;
|
|
XmmReg2[64,32] = XmmReg2[64,32] s>> imm8;
|
|
XmmReg2[96,32] = XmmReg2[96,32] s>> imm8;
|
|
}
|
|
|
|
:PSRLDQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; xmmmod=3 & reg_opcode=3 & XmmReg2; imm8
|
|
{
|
|
# a count greater than 15 just clears all the bits
|
|
XmmReg2 = XmmReg2 >> (imm8 * 8);
|
|
}
|
|
|
|
:PSRLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD1; mmxreg ... & m64 ...
|
|
{
|
|
mmxreg[0,16] = mmxreg[0,16] >> m64;
|
|
mmxreg[16,16] = mmxreg[16,16] >> m64;
|
|
mmxreg[32,16] = mmxreg[32,16] >> m64;
|
|
mmxreg[48,16] = mmxreg[48,16] >> m64;
|
|
}
|
|
|
|
:PSRLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD1; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = mmxreg1[0,16] >> mmxreg2;
|
|
mmxreg1[16,16] = mmxreg1[16,16] >> mmxreg2;
|
|
mmxreg1[32,16] = mmxreg1[32,16] >> mmxreg2;
|
|
mmxreg1[48,16] = mmxreg1[48,16] >> mmxreg2;
|
|
}
|
|
|
|
:PSRLW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8
|
|
{
|
|
mmxreg2[0,16] = mmxreg2[0,16] >> imm8;
|
|
mmxreg2[16,16] = mmxreg2[16,16] >> imm8;
|
|
mmxreg2[32,16] = mmxreg2[32,16] >> imm8;
|
|
mmxreg2[48,16] = mmxreg2[48,16] >> imm8;
|
|
}
|
|
|
|
:PSRLD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD2; mmxreg ... & m64 ...
|
|
{
|
|
mmxreg[0,32] = mmxreg[0,32] >> m64;
|
|
mmxreg[32,32] = mmxreg[32,32] >> m64;
|
|
}
|
|
|
|
:PSRLD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD2; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = mmxreg1[0,32] >> mmxreg2;
|
|
mmxreg1[32,32] = mmxreg1[32,32] >> mmxreg2;
|
|
}
|
|
|
|
:PSRLD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8
|
|
{
|
|
mmxreg2[0,32] = mmxreg2[0,32] >> imm8;
|
|
mmxreg2[32,32] = mmxreg2[32,32] >> imm8;
|
|
}
|
|
|
|
:PSRLQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD3; mmxreg ... & m64 ...
|
|
{
|
|
mmxreg = mmxreg >> m64;
|
|
}
|
|
|
|
:PSRLQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD3; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1 = mmxreg1 >> mmxreg2;
|
|
}
|
|
|
|
:PSRLQ mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8
|
|
{
|
|
mmxreg2 = mmxreg2 >> imm8;
|
|
}
|
|
|
|
:PSRLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD1; XmmReg ... & m128 ...
|
|
{
|
|
XmmReg[0,16] = XmmReg[0,16] >> m128[0,64];
|
|
XmmReg[16,16] = XmmReg[16,16] >> m128[0,64];
|
|
XmmReg[32,16] = XmmReg[32,16] >> m128[0,64];
|
|
XmmReg[48,16] = XmmReg[48,16] >> m128[0,64];
|
|
XmmReg[64,16] = XmmReg[64,16] >> m128[0,64];
|
|
XmmReg[80,16] = XmmReg[80,16] >> m128[0,64];
|
|
XmmReg[96,16] = XmmReg[96,16] >> m128[0,64];
|
|
XmmReg[112,16] = XmmReg[112,16] >> m128[0,64];
|
|
}
|
|
|
|
:PSRLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD1; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = XmmReg1[0,16] >> XmmReg2[0,64];
|
|
XmmReg1[16,16] = XmmReg1[16,16] >> XmmReg2[0,64];
|
|
XmmReg1[32,16] = XmmReg1[32,16] >> XmmReg2[0,64];
|
|
XmmReg1[48,16] = XmmReg1[48,16] >> XmmReg2[0,64];
|
|
XmmReg1[64,16] = XmmReg1[64,16] >> XmmReg2[0,64];
|
|
XmmReg1[80,16] = XmmReg1[80,16] >> XmmReg2[0,64];
|
|
XmmReg1[96,16] = XmmReg1[96,16] >> XmmReg2[0,64];
|
|
XmmReg1[112,16] = XmmReg1[112,16] >> XmmReg2[0,64];
|
|
}
|
|
|
|
:PSRLW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8
|
|
{
|
|
XmmReg2[0,16] = XmmReg2[0,16] >> imm8;
|
|
XmmReg2[16,16] = XmmReg2[16,16] >> imm8;
|
|
XmmReg2[32,16] = XmmReg2[32,16] >> imm8;
|
|
XmmReg2[48,16] = XmmReg2[48,16] >> imm8;
|
|
XmmReg2[64,16] = XmmReg2[64,16] >> imm8;
|
|
XmmReg2[80,16] = XmmReg2[80,16] >> imm8;
|
|
XmmReg2[96,16] = XmmReg2[96,16] >> imm8;
|
|
XmmReg2[112,16] = XmmReg2[112,16] >> imm8;
|
|
}
|
|
|
|
:PSRLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD2; XmmReg ... & m128 ...
|
|
{
|
|
XmmReg[0,32] = XmmReg[0,32] >> m128[0,64];
|
|
XmmReg[32,32] = XmmReg[32,32] >> m128[0,64];
|
|
XmmReg[64,32] = XmmReg[64,32] >> m128[0,64];
|
|
XmmReg[96,32] = XmmReg[96,32] >> m128[0,64];
|
|
}
|
|
|
|
:PSRLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD2; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] >> XmmReg2[0,64];
|
|
XmmReg1[32,32] = XmmReg1[32,32] >> XmmReg2[0,64];
|
|
XmmReg1[64,32] = XmmReg1[64,32] >> XmmReg2[0,64];
|
|
XmmReg1[96,32] = XmmReg1[96,32] >> XmmReg2[0,64];
|
|
}
|
|
|
|
:PSRLD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8
|
|
{
|
|
XmmReg2[0,32] = XmmReg2[0,32] >> imm8;
|
|
XmmReg2[32,32] = XmmReg2[32,32] >> imm8;
|
|
XmmReg2[64,32] = XmmReg2[64,32] >> imm8;
|
|
XmmReg2[96,32] = XmmReg2[96,32] >> imm8;
|
|
}
|
|
|
|
:PSRLQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD3; XmmReg ... & m128 ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[0,64] >> m128[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] >> m128[0,64];
|
|
}
|
|
|
|
:PSRLQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD3; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] >> XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] >> XmmReg2[0,64];
|
|
}
|
|
|
|
:PSRLQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8
|
|
{
|
|
XmmReg2[0,64] = XmmReg2[0,64] >> imm8;
|
|
XmmReg2[64,64] = XmmReg2[64,64] >> imm8;
|
|
}
|
|
|
|
:PSUBB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF8; mmxreg ... & m64 ...
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = mmxreg[0,8] - m[0,8];
|
|
mmxreg[8,8] = mmxreg[8,8] - m[8,8];
|
|
mmxreg[16,8] = mmxreg[16,8] - m[16,8];
|
|
mmxreg[24,8] = mmxreg[24,8] - m[24,8];
|
|
mmxreg[32,8] = mmxreg[32,8] - m[32,8];
|
|
mmxreg[40,8] = mmxreg[40,8] - m[40,8];
|
|
mmxreg[48,8] = mmxreg[48,8] - m[48,8];
|
|
mmxreg[56,8] = mmxreg[56,8] - m[56,8];
|
|
}
|
|
|
|
:PSUBB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF8; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = mmxreg1[0,8] - mmxreg2[0,8];
|
|
mmxreg1[16,8] = mmxreg1[16,8] - mmxreg2[16,8];
|
|
mmxreg1[24,8] = mmxreg1[24,8] - mmxreg2[24,8];
|
|
mmxreg1[32,8] = mmxreg1[32,8] - mmxreg2[32,8];
|
|
mmxreg1[40,8] = mmxreg1[40,8] - mmxreg2[40,8];
|
|
mmxreg1[48,8] = mmxreg1[48,8] - mmxreg2[48,8];
|
|
mmxreg1[56,8] = mmxreg1[56,8] - mmxreg2[56,8];
|
|
}
|
|
|
|
:PSUBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF9; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = mmxreg[0,16] - m[0,16];
|
|
mmxreg[16,16] = mmxreg[16,16] - m[16,16];
|
|
mmxreg[32,16] = mmxreg[32,16] - m[32,16];
|
|
mmxreg[48,16] = mmxreg[48,16] - m[48,16];
|
|
}
|
|
|
|
:PSUBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF9; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = mmxreg1[0,16] - mmxreg2[0,16];
|
|
mmxreg1[16,16] = mmxreg1[16,16] - mmxreg2[16,16];
|
|
mmxreg1[32,16] = mmxreg1[32,16] - mmxreg2[32,16];
|
|
mmxreg1[48,16] = mmxreg1[48,16] - mmxreg2[48,16];
|
|
}
|
|
|
|
:PSUBD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFA; mmxreg ... & m64 ... {
|
|
local m:8 = m64;
|
|
mmxreg[0,32] = mmxreg[0,32] - m[0,32];
|
|
mmxreg[32,32] = mmxreg[32,32] - m[32,32];
|
|
}
|
|
|
|
:PSUBD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFA; mmxmod = 3 & mmxreg1 & mmxreg2 {
|
|
mmxreg1[0,32] = mmxreg1[0,32] - mmxreg2[0,32];
|
|
mmxreg1[32,32] = mmxreg1[32,32] - mmxreg2[32,32];
|
|
}
|
|
|
|
:PSUBQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFB; mmxreg ... & m64 ... { mmxreg = mmxreg - m64; }
|
|
:PSUBQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 - mmxreg2; }
|
|
:PSUBQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFB; XmmReg ... & m128 ... {
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] - m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] - m[64,64];
|
|
}
|
|
|
|
:PSUBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFB; xmmmod = 3 & XmmReg1 & XmmReg2 {
|
|
XmmReg1[0,64] = XmmReg1[0,64] - XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] - XmmReg2[64,64];
|
|
}
|
|
|
|
:PSUBB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF8; XmmReg ... & m128 ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = XmmReg[0,8] - m[0,8];
|
|
XmmReg[8,8] = XmmReg[8,8] - m[8,8];
|
|
XmmReg[16,8] = XmmReg[16,8] - m[16,8];
|
|
XmmReg[24,8] = XmmReg[24,8] - m[24,8];
|
|
XmmReg[32,8] = XmmReg[32,8] - m[32,8];
|
|
XmmReg[40,8] = XmmReg[40,8] - m[40,8];
|
|
XmmReg[48,8] = XmmReg[48,8] - m[48,8];
|
|
XmmReg[56,8] = XmmReg[56,8] - m[56,8];
|
|
XmmReg[64,8] = XmmReg[64,8] - m[64,8];
|
|
XmmReg[72,8] = XmmReg[72,8] - m[72,8];
|
|
XmmReg[80,8] = XmmReg[80,8] - m[80,8];
|
|
XmmReg[88,8] = XmmReg[88,8] - m[88,8];
|
|
XmmReg[96,8] = XmmReg[96,8] - m[96,8];
|
|
XmmReg[104,8] = XmmReg[104,8] - m[104,8];
|
|
XmmReg[112,8] = XmmReg[112,8] - m[112,8];
|
|
XmmReg[120,8] = XmmReg[120,8] - m[120,8];
|
|
}
|
|
|
|
:PSUBB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF8; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = XmmReg1[0,8] - XmmReg2[0,8];
|
|
XmmReg1[8,8] = XmmReg1[8,8] - XmmReg2[8,8];
|
|
XmmReg1[16,8] = XmmReg1[16,8] - XmmReg2[16,8];
|
|
XmmReg1[24,8] = XmmReg1[24,8] - XmmReg2[24,8];
|
|
XmmReg1[32,8] = XmmReg1[32,8] - XmmReg2[32,8];
|
|
XmmReg1[40,8] = XmmReg1[40,8] - XmmReg2[40,8];
|
|
XmmReg1[48,8] = XmmReg1[48,8] - XmmReg2[48,8];
|
|
XmmReg1[56,8] = XmmReg1[56,8] - XmmReg2[56,8];
|
|
XmmReg1[64,8] = XmmReg1[64,8] - XmmReg2[64,8];
|
|
XmmReg1[72,8] = XmmReg1[72,8] - XmmReg2[72,8];
|
|
XmmReg1[80,8] = XmmReg1[80,8] - XmmReg2[80,8];
|
|
XmmReg1[88,8] = XmmReg1[88,8] - XmmReg2[88,8];
|
|
XmmReg1[96,8] = XmmReg1[96,8] - XmmReg2[96,8];
|
|
XmmReg1[104,8] = XmmReg1[104,8] - XmmReg2[104,8];
|
|
XmmReg1[112,8] = XmmReg1[112,8] - XmmReg2[112,8];
|
|
XmmReg1[120,8] = XmmReg1[120,8] - XmmReg2[120,8];
|
|
}
|
|
|
|
:PSUBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF9; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = XmmReg[0,16] - m[0,16];
|
|
XmmReg[16,16] = XmmReg[16,16] - m[16,16];
|
|
XmmReg[32,16] = XmmReg[32,16] - m[32,16];
|
|
XmmReg[48,16] = XmmReg[48,16] - m[48,16];
|
|
XmmReg[64,16] = XmmReg[64,16] - m[64,16];
|
|
XmmReg[80,16] = XmmReg[80,16] - m[80,16];
|
|
XmmReg[96,16] = XmmReg[96,16] - m[96,16];
|
|
XmmReg[112,16] = XmmReg[112,16] - m[112,16];
|
|
}
|
|
|
|
:PSUBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF9; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = XmmReg1[0,16] - XmmReg2[0,16];
|
|
XmmReg1[16,16] = XmmReg1[16,16] - XmmReg2[16,16];
|
|
XmmReg1[32,16] = XmmReg1[32,16] - XmmReg2[32,16];
|
|
XmmReg1[48,16] = XmmReg1[48,16] - XmmReg2[48,16];
|
|
XmmReg1[64,16] = XmmReg1[64,16] - XmmReg2[64,16];
|
|
XmmReg1[80,16] = XmmReg1[80,16] - XmmReg2[80,16];
|
|
XmmReg1[96,16] = XmmReg1[96,16] - XmmReg2[96,16];
|
|
XmmReg1[112,16] = XmmReg1[112,16] - XmmReg2[112,16];
|
|
}
|
|
|
|
:PSUBD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFA; XmmReg ... & m128 ... {
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] - m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] - m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] - m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] - m[96,32];
|
|
}
|
|
|
|
:PSUBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFA; xmmmod = 3 & XmmReg1 & XmmReg2 {
|
|
XmmReg1[0,32] = XmmReg1[0,32] - XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] - XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] - XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] - XmmReg2[96,32];
|
|
}
|
|
|
|
define pcodeop psubsb;
|
|
:PSUBSB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE8; mmxreg ... & m64 ... { mmxreg = psubsb(mmxreg, m64); }
|
|
:PSUBSB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE8; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psubsb(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop psubsw;
|
|
:PSUBSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE9; mmxreg ... & m64 ... { mmxreg = psubsw(mmxreg, m64); }
|
|
:PSUBSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE9; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psubsw(mmxreg1, mmxreg2); }
|
|
|
|
:PSUBSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE8; XmmReg ... & m128 ... { XmmReg = psubsb(XmmReg, m128); }
|
|
:PSUBSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE8; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psubsb(XmmReg1, XmmReg2); }
|
|
|
|
:PSUBSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE9; XmmReg ... & m128 ... { XmmReg = psubsw(XmmReg, m128); }
|
|
:PSUBSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE9; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psubsw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop psubusb;
|
|
:PSUBUSB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD8; mmxreg ... & m64 ... { mmxreg = psubusb(mmxreg, m64); }
|
|
:PSUBUSB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD8; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psubusb(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop psubusw;
|
|
:PSUBUSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD9; mmxreg ... & m64 ... { mmxreg = psubusw(mmxreg, m64); }
|
|
:PSUBUSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD9; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psubusw(mmxreg1, mmxreg2); }
|
|
|
|
:PSUBUSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD8; XmmReg ... & m128 { XmmReg = psubusb(XmmReg, m128); }
|
|
:PSUBUSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD8; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psubusb(XmmReg1, XmmReg2); }
|
|
|
|
:PSUBUSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD9; XmmReg ... & m128 { XmmReg = psubusw(XmmReg, m128); }
|
|
:PSUBUSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD9; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psubusw(XmmReg1, XmmReg2); }
|
|
|
|
:PUNPCKHBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x68; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,8] = mmxreg[32,8];
|
|
mmxreg[8,8] = m[32,8];
|
|
mmxreg[16,8] = mmxreg[40,8];
|
|
mmxreg[24,8] = m[40,8];
|
|
mmxreg[32,8] = mmxreg[48,8];
|
|
mmxreg[40,8] = m[48,8];
|
|
mmxreg[48,8] = mmxreg[56,8];
|
|
mmxreg[56,8] = m[56,8];
|
|
}
|
|
|
|
:PUNPCKHBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x68; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,8] = mmxreg1[32,8];
|
|
mmxreg1[8,8] = mmxreg2[32,8];
|
|
mmxreg1[16,8] = mmxreg1[40,8];
|
|
mmxreg1[24,8] = mmxreg2[40,8];
|
|
mmxreg1[32,8] = mmxreg1[48,8];
|
|
mmxreg1[40,8] = mmxreg2[48,8];
|
|
mmxreg1[48,8] = mmxreg1[56,8];
|
|
mmxreg1[56,8] = mmxreg2[56,8];
|
|
}
|
|
|
|
:PUNPCKHWD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x69; mmxreg ... & m64
|
|
{
|
|
local m:8 = m64;
|
|
mmxreg[0,16] = mmxreg[32,16];
|
|
mmxreg[16,16] = m[32,16];
|
|
mmxreg[32,16] = mmxreg[48,16];
|
|
mmxreg[48,16] = m[48,16];
|
|
}
|
|
|
|
:PUNPCKHWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x69; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,16] = mmxreg1[32,16];
|
|
mmxreg1[16,16] = mmxreg2[32,16];
|
|
mmxreg1[32,16] = mmxreg1[48,16];
|
|
mmxreg1[48,16] = mmxreg2[48,16];
|
|
}
|
|
|
|
:PUNPCKHDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6A; mmxreg ... & m64
|
|
{
|
|
mmxreg[0,32] = mmxreg[32,32];
|
|
mmxreg[32,32] = m64[32,32];
|
|
}
|
|
|
|
:PUNPCKHDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6A; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[0,32] = mmxreg1[32,32];
|
|
mmxreg1[32,32] = mmxreg2[32,32];
|
|
}
|
|
|
|
:PUNPCKHBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x68; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,8] = XmmReg[64,8];
|
|
XmmReg[8,8] = m[64,8];
|
|
XmmReg[16,8] = XmmReg[72,8];
|
|
XmmReg[24,8] = m[72,8];
|
|
XmmReg[32,8] = XmmReg[80,8];
|
|
XmmReg[40,8] = m[80,8];
|
|
XmmReg[48,8] = XmmReg[88,8];
|
|
XmmReg[56,8] = m[88,8];
|
|
XmmReg[64,8] = XmmReg[96,8];
|
|
XmmReg[72,8] = m[96,8];
|
|
XmmReg[80,8] = XmmReg[104,8];
|
|
XmmReg[88,8] = m[104,8];
|
|
XmmReg[96,8] = XmmReg[112,8];
|
|
XmmReg[104,8] = m[112,8];
|
|
XmmReg[112,8] = XmmReg[120,8];
|
|
XmmReg[120,8] = m[120,8];
|
|
}
|
|
|
|
# full set of XMM byte registers
|
|
:PUNPCKHBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x68; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,8] = XmmReg1[64,8];
|
|
XmmReg1[8,8] = XmmReg2[64,8];
|
|
XmmReg1[16,8] = XmmReg1[72,8];
|
|
XmmReg1[24,8] = XmmReg2[72,8];
|
|
XmmReg1[32,8] = XmmReg1[80,8];
|
|
XmmReg1[40,8] = XmmReg2[80,8];
|
|
XmmReg1[48,8] = XmmReg1[88,8];
|
|
XmmReg1[56,8] = XmmReg2[88,8];
|
|
XmmReg1[64,8] = XmmReg1[96,8];
|
|
XmmReg1[72,8] = XmmReg2[96,8];
|
|
XmmReg1[80,8] = XmmReg1[104,8];
|
|
XmmReg1[88,8] = XmmReg2[104,8];
|
|
XmmReg1[96,8] = XmmReg1[112,8];
|
|
XmmReg1[104,8] = XmmReg2[112,8];
|
|
XmmReg1[112,8] = XmmReg1[120,8];
|
|
XmmReg1[120,8] = XmmReg2[120,8];
|
|
}
|
|
|
|
:PUNPCKHWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x69; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,16] = XmmReg[64,16];
|
|
XmmReg[16,16] = m[64,16];
|
|
XmmReg[32,16] = XmmReg[80,16];
|
|
XmmReg[48,16] = m[80,16];
|
|
XmmReg[64,16] = XmmReg[96,16];
|
|
XmmReg[80,16] = m[96,16];
|
|
XmmReg[96,16] = XmmReg[112,16];
|
|
XmmReg[112,16] = m[112,16];
|
|
}
|
|
|
|
:PUNPCKHWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x69; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,16] = XmmReg1[64,16];
|
|
XmmReg1[16,16] = XmmReg2[64,16];
|
|
XmmReg1[32,16] = XmmReg1[80,16];
|
|
XmmReg1[48,16] = XmmReg2[80,16];
|
|
XmmReg1[64,16] = XmmReg1[96,16];
|
|
XmmReg1[80,16] = XmmReg2[96,16];
|
|
XmmReg1[96,16] = XmmReg1[112,16];
|
|
XmmReg1[112,16] = XmmReg2[112,16];
|
|
}
|
|
|
|
:PUNPCKHDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6A; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[64,32];
|
|
XmmReg[32,32] = m[64,32];
|
|
XmmReg[64,32] = XmmReg[96,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:PUNPCKHDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6A; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[64,32];
|
|
XmmReg1[32,32] = XmmReg2[64,32];
|
|
XmmReg1[64,32] = XmmReg1[96,32];
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:PUNPCKHQDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6D; m128 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[64,64];
|
|
XmmReg[64,64] = m128[64,64];
|
|
}
|
|
|
|
:PUNPCKHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6D; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[64,64];
|
|
XmmReg1[64,64] = XmmReg2[64,64];
|
|
}
|
|
|
|
:PUNPCKLBW mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x60; mmxreg ... & m32
|
|
{
|
|
local m:4 = m32;
|
|
mmxreg[56,8] = m[24,8];
|
|
mmxreg[48,8] = mmxreg[24,8];
|
|
mmxreg[40,8] = m[16,8];
|
|
mmxreg[32,8] = mmxreg[16,8];
|
|
mmxreg[24,8] = m[8,8];
|
|
mmxreg[16,8] = mmxreg[8,8];
|
|
mmxreg[8,8] = m[0,8];
|
|
# mmxreg[0,8] = mmxreg[0,8]; superfluous
|
|
}
|
|
|
|
:PUNPCKLBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x60; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[56,8] = mmxreg2[24,8];
|
|
mmxreg1[48,8] = mmxreg1[24,8];
|
|
mmxreg1[40,8] = mmxreg2[16,8];
|
|
mmxreg1[32,8] = mmxreg1[16,8];
|
|
mmxreg1[24,8] = mmxreg2[8,8];
|
|
mmxreg1[16,8] = mmxreg1[8,8];
|
|
mmxreg1[8,8] = mmxreg2[0,8];
|
|
# mmxreg1[0,8] = mmxreg1[0,8]; superfluous
|
|
}
|
|
|
|
:PUNPCKLWD mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x61; mmxreg ... & m32
|
|
{
|
|
local m:4 = m32;
|
|
mmxreg[48,16] = m[16,16];
|
|
mmxreg[32,16] = mmxreg[16,16];
|
|
mmxreg[16,16] = m[0,16];
|
|
# mmxreg[0,16] = mmxreg[0,16]; superfluous
|
|
}
|
|
|
|
:PUNPCKLWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x61; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[48,16] = mmxreg2[16,16];
|
|
mmxreg1[32,16] = mmxreg1[16,16];
|
|
mmxreg1[16,16] = mmxreg2[0,16];
|
|
# mmxreg1[0,16] = mmxreg1[0,16]; superfluous
|
|
}
|
|
|
|
:PUNPCKLDQ mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x62; mmxreg ... & m32
|
|
{
|
|
mmxreg[32,32] = m32;
|
|
# mmxreg[0,32] = mmxreg[0,32]; superfluous
|
|
}
|
|
|
|
:PUNPCKLDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x62; mmxmod = 3 & mmxreg1 & mmxreg2
|
|
{
|
|
mmxreg1[32,32] = mmxreg2[0,32];
|
|
# mmxreg1[0,32] = mmxreg1[0,32]; superfluous
|
|
}
|
|
|
|
:PUNPCKLBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x60; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[120,8] = m[56,8];
|
|
XmmReg[112,8] = XmmReg[56,8];
|
|
XmmReg[104,8] = m[48,8];
|
|
XmmReg[96,8] = XmmReg[48,8];
|
|
XmmReg[88,8] = m[40,8];
|
|
XmmReg[80,8] = XmmReg[40,8];
|
|
XmmReg[72,8] = m[32,8];
|
|
XmmReg[64,8] = XmmReg[32,8];
|
|
XmmReg[56,8] = m[24,8];
|
|
XmmReg[48,8] = XmmReg[24,8];
|
|
XmmReg[40,8] = m[16,8];
|
|
XmmReg[32,8] = XmmReg[16,8];
|
|
XmmReg[24,8] = m[8,8];
|
|
XmmReg[16,8] = XmmReg[8,8];
|
|
XmmReg[8,8] = m[0,8];
|
|
# XmmReg[0,8] = XmmReg[0,8]; superfluous
|
|
}
|
|
|
|
:PUNPCKLBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x60; xmmmod = 3 &
|
|
XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[120,8] = XmmReg2[56,8];
|
|
XmmReg1[112,8] = XmmReg1[56,8];
|
|
XmmReg1[104,8] = XmmReg2[48,8];
|
|
XmmReg1[96,8] = XmmReg1[48,8];
|
|
XmmReg1[88,8] = XmmReg2[40,8];
|
|
XmmReg1[80,8] = XmmReg1[40,8];
|
|
XmmReg1[72,8] = XmmReg2[32,8];
|
|
XmmReg1[64,8] = XmmReg1[32,8];
|
|
XmmReg1[56,8] = XmmReg2[24,8];
|
|
XmmReg1[48,8] = XmmReg1[24,8];
|
|
XmmReg1[40,8] = XmmReg2[16,8];
|
|
XmmReg1[32,8] = XmmReg1[16,8];
|
|
XmmReg1[24,8] = XmmReg2[8,8];
|
|
XmmReg1[16,8] = XmmReg1[8,8];
|
|
XmmReg1[8,8] = XmmReg2[0,8];
|
|
# XmmReg1[0,8] = XmmReg1[0,8]; superfluous
|
|
}
|
|
|
|
:PUNPCKLWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x61; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[112,16] = m[48,16];
|
|
XmmReg[96,16] = XmmReg[48,16];
|
|
XmmReg[80,16] = m[32,16];
|
|
XmmReg[64,16] = XmmReg[32,16];
|
|
XmmReg[48,16] = m[16,16];
|
|
XmmReg[32,16] = XmmReg[16,16];
|
|
XmmReg[16,16] = m[0,16];
|
|
# XmmReg[0,16] = XmmReg[0,16]; superfluous
|
|
}
|
|
|
|
:PUNPCKLWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x61; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[112,16] = XmmReg2[48,16];
|
|
XmmReg1[96,16] = XmmReg1[48,16];
|
|
XmmReg1[80,16] = XmmReg2[32,16];
|
|
XmmReg1[64,16] = XmmReg1[32,16];
|
|
XmmReg1[48,16] = XmmReg2[16,16];
|
|
XmmReg1[32,16] = XmmReg1[16,16];
|
|
XmmReg1[16,16] = XmmReg2[0,16];
|
|
# XmmReg1[0,16] = XmmReg1[0,16]; superfluous
|
|
}
|
|
|
|
:PUNPCKLDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x62; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[96,32] = m[32,32];
|
|
XmmReg[64,32] = XmmReg[32,32];
|
|
XmmReg[32,32] = m[0,32];
|
|
# XmmReg[0,32] = XmmReg[0,32]; superfluous
|
|
}
|
|
|
|
:PUNPCKLDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x62; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[96,32] = XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[32,32];
|
|
XmmReg1[32,32] = XmmReg2[0,32];
|
|
# XmmReg1[0,32] = XmmReg1[0,32]; superfluous
|
|
}
|
|
|
|
define pcodeop punpcklqdq;
|
|
:PUNPCKLQDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6C; m128 & XmmReg ...
|
|
{
|
|
XmmReg[64,64] = m128[0,64];
|
|
# XmmReg[0,64] = XmmReg[0,64]; superfluous
|
|
}
|
|
|
|
:PUNPCKLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6C; xmmmod = 3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[64,64] = XmmReg2[0,64];
|
|
# XmmReg1[0,64] = XmmReg1[0,64]; superfluous
|
|
}
|
|
|
|
:PXOR mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEF; mmxreg ... & m64 { mmxreg = mmxreg ^ m64; }
|
|
:PXOR mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEF; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 ^ mmxreg2; }
|
|
:PXOR XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEF; XmmReg ... & m128 { XmmReg = XmmReg ^ m128; }
|
|
:PXOR XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEF; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 ^ XmmReg2; }
|
|
|
|
define pcodeop rcpps;
|
|
:RCPPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x53; XmmReg ... & m128 { XmmReg = rcpps(XmmReg, m128); }
|
|
:RCPPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x53; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rcpps(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop rcpss;
|
|
:RCPSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x53; XmmReg ... & m32 { XmmReg = rcpss(XmmReg, m32); }
|
|
:RCPSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x53; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rcpss(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop rsqrtps;
|
|
:RSQRTPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x52; XmmReg ... & m128 { XmmReg = rsqrtps(XmmReg, m128); }
|
|
:RSQRTPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x52; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rsqrtps(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop rsqrtss;
|
|
:RSQRTSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x52; XmmReg ... & m32 { XmmReg = rsqrtss(XmmReg, m32); }
|
|
:RSQRTSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x52; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rsqrtss(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop shufpd;
|
|
:SHUFPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC6; XmmReg ... & m128; imm8 { XmmReg = shufpd(XmmReg, m128, imm8:8); }
|
|
:SHUFPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC6; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = shufpd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
:SHUFPS XmmReg, m128, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC6; (m128 & XmmReg ...); imm8 & Order0 & Order1 & Order2 & Order3
|
|
{
|
|
shifted:16 = XmmReg >> Order0;
|
|
tempA:4 = shifted:4;
|
|
|
|
shifted = XmmReg >> Order1;
|
|
tempB:4 = shifted:4;
|
|
|
|
shifted = m128 >> Order2;
|
|
tempC:4 = shifted:4;
|
|
|
|
shifted = m128 >> Order3;
|
|
tempD:4 = shifted:4;
|
|
|
|
XmmReg[0,32] = tempA;
|
|
XmmReg[32,32] = tempB;
|
|
XmmReg[64,32] = tempC;
|
|
XmmReg[96,32] = tempD;
|
|
}
|
|
|
|
:SHUFPS XmmReg1, XmmReg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC6; xmmmod=3 & XmmReg1 & XmmReg2; imm8 & Order0 & Order1 & Order2 & Order3
|
|
{
|
|
shifted:16 = XmmReg1 >> Order0;
|
|
tempA:4 = shifted:4;
|
|
|
|
shifted = XmmReg1 >> Order1;
|
|
tempB:4 = shifted:4;
|
|
|
|
shifted = XmmReg2 >> Order2;
|
|
tempC:4 = shifted:4;
|
|
|
|
shifted = XmmReg2 >> Order3;
|
|
tempD:4 = shifted:4;
|
|
|
|
XmmReg1[0,32] = tempA;
|
|
XmmReg1[32,32] = tempB;
|
|
XmmReg1[64,32] = tempC;
|
|
XmmReg1[96,32] = tempD;
|
|
}
|
|
|
|
define pcodeop sqrtpd;
|
|
:SQRTPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x51; XmmReg ... & m128 { XmmReg = sqrtpd(XmmReg, m128); }
|
|
:SQRTPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = sqrtpd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop sqrtps;
|
|
:SQRTPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x51; XmmReg ... & m128 { XmmReg = sqrtps(XmmReg, m128); }
|
|
:SQRTPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = sqrtps(XmmReg1, XmmReg2); }
|
|
|
|
:SQRTSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x51; XmmReg ... & m64 { XmmReg[0,64] = sqrt(m64); }
|
|
:SQRTSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = sqrt(XmmReg2[0,64]); }
|
|
|
|
:SQRTSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x51; XmmReg ... & m32 { XmmReg[0,32] = sqrt(m32); }
|
|
:SQRTSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = sqrt(XmmReg2[0,32]); }
|
|
|
|
:SUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5C;XmmReg ... & m128
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = XmmReg[0,64] f- m[0,64];
|
|
XmmReg[64,64] = XmmReg[64,64] f- m[64,64];
|
|
}
|
|
|
|
:SUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64];
|
|
XmmReg1[64,64] = XmmReg1[64,64] f- XmmReg2[64,64];
|
|
}
|
|
|
|
:SUBPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5C; XmmReg ... & m128
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[0,32] f- m[0,32];
|
|
XmmReg[32,32] = XmmReg[32,32] f- m[32,32];
|
|
XmmReg[64,32] = XmmReg[64,32] f- m[64,32];
|
|
XmmReg[96,32] = XmmReg[96,32] f- m[96,32];
|
|
}
|
|
|
|
:SUBPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32];
|
|
XmmReg1[32,32] = XmmReg1[32,32] f- XmmReg2[32,32];
|
|
XmmReg1[64,32] = XmmReg1[64,32] f- XmmReg2[64,32];
|
|
XmmReg1[96,32] = XmmReg1[96,32] f- XmmReg2[96,32];
|
|
}
|
|
|
|
:SUBSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5C; XmmReg ... & m64 { XmmReg[0,64] = XmmReg[0,64] f- m64; }
|
|
:SUBSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64]; }
|
|
|
|
:SUBSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5C; XmmReg ...& m32 { XmmReg[0,32] = XmmReg[0,32] f- m32; }
|
|
:SUBSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32]; }
|
|
|
|
#Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS
|
|
# RESULT <- UnorderedCompare(SRC1[63-0] <> SRC2[63-0]) {
|
|
# * Set EFLAGS *CASE (RESULT) OF
|
|
# UNORDERED: ZF,PF,CF <- 111;
|
|
# GREATER_THAN: ZF,PF,CF <- 000;
|
|
# LESS_THAN: ZF,PF,CF <- 001;
|
|
# EQUAL: ZF,PF,CF <- 100;
|
|
# ESAC;
|
|
# OF,AF,SF <- 0;}
|
|
|
|
:UCOMISD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2E; m64 & XmmReg ...
|
|
{
|
|
fucompe(XmmReg[0,64], m64);
|
|
}
|
|
|
|
:UCOMISD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2E; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
fucompe(XmmReg1[0,64], XmmReg2[0,64]);
|
|
}
|
|
|
|
#Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS
|
|
# RESULT <- UnorderedCompare(SRC1[31-0] <> SRC2[31-0]) {
|
|
# * Set EFLAGS *CASE (RESULT) OF
|
|
# UNORDERED: ZF,PF,CF <- 111;
|
|
# GREATER_THAN: ZF,PF,CF <- 000;
|
|
# LESS_THAN: ZF,PF,CF <- 001;
|
|
# EQUAL: ZF,PF,CF <- 100;
|
|
# ESAC;
|
|
# OF,AF,SF <- 0;}
|
|
|
|
:UCOMISS XmmReg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2E; m32 & XmmReg ...
|
|
{
|
|
fucompe(XmmReg[0,32], m32);
|
|
}
|
|
|
|
:UCOMISS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2E; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
fucompe(XmmReg1[0,32], XmmReg2[0,32]);
|
|
}
|
|
|
|
:UNPCKHPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x15; m128 & XmmReg ...
|
|
{
|
|
XmmReg[0,64] = XmmReg[64,64];
|
|
XmmReg[64,64] = m128[64,64];
|
|
}
|
|
|
|
:UNPCKHPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = XmmReg1[64,64];
|
|
XmmReg1[64,64] = XmmReg2[64,64];
|
|
}
|
|
|
|
:UNPCKHPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x15; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = XmmReg[64,32];
|
|
XmmReg[64,32] = XmmReg[96,32];
|
|
XmmReg[32,32] = m[64,32];
|
|
XmmReg[96,32] = m[96,32];
|
|
}
|
|
|
|
:UNPCKHPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = XmmReg1[64,32];
|
|
XmmReg1[32,32] = XmmReg2[64,32];
|
|
XmmReg1[64,32] = XmmReg1[96,32]; # XmmReg1 and XmmReg2 could be the same register, preserve XmmReg1[64,32] till later
|
|
XmmReg1[96,32] = XmmReg2[96,32];
|
|
}
|
|
|
|
:UNPCKLPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x14; m128 & XmmReg ...
|
|
{
|
|
# XmmReg[0,64] = XmmReg[0,64]; superfluous
|
|
XmmReg[64,64] = m128[0,64];
|
|
}
|
|
|
|
:UNPCKLPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
# XmmReg1[0,64] = XmmReg1[0,64]; superfluous
|
|
XmmReg1[64,64] = XmmReg2[0,64];
|
|
}
|
|
|
|
:UNPCKLPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x14; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
# XmmReg[0,32] = XmmReg[0,32]; superfluous
|
|
XmmReg[64,32] = XmmReg[32,32];
|
|
XmmReg[32,32] = m[0,32];
|
|
XmmReg[96,32] = m[32,32];
|
|
}
|
|
|
|
:UNPCKLPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
# XmmReg1[0,32] = XmmReg1[0,32]; superfluous
|
|
XmmReg1[64,32] = XmmReg1[32,32];
|
|
XmmReg1[96,32] = XmmReg2[32,32];
|
|
XmmReg1[32,32] = XmmReg2[0,32]; # XmmReg1 and XmmReg2 could be the same register, preserve Db till last
|
|
}
|
|
|
|
:XORPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x57; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,64] = ( XmmReg[0,64] ^ m[0,64] );
|
|
XmmReg[64,64] = ( XmmReg[64,64] ^ m[64,64] );
|
|
}
|
|
|
|
:XORPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x57; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,64] = ( XmmReg1[0,64] ^ XmmReg2[0,64] );
|
|
XmmReg1[64,64] = ( XmmReg1[64,64] ^ XmmReg2[64,64] );
|
|
}
|
|
|
|
:XORPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x57; m128 & XmmReg ...
|
|
{
|
|
local m:16 = m128;
|
|
XmmReg[0,32] = ( XmmReg[0,32] ^ m[0,32] );
|
|
XmmReg[32,32] = ( XmmReg[32,32] ^ m[32,32] );
|
|
XmmReg[64,32] = ( XmmReg[64,32] ^ m[64,32] );
|
|
XmmReg[96,32] = ( XmmReg[96,32] ^ m[96,32] );
|
|
}
|
|
|
|
:XORPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x57; xmmmod=3 & XmmReg1 & XmmReg2
|
|
{
|
|
XmmReg1[0,32] = ( XmmReg1[0,32] ^ XmmReg2[0,32] );
|
|
XmmReg1[32,32] = ( XmmReg1[32,32] ^ XmmReg2[32,32] );
|
|
XmmReg1[64,32] = ( XmmReg1[64,32] ^ XmmReg2[64,32] );
|
|
XmmReg1[96,32] = ( XmmReg1[96,32] ^ XmmReg2[96,32] );
|
|
}
|
|
|
|
####
|
|
#### VIA Padlock instructions
|
|
####
|
|
|
|
define pcodeop xstore_available;
|
|
define pcodeop xstore;
|
|
define pcodeop xcrypt_ecb;
|
|
define pcodeop xcrypt_cbc;
|
|
define pcodeop xcrypt_ctr;
|
|
define pcodeop xcrypt_cfb;
|
|
define pcodeop xcrypt_ofb;
|
|
define pcodeop montmul;
|
|
define pcodeop xsha1;
|
|
define pcodeop xsha256;
|
|
|
|
:XSTORE is vexMode=0 & mandover=0 & byte=0x0F; byte=0xA7; byte=0xC0 {
|
|
EAX = xstore_available(EDX,EDI);
|
|
}
|
|
|
|
:XSTORE.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xC0 {
|
|
EAX = xstore(ECX,EDX,EDI);
|
|
ECX = 0;
|
|
}
|
|
|
|
:XCRYPTECB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xC8 {
|
|
xcrypt_ecb(ECX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:XCRYPTCBC.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xD0 {
|
|
xcrypt_cbc(ECX,EAX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:XCRYPTCTR.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xD8 {
|
|
xcrypt_ctr(ECX,EAX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:XCRYPTCFB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xE0 {
|
|
xcrypt_cfb(ECX,EAX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:XCRYPTOFB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xE8 {
|
|
xcrypt_ofb(ECX,EAX,EDX,EBX,ESI,EDI);
|
|
}
|
|
|
|
:MONTMUL.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xC0 {
|
|
montmul(EAX,ECX,ESI);
|
|
ECX=0;
|
|
EDX=0;
|
|
}
|
|
|
|
:XSHA1.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xC8 {
|
|
xsha1(ECX,ESI,EDI);
|
|
EAX = ECX;
|
|
}
|
|
|
|
:XSHA256.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xD0 {
|
|
xsha256(ECX,ESI,EDI);
|
|
EAX = ECX;
|
|
}
|
|
|
|
####
|
|
#### SSE4.1 instructions
|
|
####
|
|
|
|
define pcodeop mpsadbw;
|
|
:MPSADBW XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x42; XmmReg ... & m128; imm8 { XmmReg = mpsadbw(XmmReg, m128, imm8:8); }
|
|
:MPSADBW XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x42; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = mpsadbw(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop phminposuw;
|
|
:PHMINPOSUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x41; XmmReg ... & m128 { XmmReg = phminposuw(m128); }
|
|
:PHMINPOSUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x41; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = phminposuw(XmmReg2); }
|
|
|
|
define pcodeop pmuldq;
|
|
:PMULDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x28; XmmReg ... & m128 { XmmReg = pmuldq(XmmReg, m128); }
|
|
:PMULDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmuldq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmulld;
|
|
:PMULLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x40; XmmReg ... & m128 { XmmReg = pmulld(XmmReg, m128); }
|
|
:PMULLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x40; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulld(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop dpps;
|
|
:DPPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x40; XmmReg ... & m128; imm8 { XmmReg = dpps(XmmReg, m128, imm8:8); }
|
|
:DPPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x40; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = dpps(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop dppd;
|
|
:DPPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x41; XmmReg ... & m128; imm8 { XmmReg = dppd(XmmReg, m128, imm8:8); }
|
|
:DPPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x41; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = dppd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop blendps;
|
|
:BLENDPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0C; XmmReg ... & m128; imm8 { XmmReg = blendps(XmmReg, m128, imm8:8); }
|
|
:BLENDPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0C; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = blendps(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop blendpd;
|
|
:BLENDPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0D; XmmReg ... & m128; imm8 { XmmReg = blendpd(XmmReg, m128, imm8:8); }
|
|
:BLENDPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0D; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = blendpd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop blendvps;
|
|
:BLENDVPS XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x14; XmmReg ... & m128 { XmmReg = blendvps(XmmReg, m128, XMM0); }
|
|
:BLENDVPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = blendvps(XmmReg1, XmmReg2, XMM0); }
|
|
|
|
define pcodeop blendvpd;
|
|
:BLENDVPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x15; XmmReg ... & m128 { XmmReg = blendvpd(XmmReg, m128, XMM0); }
|
|
:BLENDVPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = blendvpd(XmmReg1, XmmReg2, XMM0); }
|
|
|
|
define pcodeop pblendvb;
|
|
:PBLENDVB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x10; XmmReg ... & m128 { XmmReg = pblendvb(XmmReg, m128, XMM0); }
|
|
:PBLENDVB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x10; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pblendvb(XmmReg1, XmmReg2, XMM0); }
|
|
|
|
define pcodeop pblendw;
|
|
:PBLENDW XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0E; XmmReg ... & m128; imm8 { XmmReg = pblendw(XmmReg, m128, imm8:8); }
|
|
:PBLENDW XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0E; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = pblendw(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pminsb;
|
|
:PMINSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x38; XmmReg ... & m128 { XmmReg = pminsb(XmmReg, m128); }
|
|
:PMINSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x38; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pminsb(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pminuw;
|
|
:PMINUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3A; XmmReg ... & m128 { XmmReg = pminuw(XmmReg, m128); }
|
|
:PMINUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3A; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pminuw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pminud;
|
|
:PMINUD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3B; XmmReg ... & m128 { XmmReg = pminud(XmmReg, m128); }
|
|
:PMINUD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3B; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pminud(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pminsd;
|
|
:PMINSD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x39; XmmReg ... & m128 { XmmReg = pminsd(XmmReg, m128); }
|
|
:PMINSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x39; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pminsd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmaxsb;
|
|
:PMAXSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3C; XmmReg ... & m128 { XmmReg = pmaxsb(XmmReg, m128); }
|
|
:PMAXSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxsb(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmaxuw;
|
|
:PMAXUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3E; XmmReg ... & m128 { XmmReg = pmaxuw(XmmReg, m128); }
|
|
:PMAXUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxuw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmaxud;
|
|
:PMAXUD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3F; XmmReg ... & m128 { XmmReg = pmaxud(XmmReg, m128); }
|
|
:PMAXUD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxud(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmaxsd;
|
|
:PMAXSD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3D; XmmReg ... & m128 { XmmReg = pmaxsd(XmmReg, m128); }
|
|
:PMAXSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3D; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxsd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop roundps;
|
|
:ROUNDPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x08; XmmReg ... & m128; imm8 { XmmReg = roundps(XmmReg, m128, imm8:8); }
|
|
:ROUNDPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x08; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundps(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop roundss;
|
|
:ROUNDSS XmmReg, m32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0A; XmmReg ... & m32; imm8 { XmmReg = roundss(XmmReg, m32, imm8:8); }
|
|
:ROUNDSS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0A; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundss(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop roundpd;
|
|
:ROUNDPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x09; XmmReg ... & m128; imm8 { XmmReg = roundpd(XmmReg, m128, imm8:8); }
|
|
:ROUNDPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x09; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundpd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop roundsd;
|
|
:ROUNDSD XmmReg, m64, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0B; XmmReg ... & m64; imm8 { XmmReg = roundsd(XmmReg, m64, imm8:8); }
|
|
:ROUNDSD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0B; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundsd(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop insertps;
|
|
:INSERTPS XmmReg, m32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x21; XmmReg ... & m32; imm8 { XmmReg = insertps(XmmReg, m32, imm8:8); }
|
|
:INSERTPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x21; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = insertps(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pinsrb;
|
|
:PINSRB XmmReg, rm8, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x20; XmmReg ... & rm8; imm8 { XmmReg = pinsrb(XmmReg, rm8, imm8:8); }
|
|
|
|
define pcodeop pinsrd;
|
|
:PINSRD XmmReg, rm32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x22; XmmReg ... & rm32; imm8 { XmmReg = pinsrd(XmmReg, rm32, imm8:8); }
|
|
|
|
@ifdef IA64
|
|
define pcodeop pinsrq;
|
|
:PINSRQ XmmReg, rm64, imm8 is vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x22; XmmReg ... & rm64; imm8 { XmmReg = pinsrq(XmmReg, rm64, imm8:8); }
|
|
@endif
|
|
|
|
define pcodeop extractps;
|
|
@ifdef IA64
|
|
:EXTRACTPS rm64, XmmReg, imm8 is vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x17; XmmReg ... & rm64; imm8 { rm64 = extractps(XmmReg, imm8:8); }
|
|
@endif
|
|
:EXTRACTPS rm32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x17; XmmReg ... & rm32 & check_rm32_dest ...; imm8 { rm32 = extractps(XmmReg, imm8:8); build check_rm32_dest; }
|
|
|
|
define pcodeop pextrb;
|
|
:PEXTRB rm8, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x14; XmmReg ... & rm8; imm8 { rm8 = pextrb(XmmReg, imm8:8); }
|
|
|
|
define pcodeop pextrd;
|
|
:PEXTRD rm32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x16; XmmReg ... & rm32 & check_rm32_dest ...; imm8 { rm32 = pextrd(XmmReg, imm8:8); build check_rm32_dest; }
|
|
|
|
@ifdef IA64
|
|
define pcodeop pextrq;
|
|
:PEXTRQ rm64, XmmReg, imm8 is vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x16; XmmReg ... & rm64; imm8 { rm64 = pextrq(XmmReg, imm8:8); }
|
|
@endif
|
|
|
|
define pcodeop pmovsxbw;
|
|
:PMOVSXBW XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x20; XmmReg ... & m64 { XmmReg = pmovsxbw(XmmReg, m64); }
|
|
:PMOVSXBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x20; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxbd;
|
|
:PMOVSXBD XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x21; XmmReg ... & m32 { XmmReg = pmovsxbd(XmmReg, m32); }
|
|
:PMOVSXBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x21; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxbq;
|
|
:PMOVSXBQ XmmReg, m16 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x22; XmmReg ... & m16 { XmmReg = pmovsxbq(XmmReg, m16); }
|
|
:PMOVSXBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x22; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxwd;
|
|
:PMOVSXWD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x23; XmmReg ... & m64 { XmmReg = pmovsxwd(XmmReg, m64); }
|
|
:PMOVSXWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x23; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxwd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxwq;
|
|
:PMOVSXWQ XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x24; XmmReg ... & m32 { XmmReg = pmovsxwq(XmmReg, m32); }
|
|
:PMOVSXWQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x24; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxwq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovsxdq;
|
|
:PMOVSXDQ XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x25; XmmReg ... & m64 { XmmReg = pmovsxdq(XmmReg, m64); }
|
|
:PMOVSXDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x25; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxdq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxbw;
|
|
:PMOVZXBW XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x30; XmmReg ... & m64 { XmmReg = pmovzxbw(XmmReg, m64); }
|
|
:PMOVZXBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x30; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxbd;
|
|
:PMOVZXBD XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x31; XmmReg ... & m32 { XmmReg = pmovzxbd(XmmReg, m32); }
|
|
:PMOVZXBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x31; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxbq;
|
|
:PMOVZXBQ XmmReg, m16 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x32; XmmReg ... & m16 { XmmReg = pmovzxbq(XmmReg, m16); }
|
|
:PMOVZXBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x32; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxwd;
|
|
:PMOVZXWD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x33; XmmReg ... & m64 { XmmReg = pmovzxwd(XmmReg, m64); }
|
|
:PMOVZXWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x33; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxwd(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxwq;
|
|
:PMOVZXWQ XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x34; XmmReg ... & m32 { XmmReg = pmovzxwq(XmmReg, m32); }
|
|
:PMOVZXWQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x34; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxwq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop pmovzxdq;
|
|
:PMOVZXDQ XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x35; XmmReg ... & m64 { XmmReg = pmovzxdq(XmmReg, m64); }
|
|
:PMOVZXDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x35; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxdq(XmmReg1, XmmReg2); }
|
|
|
|
:PTEST XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x17; XmmReg ... & m128 {
|
|
local tmp = m128 & XmmReg;
|
|
ZF = tmp == 0;
|
|
local tmp2 = m128 & ~XmmReg;
|
|
CF = tmp2 == 0;
|
|
AF = 0;
|
|
OF = 0;
|
|
PF = 0;
|
|
SF = 0;
|
|
}
|
|
|
|
:PTEST XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x17; xmmmod=3 & XmmReg1 & XmmReg2 {
|
|
local tmp = XmmReg2 & XmmReg1;
|
|
ZF = tmp == 0;
|
|
local tmp2 = XmmReg2 & ~XmmReg1;
|
|
CF = tmp2 == 0;
|
|
AF = 0;
|
|
OF = 0;
|
|
PF = 0;
|
|
SF = 0;
|
|
}
|
|
|
|
define pcodeop pcmpeqq;
|
|
:PCMPEQQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; XmmReg ... & m128 { XmmReg = pcmpeqq(XmmReg, m128); }
|
|
:PCMPEQQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pcmpeqq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop packusdw;
|
|
:PACKUSDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2B; XmmReg ... & m128 { XmmReg = packusdw(XmmReg, m128); }
|
|
:PACKUSDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2B; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = packusdw(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop movntdqa;
|
|
:MOVNTDQA XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2A; XmmReg ... & m128 { XmmReg = movntdqa(XmmReg, m128); }
|
|
|
|
####
|
|
#### SSE4.2 instructions
|
|
####
|
|
|
|
define pcodeop crc32;
|
|
:CRC32 Reg32, rm8 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF0; Reg32 ... & check_Reg32_dest ... & rm8 { Reg32 = crc32(Reg32, rm8); build check_Reg32_dest; }
|
|
:CRC32 Reg32, rm16 is vexMode=0 & opsize=0 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF1; Reg32 ... & check_Reg32_dest ... & rm16 { Reg32 = crc32(Reg32, rm16); build check_Reg32_dest; }
|
|
:CRC32 Reg32, rm32 is vexMode=0 & opsize=1 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF1; Reg32 ... & check_Reg32_dest ... & rm32 { Reg32 = crc32(Reg32, rm32); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:CRC32 Reg32, rm8 is vexMode=0 & opsize=1 & $(PRE_F2) & $(REX) & byte=0x0F; byte=0x38; byte=0xF0; Reg32 ... & check_Reg32_dest ... & rm8 { Reg32 = crc32(Reg32, rm8); build check_Reg32_dest; }
|
|
:CRC32 Reg64, rm8 is vexMode=0 & opsize=2 & $(PRE_F2) & $(REX_W) & byte=0x0F; byte=0x38; byte=0xF0; Reg64 ... & rm8 { Reg64 = crc32(Reg64, rm8); }
|
|
:CRC32 Reg64, rm64 is vexMode=0 & opsize=2 & $(PRE_F2) & $(REX_W) & byte=0x0F; byte=0x38; byte=0xF1; Reg64 ... & rm64 { Reg64 = crc32(Reg64, rm64); }
|
|
@endif
|
|
|
|
define pcodeop pcmpestri;
|
|
:PCMPESTRI XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x61; XmmReg ... & m128; imm8 { ECX = pcmpestri(XmmReg, m128, imm8:8); }
|
|
:PCMPESTRI XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x61; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { ECX = pcmpestri(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pcmpestrm;
|
|
:PCMPESTRM XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x60; XmmReg ... & m128; imm8 { XMM0 = pcmpestrm(XmmReg, m128, imm8:8); }
|
|
:PCMPESTRM XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x60; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XMM0 = pcmpestrm(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pcmpistri;
|
|
:PCMPISTRI XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x63; XmmReg ... & m128; imm8 { ECX = pcmpistri(XmmReg, m128, imm8:8); }
|
|
:PCMPISTRI XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x63; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { ECX = pcmpistri(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pcmpistrm;
|
|
:PCMPISTRM XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x62; XmmReg ... & m128; imm8 { XMM0 = pcmpistrm(XmmReg, m128, imm8:8); }
|
|
:PCMPISTRM XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x62; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XMM0 = pcmpistrm(XmmReg1, XmmReg2, imm8:8); }
|
|
|
|
define pcodeop pcmpgtq;
|
|
:PCMPGTQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x37; XmmReg ... & m128 { XmmReg = pcmpgtq(XmmReg, m128); }
|
|
:PCMPGTQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x37; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pcmpgtq(XmmReg1, XmmReg2); }
|
|
|
|
define pcodeop popcnt;
|
|
:POPCNT Reg16, rm16 is vexMode=0 & opsize=0 & $(PRE_F3) & byte=0x0F; byte=0xB8; Reg16 ... & rm16 { Reg16 = popcnt(rm16); }
|
|
:POPCNT Reg32, rm32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0F; byte=0xB8; Reg32 ... & check_Reg32_dest ... & rm32 { Reg32 = popcnt(rm32); build check_Reg32_dest; }
|
|
@ifdef IA64
|
|
:POPCNT Reg64, rm64 is vexMode=0 & opsize=2 & $(PRE_F3) & $(REX_W) & byte=0x0F; byte=0xB8; Reg64 ... & rm64 { Reg64 = popcnt(rm64); }
|
|
@endif
|
|
|
|
|
|
####
|
|
#### AESNI instructions
|
|
####
|
|
|
|
define pcodeop aesdec;
|
|
:AESDEC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xde; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesdec(XmmReg1, XmmReg2_m128);
|
|
}
|
|
:VAESDEC XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xde; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesdec(vexVVVV_XmmReg, XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aesdeclast;
|
|
:AESDECLAST XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdf; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesdeclast(XmmReg1, XmmReg2_m128);
|
|
}
|
|
:VAESDECLAST XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdf; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesdeclast(vexVVVV_XmmReg, XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aesenc;
|
|
:AESENC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdc; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesenc(XmmReg1, XmmReg2_m128);
|
|
}
|
|
:VAESENC XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdc; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesenc(vexVVVV_XmmReg, XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aesenclast;
|
|
:AESENCLAST XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdd; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesenclast(XmmReg1, XmmReg2_m128);
|
|
}
|
|
:VAESENCLAST XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdd; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesenclast(vexVVVV_XmmReg, XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aesimc;
|
|
:AESIMC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdb; XmmReg1 ... & XmmReg2_m128 {
|
|
XmmReg1 = aesimc(XmmReg2_m128);
|
|
}
|
|
:VAESIMC XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0xdb; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 {
|
|
XmmReg1 = aesimc(XmmReg2_m128);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
define pcodeop aeskeygenassist;
|
|
:AESKEYGENASSIST XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0xdf; XmmReg1 ... & XmmReg2_m128; imm8 {
|
|
XmmReg1 = aeskeygenassist(XmmReg2_m128, imm8:1);
|
|
}
|
|
:VAESKEYGENASSIST XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0xdf; (XmmReg1 & YmmReg1) ... & XmmReg2_m128; imm8 {
|
|
XmmReg1 = aeskeygenassist(XmmReg2_m128, imm8:1);
|
|
YmmReg1 = zext(XmmReg1);
|
|
}
|
|
|
|
|
|
|
|
####
|
|
#### Deprecated 3DNow! instructions
|
|
####
|
|
|
|
define pcodeop PackedIntToFloatingDwordConv;
|
|
:PI2FD mmxreg, m64 is vexMode=0 & suffix3D=0x0D & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedIntToFloatingDwordConv(mmxreg, m64); }
|
|
:PI2FD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x0D & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedIntToFloatingDwordConv(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingToIntDwordConv;
|
|
:PF2ID mmxreg, m64 is vexMode=0 & suffix3D=0x1D & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingToIntDwordConv(mmxreg, m64); }
|
|
:PF2ID mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x1D & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingToIntDwordConv(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingCompareGE;
|
|
:PFCMPGE mmxreg, m64 is vexMode=0 & suffix3D=0x90 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareGE(mmxreg, m64); }
|
|
:PFCMPGE mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x90 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareGE(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingCompareGT;
|
|
:PFCMPGT mmxreg, m64 is vexMode=0 & suffix3D=0xA0 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareGT(mmxreg, m64); }
|
|
:PFCMPGT mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA0 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareGT(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingCompareEQ;
|
|
:PFCMPEQ mmxreg, m64 is vexMode=0 & suffix3D=0xB0 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareEQ(mmxreg, m64); }
|
|
:PFCMPEQ mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB0 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareEQ(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingAccumulate;
|
|
:PFACC mmxreg, m64 is vexMode=0 & suffix3D=0xAE & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingAccumulate(mmxreg, m64); }
|
|
:PFACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xAE & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingAccumulate(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingADD;
|
|
:PFADD mmxreg, m64 is vexMode=0 & suffix3D=0x9E & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingADD(mmxreg, m64); }
|
|
:PFADD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x9E & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingADD(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingSUB;
|
|
:PFSUB mmxreg, m64 is vexMode=0 & suffix3D=0x9A & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingSUB(mmxreg, m64); }
|
|
:PFSUB mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x9A & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingSUB(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingSUBR;
|
|
:PFSUBR mmxreg, m64 is vexMode=0 & suffix3D=0xAA & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingSUBR(mmxreg, m64); }
|
|
:PFSUBR mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xAA & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingSUBR(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingMIN;
|
|
:PFMIN mmxreg, m64 is vexMode=0 & suffix3D=0x94 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMIN(mmxreg, m64); }
|
|
:PFMIN mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x94 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMIN(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingMAX;
|
|
:PFMAX mmxreg, m64 is vexMode=0 & suffix3D=0xA4 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMAX(mmxreg, m64); }
|
|
:PFMAX mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA4 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMAX(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingMUL;
|
|
:PFMUL mmxreg, m64 is vexMode=0 & suffix3D=0xB4 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMUL(mmxreg, m64); }
|
|
:PFMUL mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB4 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMUL(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop FloatingReciprocalAprox;
|
|
:PFRCP mmxreg, m64 is vexMode=0 & suffix3D=0x96 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = FloatingReciprocalAprox(mmxreg, m64); }
|
|
:PFRCP mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x96 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = FloatingReciprocalAprox(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingReciprocalSQRAprox;
|
|
:PFRSQRT mmxreg, m64 is vexMode=0 & suffix3D=0x97 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalSQRAprox(mmxreg, m64); }
|
|
:PFRSQRT mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x97 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalSQRAprox(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingReciprocalIter1;
|
|
:PFRCPIT1 mmxreg, m64 is vexMode=0 & suffix3D=0xA6 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalIter1(mmxreg, m64); }
|
|
:PFRCPIT1 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA6 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalIter1(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingReciprocalSQRIter1;
|
|
:PFRSQIT1 mmxreg, m64 is vexMode=0 & suffix3D=0xA7 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalSQRIter1(mmxreg, m64); }
|
|
:PFRSQIT1 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA7 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalSQRIter1(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingReciprocalIter2;
|
|
:PFRCPIT2 mmxreg, m64 is vexMode=0 & suffix3D=0xB6 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalIter2(mmxreg, m64); }
|
|
:PFRCPIT2 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB6 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalIter2(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedAverageUnsignedBytes;
|
|
:PAVGUSB mmxreg, m64 is vexMode=0 & suffix3D=0xBF & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedAverageUnsignedBytes(mmxreg, m64); }
|
|
:PAVGUSB mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xBF & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedAverageUnsignedBytes(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedAverageHighRoundedWord;
|
|
:PMULHRW mmxreg, m64 is vexMode=0 & suffix3D=0xB7 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedAverageHighRoundedWord(mmxreg, m64); }
|
|
:PMULHRW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB7 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedAverageHighRoundedWord(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop FastExitMediaState;
|
|
:FEMMS is vexMode=0 & byte=0x0F; byte=0x0E { FastExitMediaState(); }
|
|
|
|
#define pcodeop PrefetchDataIntoCache;
|
|
#:PREFETCH m8 is vexMode=0 & byte=0x0F; byte=0x18; m8 { PrefetchDataIntoCache(m8); }
|
|
|
|
#define pcodeop PrefetchDataIntoCacheWrite;
|
|
#:PREFETCHW m8 is vexMode=0 & byte=0x0F; byte=0x0D; reg_opcode=1 ... & m8 { PrefetchDataIntoCacheWrite(m8); }
|
|
|
|
# 3DNow! extensions
|
|
|
|
define pcodeop PackedFloatingToIntWord;
|
|
:PF2IW mmxreg, m64 is vexMode=0 & suffix3D=0x1C & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingToIntWord(mmxreg, m64); }
|
|
:PF2IW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x1C & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingToIntWord(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedIntToFloatingWord;
|
|
:PI2FW mmxreg, m64 is vexMode=0 & suffix3D=0x0C & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedIntToFloatingWord(mmxreg, m64); }
|
|
:PI2FW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x0C & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedIntToFloatingWord(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingNegAccumulate;
|
|
:PFNACC mmxreg, m64 is vexMode=0 & suffix3D=0x8A & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingNegAccumulate(mmxreg, m64); }
|
|
:PFNACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x8A & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingNegAccumulate(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedFloatingPosNegAccumulate;
|
|
:PFPNACC mmxreg, m64 is vexMode=0 & suffix3D=0x8E & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingPosNegAccumulate(mmxreg, m64); }
|
|
:PFPNACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x8E & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingPosNegAccumulate(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop PackedSwapDWords;
|
|
:PSWAPD mmxreg, m64 is vexMode=0 & suffix3D=0xBB & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedSwapDWords(mmxreg, m64); }
|
|
:PSWAPD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xBB & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedSwapDWords(mmxreg1, mmxreg2); }
|
|
|
|
define pcodeop MaskedMoveQWord;
|
|
:MASKMOVQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF7; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = MaskedMoveQWord(mmxreg1, mmxreg2); }
|