Merge remote-tracking branch 'origin/GP-1185_x64_vector_ops--SQUASHED'

This commit is contained in:
Ryan Kurtz 2021-08-17 13:57:19 -04:00
commit 54c426f117
6 changed files with 404 additions and 102 deletions

View file

@ -1224,36 +1224,6 @@ define pcodeop vmovddup_avx ;
# TODO ZmmReg1 = zext(YmmReg1)
}
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61667
define pcodeop vmovdqa_avx ;
:VMOVDQA XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; (XmmReg1 & YmmReg1) ... & XmmReg2_m128
{
local tmp:16 = vmovdqa_avx( XmmReg2_m128 );
YmmReg1 = zext(tmp);
# TODO ZmmReg1 = zext(XmmReg1)
}
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61669
:VMOVDQA XmmReg2_m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; XmmReg1 ... & XmmReg2_m128
{
XmmReg2_m128 = vmovdqa_avx( XmmReg1 );
# TODO ZmmReg2 = zext(XmmReg2)
}
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61671
:VMOVDQA YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; YmmReg1 ... & YmmReg2_m256
{
YmmReg1 = vmovdqa_avx( YmmReg2_m256 );
# TODO ZmmReg1 = zext(YmmReg1)
}
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61673
:VMOVDQA YmmReg2_m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; YmmReg1 ... & YmmReg2_m256
{
YmmReg2_m256 = vmovdqa_avx( YmmReg1 );
# TODO ZmmReg2 = zext(YmmReg2)
}
# MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61930
define pcodeop vmovdqu_avx ;
:VMOVDQU XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; (XmmReg1 & YmmReg1) ... & XmmReg2_m128
@ -1891,15 +1861,6 @@ define pcodeop vpcmpeqd_avx ;
# TODO ZmmReg1 = zext(XmmReg1)
}
# PCMPEQQ 4-250 PAGE 1370 LINE 71169
define pcodeop vpcmpeqq_avx ;
:VPCMPEQQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x29; (XmmReg1 & YmmReg1) ... & XmmReg2_m128
{
local tmp:16 = vpcmpeqq_avx( vexVVVV_XmmReg, XmmReg2_m128 );
YmmReg1 = zext(tmp);
# TODO ZmmReg1 = zext(XmmReg1)
}
# PCMPESTRI 4-253 PAGE 1373 LINE 71311
define pcodeop vpcmpestri_avx ;
:VPCMPESTRI XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A); byte=0x61; XmmReg1 ... & XmmReg2_m128; imm8
@ -2234,14 +2195,6 @@ define pcodeop vpminud_avx ;
# TODO ZmmReg1 = zext(XmmReg1)
}
# PMOVMSKB 4-338 PAGE 1458 LINE 75651
define pcodeop vpmovmskb_avx ;
:VPMOVMSKB Reg32, XmmReg2 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xD7; Reg32 & (mod=0x3 & XmmReg2)
{
Reg32 = vpmovmskb_avx( XmmReg2 );
# TODO Reg64 = zext(Reg32)
}
# PMOVSX 4-340 PAGE 1460 LINE 75770
define pcodeop vpmovsxbw_avx ;
:VPMOVSXBW XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x20; (XmmReg1 & YmmReg1) ... & XmmReg2_m64

View file

@ -218,14 +218,6 @@ define pcodeop vpcmpeqd_avx2 ;
# TODO ZmmReg1 = zext(YmmReg1)
}
# PCMPEQQ 4-250 PAGE 1370 LINE 71171
define pcodeop vpcmpeqq_avx2 ;
:VPCMPEQQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x29; YmmReg1 ... & YmmReg2_m256
{
YmmReg1 = vpcmpeqq_avx2( vexVVVV_YmmReg, YmmReg2_m256 );
# TODO ZmmReg1 = zext(YmmReg1)
}
# PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71508
define pcodeop vpcmpgtb_avx2 ;
:VPCMPGTB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x64; YmmReg1 ... & YmmReg2_m256
@ -418,14 +410,6 @@ define pcodeop vpminud_avx2 ;
# TODO ZmmReg1 = zext(YmmReg1)
}
# PMOVMSKB 4-338 PAGE 1458 LINE 75655
define pcodeop vpmovmskb_avx2 ;
:VPMOVMSKB Reg32, YmmReg2 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xD7; Reg32 & (mod=0x3 & YmmReg2)
{
Reg32 = vpmovmskb_avx2( YmmReg2 );
# TODO Reg64 = zext(Reg32)
}
# PMOVSX 4-340 PAGE 1460 LINE 75782
define pcodeop vpmovsxbw_avx2 ;
:VPMOVSXBW YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x20; YmmReg1 ... & XmmReg2_m128

View file

@ -109,6 +109,15 @@ define pcodeop vgatherqps ;
}
@endif
# PCMPEQQ 4-250 PAGE 1370 LINE 71171
:VPCMPEQQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x29; YmmReg1 ... & YmmReg2_m256
{
YmmReg1[0,64] = zext(vexVVVV_YmmReg[0,64] == YmmReg2_m256[0,64]) * 0xffffffffffffffff:8;
YmmReg1[64,64] = zext(vexVVVV_YmmReg[64,64] == YmmReg2_m256[64,64]) * 0xffffffffffffffff:8;
YmmReg1[128,64] = zext(vexVVVV_YmmReg[128,64] == YmmReg2_m256[128,64]) * 0xffffffffffffffff:8;
YmmReg1[192,64] = zext(vexVVVV_YmmReg[192,64] == YmmReg2_m256[192,64]) * 0xffffffffffffffff:8;
# TODO ZmmReg1 = zext(YmmReg1)
}
# VPGATHERDD/VPGATHERQD 5-273 PAGE 2097 LINE 107884
define pcodeop vpgatherdd ;
@ -201,3 +210,43 @@ define pcodeop vpgatherqq ;
@endif
# PMOVMSKB 4-338 PAGE 1458 LINE 75655
:VPMOVMSKB Reg32, YmmReg2 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xD7; Reg32 & (mod=0x3 & YmmReg2) & check_Reg32_dest
{
local byte_mask:4 = 0:4;
byte_mask[0,1] = YmmReg2[7,1];
byte_mask[1,1] = YmmReg2[15,1];
byte_mask[2,1] = YmmReg2[23,1];
byte_mask[3,1] = YmmReg2[31,1];
byte_mask[4,1] = YmmReg2[39,1];
byte_mask[5,1] = YmmReg2[47,1];
byte_mask[6,1] = YmmReg2[55,1];
byte_mask[7,1] = YmmReg2[63,1];
byte_mask[8,1] = YmmReg2[71,1];
byte_mask[9,1] = YmmReg2[79,1];
byte_mask[10,1] = YmmReg2[87,1];
byte_mask[11,1] = YmmReg2[95,1];
byte_mask[12,1] = YmmReg2[103,1];
byte_mask[13,1] = YmmReg2[111,1];
byte_mask[14,1] = YmmReg2[119,1];
byte_mask[15,1] = YmmReg2[127,1];
byte_mask[16,1] = YmmReg2[135,1];
byte_mask[17,1] = YmmReg2[143,1];
byte_mask[18,1] = YmmReg2[151,1];
byte_mask[19,1] = YmmReg2[159,1];
byte_mask[20,1] = YmmReg2[167,1];
byte_mask[21,1] = YmmReg2[175,1];
byte_mask[22,1] = YmmReg2[183,1];
byte_mask[23,1] = YmmReg2[191,1];
byte_mask[24,1] = YmmReg2[199,1];
byte_mask[25,1] = YmmReg2[207,1];
byte_mask[26,1] = YmmReg2[215,1];
byte_mask[27,1] = YmmReg2[223,1];
byte_mask[28,1] = YmmReg2[231,1];
byte_mask[29,1] = YmmReg2[239,1];
byte_mask[30,1] = YmmReg2[247,1];
byte_mask[31,1] = YmmReg2[255,1];
Reg32 = zext(byte_mask);
build check_Reg32_dest;
}

View file

@ -1,3 +1,39 @@
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61667
# Note: we do not model the exception generated if VMOVDQA is used with a memory operand which is not 16-bye aligned
:VMOVDQA XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; (XmmReg1 & YmmReg1) ... & XmmReg2_m128
{
YmmReg1 = zext(XmmReg2_m128);
# TODO ZmmReg1 = zext(XmmReg1)
}
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61669
:VMOVDQA XmmReg2, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; XmmReg1 & (mod = 3 & XmmReg2 & YmmReg2)
{
YmmReg2 = zext(XmmReg1);
# TODO ZmmReg2 = zext(XmmReg2)
}
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61669
:VMOVDQA m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; XmmReg1 ... & m128
{
m128 = XmmReg1;
# TODO ZmmReg2 = zext(XmmReg2)
}
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61671
:VMOVDQA YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; YmmReg1 ... & YmmReg2_m256
{
YmmReg1 = YmmReg2_m256;
# TODO ZmmReg1 = zext(YmmReg1)
}
# MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61673
:VMOVDQA YmmReg2_m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; YmmReg1 ... & YmmReg2_m256
{
YmmReg2_m256 = YmmReg1;
# TODO ZmmReg2 = zext(YmmReg2)
}
# MOVSD 4-111 PAGE 1231 LINE 63970
:VMOVSD XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x10; XmmReg1 & YmmReg1 & (mod=0x3 & XmmReg2)
{
@ -70,3 +106,40 @@
{
YmmReg2_m256 = YmmReg1;
}
# PCMPEQQ 4-250 PAGE 1370 LINE 71169
:VPCMPEQQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x29; (XmmReg1 & YmmReg1) ... & XmmReg2_m128
{
XmmReg1[0,64] = zext(vexVVVV_XmmReg[0,64] == XmmReg2_m128[0,64]) * 0xffffffffffffffff:8;
XmmReg1[64,64] = zext(vexVVVV_XmmReg[64,64] == XmmReg2_m128[64,64]) * 0xffffffffffffffff:8;
YmmReg1 = zext(XmmReg1);
# TODO ZmmReg1 = zext(XmmReg1)
}
# PMOVMSKB 4-338 PAGE 1458 LINE 75651
:VPMOVMSKB Reg32, XmmReg2 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xD7; Reg32 & (mod=0x3 & XmmReg2) & check_Reg32_dest
{
local byte_mask:2 = 0:2;
byte_mask[0,1] = XmmReg2[7,1];
byte_mask[1,1] = XmmReg2[15,1];
byte_mask[2,1] = XmmReg2[23,1];
byte_mask[3,1] = XmmReg2[31,1];
byte_mask[4,1] = XmmReg2[39,1];
byte_mask[5,1] = XmmReg2[47,1];
byte_mask[6,1] = XmmReg2[55,1];
byte_mask[7,1] = XmmReg2[63,1];
byte_mask[8,1] = XmmReg2[71,1];
byte_mask[9,1] = XmmReg2[79,1];
byte_mask[10,1] = XmmReg2[87,1];
byte_mask[11,1] = XmmReg2[95,1];
byte_mask[12,1] = XmmReg2[103,1];
byte_mask[13,1] = XmmReg2[111,1];
byte_mask[14,1] = XmmReg2[119,1];
byte_mask[15,1] = XmmReg2[127,1];
Reg32 = zext(byte_mask);
build check_Reg32_dest;
}

View file

@ -6380,7 +6380,6 @@ define pcodeop pavgw;
mmxreg1[32,32] = zext(mmxreg1[32,32] == mmxreg2[32,32]) * 0xFFFFFFFF;
}
define pcodeop pcmpeqb;
:PCMPEQB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x74; m128 & XmmReg ...
{
local m:16 = m128;
@ -6690,11 +6689,73 @@ define pcodeop pmaxsw;
:PMAXSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEE; XmmReg ... & m128 { XmmReg = pmaxsw(XmmReg, m128); }
:PMAXSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEE; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxsw(XmmReg1, XmmReg2); }
define pcodeop pmaxub;
:PMAXUB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDE; mmxreg ... & m64 { mmxreg = pmaxub(mmxreg, m64); }
:PMAXUB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDE; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmaxub(mmxreg1, mmxreg2); }
:PMAXUB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDE; XmmReg ... & m128 { XmmReg = pmaxub(XmmReg, m128); }
:PMAXUB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDE; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxub(XmmReg1, XmmReg2); }
macro assignUnsignedGreater(dest, x, y){
dest = (zext(x >= y) * x) + (zext(x < y) * y);
}
:PMAXUB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDE; mmxreg ... & m64
{
assignUnsignedGreater(mmxreg[0,8],mmxreg[0,8],m64[0,8]);
assignUnsignedGreater(mmxreg[8,8],mmxreg[8,8],m64[8,8]);
assignUnsignedGreater(mmxreg[16,8],mmxreg[16,8],m64[16,8]);
assignUnsignedGreater(mmxreg[24,8],mmxreg[24,8],m64[24,8]);
assignUnsignedGreater(mmxreg[32,8],mmxreg[32,8],m64[32,8]);
assignUnsignedGreater(mmxreg[40,8],mmxreg[40,8],m64[40,8]);
assignUnsignedGreater(mmxreg[48,8],mmxreg[48,8],m64[48,8]);
assignUnsignedGreater(mmxreg[56,8],mmxreg[56,8],m64[56,8]);
}
:PMAXUB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDE; mmxmod = 3 & mmxreg1 & mmxreg2
{
assignUnsignedGreater(mmxreg1[0,8],mmxreg1[0,8],mmxreg2[0,8]);
assignUnsignedGreater(mmxreg1[8,8],mmxreg1[8,8],mmxreg2[8,8]);
assignUnsignedGreater(mmxreg1[16,8],mmxreg1[16,8],mmxreg2[16,8]);
assignUnsignedGreater(mmxreg1[24,8],mmxreg1[24,8],mmxreg2[24,8]);
assignUnsignedGreater(mmxreg1[32,8],mmxreg1[32,8],mmxreg2[32,8]);
assignUnsignedGreater(mmxreg1[40,8],mmxreg1[40,8],mmxreg2[40,8]);
assignUnsignedGreater(mmxreg1[48,8],mmxreg1[48,8],mmxreg2[48,8]);
assignUnsignedGreater(mmxreg1[56,8],mmxreg1[56,8],mmxreg2[56,8]);
}
:PMAXUB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDE; XmmReg ... & m128
{
assignUnsignedGreater(XmmReg[0,8],XmmReg[0,8],m128[0,8]);
assignUnsignedGreater(XmmReg[8,8],XmmReg[8,8],m128[8,8]);
assignUnsignedGreater(XmmReg[16,8],XmmReg[16,8],m128[16,8]);
assignUnsignedGreater(XmmReg[24,8],XmmReg[24,8],m128[24,8]);
assignUnsignedGreater(XmmReg[32,8],XmmReg[32,8],m128[32,8]);
assignUnsignedGreater(XmmReg[40,8],XmmReg[40,8],m128[40,8]);
assignUnsignedGreater(XmmReg[48,8],XmmReg[48,8],m128[48,8]);
assignUnsignedGreater(XmmReg[56,8],XmmReg[56,8],m128[56,8]);
assignUnsignedGreater(XmmReg[64,8],XmmReg[64,8],m128[64,8]);
assignUnsignedGreater(XmmReg[72,8],XmmReg[72,8],m128[72,8]);
assignUnsignedGreater(XmmReg[80,8],XmmReg[80,8],m128[80,8]);
assignUnsignedGreater(XmmReg[88,8],XmmReg[88,8],m128[88,8]);
assignUnsignedGreater(XmmReg[96,8],XmmReg[96,8],m128[96,8]);
assignUnsignedGreater(XmmReg[104,8],XmmReg[104,8],m128[104,8]);
assignUnsignedGreater(XmmReg[112,8],XmmReg[112,8],m128[112,8]);
assignUnsignedGreater(XmmReg[120,8],XmmReg[120,8],m128[120,8]);
}
:PMAXUB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDE; xmmmod = 3 & XmmReg1 & XmmReg2
{
assignUnsignedGreater(XmmReg1[0,8],XmmReg1[0,8],XmmReg2[0,8]);
assignUnsignedGreater(XmmReg1[8,8],XmmReg1[8,8],XmmReg2[8,8]);
assignUnsignedGreater(XmmReg1[16,8],XmmReg1[16,8],XmmReg2[16,8]);
assignUnsignedGreater(XmmReg1[24,8],XmmReg1[24,8],XmmReg2[24,8]);
assignUnsignedGreater(XmmReg1[32,8],XmmReg1[32,8],XmmReg2[32,8]);
assignUnsignedGreater(XmmReg1[40,8],XmmReg1[40,8],XmmReg2[40,8]);
assignUnsignedGreater(XmmReg1[48,8],XmmReg1[48,8],XmmReg2[48,8]);
assignUnsignedGreater(XmmReg1[56,8],XmmReg1[56,8],XmmReg2[56,8]);
assignUnsignedGreater(XmmReg1[64,8],XmmReg1[64,8],XmmReg2[64,8]);
assignUnsignedGreater(XmmReg1[72,8],XmmReg1[72,8],XmmReg2[72,8]);
assignUnsignedGreater(XmmReg1[80,8],XmmReg1[80,8],XmmReg2[80,8]);
assignUnsignedGreater(XmmReg1[88,8],XmmReg1[88,8],XmmReg2[88,8]);
assignUnsignedGreater(XmmReg1[96,8],XmmReg1[96,8],XmmReg2[96,8]);
assignUnsignedGreater(XmmReg1[104,8],XmmReg1[104,8],XmmReg2[104,8]);
assignUnsignedGreater(XmmReg1[112,8],XmmReg1[112,8],XmmReg2[112,8]);
assignUnsignedGreater(XmmReg1[120,8],XmmReg1[120,8],XmmReg2[120,8]);
}
define pcodeop pminsw;
:PMINSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEA; mmxreg ... & m64 { mmxreg = pminsw(mmxreg, m64); }
@ -6702,19 +6763,116 @@ define pcodeop pminsw;
:PMINSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEA; XmmReg ... & m128 { XmmReg = pminsw(XmmReg, m128); }
:PMINSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEA; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pminsw(XmmReg1, XmmReg2); }
define pcodeop pminub;
:PMINUB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDA; mmxreg ... & m64 { mmxreg = pminub(mmxreg, m64); }
:PMINUB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDA; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pminub(mmxreg1, mmxreg2); }
:PMINUB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDA; XmmReg ... & m128 { XmmReg = pminub(XmmReg, m128); }
:PMINUB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDA; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pminub(XmmReg1, XmmReg2); }
macro assignUnsignedLesser(dest, x, y){
dest = (zext(x <= y) * x) + (zext(y < x) * y);
}
define pcodeop pmovmskb;
:PMOVMSKB Reg32, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD7; Reg32 & mmxreg2 { Reg32 = pmovmskb(Reg32, mmxreg2); }
:PMOVMSKB Reg32, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD7; Reg32 & XmmReg2 { Reg32 = pmovmskb(Reg32, XmmReg2); }
@ifdef IA64
:PMOVMSKB Reg64, mmxreg2 is vexMode=0 & opsize=2 & mandover=0 & byte=0x0F; byte=0xD7; Reg64 & mmxreg2 { Reg64 = pmovmskb(Reg64, mmxreg2); }
@endif
:PMINUB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDA; mmxreg ... & m64
{
assignUnsignedLesser(mmxreg[0,8],mmxreg[0,8],m64[0,8]);
assignUnsignedLesser(mmxreg[8,8],mmxreg[8,8],m64[8,8]);
assignUnsignedLesser(mmxreg[16,8],mmxreg[16,8],m64[16,8]);
assignUnsignedLesser(mmxreg[24,8],mmxreg[24,8],m64[24,8]);
assignUnsignedLesser(mmxreg[32,8],mmxreg[32,8],m64[32,8]);
assignUnsignedLesser(mmxreg[40,8],mmxreg[40,8],m64[40,8]);
assignUnsignedLesser(mmxreg[48,8],mmxreg[48,8],m64[48,8]);
assignUnsignedLesser(mmxreg[56,8],mmxreg[56,8],m64[56,8]);
}
:PMINUB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDA; mmxmod = 3 & mmxreg1 & mmxreg2
{
assignUnsignedLesser(mmxreg1[0,8],mmxreg1[0,8],mmxreg2[0,8]);
assignUnsignedLesser(mmxreg1[8,8],mmxreg1[8,8],mmxreg2[8,8]);
assignUnsignedLesser(mmxreg1[16,8],mmxreg1[16,8],mmxreg2[16,8]);
assignUnsignedLesser(mmxreg1[24,8],mmxreg1[24,8],mmxreg2[24,8]);
assignUnsignedLesser(mmxreg1[32,8],mmxreg1[32,8],mmxreg2[32,8]);
assignUnsignedLesser(mmxreg1[40,8],mmxreg1[40,8],mmxreg2[40,8]);
assignUnsignedLesser(mmxreg1[48,8],mmxreg1[48,8],mmxreg2[48,8]);
assignUnsignedLesser(mmxreg1[56,8],mmxreg1[56,8],mmxreg2[56,8]);
}
:PMINUB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDA; XmmReg ... & m128
{
assignUnsignedLesser(XmmReg[0,8],XmmReg[0,8],m128[0,8]);
assignUnsignedLesser(XmmReg[8,8],XmmReg[8,8],m128[8,8]);
assignUnsignedLesser(XmmReg[16,8],XmmReg[16,8],m128[16,8]);
assignUnsignedLesser(XmmReg[24,8],XmmReg[24,8],m128[24,8]);
assignUnsignedLesser(XmmReg[32,8],XmmReg[32,8],m128[32,8]);
assignUnsignedLesser(XmmReg[40,8],XmmReg[40,8],m128[40,8]);
assignUnsignedLesser(XmmReg[48,8],XmmReg[48,8],m128[48,8]);
assignUnsignedLesser(XmmReg[56,8],XmmReg[56,8],m128[56,8]);
assignUnsignedLesser(XmmReg[64,8],XmmReg[64,8],m128[64,8]);
assignUnsignedLesser(XmmReg[72,8],XmmReg[72,8],m128[72,8]);
assignUnsignedLesser(XmmReg[80,8],XmmReg[80,8],m128[80,8]);
assignUnsignedLesser(XmmReg[88,8],XmmReg[88,8],m128[88,8]);
assignUnsignedLesser(XmmReg[96,8],XmmReg[96,8],m128[96,8]);
assignUnsignedLesser(XmmReg[104,8],XmmReg[104,8],m128[104,8]);
assignUnsignedLesser(XmmReg[112,8],XmmReg[112,8],m128[112,8]);
assignUnsignedLesser(XmmReg[120,8],XmmReg[120,8],m128[120,8]);
}
:PMINUB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDA; xmmmod = 3 & XmmReg1 & XmmReg2
{
assignUnsignedLesser(XmmReg1[0,8],XmmReg1[0,8],XmmReg2[0,8]);
assignUnsignedLesser(XmmReg1[8,8],XmmReg1[8,8],XmmReg2[8,8]);
assignUnsignedLesser(XmmReg1[16,8],XmmReg1[16,8],XmmReg2[16,8]);
assignUnsignedLesser(XmmReg1[24,8],XmmReg1[24,8],XmmReg2[24,8]);
assignUnsignedLesser(XmmReg1[32,8],XmmReg1[32,8],XmmReg2[32,8]);
assignUnsignedLesser(XmmReg1[40,8],XmmReg1[40,8],XmmReg2[40,8]);
assignUnsignedLesser(XmmReg1[48,8],XmmReg1[48,8],XmmReg2[48,8]);
assignUnsignedLesser(XmmReg1[56,8],XmmReg1[56,8],XmmReg2[56,8]);
assignUnsignedLesser(XmmReg1[64,8],XmmReg1[64,8],XmmReg2[64,8]);
assignUnsignedLesser(XmmReg1[72,8],XmmReg1[72,8],XmmReg2[72,8]);
assignUnsignedLesser(XmmReg1[80,8],XmmReg1[80,8],XmmReg2[80,8]);
assignUnsignedLesser(XmmReg1[88,8],XmmReg1[88,8],XmmReg2[88,8]);
assignUnsignedLesser(XmmReg1[96,8],XmmReg1[96,8],XmmReg2[96,8]);
assignUnsignedLesser(XmmReg1[104,8],XmmReg1[104,8],XmmReg2[104,8]);
assignUnsignedLesser(XmmReg1[112,8],XmmReg1[112,8],XmmReg2[112,8]);
assignUnsignedLesser(XmmReg1[120,8],XmmReg1[120,8],XmmReg2[120,8]);
}
#in 64-bit mode the default operand size is 64 bits
#note that gcc assembles pmovmskb eax, mm0 and pmovmskb rax, mm0 to 0f d7 c0
:PMOVMSKB Reg32, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD7; mod = 3 & Reg32 & mmxreg2 & check_Reg32_dest
{
local byte_mask:1 = 0:1;
byte_mask[0,1] = mmxreg2[7,1];
byte_mask[1,1] = mmxreg2[15,1];
byte_mask[2,1] = mmxreg2[23,1];
byte_mask[3,1] = mmxreg2[31,1];
byte_mask[4,1] = mmxreg2[39,1];
byte_mask[5,1] = mmxreg2[47,1];
byte_mask[6,1] = mmxreg2[55,1];
byte_mask[7,1] = mmxreg2[63,1];
Reg32 = zext(byte_mask);
build check_Reg32_dest;
}
#in 64-bit mode the default operand size is 64 bits
#note that gcc assembles pmovmskb eax, xmm0 and pmovmskb rax, xmm0 to 66 0f d7 c0
:PMOVMSKB Reg32, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD7; mod = 3 & Reg32 & XmmReg2 & check_Reg32_dest
{
local byte_mask:2 = 0:2;
byte_mask[0,1] = XmmReg2[7,1];
byte_mask[1,1] = XmmReg2[15,1];
byte_mask[2,1] = XmmReg2[23,1];
byte_mask[3,1] = XmmReg2[31,1];
byte_mask[4,1] = XmmReg2[39,1];
byte_mask[5,1] = XmmReg2[47,1];
byte_mask[6,1] = XmmReg2[55,1];
byte_mask[7,1] = XmmReg2[63,1];
byte_mask[8,1] = XmmReg2[71,1];
byte_mask[9,1] = XmmReg2[79,1];
byte_mask[10,1] = XmmReg2[87,1];
byte_mask[11,1] = XmmReg2[95,1];
byte_mask[12,1] = XmmReg2[103,1];
byte_mask[13,1] = XmmReg2[111,1];
byte_mask[14,1] = XmmReg2[119,1];
byte_mask[15,1] = XmmReg2[127,1];
Reg32 = zext(byte_mask);
build check_Reg32_dest;
}
define pcodeop pmulhrsw;
:PMULHRSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0B; mmxreg ... & m64 { mmxreg=pmulhrsw(mmxreg,m64); }
:PMULHRSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0B; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pmulhrsw(mmxreg1,mmxreg2); }
@ -6892,8 +7050,23 @@ define pcodeop psignd;
:PSIGND XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0a; XmmReg ... & m128 { XmmReg=psignd(XmmReg,m128); }
:PSIGND XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0a; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignd(XmmReg1,XmmReg2); }
define pcodeop pslldq;
:PSLLDQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; xmmmod = 3 & reg_opcode=7 & XmmReg2; imm8 { XmmReg2 = pslldq(XmmReg2, imm8:8); }
#break into two 64-bit chunks so decompiler can follow constants
:PSLLDQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; xmmmod = 3 & reg_opcode=7 & XmmReg2; imm8
{
if (imm8:1 > 15:1) goto <zero>;
local low64copy:8 = XmmReg2[0,64];
XmmReg2[0,64] = XmmReg2[0,64] << (8:1 * imm8:1);
if (imm8:1 > 8:1) goto <greater>;
XmmReg2[64,64] = (XmmReg2[64,64] << (8:1 * imm8:1)) | (low64copy >> (8:1 * (8 - imm8:1)));
goto <end>;
<greater>
XmmReg2[64,64] = low64copy << (8:1 * (imm8 - 8));
goto <end>;
<zero>
XmmReg2[0,64] = 0:8;
XmmReg2[64,64] = 0:8;
<end>
}
define pcodeop psllw;
:PSLLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF1; mmxreg ... & m64 ... { mmxreg = psllw(mmxreg, m64); }
@ -8003,13 +8176,43 @@ define pcodeop pminsb;
:PMINSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x38; XmmReg ... & m128 { XmmReg = pminsb(XmmReg, m128); }
:PMINSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x38; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pminsb(XmmReg1, XmmReg2); }
define pcodeop pminuw;
:PMINUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3A; XmmReg ... & m128 { XmmReg = pminuw(XmmReg, m128); }
:PMINUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3A; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pminuw(XmmReg1, XmmReg2); }
:PMINUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3A; XmmReg ... & m128
{
assignUnsignedLesser(XmmReg[0,16],XmmReg[0,16],m128[0,16]);
assignUnsignedLesser(XmmReg[16,16],XmmReg[16,16],m128[16,16]);
assignUnsignedLesser(XmmReg[32,16],XmmReg[32,16],m128[32,16]);
assignUnsignedLesser(XmmReg[48,16],XmmReg[48,16],m128[48,16]);
assignUnsignedLesser(XmmReg[64,16],XmmReg[64,16],m128[64,16]);
assignUnsignedLesser(XmmReg[80,16],XmmReg[80,16],m128[80,16]);
assignUnsignedLesser(XmmReg[96,16],XmmReg[96,16],m128[96,16]);
assignUnsignedLesser(XmmReg[112,16],XmmReg[112,16],m128[112,16]);
}
:PMINUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3A; xmmmod=3 & XmmReg1 & XmmReg2
{
assignUnsignedLesser(XmmReg1[0,16],XmmReg1[0,16],XmmReg2[0,16]);
assignUnsignedLesser(XmmReg1[16,16],XmmReg1[16,16],XmmReg2[16,16]);
assignUnsignedLesser(XmmReg1[32,16],XmmReg1[32,16],XmmReg2[32,16]);
assignUnsignedLesser(XmmReg1[48,16],XmmReg1[48,16],XmmReg2[48,16]);
assignUnsignedLesser(XmmReg1[64,16],XmmReg1[64,16],XmmReg2[64,16]);
assignUnsignedLesser(XmmReg1[80,16],XmmReg1[80,16],XmmReg2[80,16]);
assignUnsignedLesser(XmmReg1[96,16],XmmReg1[96,16],XmmReg2[96,16]);
assignUnsignedLesser(XmmReg1[112,16],XmmReg1[112,16],XmmReg2[112,16]);
}
define pcodeop pminud;
:PMINUD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3B; XmmReg ... & m128 { XmmReg = pminud(XmmReg, m128); }
:PMINUD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3B; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pminud(XmmReg1, XmmReg2); }
:PMINUD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3B; XmmReg ... & m128
{
assignUnsignedLesser(XmmReg[0,32],XmmReg[0,32],m128[0,32]);
assignUnsignedLesser(XmmReg[32,32],XmmReg[32,32],m128[32,32]);
assignUnsignedLesser(XmmReg[64,32],XmmReg[64,32],m128[64,32]);
assignUnsignedLesser(XmmReg[96,32],XmmReg[96,32],m128[96,32]);
}
:PMINUD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3B; xmmmod=3 & XmmReg1 & XmmReg2
{
assignUnsignedLesser(XmmReg1[0,32],XmmReg1[0,32],XmmReg2[0,32]);
assignUnsignedLesser(XmmReg1[32,32],XmmReg1[32,32],XmmReg2[32,32]);
assignUnsignedLesser(XmmReg1[64,32],XmmReg1[64,32],XmmReg2[64,32]);
assignUnsignedLesser(XmmReg1[96,32],XmmReg1[96,32],XmmReg2[96,32]);
}
define pcodeop pminsd;
:PMINSD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x39; XmmReg ... & m128 { XmmReg = pminsd(XmmReg, m128); }
@ -8019,13 +8222,46 @@ define pcodeop pmaxsb;
:PMAXSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3C; XmmReg ... & m128 { XmmReg = pmaxsb(XmmReg, m128); }
:PMAXSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxsb(XmmReg1, XmmReg2); }
define pcodeop pmaxuw;
:PMAXUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3E; XmmReg ... & m128 { XmmReg = pmaxuw(XmmReg, m128); }
:PMAXUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxuw(XmmReg1, XmmReg2); }
define pcodeop pmaxud;
:PMAXUD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3F; XmmReg ... & m128 { XmmReg = pmaxud(XmmReg, m128); }
:PMAXUD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaxud(XmmReg1, XmmReg2); }
:PMAXUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3E; XmmReg ... & m128
{
assignUnsignedGreater(XmmReg[0,16],XmmReg[0,16],m128[0,16]);
assignUnsignedGreater(XmmReg[16,16],XmmReg[16,16],m128[16,16]);
assignUnsignedGreater(XmmReg[32,16],XmmReg[32,16],m128[32,16]);
assignUnsignedGreater(XmmReg[48,16],XmmReg[48,16],m128[48,16]);
assignUnsignedGreater(XmmReg[64,16],XmmReg[64,16],m128[64,16]);
assignUnsignedGreater(XmmReg[80,16],XmmReg[80,16],m128[80,16]);
assignUnsignedGreater(XmmReg[96,16],XmmReg[96,16],m128[96,16]);
assignUnsignedGreater(XmmReg[112,16],XmmReg[112,16],m128[112,16]);
}
:PMAXUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3E; xmmmod=3 & XmmReg1 & XmmReg2
{
assignUnsignedGreater(XmmReg1[0,16],XmmReg1[0,16],XmmReg2[0,16]);
assignUnsignedGreater(XmmReg1[16,16],XmmReg1[16,16],XmmReg2[16,16]);
assignUnsignedGreater(XmmReg1[32,16],XmmReg1[32,16],XmmReg2[32,16]);
assignUnsignedGreater(XmmReg1[48,16],XmmReg1[48,16],XmmReg2[48,16]);
assignUnsignedGreater(XmmReg1[64,16],XmmReg1[64,16],XmmReg2[64,16]);
assignUnsignedGreater(XmmReg1[80,16],XmmReg1[80,16],XmmReg2[80,16]);
assignUnsignedGreater(XmmReg1[96,16],XmmReg1[96,16],XmmReg2[96,16]);
assignUnsignedGreater(XmmReg1[112,16],XmmReg1[112,16],XmmReg2[112,16]);
}
:PMAXUD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3F; XmmReg ... & m128
{
assignUnsignedGreater(XmmReg[0,32],XmmReg[0,32],m128[0,32]);
assignUnsignedGreater(XmmReg[32,32],XmmReg[32,32],m128[32,32]);
assignUnsignedGreater(XmmReg[64,32],XmmReg[64,32],m128[64,32]);
assignUnsignedGreater(XmmReg[96,32],XmmReg[96,32],m128[96,32]);
}
:PMAXUD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3F; xmmmod=3 & XmmReg1 & XmmReg2
{
assignUnsignedGreater(XmmReg1[0,32],XmmReg1[0,32],XmmReg2[0,32]);
assignUnsignedGreater(XmmReg1[32,32],XmmReg1[32,32],XmmReg2[32,32]);
assignUnsignedGreater(XmmReg1[64,32],XmmReg1[64,32],XmmReg2[64,32]);
assignUnsignedGreater(XmmReg1[96,32],XmmReg1[96,32],XmmReg2[96,32]);
}
define pcodeop pmaxsd;
:PMAXSD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3D; XmmReg ... & m128 { XmmReg = pmaxsd(XmmReg, m128); }
@ -8149,9 +8385,16 @@ define pcodeop pmovzxdq;
SF = 0;
}
define pcodeop pcmpeqq;
:PCMPEQQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; XmmReg ... & m128 { XmmReg = pcmpeqq(XmmReg, m128); }
:PCMPEQQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pcmpeqq(XmmReg1, XmmReg2); }
:PCMPEQQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; XmmReg ... & m128
{
XmmReg[0,64] = zext(XmmReg[0,64] == m128[0,64]) * 0xffffffffffffffff:8;
XmmReg[64,64] = zext(XmmReg[64,64] == m128[64,64]) * 0xffffffffffffffff:8;
}
:PCMPEQQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2
{
XmmReg1[0,64] = zext(XmmReg1[0,64] == XmmReg2[0,64]) * 0xffffffffffffffff:8;
XmmReg1[64,64] = zext(XmmReg1[64,64] == XmmReg2[64,64]) * 0xffffffffffffffff:8;
}
define pcodeop packusdw;
:PACKUSDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2B; XmmReg ... & m128 { XmmReg = packusdw(XmmReg, m128); }

View file

@ -5,7 +5,7 @@
endian="little"
size="32"
variant="default"
version="2.10"
version="2.11"
slafile="x86.sla"
processorspec="x86.pspec"
manualindexfile="../manuals/x86.idx"
@ -33,7 +33,7 @@
endian="little"
size="32"
variant="System Management Mode"
version="2.10"
version="2.11"
slafile="x86.sla"
processorspec="x86-16.pspec"
manualindexfile="../manuals/x86.idx"
@ -46,7 +46,7 @@
endian="little"
size="16"
variant="Real Mode"
version="2.10"
version="2.11"
slafile="x86.sla"
processorspec="x86-16-real.pspec"
manualindexfile="../manuals/x86.idx"
@ -66,7 +66,7 @@
endian="little"
size="16"
variant="Protected Mode"
version="2.10"
version="2.11"
slafile="x86.sla"
processorspec="x86-16.pspec"
manualindexfile="../manuals/x86.idx"
@ -81,7 +81,7 @@
endian="little"
size="64"
variant="default"
version="2.10"
version="2.11"
slafile="x86-64.sla"
processorspec="x86-64.pspec"
manualindexfile="../manuals/x86.idx"