Use bitrange operator instead of special constructors when accessing

vector register lanes
This commit is contained in:
caheckman 2019-11-16 10:28:09 -05:00
parent 05ee2c14b9
commit 64d15b3ea0
4 changed files with 1275 additions and 1890 deletions

View file

@ -1,6 +1,6 @@
# VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109785
define pcodeop vinserti128 ;
:VINSERTI128 YmmReg1, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg & vexVVVV_YmmReg_DQa & vexVVVV_YmmReg_DQb; byte=0x38; (YmmReg1 & YmmReg1_DQa & YmmReg1_DQb) ... & XmmReg2_m128; imm8 & imm8_0 {
:VINSERTI128 YmmReg1, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg & vexVVVV_YmmReg_DQa & vexVVVV_YmmReg_DQb; byte=0x38; YmmReg1 ... & XmmReg2_m128; imm8 & imm8_0 {
local tmp:16 = XmmReg2_m128;
# ignoring all but the least significant bit
@ -8,13 +8,13 @@ define pcodeop vinserti128 ;
if (imm8_0:1 == 1) goto <case1>;
<case0>
YmmReg1_DQa = tmp;
YmmReg1_DQb = vexVVVV_YmmReg_DQb;
YmmReg1[0,128] = tmp;
YmmReg1[128,128] = vexVVVV_YmmReg_DQb;
goto <done>;
<case1>
YmmReg1_DQa = vexVVVV_YmmReg_DQa;
YmmReg1_DQb = tmp;
YmmReg1[0,128] = vexVVVV_YmmReg_DQa;
YmmReg1[128,128] = tmp;
<done>
}

View file

@ -1,44 +1,46 @@
# MOVSD 4-111 PAGE 1231 LINE 63970
:VMOVSD XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg & vexVVVV_XmmReg_Qb; byte=0x10; (XmmReg1 & XmmReg1_Qa & XmmReg1_Qb & YmmReg1) & (mod=0x3 & XmmReg2 & XmmReg2_Qa)
:VMOVSD XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg & vexVVVV_XmmReg_Qb; byte=0x10; XmmReg1 & YmmReg1 & (mod=0x3 & XmmReg2)
{
local tmpa:8 = XmmReg2_Qa;
local tmpa:8 = XmmReg2[0,64];
local tmpb:8 = vexVVVV_XmmReg_Qb;
YmmReg1 = 0;
XmmReg1_Qa = tmpa;
XmmReg1_Qb = tmpb;
XmmReg1[0,64] = tmpa;
XmmReg1[64,64] = tmpb;
# TODO ZmmReg1 = zext(XmmReg1)
}
# MOVSD 4-111 PAGE 1231 LINE 63972
:VMOVSD XmmReg1, m64 is $(VEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x10; (XmmReg1 & YmmReg1) ... & m64
{
local tmp:16 = zext(m64);
YmmReg1 = zext(tmp);
YmmReg1[0,64] = m64;
YmmReg1[64,64] = 0;
# TODO ZmmReg1 = zext(XmmReg1)
}
# MOVSD 4-111 PAGE 1231 LINE 63974
:VMOVSD XmmReg2, vexVVVV_XmmReg, XmmReg1 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg & vexVVVV_XmmReg_Qb; byte=0x11; XmmReg1 & XmmReg1_Qa & (mod=0x3 & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb & YmmReg2))
:VMOVSD XmmReg2, vexVVVV_XmmReg, XmmReg1 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg & vexVVVV_XmmReg_Qb; byte=0x11; XmmReg1 & (mod=0x3 & (XmmReg2 & YmmReg2))
{
local tmpa:8 = XmmReg1_Qa;
local tmpa:8 = XmmReg1[0,64];
local tmpb:8 = vexVVVV_XmmReg_Qb;
YmmReg2 = 0;
XmmReg2_Qa = tmpa;
XmmReg2_Qb = tmpb;
XmmReg2[0,64] = tmpa;
XmmReg2[64,64] = tmpb;
# TODO ZmmReg2 = zext(XmmReg2)
}
# MOVSD 4-111 PAGE 1231 LINE 63976
:VMOVSD m64, XmmReg1 is $(VEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x11; (XmmReg1 & XmmReg1_Qa) ... & m64
:VMOVSD m64, XmmReg1 is $(VEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x11; XmmReg1 ... & m64
{
m64 = XmmReg1_Qa;
m64 = XmmReg1[0,64];
}
# MOVUPS 4-130 PAGE 1250 LINE 64872
:VMOVUPS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x10; (XmmReg1 & YmmReg1) ... & XmmReg2_m128
{
local tmp:16 = XmmReg2_m128;
YmmReg1 = zext(tmp);
YmmReg1[0,128] = tmp;
YmmReg1[128,64] = 0;
YmmReg1[192,64] = 0;
}
# MOVUPS 4-130 PAGE 1250 LINE 64874

File diff suppressed because it is too large Load diff

View file

@ -1,98 +1,97 @@
# Due to limitations on variable length matching that preclude opcode matching afterwards, all memory addressing forms of PCLMULQDQ are decoded to PCLMULQDQ, not the macro names.
# Display is non-standard, but semantics, and de-compilation should be correct.
:PCLMULLQLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & (XmmReg1 & XmmReg1_Qa & XmmReg1_Qb) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); byte=0x00
:PCLMULLQLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; byte=0x00
{
XmmReg1 = zext(XmmReg2_Qa) * zext(XmmReg1_Qa);
XmmReg1 = zext(XmmReg2[0,64]) * zext(XmmReg1[0,64]);
}
:PCLMULHQLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & (XmmReg1 & XmmReg1_Qa & XmmReg1_Qb) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); byte=0x01
:PCLMULHQLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; byte=0x01
{
XmmReg1 = zext(XmmReg2_Qa) * zext(XmmReg1_Qb);
XmmReg1 = zext(XmmReg2[0,64]) * zext(XmmReg1[64,64]);
}
:PCLMULLQHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & (XmmReg1 & XmmReg1_Qa & XmmReg1_Qb) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); byte=0x10
:PCLMULLQHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; byte=0x10
{
XmmReg1 = zext(XmmReg2_Qb) * zext(XmmReg1_Qa);
XmmReg1 = zext(XmmReg2[64,64]) * zext(XmmReg1[0,64]);
}
:PCLMULHQHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & (XmmReg1 & XmmReg1_Qa & XmmReg1_Qb) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); byte=0x11
:PCLMULHQHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; byte=0x11
{
XmmReg1 = zext(XmmReg2_Qb) * zext(XmmReg1_Qb);
XmmReg1 = zext(XmmReg2[64,64]) * zext(XmmReg1[64,64]);
}
:PCLMULQDQ XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & (XmmReg1 & XmmReg1_Qa & XmmReg1_Qb) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); imm8 & imm8_4 & imm8_0
:PCLMULQDQ XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; imm8 & imm8_4 & imm8_0
{
if (imm8_0:1) goto <src1_b>;
src1:16 = zext(XmmReg1_Qa);
src1:16 = zext(XmmReg1[0,64]);
goto <done1>;
<src1_b>
src1 = zext(XmmReg1_Qb);
src1 = zext(XmmReg1[64,64]);
<done1>
if (imm8_4:1) goto <src2_b>;
src2:16 = zext(XmmReg2_Qa);
src2:16 = zext(XmmReg2[0,64]);
goto <done2>;
<src2_b>
src2 = zext(XmmReg2_Qb);
src2 = zext(XmmReg2[64,64]);
<done2>
XmmReg1 = src2 * src1;
}
:PCLMULQDQ XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; (XmmReg & XmmReg_Qa & XmmReg_Qb) ... & (m128 & m128_Qa & m128_Qb); imm8 & imm8_4 & imm8_0
:PCLMULQDQ XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; XmmReg ... & m128; imm8 & imm8_4 & imm8_0
{
if (imm8_0:1) goto <src1_b>;
src1:16 = zext(XmmReg_Qa);
src1:16 = zext(XmmReg[0,64]);
goto <done1>;
<src1_b>
src1 = zext(XmmReg_Qb);
src1 = zext(XmmReg[64,64]);
<done1>
local m:16 = m128;
if (imm8_4:1) goto <src2_b>;
src2:16 = zext(m128_Qa);
src2:16 = zext(m[0,64]);
goto <done2>;
<src2_b>
build m128_Qb;
src2 = zext(m128_Qb);
src2 = zext(m[64,64]);
<done2>
XmmReg = src2 * src1;
}
:VPCLMULLQLQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); byte=0x00
:VPCLMULLQLQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; byte=0x00
{
tmp:16 = zext(XmmReg2_Qa) * zext(vexVVVV_XmmReg_Qa);
tmp:16 = zext(XmmReg2[0,64]) * zext(vexVVVV_XmmReg_Qa);
YmmReg1 = zext(tmp);
}
:VPCLMULHQLQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); byte=0x01
:VPCLMULHQLQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; byte=0x01
{
tmp:16 = zext(XmmReg2_Qa) * zext(vexVVVV_XmmReg_Qb);
tmp:16 = zext(XmmReg2[0,64]) * zext(vexVVVV_XmmReg_Qb);
YmmReg1 = zext(tmp);
}
:VPCLMULLQHQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); byte=0x10
:VPCLMULLQHQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; byte=0x10
{
tmp:16 = zext(XmmReg2_Qb) * zext(vexVVVV_XmmReg_Qa);
tmp:16 = zext(XmmReg2[64,64]) * zext(vexVVVV_XmmReg_Qa);
YmmReg1 = zext(tmp);
}
:VPCLMULHQHQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); byte=0x11
:VPCLMULHQHQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; byte=0x11
{
tmp:16 = zext(XmmReg2_Qb) * zext(vexVVVV_XmmReg_Qb);
tmp:16 = zext(XmmReg2[64,64]) * zext(vexVVVV_XmmReg_Qb);
YmmReg1 = zext(tmp);
}
:VPCLMULQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & (XmmReg2 & XmmReg2_Qa & XmmReg2_Qb); imm8 & imm8_4 & imm8_0
:VPCLMULQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; imm8 & imm8_4 & imm8_0
{
if (imm8_0:1) goto <src1_b>;
src1:16 = zext(vexVVVV_XmmReg_Qa);
@ -104,11 +103,11 @@
<done1>
if (imm8_4:1) goto <src2_b>;
src2:16 = zext(XmmReg2_Qa);
src2:16 = zext(XmmReg2[0,64]);
goto <done2>;
<src2_b>
src2 = zext(XmmReg2_Qb);
src2 = zext(XmmReg2[64,64]);
<done2>
@ -116,7 +115,7 @@
YmmReg1 = zext(tmp);
}
:VPCLMULQDQ XmmReg1, vexVVVV_XmmReg, m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; (XmmReg1 & YmmReg1) ... & (m128 & m128_Qa & m128_Qb); imm8 & imm8_4 & imm8_0
:VPCLMULQDQ XmmReg1, vexVVVV_XmmReg, m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & (vexVVVV_XmmReg & vexVVVV_XmmReg_Qa & vexVVVV_XmmReg_Qb); byte=0x44; (XmmReg1 & YmmReg1) ... & m128; imm8 & imm8_4 & imm8_0
{
if (imm8_0:1) goto <src1_b>;
src1:16 = zext(vexVVVV_XmmReg_Qa);
@ -127,13 +126,13 @@
<done1>
local m:16 = m128;
if (imm8_4:1) goto <src2_b>;
src2:16 = zext(m128_Qa);
src2:16 = zext(m[0,64]);
goto <done2>;
<src2_b>
build m128_Qb;
src2 = zext(m128_Qb);
src2 = zext(m[64,64]);
<done2>