delta += 2+1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta+2, dis_buf, 0 );
+ if (insn[1] == 0x28/*movaps*/)
+ gen_SEGV_if_not_16_aligned( addr );
putXMMReg( gregOfRexRM(pfx,modrm),
loadLE(Ity_V128, mkexpr(addr)) );
DIP("mov[ua]ps %s,%s\n", dis_buf,
/* fall through; awaiting test case */
} else {
addr = disAMode ( &alen, vbi, pfx, delta+2, dis_buf, 0 );
+ if (insn[1] == 0x29/*movaps*/)
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("mov[ua]ps %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
dis_buf );
modrm = getUChar(delta+2);
if (!epartIsReg(modrm)) {
addr = disAMode ( &alen, vbi, pfx, delta+2, dis_buf, 0 );
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("movntp%s %s,%s\n", sz==2 ? "d" : "s",
dis_buf,
delta += 2+1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta+2, dis_buf, 0 );
+ if (insn[1] == 0x28/*movapd*/ || insn[1] == 0x6F/*movdqa*/)
+ gen_SEGV_if_not_16_aligned( addr );
putXMMReg( gregOfRexRM(pfx,modrm),
loadLE(Ity_V128, mkexpr(addr)) );
DIP("mov%s %s,%s\n", wot, dis_buf,
delta += 2+1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta+2, dis_buf, 0 );
+ if (insn[1] == 0x29/*movapd*/)
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("mov%s %s,%s\n", wot, nameXMMReg(gregOfRexRM(pfx,modrm)),
dis_buf );
nameXMMReg(eregOfRexRM(pfx,modrm)));
} else {
addr = disAMode( &alen, vbi, pfx, delta+2, dis_buf, 0 );
+ gen_SEGV_if_not_16_aligned( addr );
delta += 2+alen;
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("movdqa %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf);
modrm = getUChar(delta+2);
if (!epartIsReg(modrm)) {
addr = disAMode ( &alen, vbi, pfx, delta+2, dis_buf, 0 );
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("movntdq %s,%s\n", dis_buf,
nameXMMReg(gregOfRexRM(pfx,modrm)));
delta += 2+1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta+2, dis_buf, 0 );
+ gen_SEGV_if_not_16_aligned( addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
DIP("movs%cdup %s,%s\n", isH ? 'h' : 'l',
dis_buf,
} else {
addr = disAMode( &alen, vbi, pfx, delta+3, dis_buf,
1/* imm8 is 1 byte after the amode */ );
+ gen_SEGV_if_not_16_aligned( addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)insn[2+alen+1];
delta += 3+alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta+3, dis_buf,
1/* imm8 is 1 byte after the amode */ );
+ gen_SEGV_if_not_16_aligned( addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)insn[3+alen];
delta += 3+alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta+3, dis_buf,
1/* imm8 is 1 byte after the amode */ );
+ gen_SEGV_if_not_16_aligned( addr );
assign( svec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)insn[2+alen+1];
delta += 3+alen+1;
args = mkIRExprVec_3(mkexpr(t0), mkexpr(t1), mkU64(0));
assign(t2,
- mkIRExprCCall(Ity_I64,0, "amd64g_calculate_pclmul", &amd64g_calculate_pclmul, args));
+ mkIRExprCCall(Ity_I64,0, "amd64g_calculate_pclmul",
+ &amd64g_calculate_pclmul, args));
args = mkIRExprVec_3(mkexpr(t0), mkexpr(t1), mkU64(1));
assign(t3,
- mkIRExprCCall(Ity_I64,0, "amd64g_calculate_pclmul", &amd64g_calculate_pclmul, args));
+ mkIRExprCCall(Ity_I64,0, "amd64g_calculate_pclmul",
+ &amd64g_calculate_pclmul, args));
IRTemp res = newTemp(Ity_V128);
assign(res, binop(Iop_64HLtoV128, mkexpr(t3), mkexpr(t2)));
} else {
addr = disAMode( &alen, vbi, pfx, delta+3, dis_buf,
1/* imm8 is 1 byte after the amode */ );
+ gen_SEGV_if_not_16_aligned( addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)insn[2+alen+1];
delta += 3+alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta+3, dis_buf,
1/* imm8 is 1 byte after the amode */ );
+ gen_SEGV_if_not_16_aligned( addr );
assign( xmm2_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)insn[2+alen+1];
delta += 3+alen+1;
*/
if ( have66noF2noF3( pfx ) && sz == 2
&& insn[0] == 0x0F && insn[1] == 0x38 && insn[2] == 0x37) {
+ /* FIXME: this needs an alignment check */
delta = dis_SSEint_E_to_G( vbi, pfx, delta+3,
"pcmpgtq", Iop_CmpGT64Sx2, False );
goto decode_success;
if ( have66noF2noF3( pfx ) && sz == 2
&& insn[0] == 0x0F && insn[1] == 0x38
&& (insn[2] == 0x3D || insn[2] == 0x39)) {
+ /* FIXME: this needs an alignment check */
Bool isMAX = insn[2] == 0x3D;
delta = dis_SSEint_E_to_G(
vbi, pfx, delta+3,
if ( have66noF2noF3( pfx ) && sz == 2
&& insn[0] == 0x0F && insn[1] == 0x38
&& (insn[2] == 0x3F || insn[2] == 0x3B)) {
+ /* FIXME: this needs an alignment check */
Bool isMAX = insn[2] == 0x3F;
delta = dis_SSEint_E_to_G(
vbi, pfx, delta+3,
if ( have66noF2noF3( pfx ) && sz == 2
&& insn[0] == 0x0F && insn[1] == 0x38
&& (insn[2] == 0x3E || insn[2] == 0x3A)) {
+ /* FIXME: this needs an alignment check */
Bool isMAX = insn[2] == 0x3E;
delta = dis_SSEint_E_to_G(
vbi, pfx, delta+3,
if ( have66noF2noF3( pfx ) && sz == 2
&& insn[0] == 0x0F && insn[1] == 0x38
&& (insn[2] == 0x3C || insn[2] == 0x38)) {
+ /* FIXME: this needs an alignment check */
Bool isMAX = insn[2] == 0x3C;
delta = dis_SSEint_E_to_G(
vbi, pfx, delta+3,
nameXMMReg( gregOfRexRM(pfx, modrm) ) );
} else {
addr = disAMode( &alen, vbi, pfx, delta+3, dis_buf, 0 );
+ gen_SEGV_if_not_16_aligned( addr );
assign( argL, loadLE( Ity_V128, mkexpr(addr) ));
delta += 3+alen;
DIP( "pmulld %s,%s\n",
regNoL = 16; /* use XMM16 as an intermediary */
regNoR = gregOfRexRM(pfx, modrm);
addr = disAMode( &alen, vbi, pfx, delta+3, dis_buf, 0 );
+ /* No alignment check; I guess that makes sense, given that
+ these insns are for dealing with C style strings. */
stmt( IRStmt_Put( OFFB_XMM16, loadLE(Ity_V128, mkexpr(addr)) ));
imm = insn[3+alen];
delta += 3+alen+1;
nameXMMReg( gregOfRexRM(pfx, modrm) ) );
} else {
addr = disAMode( &alen, vbi, pfx, delta+3, dis_buf, 0 );
+ gen_SEGV_if_not_16_aligned( addr );
assign(vecE, loadLE( Ity_V128, mkexpr(addr) ));
delta += 3+alen;
DIP( "ptest %s,%s\n",
delta += 2+1;
} else {
addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+ if (insn[1] == 0x28/*movaps*/)
+ gen_SEGV_if_not_16_aligned( addr );
putXMMReg( gregOfRM(modrm),
loadLE(Ity_V128, mkexpr(addr)) );
DIP("mov[ua]ps %s,%s\n", dis_buf,
/* fall through; awaiting test case */
} else {
addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+ if (insn[1] == 0x29/*movaps*/)
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
DIP("mov[ua]ps %s,%s\n", nameXMMReg(gregOfRM(modrm)),
dis_buf );
modrm = getIByte(delta+2);
if (!epartIsReg(modrm)) {
addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
DIP("movntp%s %s,%s\n", sz==2 ? "d" : "s",
dis_buf,
delta += 2+1;
} else {
addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+ if (insn[1] == 0x28/*movapd*/ || insn[1] == 0x6F/*movdqa*/)
+ gen_SEGV_if_not_16_aligned( addr );
putXMMReg( gregOfRM(modrm),
loadLE(Ity_V128, mkexpr(addr)) );
DIP("mov%s %s,%s\n", wot, dis_buf,
/* fall through; awaiting test case */
} else {
addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+ if (insn[1] == 0x29/*movapd*/)
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
DIP("mov%s %s,%s\n", wot, nameXMMReg(gregOfRM(modrm)),
dis_buf );
} else {
addr = disAMode( &alen, sorb, delta+2, dis_buf );
delta += 2+alen;
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
DIP("movdqa %s, %s\n", nameXMMReg(gregOfRM(modrm)), dis_buf);
}
modrm = getIByte(delta+2);
if (sz == 2 && !epartIsReg(modrm)) {
addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+ gen_SEGV_if_not_16_aligned( addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
DIP("movntdq %s,%s\n", dis_buf,
nameXMMReg(gregOfRM(modrm)));
delta += 3+1;
} else {
addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+ gen_SEGV_if_not_16_aligned( addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
DIP("movs%cdup %s,%s\n", isH ? 'h' : 'l',
dis_buf,