break;
}
+ case 0xF7: {
+ IRTemp addr = newTemp(Ity_I32);
+ IRTemp regD = newTemp(Ity_I64);
+ IRTemp regM = newTemp(Ity_I64);
+ IRTemp mask = newTemp(Ity_I64);
+ IRTemp olddata = newTemp(Ity_I64);
+ IRTemp newdata = newTemp(Ity_I64);
+
+ modrm = getIByte(delta);
+ if (sz != 4 || (!epartIsReg(modrm)))
+ goto mmx_decode_failure;
+ delta++;
+
+ assign( addr, handleSegOverride( sorb, getIReg(4, R_EDI) ));
+ assign( regM, getMMXReg( eregOfRM(modrm) ));
+ assign( regD, getMMXReg( gregOfRM(modrm) ));
+ assign( mask, binop(Iop_SarN8x8, mkexpr(regM), mkU8(7)) );
+ assign( olddata, loadLE( Ity_I64, mkexpr(addr) ));
+ assign( newdata,
+ binop(Iop_Or64,
+ binop(Iop_And64,
+ mkexpr(regD),
+ mkexpr(mask) ),
+ binop(Iop_And64,
+ mkexpr(olddata),
+ unop(Iop_Not64, mkexpr(mask)))) );
+ storeLE( mkexpr(addr), mkexpr(newdata) );
+ DIP("maskmovq %s,%s\n", nameMMXReg( eregOfRM(modrm) ),
+ nameMMXReg( gregOfRM(modrm) ) );
+ break;
+ }
+
/* --- MMX decode failure --- */
default:
mmx_decode_failure:
goto decode_success;
}
+ /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+ /* 0F F7 = MASKMOVQ -- 8x8 masked store */
+ if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xF7) {
+ Bool ok = False;
+ delta = dis_MMX( &ok, sorb, sz, delta+1 );
+ if (!ok)
+ goto decode_failure;
+ goto decode_success;
+ }
+
/* 0F 5F = MAXPS -- max 32Fx4 from R/M to R */
if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x5F) {
delta = dis_SSE_E_to_G_all( sorb, delta+2, "maxps", Iop_Max32Fx4 );
/* else fall through */
}
+ /* 66 0F F7 = MASKMOVDQU -- store selected bytes of double quadword */
+ if (insn[0] == 0x0F && insn[1] == 0xF7) {
+ modrm = getIByte(delta+2);
+ if (sz == 2 && epartIsReg(modrm)) {
+ IRTemp regD = newTemp(Ity_V128);
+ IRTemp mask = newTemp(Ity_V128);
+ IRTemp olddata = newTemp(Ity_V128);
+ IRTemp newdata = newTemp(Ity_V128);
+ addr = newTemp(Ity_I32);
+
+ assign( addr, handleSegOverride( sorb, getIReg(4, R_EDI) ));
+ assign( regD, getXMMReg( gregOfRM(modrm) ));
+
+ /* Unfortunately can't do the obvious thing with SarN8x16
+ here since that can't be re-emitted as SSE2 code - no such
+ insn. */
+ assign(
+ mask,
+ binop(Iop_64HLtoV128,
+ binop(Iop_SarN8x8,
+ getXMMRegLane64( eregOfRM(modrm), 1 ),
+ mkU8(7) ),
+ binop(Iop_SarN8x8,
+ getXMMRegLane64( eregOfRM(modrm), 0 ),
+ mkU8(7) ) ));
+ assign( olddata, loadLE( Ity_V128, mkexpr(addr) ));
+ assign( newdata,
+ binop(Iop_OrV128,
+ binop(Iop_AndV128,
+ mkexpr(regD),
+ mkexpr(mask) ),
+ binop(Iop_AndV128,
+ mkexpr(olddata),
+ unop(Iop_NotV128, mkexpr(mask)))) );
+ storeLE( mkexpr(addr), mkexpr(newdata) );
+
+ delta += 2+1;
+ DIP("maskmovdqu %s,%s\n", nameXMMReg( eregOfRM(modrm) ),
+ nameXMMReg( gregOfRM(modrm) ) );
+ goto decode_success;
+ }
+ /* else fall through */
+ }
+
/* 66 0F E7 = MOVNTDQ -- for us, just a plain SSE store. */
if (insn[0] == 0x0F && insn[1] == 0xE7) {
modrm = getIByte(delta+2);
/* shifts: we don't care about out-of-range ones, since
that is dealt with at a higher level. */
+static inline UChar sar8 ( UChar v, UInt n )
+{
+ return toUChar(((Char)v) >> n);
+}
+
static inline UShort shl16 ( UShort v, UInt n )
{
return toUShort(v << n);
);
}
+ULong h_generic_calc_SarN8x8 ( ULong xx, UInt nn )
+{
+ /* vassert(nn < 8); */
+ nn &= 7;
+ return mk8x8(
+ sar8( sel8x8_7(xx), nn ),
+ sar8( sel8x8_6(xx), nn ),
+ sar8( sel8x8_5(xx), nn ),
+ sar8( sel8x8_4(xx), nn ),
+ sar8( sel8x8_3(xx), nn ),
+ sar8( sel8x8_2(xx), nn ),
+ sar8( sel8x8_1(xx), nn ),
+ sar8( sel8x8_0(xx), nn )
+ );
+}
+
/* ------------ Averaging ------------ */
ULong h_generic_calc_Avg8Ux8 ( ULong xx, ULong yy )
case Iop_ShlN32x2: vex_printf("ShlN32x2"); return;
case Iop_ShrN16x4: vex_printf("ShrN16x4"); return;
case Iop_ShrN32x2: vex_printf("ShrN32x2"); return;
+ case Iop_SarN8x8: vex_printf("SarN8x8"); return;
case Iop_SarN16x4: vex_printf("SarN16x4"); return;
case Iop_SarN32x2: vex_printf("SarN32x2"); return;
case Iop_QNarrow16Ux4: vex_printf("QNarrow16Ux4"); return;
case Iop_ShlN32x2: case Iop_ShlN16x4:
case Iop_ShrN32x2: case Iop_ShrN16x4:
- case Iop_SarN32x2: case Iop_SarN16x4:
+ case Iop_SarN32x2: case Iop_SarN16x4: case Iop_SarN8x8:
BINARY(Ity_I64,Ity_I8, Ity_I64);
case Iop_Shl8: case Iop_Shr8: case Iop_Sar8:
Iop_CmpGT8Sx8, Iop_CmpGT16Sx4, Iop_CmpGT32Sx2,
/* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
- Iop_ShlN16x4, Iop_ShlN32x2,
- Iop_ShrN16x4, Iop_ShrN32x2,
- Iop_SarN16x4, Iop_SarN32x2,
+ Iop_ShlN16x4, Iop_ShlN32x2,
+ Iop_ShrN16x4, Iop_ShrN32x2,
+ Iop_SarN8x8, Iop_SarN16x4, Iop_SarN32x2,
/* NARROWING -- narrow 2xI64 into 1xI64, hi half from left arg */
Iop_QNarrow16Ux4,