if (epartIsReg(modrm)) {
delta++;
- /* Get it onto the client's stack. */
+ /* Get it onto the client's stack. Oh, this is a horrible
+ kludge. See https://bugs.kde.org/show_bug.cgi?id=245925.
+ Because of the ELF ABI stack redzone, there may be live data
+ up to 128 bytes below %RSP. So we can't just push it on the
+ stack, else we may wind up trashing live data, and causing
+ impossible-to-find simulation errors. (Yes, this did
+ happen.) So we need to drop RSP before at least 128 before
+ pushing it. That unfortunately means hitting Memcheck's
+ fast-case painting code. Ideally we should drop more than
+ 128, to reduce the chances of breaking buggy programs that
+ have live data below -128(%RSP). Memcheck fast-cases moves
+ of 288 bytes due to the need to handle ppc64-linux quickly,
+ so let's use 288. Of course the real fix is to get rid of
+ this kludge entirely. */
t_rsp = newTemp(Ity_I64);
t_addr0 = newTemp(Ity_I64);
- assign( t_rsp, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(sz)) );
+ vassert(vbi->guest_stack_redzone_size == 128);
+ assign( t_rsp, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(288)) );
putIReg64(R_RSP, mkexpr(t_rsp));
storeLE( mkexpr(t_rsp), getIRegE(sz, pfx, modrm) );
standard zero-extend rule */
if (op != BtOpNone)
putIRegE(sz, pfx, modrm, loadLE(szToITy(sz), mkexpr(t_rsp)) );
- putIReg64(R_RSP, binop(Iop_Add64, mkexpr(t_rsp), mkU64(sz)) );
+ putIReg64(R_RSP, binop(Iop_Add64, mkexpr(t_rsp), mkU64(288)) );
}
DIP("bt%s%c %s, %s\n",
static
-UInt dis_bt_G_E ( UChar sorb, Bool locked, Int sz, Int delta, BtOp op )
+UInt dis_bt_G_E ( VexAbiInfo* vbi,
+ UChar sorb, Bool locked, Int sz, Int delta, BtOp op )
{
HChar dis_buf[50];
UChar modrm;
t_esp = newTemp(Ity_I32);
t_addr0 = newTemp(Ity_I32);
- assign( t_esp, binop(Iop_Sub32, getIReg(4, R_ESP), mkU32(sz)) );
+ /* For the choice of the value 128, see comment in dis_bt_G_E in
+ guest_amd64_toIR.c. We point out here only that 128 is
+ fast-cased in Memcheck and is > 0, so seems like a good
+ choice. */
+ vassert(vbi->guest_stack_redzone_size == 0);
+ assign( t_esp, binop(Iop_Sub32, getIReg(4, R_ESP), mkU32(128)) );
putIReg(4, R_ESP, mkexpr(t_esp));
storeLE( mkexpr(t_esp), getIReg(sz, eregOfRM(modrm)) );
if (epartIsReg(modrm)) {
/* t_esp still points at it. */
putIReg(sz, eregOfRM(modrm), loadLE(szToITy(sz), mkexpr(t_esp)) );
- putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(t_esp), mkU32(sz)) );
+ putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(t_esp), mkU32(128)) );
}
DIP("bt%s%c %s, %s\n",
Bool resteerCisOk,
void* callback_opaque,
Long delta64,
- VexArchInfo* archinfo
+ VexArchInfo* archinfo,
+ VexAbiInfo* vbi
)
{
IRType ty;
/* =-=-=-=-=-=-=-=-=- BT/BTS/BTR/BTC =-=-=-=-=-=-= */
case 0xA3: /* BT Gv,Ev */
- delta = dis_bt_G_E ( sorb, pfx_lock, sz, delta, BtOpNone );
+ delta = dis_bt_G_E ( vbi, sorb, pfx_lock, sz, delta, BtOpNone );
break;
case 0xB3: /* BTR Gv,Ev */
- delta = dis_bt_G_E ( sorb, pfx_lock, sz, delta, BtOpReset );
+ delta = dis_bt_G_E ( vbi, sorb, pfx_lock, sz, delta, BtOpReset );
break;
case 0xAB: /* BTS Gv,Ev */
- delta = dis_bt_G_E ( sorb, pfx_lock, sz, delta, BtOpSet );
+ delta = dis_bt_G_E ( vbi, sorb, pfx_lock, sz, delta, BtOpSet );
break;
case 0xBB: /* BTC Gv,Ev */
- delta = dis_bt_G_E ( sorb, pfx_lock, sz, delta, BtOpComp );
+ delta = dis_bt_G_E ( vbi, sorb, pfx_lock, sz, delta, BtOpComp );
break;
/* =-=-=-=-=-=-=-=-=- CMOV =-=-=-=-=-=-=-=-=-=-=-= */
expect_CAS = False;
dres = disInstr_X86_WRK ( &expect_CAS, put_IP, resteerOkFn,
resteerCisOk,
- callback_opaque, delta, archinfo );
+ callback_opaque,
+ delta, archinfo, abiinfo );
x2 = irsb_IN->stmts_used;
vassert(x2 >= x1);
vex_traceflags |= VEX_TRACE_FE;
dres = disInstr_X86_WRK ( &expect_CAS, put_IP, resteerOkFn,
resteerCisOk,
- callback_opaque, delta, archinfo );
+ callback_opaque,
+ delta, archinfo, abiinfo );
for (i = x1; i < x2; i++) {
vex_printf("\t\t");
ppIRStmt(irsb_IN->stmts[i]);