fp_pop();
break;
-//.. case 5: { /* FLD extended-real */
-//.. /* Uses dirty helper:
-//.. ULong loadF80le ( VexGuestX86State*, UInt )
-//.. addr holds the address. First, do a dirty call to
-//.. get hold of the data. */
-//.. IRTemp val = newTemp(Ity_I64);
-//.. IRExpr** args = mkIRExprVec_1 ( mkexpr(addr) );
-//..
-//.. IRDirty* d = unsafeIRDirty_1_N (
-//.. val,
-//.. 0/*regparms*/,
-//.. "x86g_loadF80le", &x86g_loadF80le,
-//.. args
-//.. );
-//.. /* declare that we're reading memory */
-//.. d->mFx = Ifx_Read;
-//.. d->mAddr = mkexpr(addr);
-//.. d->mSize = 10;
-//..
-//.. /* execute the dirty call, dumping the result in val. */
-//.. stmt( IRStmt_Dirty(d) );
-//.. fp_push();
-//.. put_ST(0, unop(Iop_ReinterpI64asF64, mkexpr(val)));
-//..
-//.. DIP("fldt %s\n", dis_buf);
-//.. break;
-//.. }
-//..
-//.. case 7: { /* FSTP extended-real */
-//.. /* Uses dirty helper: void x86g_storeF80le ( UInt, ULong ) */
-//.. IRExpr** args
-//.. = mkIRExprVec_2( mkexpr(addr),
-//.. unop(Iop_ReinterpF64asI64, get_ST(0)) );
-//..
-//.. IRDirty* d = unsafeIRDirty_0_N (
-//.. 0/*regparms*/,
-//.. "x86g_storeF80le", &x86g_storeF80le,
-//.. args
-//.. );
-//.. /* declare we're writing memory */
-//.. d->mFx = Ifx_Write;
-//.. d->mAddr = mkexpr(addr);
-//.. d->mSize = 10;
-//..
-//.. /* execute the dirty call. */
-//.. stmt( IRStmt_Dirty(d) );
-//.. fp_pop();
-//..
-//.. DIP("fstpt\n %s", dis_buf);
-//.. break;
-//.. }
+ case 5: { /* FLD extended-real */
+ /* Uses dirty helper:
+ ULong amd64g_loadF80le ( ULong )
+ addr holds the address. First, do a dirty call to
+ get hold of the data. */
+ IRTemp val = newTemp(Ity_I64);
+ IRExpr** args = mkIRExprVec_1 ( mkexpr(addr) );
+
+ IRDirty* d = unsafeIRDirty_1_N (
+ val,
+ 0/*regparms*/,
+ "amd64g_loadF80le", &amd64g_loadF80le,
+ args
+ );
+ /* declare that we're reading memory */
+ d->mFx = Ifx_Read;
+ d->mAddr = mkexpr(addr);
+ d->mSize = 10;
+
+ /* execute the dirty call, dumping the result in val. */
+ stmt( IRStmt_Dirty(d) );
+ fp_push();
+ put_ST(0, unop(Iop_ReinterpI64asF64, mkexpr(val)));
+
+ DIP("fldt %s\n", dis_buf);
+ break;
+ }
+
+ case 7: { /* FSTP extended-real */
+ /* Uses dirty helper:
+ void amd64g_storeF80le ( ULong addr, ULong data )
+ */
+ IRExpr** args
+ = mkIRExprVec_2( mkexpr(addr),
+ unop(Iop_ReinterpF64asI64, get_ST(0)) );
+
+ IRDirty* d = unsafeIRDirty_0_N (
+ 0/*regparms*/,
+ "amd64g_storeF80le", &amd64g_storeF80le,
+ args
+ );
+ /* declare we're writing memory */
+ d->mFx = Ifx_Write;
+ d->mAddr = mkexpr(addr);
+ d->mSize = 10;
+
+ /* execute the dirty call. */
+ stmt( IRStmt_Dirty(d) );
+ fp_pop();
+
+ DIP("fstpt\n %s", dis_buf);
+ break;
+ }
default:
vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
//.. mk_x86g_calculate_condition(X86CondNB)),
//.. get_ST(0), get_ST(r_src)) );
//.. break;
-//..
-//.. case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */
-//.. r_src = (UInt)modrm - 0xC8;
-//.. DIP("fcmovnz %%st(%d), %%st(0)\n", r_src);
-//.. put_ST_UNCHECKED(0,
-//.. IRExpr_Mux0X(
-//.. unop(Iop_1Uto8,
-//.. mk_x86g_calculate_condition(X86CondNZ)),
-//.. get_ST(0), get_ST(r_src)) );
-//.. break;
-//..
+
+ case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */
+ r_src = (UInt)modrm - 0xC8;
+ DIP("fcmovnz %%st(%d), %%st(0)\n", r_src);
+ put_ST_UNCHECKED(0,
+ IRExpr_Mux0X(
+ unop(Iop_1Uto8,
+ mk_amd64g_calculate_condition(AMD64CondNZ)),
+ get_ST(0), get_ST(r_src)) );
+ break;
+
//.. case 0xD0 ... 0xD7: /* FCMOVNBE ST(i), ST(0) */
//.. r_src = (UInt)modrm - 0xD0;
//.. DIP("fcmovnbe %%st(%d), %%st(0)\n", r_src);
//.. binop(Iop_And32, get_C3210(), mkU32(0x4700))
//.. )));
//.. break;
-//..
-//.. case 0xE8 ... 0xEF: /* FUCOMIP %st(0),%st(?) */
-//.. fp_do_ucomi_ST0_STi( (UInt)modrm - 0xE8, True );
-//.. break;
-//..
+
+ case 0xE8 ... 0xEF: /* FUCOMIP %st(0),%st(?) */
+ fp_do_ucomi_ST0_STi( (UInt)modrm - 0xE8, True );
+ break;
+
//.. case 0xF0 ... 0xF7: /* FCOMIP %st(0),%st(?) */
//.. /* not really right since COMIP != UCOMIP */
//.. fp_do_ucomi_ST0_STi( (UInt)modrm - 0xF0, True );
return dst;
}
+ /* ReinterpF64asI64(e) */
+ /* Given an IEEE754 double, produce an I64 with the same bit
+ pattern. */
+ case Iop_ReinterpF64asI64: {
+ AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+ HReg dst = newVRegI(env);
+ HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+ /* paranoia */
+ set_SSE_rounding_default(env);
+ addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, src, m8_rsp));
+ addInstr(env, AMD64Instr_Alu64R(
+ Aalu_MOV, AMD64RMI_Mem(m8_rsp), dst));
+ return dst;
+ }
+
case Iop_16to8:
case Iop_32to8:
case Iop_32to16:
//.. return;
//.. }
//..
-//.. /* ReinterpF64asI64(e) */
-//.. /* Given an IEEE754 double, produce an I64 with the same bit
-//.. pattern. */
-//.. case Iop_ReinterpF64asI64: {
-//.. HReg rf = iselDblExpr(env, e->Iex.Unop.arg);
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
-//.. X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
-//.. X86AMode* four_esp = X86AMode_IR(4, hregX86_ESP());
-//.. /* paranoia */
-//.. set_FPU_rounding_default(env);
-//.. /* subl $8, %esp */
-//.. sub_from_esp(env, 8);
-//.. /* gstD %rf, 0(%esp) */
-//.. addInstr(env,
-//.. X86Instr_FpLdSt(False/*store*/, 8, rf, zero_esp));
-//.. /* movl 0(%esp), %tLo */
-//.. addInstr(env,
-//.. X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(zero_esp), tLo));
-//.. /* movl 4(%esp), %tHi */
-//.. addInstr(env,
-//.. X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(four_esp), tHi));
-//.. /* addl $8, %esp */
-//.. add_to_esp(env, 8);
-//.. *rHi = tHi;
-//.. *rLo = tLo;
-//.. return;
-//.. }
-//..
//.. case Iop_CmpNEZ32x2:
//.. fn = (HWord)h_generic_calc_CmpNEZ32x2; goto unish;
//.. case Iop_CmpNEZ16x4:
//.. add_to_esp(env, 4);
//.. return dst;
//.. }
-//.. case Iop_ReinterpI64asF64: {
-//.. /* Given an I64, produce an IEEE754 double with the same
-//.. bit pattern. */
-//.. HReg dst = newVRegF(env);
-//.. HReg rHi, rLo;
-//.. iselInt64Expr( &rHi, &rLo, env, e->Iex.Unop.arg);
-//.. /* paranoia */
-//.. set_FPU_rounding_default(env);
-//.. addInstr(env, X86Instr_Push(X86RMI_Reg(rHi)));
-//.. addInstr(env, X86Instr_Push(X86RMI_Reg(rLo)));
-//.. addInstr(env, X86Instr_FpLdSt(
-//.. True/*load*/, 8, dst,
-//.. X86AMode_IR(0, hregX86_ESP())));
-//.. add_to_esp(env, 8);
-//.. return dst;
-//.. }
+ case Iop_ReinterpI64asF64: {
+ /* Given an I64, produce an IEEE754 double with the same
+ bit pattern. */
+ AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+ HReg dst = newVRegV(env);
+ AMD64RI* src = iselIntExpr_RI(env, e->Iex.Unop.arg);
+ /* paranoia */
+ set_SSE_rounding_default(env);
+ addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, src, m8_rsp));
+ addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 8, dst, m8_rsp));
+ return dst;
+ }
case Iop_F32toF64: {
HReg f32;
HReg f64 = newVRegV(env);