}
#endif
+#define ASM_VOLATILE_ROUND32(fs, inst) \
+ __asm__ volatile("ctc1 %3, $31" "\n\t" \
+ "mtc1 %1, $f0" "\n\t" \
+ "mtc1 %2, $f1" "\n\t" \
+ ""#inst" $f0, $f0" "\n\t" \
+ "cfc1 %0, $31" "\n\t" \
+ : "=r" (ret) \
+ : "r" (addr[fs]), "r" (addr[fs+1]), "r" (fcsr) \
+ : "$f0", "$f1" \
+ );
+
+#define ASM_VOLATILE_ROUND64(fs, inst) \
+ __asm__ volatile("ctc1 %2, $31" "\n\t" \
+ "dmtc1 %1, $f0" "\n\t" \
+ ""#inst" $f0, $f0" "\n\t" \
+ "cfc1 %0, $31" "\n\t" \
+ : "=r" (ret) \
+ : "r" (addr[fs]), "r" (fcsr) \
+ : "$f0" \
+ );
+
+/* TODO: Add cases for all fpu instructions because all fpu instructions are
+ change the value of FCSR register. */
+extern UInt mips_dirtyhelper_calculate_FCSR ( void* gs, UInt fs, flt_op inst )
+{
+ UInt ret = 0;
+#if defined(VGA_mips32)
+ VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
+ UInt *addr = (UInt *)&guest_state->guest_f0;
+#define ASM_VOLATILE_ROUND(fs, inst) ASM_VOLATILE_ROUND32(fs, inst)
+#else
+ VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
+ ULong *addr = (ULong *)&guest_state->guest_f0;
+#define ASM_VOLATILE_ROUND(fs, inst) ASM_VOLATILE_ROUND64(fs, inst)
+#endif
+ UInt fcsr = guest_state->guest_FCSR;
+ switch (inst) {
+ case ROUNDWD:
+ ASM_VOLATILE_ROUND(fs, round.w.d)
+ break;
+ case FLOORWS:
+ ASM_VOLATILE_ROUND(fs, floor.w.s)
+ break;
+ case FLOORWD:
+ ASM_VOLATILE_ROUND(fs, floor.w.d)
+ break;
+ case TRUNCWS:
+ ASM_VOLATILE_ROUND(fs, trunc.w.s)
+ break;
+ case TRUNCWD:
+ ASM_VOLATILE_ROUND(fs, trunc.w.d)
+ break;
+ case CEILWS:
+ ASM_VOLATILE_ROUND(fs, ceil.w.s)
+ break;
+ case CEILWD:
+ ASM_VOLATILE_ROUND(fs, ceil.w.d)
+ break;
+ case CEILLS:
+ ASM_VOLATILE_ROUND(fs, ceil.l.s)
+ break;
+ case CEILLD:
+ ASM_VOLATILE_ROUND(fs, ceil.l.d)
+ break;
+ case ROUNDLS:
+ ASM_VOLATILE_ROUND(fs, round.l.s)
+ break;
+ case ROUNDLD:
+ ASM_VOLATILE_ROUND(fs, round.l.d)
+ break;
+ case TRUNCLS:
+ ASM_VOLATILE_ROUND(fs, trunc.l.s)
+ break;
+ case TRUNCLD:
+ ASM_VOLATILE_ROUND(fs, trunc.l.d)
+ break;
+ case CVTDS:
+ ASM_VOLATILE_ROUND(fs, cvt.d.s)
+ break;
+ case CVTDW:
+ ASM_VOLATILE_ROUND(fs, cvt.d.w)
+ break;
+ case CVTDL:
+ ASM_VOLATILE_ROUND(fs, cvt.d.l)
+ break;
+ case CVTSW:
+ ASM_VOLATILE_ROUND(fs, cvt.s.w)
+ break;
+ case CVTSD:
+ ASM_VOLATILE_ROUND(fs, cvt.s.d)
+ break;
+ case CVTSL:
+ ASM_VOLATILE_ROUND(fs, cvt.s.l)
+ break;
+ case CVTWS:
+ ASM_VOLATILE_ROUND(fs, cvt.w.s)
+ break;
+ case CVTWD:
+ ASM_VOLATILE_ROUND(fs, cvt.w.d)
+ break;
+ case CVTLS:
+ ASM_VOLATILE_ROUND(fs, cvt.l.s)
+ break;
+ case CVTLD:
+ ASM_VOLATILE_ROUND(fs, cvt.l.d)
+ break;
+ case FLOORLS:
+ ASM_VOLATILE_ROUND(fs, floor.l.s)
+ break;
+ case FLOORLD:
+ ASM_VOLATILE_ROUND(fs, floor.l.d)
+ break;
+ case ROUNDWS:
+ ASM_VOLATILE_ROUND(fs, round.w.s)
+ break;
+ default:
+ vassert(0);
+ break;
+ }
+ return ret;
+}
+
/*---------------------------------------------------------------*/
/*--- end guest_mips_helpers.c ---*/
/*---------------------------------------------------------------*/
stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_FCSR), e));
}
+static void calculateFCSR(UInt fs, UInt inst)
+{
+ IRDirty *d;
+ IRTemp fcsr = newTemp(Ity_I32);
+ /* IRExpr_BBPTR() => Need to pass pointer to guest
+ state to helper. */
+ d = unsafeIRDirty_1_N(fcsr, 0,
+ "mips_dirtyhelper_calculate_FCSR",
+ &mips_dirtyhelper_calculate_FCSR,
+ mkIRExprVec_3(IRExpr_BBPTR(),
+ mkU32(fs),
+ mkU32(inst)));
+
+ /* Declare we're reading guest state. */
+ d->nFxState = mode64 ? 1 : 2;
+ vex_bzero(&d->fxState, sizeof(d->fxState));
+
+ d->fxState[0].fx = Ifx_Read; /* read */
+ d->fxState[0].offset = floatGuestRegOffset(fs);
+ if (mode64)
+ d->fxState[0].size = sizeof(ULong);
+ else
+ d->fxState[0].size = sizeof(UInt);
+
+ if (!mode64) {
+ d->fxState[1].fx = Ifx_Read; /* read */
+ d->fxState[1].offset = floatGuestRegOffset(fs+1);
+ d->fxState[1].size = sizeof(UInt);
+ }
+
+ stmt(IRStmt_Dirty(d));
+
+ putFCSR(mkexpr(fcsr));
+}
+
static IRExpr *getULR(void)
{
if (mode64)
switch (fmt) {
case 0x10: /* S */
DIP("round.l.s f%d, f%d", fd, fs);
+ calculateFCSR(fs, ROUNDLS);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, mkU32(0x0),
break;
case 0x11: /* D */
DIP("round.l.d f%d, f%d", fd, fs);
+ calculateFCSR(fs, ROUNDLD);
putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x0),
getFReg(fs)));
break;
switch (fmt) {
case 0x10: /* S */
DIP("trunc.l.s f%d, f%d", fd, fs);
+ calculateFCSR(fs, TRUNCLS);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, mkU32(0x3),
getLoFromF64(Ity_F64, getFReg(fs))));
break;
case 0x11: /* D */
DIP("trunc.l.d f%d, f%d", fd, fs);
+ calculateFCSR(fs, TRUNCLD);
putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x3),
getFReg(fs)));
break;
switch (fmt) {
case 0x10: /* S */
DIP("cvt.d.s f%d, f%d", fd, fs);
+ calculateFCSR(fs, CVTDS);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
putFReg(fd, unop(Iop_F32toF64, mkexpr(t3)));
- break;
- } else {
+ } else
putDReg(fd, unop(Iop_F32toF64, getFReg(fs)));
- break;
- }
+ break;
case 0x14:
DIP("cvt.d.w %d, %d", fd, fs);
+ calculateFCSR(fs, CVTDW);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x15: { /* L */
if (mode64) {
DIP("cvt.d.l %d, %d", fd, fs);
+ calculateFCSR(fs, CVTDL);
t0 = newTemp(Ity_I64);
assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
switch (fmt) {
case 0x14: /* W */
DIP("cvt.s.w %d, %d", fd, fs);
+ calculateFCSR(fs, CVTSW);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
assign(t1, unop(Iop_64to32, mkexpr(t0)));
putFReg(fd, mkWidenFromF32(tyF, binop(Iop_I32StoF32,
get_IR_roundingmode(), mkexpr(t1))));
- break;
} else {
t0 = newTemp(Ity_I32);
assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs)));
putFReg(fd, binop(Iop_I32StoF32, get_IR_roundingmode(),
mkexpr(t0)));
- break;
- }
+ }
+ break;
case 0x11: /* D */
DIP("cvt.s.d %d, %d", fd, fs);
+ calculateFCSR(fs, CVTSD);
if (mode64) {
t0 = newTemp(Ity_F32);
assign(t0, binop(Iop_F64toF32, get_IR_roundingmode(),
case 0x15: /* L */
DIP("cvt.s.l %d, %d", fd, fs);
+ calculateFCSR(fs, CVTSL);
t0 = newTemp(Ity_I64);
assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
switch (fmt) {
case 0x10: /* S */
DIP("cvt.w.s %d, %d", fd, fs);
+ calculateFCSR(fs, CVTWS);
if (mode64) {
putFReg(fd, mkWidenFromF32(tyF, binop(Iop_RoundF32toInt,
get_IR_roundingmode(), getLoFromF64(tyF,
case 0x11:
DIP("cvt.w.d %d, %d", fd, fs);
+ calculateFCSR(fs, CVTWD);
if (mode64) {
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_F32);
switch (fmt) {
case 0x10: /* S */
DIP("cvt.l.s %d, %d", fd, fs);
+ calculateFCSR(fs, CVTLS);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, get_IR_roundingmode(),
case 0x11: { /* D */
DIP("cvt.l.d %d, %d", fd, fs);
+ calculateFCSR(fs, CVTLD);
putFReg(fd, binop(Iop_RoundF64toInt,
get_IR_roundingmode(), getFReg(fs)));
break;
switch (fmt) {
case 0x10: /* S */
DIP("floor.l.s %d, %d", fd, fs);
+ calculateFCSR(fs, FLOORLS);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, mkU32(0x1),
case 0x11: /* D */
DIP("floor.l.d %d, %d", fd, fs);
+ calculateFCSR(fs, FLOORLD);
putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x1),
getFReg(fs)));
break;
switch (fmt) {
case 0x10: /* S */
DIP("round.w.s f%d, f%d", fd, fs);
+ calculateFCSR(fs, ROUNDWS);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
mkexpr(t3)));
putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
- break;
- } else {
+ } else
putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x0),
getFReg(fs)));
- break;
- }
+ break;
case 0x11: /* D */
DIP("round.w.d f%d, f%d", fd, fs);
+ calculateFCSR(fs, ROUNDWD);
if (mode64) {
t0 = newTemp(Ity_I32);
assign(t0, binop(Iop_F64toI32S, mkU32(0x0),
getDReg(fs)));
putFReg(fd, mkWidenFromF32(tyF,
unop(Iop_ReinterpI32asF32, mkexpr(t0))));
- break;
} else {
t0 = newTemp(Ity_I32);
getDReg(fs)));
putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
- break;
}
+ break;
default:
goto decode_failure;
switch (fmt) {
case 0x10: /* S */
DIP("floor.w.s f%d, f%d", fd, fs);
+ calculateFCSR(fs, FLOORWS);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
mkexpr(t3)));
putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
- break;
- } else {
+ } else
putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x1),
getFReg(fs)));
- break;
- }
+ break;
case 0x11: /* D */
DIP("floor.w.d f%d, f%d", fd, fs);
+ calculateFCSR(fs, FLOORWD);
if (mode64) {
t0 = newTemp(Ity_I32);
assign(t0, binop(Iop_F64toI32S, mkU32(0x1),
switch (fmt) {
case 0x10: /* S */
DIP("trunc.w.s %d, %d", fd, fs);
+ calculateFCSR(fs, TRUNCWS);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
mkexpr(t3)));
putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
- break;
- } else {
+ } else
putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x3),
getFReg(fs)));
break;
- }
case 0x11: /* D */
DIP("trunc.w.d %d, %d", fd, fs);
+ calculateFCSR(fs, TRUNCWD);
if (mode64) {
t0 = newTemp(Ity_I32);
putFReg(fd, mkWidenFromF32(tyF,
unop(Iop_ReinterpI32asF32, mkexpr(t0))));
- break;
} else {
t0 = newTemp(Ity_I32);
getDReg(fs)));
putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
- break;
}
+ break;
default:
goto decode_failure;
switch (fmt) {
case 0x10: /* S */
DIP("ceil.w.s %d, %d", fd, fs);
+ calculateFCSR(fs, CEILWS);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x11: /* D */
DIP("ceil.w.d %d, %d", fd, fs);
+ calculateFCSR(fs, CEILWD);
if (!mode64) {
t0 = newTemp(Ity_I32);
assign(t0, binop(Iop_F64toI32S, mkU32(0x2),
getDReg(fs)));
putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
- break;
} else {
t0 = newTemp(Ity_I32);
assign(t0, binop(Iop_F64toI32S, mkU32(0x2),
getDReg(fs)));
putFReg(fd, mkWidenFromF32(tyF,
unop(Iop_ReinterpI32asF32, mkexpr(t0))));
- break;
}
+ break;
default:
goto decode_failure;
switch (fmt) {
case 0x10: /* S */
DIP("ceil.l.s %d, %d", fd, fs);
+ calculateFCSR(fs, CEILLS);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, mkU32(0x2),
case 0x11: /* D */
DIP("ceil.l.d %d, %d", fd, fs);
+ calculateFCSR(fs, CEILLD);
putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x2),
getFReg(fs)));
break;