}
#endif
-#define ASM_VOLATILE_ROUND32(fs, inst) \
- __asm__ volatile("cfc1 $t0, $31" "\n\t" \
- "ctc1 %2, $31" "\n\t" \
- "mtc1 %1, $f0" "\n\t" \
- ""#inst" $f0, $f0" "\n\t" \
- "cfc1 %0, $31" "\n\t" \
- "ctc1 $t0, $31" "\n\t" \
+#define ASM_VOLATILE_UNARY32(inst) \
+ __asm__ volatile("cfc1 $t0, $31" "\n\t" \
+ "ctc1 %2, $31" "\n\t" \
+ "mtc1 %1, $f20" "\n\t" \
+ #inst" $f20, $f20" "\n\t" \
+ "cfc1 %0, $31" "\n\t" \
+ "ctc1 $t0, $31" "\n\t" \
: "=r" (ret) \
- : "r" (loVal), "r" (fcsr) \
- : "t0", "$f0", "$f1" \
+ : "r" (loFsVal), "r" (fcsr) \
+ : "t0", "$f20" \
);
-#define ASM_VOLATILE_ROUND32_DOUBLE(fs, inst) \
- __asm__ volatile("cfc1 $t0, $31" "\n\t" \
- "ctc1 %3, $31" "\n\t" \
- "mtc1 %1, $f0" "\n\t" \
- "mtc1 %2, $f1" "\n\t" \
- ""#inst" $f0, $f0" "\n\t" \
- "cfc1 %0, $31" "\n\t" \
- "ctc1 $t0, $31" "\n\t" \
+#define ASM_VOLATILE_UNARY32_DOUBLE(inst) \
+ __asm__ volatile("cfc1 $t0, $31" "\n\t" \
+ "ctc1 %3, $31" "\n\t" \
+ "mtc1 %1, $f20" "\n\t" \
+ "mtc1 %2, $f21" "\n\t" \
+ #inst" $f20, $f20" "\n\t" \
+ "cfc1 %0, $31" "\n\t" \
+ "ctc1 $t0, $31" "\n\t" \
: "=r" (ret) \
- : "r" (loVal), "r" (hiVal), "r" (fcsr) \
- : "t0", "$f0", "$f1" \
+ : "r" (loFsVal), "r" (hiFsVal), "r" (fcsr) \
+ : "t0", "$f20", "$f21" \
);
-#define ASM_VOLATILE_ROUND64(fs, inst) \
- __asm__ volatile("cfc1 $t0, $31" "\n\t" \
- "ctc1 %2, $31" "\n\t" \
- "dmtc1 %1, $f0" "\n\t" \
- ""#inst" $f0, $f0" "\n\t" \
- "cfc1 %0, $31" "\n\t" \
- "ctc1 $t0, $31" "\n\t" \
+#define ASM_VOLATILE_UNARY64(inst) \
+ __asm__ volatile("cfc1 $t0, $31" "\n\t" \
+ "ctc1 %2, $31" "\n\t" \
+ "dmtc1 %1, $f24" "\n\t" \
+ #inst" $f24, $f24" "\n\t" \
+ "cfc1 %0, $31" "\n\t" \
+ "ctc1 $t0, $31" "\n\t" \
: "=r" (ret) \
- : "r" (addr[fs]), "r" (fcsr) \
- : "t0", "$f0" \
+ : "r" (fsVal), "r" (fcsr) \
+ : "t0", "$f24" \
+ );
+
+#define ASM_VOLATILE_BINARY32(inst) \
+ __asm__ volatile("cfc1 $t0, $31" "\n\t" \
+ "ctc1 %3, $31" "\n\t" \
+ "mtc1 %1, $f20" "\n\t" \
+ "mtc1 %2, $f22" "\n\t" \
+ #inst" $f20, $f20, $f22" "\n\t" \
+ "cfc1 %0, $31" "\n\t" \
+ "ctc1 $t0, $31" "\n\t" \
+ : "=r" (ret) \
+ : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \
+ : "t0", "$f20", "$f22" \
+ );
+
+#define ASM_VOLATILE_BINARY32_DOUBLE(inst) \
+ __asm__ volatile("cfc1 $t0, $31" "\n\t" \
+ "ctc1 %5, $31" "\n\t" \
+ "mtc1 %1, $f20" "\n\t" \
+ "mtc1 %2, $f21" "\n\t" \
+ "mtc1 %3, $f22" "\n\t" \
+ "mtc1 %4, $f23" "\n\t" \
+ #inst" $f20, $f20, $f22" "\n\t" \
+ "cfc1 %0, $31" "\n\t" \
+ "ctc1 $t0, $31" "\n\t" \
+ : "=r" (ret) \
+ : "r" (loFsVal), "r" (hiFsVal), "r" (loFtVal), \
+ "r" (hiFtVal), "r" (fcsr) \
+ : "t0", "$f20", "$f21", "$f22", "$f23" \
+ );
+
+#define ASM_VOLATILE_BINARY64(inst) \
+ __asm__ volatile("cfc1 $t0, $31" "\n\t" \
+ "ctc1 %3, $31" "\n\t" \
+ "dmtc1 %1, $f24" "\n\t" \
+ "dmtc1 %2, $f25" "\n\t" \
+ #inst" $f24, $f24, $f25" "\n\t" \
+ "cfc1 %0, $31" "\n\t" \
+ "ctc1 $t0, $31" "\n\t" \
+ : "=r" (ret) \
+ : "r" (fsVal), "r" (ftVal), "r" (fcsr) \
+ : "t0", "$f24", "$f25" \
);
/* TODO: Add cases for all fpu instructions because all fpu instructions are
change the value of FCSR register. */
-extern UInt mips_dirtyhelper_calculate_FCSR ( void* gs, UInt fs, flt_op inst )
+extern UInt mips_dirtyhelper_calculate_FCSR ( void* gs, UInt fs, UInt ft,
+ flt_op inst )
{
UInt ret = 0;
#if defined(__mips__)
#if defined(VGA_mips32)
VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
- UInt *addr = (UInt *)&guest_state->guest_f0;
- UInt loVal = addr[fs];
- UInt hiVal = addr[fs+1];
-#define ASM_VOLATILE_ROUND(fs, inst) ASM_VOLATILE_ROUND32(fs, inst)
-#define ASM_VOLATILE_ROUND_DOUBLE(fs, inst) ASM_VOLATILE_ROUND32_DOUBLE(fs, inst)
+ UInt *addr = (UInt *)&guest_state->guest_f0;
+ UInt loFsVal = addr[fs];
+ UInt hiFsVal = addr[fs+1];
+ UInt loFtVal = addr[ft];
+ UInt hiFtVal = addr[ft+1];
+#define ASM_VOLATILE_UNARY(inst) ASM_VOLATILE_UNARY32(inst)
+#define ASM_VOLATILE_UNARY_DOUBLE(inst) ASM_VOLATILE_UNARY32_DOUBLE(inst)
+#define ASM_VOLATILE_BINARY(inst) ASM_VOLATILE_BINARY32(inst)
+#define ASM_VOLATILE_BINARY_DOUBLE(inst) ASM_VOLATILE_BINARY32_DOUBLE(inst)
#else
VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
ULong *addr = (ULong *)&guest_state->guest_f0;
-#define ASM_VOLATILE_ROUND(fs, inst) ASM_VOLATILE_ROUND64(fs, inst)
-#define ASM_VOLATILE_ROUND_DOUBLE(fs, inst) ASM_VOLATILE_ROUND64(fs, inst)
+ ULong fsVal = addr[fs];
+ ULong ftVal = addr[ft];
+#define ASM_VOLATILE_UNARY(inst) ASM_VOLATILE_UNARY64(inst)
+#define ASM_VOLATILE_UNARY_DOUBLE(inst) ASM_VOLATILE_UNARY64(inst)
+#define ASM_VOLATILE_BINARY(inst) ASM_VOLATILE_BINARY64(inst)
+#define ASM_VOLATILE_BINARY_DOUBLE(inst) ASM_VOLATILE_BINARY64(inst)
#endif
UInt fcsr = guest_state->guest_FCSR;
switch (inst) {
case ROUNDWD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, round.w.d)
+ ASM_VOLATILE_UNARY_DOUBLE(round.w.d)
break;
case FLOORWS:
- ASM_VOLATILE_ROUND(fs, floor.w.s)
+ ASM_VOLATILE_UNARY(floor.w.s)
break;
case FLOORWD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, floor.w.d)
+ ASM_VOLATILE_UNARY_DOUBLE(floor.w.d)
break;
case TRUNCWS:
- ASM_VOLATILE_ROUND(fs, trunc.w.s)
+ ASM_VOLATILE_UNARY(trunc.w.s)
break;
case TRUNCWD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, trunc.w.d)
+ ASM_VOLATILE_UNARY_DOUBLE(trunc.w.d)
break;
case CEILWS:
- ASM_VOLATILE_ROUND(fs, ceil.w.s)
+ ASM_VOLATILE_UNARY(ceil.w.s)
break;
case CEILWD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, ceil.w.d)
+ ASM_VOLATILE_UNARY_DOUBLE(ceil.w.d)
break;
case CVTDS:
- ASM_VOLATILE_ROUND(fs, cvt.d.s)
+ ASM_VOLATILE_UNARY(cvt.d.s)
break;
case CVTDW:
- ASM_VOLATILE_ROUND(fs, cvt.d.w)
+ ASM_VOLATILE_UNARY(cvt.d.w)
break;
case CVTSW:
- ASM_VOLATILE_ROUND(fs, cvt.s.w)
+ ASM_VOLATILE_UNARY(cvt.s.w)
break;
case CVTSD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, cvt.s.d)
+ ASM_VOLATILE_UNARY_DOUBLE(cvt.s.d)
break;
case CVTWS:
- ASM_VOLATILE_ROUND(fs, cvt.w.s)
+ ASM_VOLATILE_UNARY(cvt.w.s)
break;
case CVTWD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, cvt.w.d)
+ ASM_VOLATILE_UNARY_DOUBLE(cvt.w.d)
break;
case ROUNDWS:
- ASM_VOLATILE_ROUND(fs, round.w.s)
+ ASM_VOLATILE_UNARY(round.w.s)
break;
#if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \
|| (__mips == 64)
case CEILLS:
- ASM_VOLATILE_ROUND(fs, ceil.l.s)
+ ASM_VOLATILE_UNARY(ceil.l.s)
break;
case CEILLD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, ceil.l.d)
+ ASM_VOLATILE_UNARY_DOUBLE(ceil.l.d)
break;
case CVTDL:
- ASM_VOLATILE_ROUND_DOUBLE(fs, cvt.d.l)
+ ASM_VOLATILE_UNARY_DOUBLE(cvt.d.l)
break;
case CVTLS:
- ASM_VOLATILE_ROUND(fs, cvt.l.s)
+ ASM_VOLATILE_UNARY(cvt.l.s)
break;
case CVTLD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, cvt.l.d)
+ ASM_VOLATILE_UNARY_DOUBLE(cvt.l.d)
break;
case CVTSL:
- ASM_VOLATILE_ROUND_DOUBLE(fs, cvt.s.l)
+ ASM_VOLATILE_UNARY_DOUBLE(cvt.s.l)
break;
case FLOORLS:
- ASM_VOLATILE_ROUND(fs, floor.l.s)
+ ASM_VOLATILE_UNARY(floor.l.s)
break;
case FLOORLD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, floor.l.d)
+ ASM_VOLATILE_UNARY_DOUBLE(floor.l.d)
break;
case ROUNDLS:
- ASM_VOLATILE_ROUND(fs, round.l.s)
+ ASM_VOLATILE_UNARY(round.l.s)
break;
case ROUNDLD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, round.l.d)
+ ASM_VOLATILE_UNARY_DOUBLE(round.l.d)
break;
case TRUNCLS:
- ASM_VOLATILE_ROUND(fs, trunc.l.s)
+ ASM_VOLATILE_UNARY(trunc.l.s)
break;
case TRUNCLD:
- ASM_VOLATILE_ROUND_DOUBLE(fs, trunc.l.d)
+ ASM_VOLATILE_UNARY_DOUBLE(trunc.l.d)
break;
#endif
+ case ADDS:
+ ASM_VOLATILE_BINARY(add.s)
+ break;
+ case ADDD:
+ ASM_VOLATILE_BINARY_DOUBLE(add.d)
+ break;
+ case SUBS:
+ ASM_VOLATILE_BINARY(sub.s)
+ break;
+ case SUBD:
+ ASM_VOLATILE_BINARY_DOUBLE(sub.d)
+ break;
+ case DIVS:
+ ASM_VOLATILE_BINARY(div.s)
+ break;
default:
vassert(0);
break;
/* fs - fpu source register number.
inst - fpu instruction that needs to be executed.
- sz32 - size of source register. */
-static void calculateFCSR(UInt fs, UInt inst, Bool sz32)
+ sz32 - size of source register.
+ opN - number of operads:
+ 1 - unary operation.
+ 2 - binary operation. */
+static void calculateFCSR(UInt fs, UInt ft, UInt inst, Bool sz32, UInt opN)
{
IRDirty *d;
IRTemp fcsr = newTemp(Ity_I32);
d = unsafeIRDirty_1_N(fcsr, 0,
"mips_dirtyhelper_calculate_FCSR",
&mips_dirtyhelper_calculate_FCSR,
- mkIRExprVec_3(IRExpr_BBPTR(),
+ mkIRExprVec_4(IRExpr_BBPTR(),
mkU32(fs),
+ mkU32(ft),
mkU32(inst)));
- /* Declare we're reading guest state. */
- if (!mode64 && !sz32)
- d->nFxState = 2;
- else
- d->nFxState = 1;
- vex_bzero(&d->fxState, sizeof(d->fxState));
+ if (opN == 1) { /* Unary operation. */
+ /* Declare we're reading guest state. */
+ if (!mode64 && !sz32)
+ d->nFxState = 3;
+ else
+ d->nFxState = 2;
+ vex_bzero(&d->fxState, sizeof(d->fxState));
- d->fxState[0].fx = Ifx_Read; /* read */
- d->fxState[0].offset = floatGuestRegOffset(fs);
- if (mode64)
- d->fxState[0].size = sizeof(ULong);
- else
+ d->fxState[0].fx = Ifx_Read; /* read */
+ d->fxState[0].offset = offsetof(VexGuestMIPS64State, guest_FCSR);
d->fxState[0].size = sizeof(UInt);
+ d->fxState[1].fx = Ifx_Read; /* read */
+ d->fxState[1].offset = floatGuestRegOffset(fs);
+ if (mode64)
+ d->fxState[1].size = sizeof(ULong);
+ else
+ d->fxState[1].size = sizeof(UInt);
+
+ if (!mode64 && !sz32) {
+ d->fxState[2].fx = Ifx_Read; /* read */
+ d->fxState[2].offset = floatGuestRegOffset(fs+1);
+ d->fxState[2].size = sizeof(UInt);
+ }
+ } else if (opN == 2) { /* Binary operation. */
+ /* Declare we're reading guest state. */
+ if (!mode64 && !sz32)
+ d->nFxState = 5;
+ else
+ d->nFxState = 3;
+ vex_bzero(&d->fxState, sizeof(d->fxState));
- if (!mode64 && !sz32) {
+ d->fxState[0].fx = Ifx_Read; /* read */
+ d->fxState[0].offset = offsetof(VexGuestMIPS64State, guest_FCSR);
+ d->fxState[0].size = sizeof(UInt);
d->fxState[1].fx = Ifx_Read; /* read */
- d->fxState[1].offset = floatGuestRegOffset(fs+1);
- d->fxState[1].size = sizeof(UInt);
+ d->fxState[1].offset = floatGuestRegOffset(fs);
+ d->fxState[2].fx = Ifx_Read; /* read */
+ d->fxState[2].offset = floatGuestRegOffset(ft);
+ if (mode64) {
+ d->fxState[1].size = sizeof(ULong);
+ d->fxState[2].size = sizeof(ULong);
+ } else {
+ d->fxState[1].size = sizeof(UInt);
+ d->fxState[2].size = sizeof(UInt);
+ }
+ if (!mode64 && !sz32) {
+ d->fxState[3].fx = Ifx_Read; /* read */
+ d->fxState[3].offset = floatGuestRegOffset(fs+1);
+ d->fxState[3].size = sizeof(UInt);
+ d->fxState[4].fx = Ifx_Read; /* read */
+ d->fxState[4].offset = floatGuestRegOffset(ft+1);
+ d->fxState[4].size = sizeof(UInt);
+ }
}
stmt(IRStmt_Dirty(d));
case 0x10: /* S */
{
DIP("div.s f%d, f%d, f%d", fd, fs, ft);
+ calculateFCSR(fs, ft, DIVS, False, 2);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32, rm,
getLoFromF64(tyF, getFReg(fs)),
case 0x01: /* SUB.fmt */
switch (fmt) {
- case 0x11: /* D */
- {
+ case 0x11: { /* D */
DIP("sub.d f%d, f%d, f%d", fd, fs, ft);
+ calculateFCSR(fs, ft, SUBD, False, 2);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, triop(Iop_SubF64, rm, getDReg(fs),
getDReg(ft)));
break;
}
- case 0x10: /* S */
- {
+ case 0x10: { /* S */
DIP("sub.s f%d, f%d, f%d", fd, fs, ft);
+ calculateFCSR(fs, ft, SUBS, True, 2);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_SubF32, rm,
getLoFromF64(tyF, getFReg(fs)),
getLoFromF64(tyF, getFReg(ft)))));
break;
}
- default:
- goto decode_failure;
+ default:
+ goto decode_failure;
}
break; /* SUB.fmt */
switch (fmt) {
case 0x10: /* S */
DIP("round.l.s f%d, f%d", fd, fs);
- calculateFCSR(fs, ROUNDLS, True);
+ calculateFCSR(fs, 0, ROUNDLS, True, 1);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, mkU32(0x0),
break;
case 0x11: /* D */
DIP("round.l.d f%d, f%d", fd, fs);
- calculateFCSR(fs, ROUNDLD, False);
+ calculateFCSR(fs, 0, ROUNDLD, False, 1);
putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x0),
getFReg(fs)));
break;
switch (fmt) {
case 0x10: /* S */
DIP("trunc.l.s f%d, f%d", fd, fs);
- calculateFCSR(fs, TRUNCLS, True);
+ calculateFCSR(fs, 0, TRUNCLS, True, 1);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, mkU32(0x3),
getLoFromF64(Ity_F64, getFReg(fs))));
break;
case 0x11: /* D */
DIP("trunc.l.d f%d, f%d", fd, fs);
- calculateFCSR(fs, TRUNCLD, False);
+ calculateFCSR(fs, 0, TRUNCLD, False, 1);
putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x3),
getFReg(fs)));
break;
case 0x0: /* add.fmt */
switch (fmt) {
- case 0x10: /* S */
- {
- DIP("add.s f%d, f%d, f%d", fd, fs, ft);
- IRExpr *rm = get_IR_roundingmode();
- putFReg(fd, mkWidenFromF32(tyF, triop(Iop_AddF32, rm,
- getLoFromF64(tyF, getFReg(fs)),
- getLoFromF64(tyF, getFReg(ft)))));
- break;
- }
+ case 0x10: { /* S */
+ DIP("add.s f%d, f%d, f%d", fd, fs, ft);
+ calculateFCSR(fs, ft, ADDS, True, 2);
+ IRExpr *rm = get_IR_roundingmode();
+ putFReg(fd, mkWidenFromF32(tyF, triop(Iop_AddF32, rm,
+ getLoFromF64(tyF, getFReg(fs)),
+ getLoFromF64(tyF, getFReg(ft)))));
+ break;
+ }
case 0x11: { /* D */
DIP("add.d f%d, f%d, f%d", fd, fs, ft);
+ calculateFCSR(fs, ft, ADDD, False, 2);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, triop(Iop_AddF64, rm, getDReg(fs), getDReg(ft)));
break;
switch (fmt) {
case 0x10: /* S */
DIP("cvt.d.s f%d, f%d", fd, fs);
- calculateFCSR(fs, CVTDS, True);
+ calculateFCSR(fs, 0, CVTDS, True, 1);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x14:
DIP("cvt.d.w %d, %d", fd, fs);
- calculateFCSR(fs, CVTDW, True);
+ calculateFCSR(fs, 0, CVTDW, True, 1);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x15: { /* L */
if (mode64) {
DIP("cvt.d.l %d, %d", fd, fs);
- calculateFCSR(fs, CVTDL, False);
+ calculateFCSR(fs, 0, CVTDL, False, 1);
t0 = newTemp(Ity_I64);
assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
switch (fmt) {
case 0x14: /* W */
DIP("cvt.s.w %d, %d", fd, fs);
- calculateFCSR(fs, CVTSW, True);
+ calculateFCSR(fs, 0, CVTSW, True, 1);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x11: /* D */
DIP("cvt.s.d %d, %d", fd, fs);
- calculateFCSR(fs, CVTSD, False);
+ calculateFCSR(fs, 0, CVTSD, False, 1);
if (mode64) {
t0 = newTemp(Ity_F32);
assign(t0, binop(Iop_F64toF32, get_IR_roundingmode(),
case 0x15: /* L */
DIP("cvt.s.l %d, %d", fd, fs);
- calculateFCSR(fs, CVTSL, False);
+ calculateFCSR(fs, 0, CVTSL, False, 1);
t0 = newTemp(Ity_I64);
assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
switch (fmt) {
case 0x10: /* S */
DIP("cvt.w.s %d, %d", fd, fs);
- calculateFCSR(fs, CVTWS, True);
+ calculateFCSR(fs, 0, CVTWS, True, 1);
if (mode64) {
putFReg(fd, mkWidenFromF32(tyF, binop(Iop_RoundF32toInt,
get_IR_roundingmode(), getLoFromF64(tyF,
case 0x11:
DIP("cvt.w.d %d, %d", fd, fs);
- calculateFCSR(fs, CVTWD, False);
+ calculateFCSR(fs, 0, CVTWD, False, 1);
if (mode64) {
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_F32);
switch (fmt) {
case 0x10: /* S */
DIP("cvt.l.s %d, %d", fd, fs);
- calculateFCSR(fs, CVTLS, True);
+ calculateFCSR(fs, 0, CVTLS, True, 1);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, get_IR_roundingmode(),
case 0x11: { /* D */
DIP("cvt.l.d %d, %d", fd, fs);
- calculateFCSR(fs, CVTLD, False);
+ calculateFCSR(fs, 0, CVTLD, False, 1);
putFReg(fd, binop(Iop_RoundF64toInt,
get_IR_roundingmode(), getFReg(fs)));
break;
switch (fmt) {
case 0x10: /* S */
DIP("floor.l.s %d, %d", fd, fs);
- calculateFCSR(fs, FLOORLS, True);
+ calculateFCSR(fs, 0, FLOORLS, True, 1);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, mkU32(0x1),
case 0x11: /* D */
DIP("floor.l.d %d, %d", fd, fs);
- calculateFCSR(fs, FLOORLD, False);
+ calculateFCSR(fs, 0, FLOORLD, False, 1);
putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x1),
getFReg(fs)));
break;
switch (fmt) {
case 0x10: /* S */
DIP("round.w.s f%d, f%d", fd, fs);
- calculateFCSR(fs, ROUNDWS, True);
+ calculateFCSR(fs, 0, ROUNDWS, True, 1);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x11: /* D */
DIP("round.w.d f%d, f%d", fd, fs);
- calculateFCSR(fs, ROUNDWD, False);
+ calculateFCSR(fs, 0, ROUNDWD, False, 1);
if (mode64) {
t0 = newTemp(Ity_I32);
assign(t0, binop(Iop_F64toI32S, mkU32(0x0),
switch (fmt) {
case 0x10: /* S */
DIP("floor.w.s f%d, f%d", fd, fs);
- calculateFCSR(fs, FLOORWS, True);
+ calculateFCSR(fs, 0, FLOORWS, True, 1);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x11: /* D */
DIP("floor.w.d f%d, f%d", fd, fs);
- calculateFCSR(fs, FLOORWD, False);
+ calculateFCSR(fs, 0, FLOORWD, False, 1);
if (mode64) {
t0 = newTemp(Ity_I32);
assign(t0, binop(Iop_F64toI32S, mkU32(0x1),
switch (fmt) {
case 0x10: /* S */
DIP("trunc.w.s %d, %d", fd, fs);
- calculateFCSR(fs, TRUNCWS, True);
+ calculateFCSR(fs, 0, TRUNCWS, True, 1);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
break;
case 0x11: /* D */
DIP("trunc.w.d %d, %d", fd, fs);
- calculateFCSR(fs, TRUNCWD, False);
+ calculateFCSR(fs, 0, TRUNCWD, False, 1);
if (mode64) {
t0 = newTemp(Ity_I32);
switch (fmt) {
case 0x10: /* S */
DIP("ceil.w.s %d, %d", fd, fs);
- calculateFCSR(fs, CEILWS, True);
+ calculateFCSR(fs, 0, CEILWS, True, 1);
if (mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x11: /* D */
DIP("ceil.w.d %d, %d", fd, fs);
- calculateFCSR(fs, CEILWD, False);
+ calculateFCSR(fs, 0, CEILWD, False, 1);
if (!mode64) {
t0 = newTemp(Ity_I32);
assign(t0, binop(Iop_F64toI32S, mkU32(0x2),
switch (fmt) {
case 0x10: /* S */
DIP("ceil.l.s %d, %d", fd, fs);
- calculateFCSR(fs, CEILLS, True);
+ calculateFCSR(fs, 0, CEILLS, True, 1);
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_F32toI64S, mkU32(0x2),
case 0x11: /* D */
DIP("ceil.l.d %d, %d", fd, fs);
- calculateFCSR(fs, CEILLD, False);
+ calculateFCSR(fs, 0, CEILLD, False, 1);
putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x2),
getFReg(fs)));
break;