/* CPU has FPU and 32 dbl. prec. FP registers. */
static Bool fp_mode64 = False;
+/* FPU works in FRE mode */
+static Bool fp_mode64_fre = False;
+
/* CPU has MSA unit */
static Bool has_msa = False;
if (fmt == 0x0b) {
return True;
}
+
+ /* R6 branches */
+ /* bc1eqz */
+ if (fmt == 0x09) {
+ return True;
+ }
+
+ /* bc1nez */
+ if (fmt == 0x0D) {
+ return True;
+ }
}
/* bposge32 */
return True;
}
- /* bgezal */
+ /* bgezal or bal(r6) */
if (opcode == 0x01 && rt == 0x11) {
return True;
}
binop(Iop_Shr32, src, mkexpr(t0)));
}
+
+static UShort extend_s_9to16(UInt x)
+{
+ return (UShort) ((((Int) x) << 23) >> 23);
+}
+
static UShort extend_s_10to16(UInt x)
{
return (UShort) ((((Int) x) << 22) >> 22);
return (UInt) ((((Int) x) << 14) >> 14);
}
+static UInt extend_s_19to32(UInt x)
+{
+ return (UInt) ((((Int) x) << 13) >> 13);
+}
+
+static UInt extend_s_23to32(UInt x)
+{
+ return (UInt) ((((Int) x) << 9) >> 9);
+}
+
+static UInt extend_s_26to32(UInt x)
+{
+ return (UInt) ((((Int) x) << 6) >> 6);
+}
+
static ULong extend_s_16to64 ( UInt x )
{
return (ULong) ((((Long) x) << 48) >> 48);
return (ULong) ((((Long) x) << 46) >> 46);
}
+static ULong extend_s_19to64(UInt x)
+{
+ return (ULong) ((((Long) x) << 45) >> 45);
+}
+
+static ULong extend_s_23to64(UInt x)
+{
+ return (ULong) ((((Long) x) << 41) >> 41);
+}
+
+static ULong extend_s_26to64(UInt x)
+{
+ return (ULong) ((((Long) x) << 38) >> 38);
+}
+
static ULong extend_s_32to64 ( UInt x )
{
return (ULong) ((((Long) x) << 32) >> 32);
return src;
}
+static inline IRExpr *getHiFromF64(IRExpr * src)
+{
+ vassert(typeOfIRExpr(irsb->tyenv, src) == Ity_F64);
+ return unop(Iop_ReinterpI32asF32, unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64, src)));
+}
+
static IRExpr *mkWidenFromF32(IRType ty, IRExpr * src)
{
vassert(ty == Ity_F32 || ty == Ity_F64);
(UInt) branch_offset), OFFB_PC);
}
+static void dis_branch_compact(Bool link, IRExpr * guard, UInt imm,
+ DisResult *dres)
+{
+ ULong branch_offset;
+ IRTemp t0;
+
+ if (link) { /* LR (GPR31) = addr of the instr after branch instr */
+ if (mode64)
+ putIReg(31, mkU64(guest_PC_curr_instr + 4));
+ else
+ putIReg(31, mkU32(guest_PC_curr_instr + 4));
+ dres->jk_StopHere = Ijk_Call;
+ } else {
+ dres->jk_StopHere = Ijk_Boring;
+ }
+
+ dres->whatNext = Dis_StopHere;
+
+ /* PC = PC + (SignExtend(signed_immed_24) << 2)
+ An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
+ is added to the address of the instruction following
+ the branch (not the branch itself), in the branch delay slot, to form
+ a PC-relative effective target address. */
+
+ if (mode64)
+ branch_offset = extend_s_18to64(imm << 2);
+ else
+ branch_offset = extend_s_18to32(imm << 2);
+
+ t0 = newTemp(Ity_I1);
+ assign(t0, guard);
+
+ if (mode64) {
+ stmt(IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring,
+ IRConst_U64(guest_PC_curr_instr + 4 + branch_offset),
+ OFFB_PC));
+ putPC(mkU64(guest_PC_curr_instr + 4));
+ } else {
+ stmt(IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring,
+ IRConst_U32(guest_PC_curr_instr + 4 +
+ (UInt) branch_offset), OFFB_PC));
+ putPC(mkU32(guest_PC_curr_instr + 4));
+ }
+}
+
static IRExpr *getFReg(UInt fregNo)
{
vassert(fregNo < 32);
vassert(dregNo < 32);
IRType ty = fp_mode64 ? Ity_F64 : Ity_F32;
vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
- stmt(IRStmt_Put(floatGuestRegOffset(dregNo), e));
+
+ if (fp_mode64_fre) {
+ IRTemp t0 = newTemp(Ity_F32);
+ assign(t0, getLoFromF64(ty, e));
+#if defined (_MIPSEL)
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo), mkexpr(t0)));
+ if (dregNo & 1)
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo) - 4, mkexpr(t0)));
+#else
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo) + 4, mkexpr(t0)));
+ if (dregNo & 1)
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo & (~1)), mkexpr(t0)));
+#endif
+ } else {
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo), e));
+ }
+
if (has_msa && fp_mode64) {
stmt(IRStmt_Put(msaGuestRegOffset(dregNo),
binop(Iop_64HLtoV128,
- mkU64(0), unop(Iop_ReinterpF64asI64, e))));
+ unop(Iop_ReinterpF64asI64, e),
+ unop(Iop_ReinterpF64asI64, e))));
}
}
IRType ty = Ity_F64;
vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
stmt(IRStmt_Put(floatGuestRegOffset(dregNo), e));
+ if (fp_mode64_fre) {
+ IRTemp t0 = newTemp(Ity_F32);
+ if (dregNo & 1) {
+ assign(t0, getLoFromF64(ty, e));
+#if defined (_MIPSEL)
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo) - 4, mkexpr(t0)));
+#else
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo & (~1)), mkexpr(t0)));
+#endif
+ } else {
+ assign(t0, getHiFromF64(e));
+#if defined (_MIPSEL)
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo | 1), mkexpr(t0)));
+#else
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo | 1) + 4, mkexpr(t0)));
+#endif
+ }
+ }
if (has_msa)
stmt(IRStmt_Put(msaGuestRegOffset(dregNo),
binop(Iop_64HLtoV128,
- mkU64(0), unop(Iop_ReinterpF64asI64, e))));
+ unop(Iop_ReinterpF64asI64, e),
+ unop(Iop_ReinterpF64asI64, e))));
} else {
vassert(dregNo < 32);
vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64);
mkU64(0xC3E0000000000000),
mkU64(0xC3E0000000000000)))));
assign(t1,
- unop(Iop_ReinterpF64asI64,
- binop(Iop_RoundF64toInt,
- mkU32(0x3),
- unop(Iop_ReinterpI64asF64,
- unop(Iop_V128to64,
- mkexpr(t3))))));
+ binop(Iop_F64toI64S, mkU32(0x3),
+ unop(Iop_ReinterpI64asF64,
+ unop(Iop_V128to64, mkexpr(t3)))));
assign(t2,
- unop(Iop_ReinterpF64asI64,
- binop(Iop_RoundF64toInt,
- mkU32(0x3),
- unop(Iop_ReinterpI64asF64,
- unop(Iop_V128HIto64,
- mkexpr(t3))))));
+ binop(Iop_F64toI64S, mkU32(0x3),
+ unop(Iop_ReinterpI64asF64,
+ unop(Iop_V128HIto64, mkexpr(t3)))));
putWReg(wd,
binop(Iop_64HLtoV128,
mkexpr(t2), mkexpr(t1)));
tmp[i] = newTemp(Ity_I32);
assign(tmp[i],
unop(Iop_ReinterpF32asI32,
- binop(Iop_I32StoF32, rm,
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- mkexpr(t1),
- mkU8(i))))))));
+ binop(Iop_RoundF32toInt, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ mkexpr(t1), mkU8(i))))));
}
putWReg(wd,
tmp[i] = newTemp(Ity_I64);
assign(tmp[i],
unop(Iop_ReinterpF64asI64,
- binop(Iop_I64StoF64, rm,
- unop(Iop_ReinterpF64asI64,
- binop(Iop_RoundF64toInt, rm,
- unop(Iop_ReinterpI64asF64,
- binop(Iop_GetElem64x2,
- mkexpr(t1),
- mkU8(i))))))));
+ binop(Iop_RoundF64toInt, rm,
+ unop(Iop_ReinterpI64asF64,
+ binop(Iop_GetElem64x2,
+ mkexpr(t1), mkU8(i))))));
}
putWReg(wd,
IRExpr *rm = get_IR_roundingmode_MSA();
assign(t1,
binop(Iop_32HLto64,
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- mkexpr(t3),
- mkU8(1))))),
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- mkexpr(t3),
- mkU8(0)))))));
+ binop(Iop_F32toI32S, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ mkexpr(t3), mkU8(1)))),
+ binop(Iop_F32toI32S, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ mkexpr(t3), mkU8(0))))));
assign(t2,
binop(Iop_32HLto64,
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- mkexpr(t3),
- mkU8(3))))),
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- mkexpr(t3),
- mkU8(2)))))));
+ binop(Iop_F32toI32S, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ mkexpr(t3), mkU8(3)))),
+ binop(Iop_F32toI32S, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ mkexpr(t3), mkU8(2))))));
putWReg(wd,
binop(Iop_64HLtoV128,
mkexpr(t2), mkexpr(t1)));
mkU64(0xC3E0000000000000)))));
IRExpr *rm = get_IR_roundingmode_MSA();
assign(t1,
- unop(Iop_ReinterpF64asI64,
- binop(Iop_RoundF64toInt, rm,
- unop(Iop_ReinterpI64asF64,
- unop(Iop_V128to64,
- mkexpr(t3))))));
+ binop(Iop_F64toI64S, rm,
+ unop(Iop_ReinterpI64asF64,
+ unop(Iop_V128to64, mkexpr(t3)))));
assign(t2,
- unop(Iop_ReinterpF64asI64,
- binop(Iop_RoundF64toInt, rm,
- unop(Iop_ReinterpI64asF64,
- unop(Iop_V128HIto64,
- mkexpr(t3))))));
+ binop(Iop_F64toI64S, rm,
+ unop(Iop_ReinterpI64asF64,
+ unop(Iop_V128HIto64, mkexpr(t3)))));
putWReg(wd,
binop(Iop_64HLtoV128,
mkexpr(t2), mkexpr(t1)));
IRExpr *rm = get_IR_roundingmode_MSA();
assign(t1,
binop(Iop_32HLto64,
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- getWReg(ws),
- mkU8(1))))),
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- getWReg(ws),
- mkU8(0)))))));
+ binop(Iop_F32toI32U, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ getWReg(ws), mkU8(1)))),
+ binop(Iop_F32toI32U, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ getWReg(ws), mkU8(0))))));
assign(t2,
binop(Iop_32HLto64,
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- getWReg(ws),
- mkU8(3))))),
- unop(Iop_ReinterpF32asI32,
- binop(Iop_RoundF32toInt, rm,
- unop(Iop_ReinterpI32asF32,
- binop(Iop_GetElem32x4,
- getWReg(ws),
- mkU8(2)))))));
+ binop(Iop_F32toI32U, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ getWReg(ws), mkU8(3)))),
+ binop(Iop_F32toI32U, rm,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_GetElem32x4,
+ getWReg(ws), mkU8(2))))));
assign(t3,
unop(Iop_NotV128,
binop(Iop_SarN32x4,
case 0x11: { /* COP1 */
if (fmt == 0x3 && fd == 0 && function == 0) { /* MFHC1 */
DIP("mfhc1 r%u, f%u", rt, fs);
- if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps) ||
+ VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
if (fp_mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
assign(t0, unop(Iop_ReinterpF64asI64, getDReg(fs)));
assign(t1, unop(Iop_64HIto32, mkexpr(t0)));
putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True));
- break;
} else {
putIReg(rt, mkWidenFrom32(ty, unop(Iop_ReinterpF32asI32,
getFReg(fs | 1)), True));
- break;
}
+ } else {
+ ILLEGAL_INSTRUCTON;
}
- ILLEGAL_INSTRUCTON;
break;
} else if (fmt == 0x7 && fd == 0 && function == 0) { /* MTHC1 */
DIP("mthc1 r%u, f%u", rt, fs);
- if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps) ||
+ VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
if (fp_mode64) {
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_32HLto64, mkNarrowTo32(ty, getIReg(rt)),
unop(Iop_ReinterpF32asI32,
getLoFromF64(Ity_F64, getDReg(fs)))));
putDReg(fs, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
- break;
} else {
putFReg(fs | 1, unop(Iop_ReinterpI32asF32,
mkNarrowTo32(ty, getIReg(rt))));
- break;
}
+ } else {
+ ILLEGAL_INSTRUCTON;
}
- ILLEGAL_INSTRUCTON;
break;
} else if (fmt == 0x8) { /* BC */
/* FcConditionalCode(bc1_cc) */
unop(Iop_V128HIto64, mkexpr(t1))))));
dis_branch(False,
binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0)), imm, &bstmt);
+ } else if (fmt == 0x09) { /* BC1EQZ */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("bc1eqz f%u, %u", ft, imm);
+ t1 = newTemp(Ity_I1);
+ if (mode64) {
+ assign(t1, binop(Iop_CmpEQ64,
+ binop(Iop_And64,
+ unop(Iop_ReinterpF64asI64, getDReg(ft)),
+ mkU64(1)),
+ mkU64(0)));
+ } else {
+ assign(t1, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64, getDReg(ft))),
+ mkU32(1)),
+ mkU32(0)));
+ }
+ dis_branch(False, mkexpr(t1), imm, &bstmt);
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ } else if (fmt == 0x0D) { /* BC1NEZ */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("bc1nez f%u, %u", ft, imm);
+ t1 = newTemp(Ity_I1);
+ if (mode64) {
+ assign(t1, binop(Iop_CmpNE64,
+ binop(Iop_And64,
+ unop(Iop_ReinterpF64asI64, getDReg(ft)),
+ mkU64(1)),
+ mkU64(0)));
+ } else {
+ assign(t1, binop(Iop_CmpNE32,
+ binop(Iop_And32,
+ unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64, getDReg(ft))),
+ mkU32(1)),
+ mkU32(0)));
+ }
+ dis_branch(False, mkexpr(t1), imm, &bstmt);
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
} else {
+ if (fmt == 0x15) { /* CMP.cond.d */
+ Bool comparison = True;
+ UInt signaling = CMPAFD;
+ DIP("cmp.cond.d f%u, f%u, f%u, cond %u", fd, fs, ft, function);
+ t0 = newTemp(Ity_I32);
+ /* Conditions starting with S should signal exception on QNaN inputs. */
+ switch (function) {
+ case 8: /* SAF */
+ signaling = CMPSAFD;
+ case 0: /* AF */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(), mkU64(0)));
+ break;
+ case 9: /* SUN */
+ signaling = CMPSAFD;
+ case 1: /* UN */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x45)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(), mkU64(0))));
+ break;
+ case 0x19: /* SOR */
+ signaling = CMPSAFD;
+ case 0x11: /* OR */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x45)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(), mkU64(0)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL))));
+ break;
+ case 0xa: /* SEQ */
+ signaling = CMPSAFD;
+ case 2: /* EQ */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x40)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(), mkU64(0))));
+ break;
+ case 0x1A: /* SNEQ */
+ signaling = CMPSAFD;
+ case 0x12: /* NEQ */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x40)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(), mkU64(0)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL))));
+ break;
+ case 0xB: /* SUEQ */
+ signaling = CMPSAFD;
+ case 0x3: /* UEQ */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x40)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ IRExpr_ITE(binop(Iop_CmpEQ32,
+ mkexpr(t0), mkU32(0x45)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(),
+ mkU64(0)))));
+ break;
+ case 0x1B: /* SNEQ */
+ signaling = CMPSAFD;
+ case 0x13: /* NEQ */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0),mkU32(0x01)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ IRExpr_ITE(binop(Iop_CmpEQ32,
+ mkexpr(t0),mkU32(0x00)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(),
+ mkU64(0)))));
+ break;
+ case 0xC: /* SLT */
+ signaling = CMPSAFD;
+ case 0x4: /* LT */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x01)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(), mkU64(0))));
+ break;
+ case 0xD: /* SULT */
+ signaling = CMPSAFD;
+ case 0x5: /* ULT */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x01)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ IRExpr_ITE(binop(Iop_CmpEQ32,
+ mkexpr(t0), mkU32(0x45)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(),
+ mkU64(0)))));
+ break;
+ case 0xE: /* SLE */
+ signaling = CMPSAFD;
+ case 0x6: /* LE */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0),mkU32(0x01)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ IRExpr_ITE(binop(Iop_CmpEQ32,
+ mkexpr(t0),mkU32(0x40)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(),
+ mkU64(0)))));
+ break;
+ case 0xF: /* SULE */
+ signaling = CMPSAFD;
+ case 0x7: /* ULE */
+ assign(t0, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+ calculateFCSR(fs, ft, signaling, False, 2);
+ putDReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x0)),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(), mkU64(0)),
+ unop(Iop_ReinterpI64asF64,
+ mkU64(0xFFFFFFFFFFFFFFFFULL))));
+ break;
+ default:
+ comparison = False;
+ }
+ if (comparison) {
+ if (!VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
+
+ } else if (fmt == 0x14) {
+ Bool comparison = True;
+ UInt signaling = CMPAFS;
+ DIP("cmp.cond.s f%u, f%u, f%u, cond %u", fd, fs, ft, function);
+ t0 = newTemp(Ity_I32);
+ /* Conditions starting with S should signal exception on QNaN inputs. */
+ switch (function) {
+ case 8: /* SAF */
+ signaling = CMPSAFS;
+ case 0: /* AF */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(), mkU32(0))));
+ break;
+ case 9: /* SUN */
+ signaling = CMPSAFS;
+ case 1: /* UN */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x45)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0)))));
+ break;
+ case 0x19: /* SOR */
+ signaling = CMPSAFS;
+ case 0x11: /* OR */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x45)),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0))),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU)))));
+ break;
+ case 0xa: /* SEQ */
+ signaling = CMPSAFS;
+ case 2: /* EQ */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x40)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0)))));
+ break;
+ case 0x1A: /* SNEQ */
+ signaling = CMPSAFS;
+ case 0x12: /* NEQ */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x40)),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0))),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU)))));
+ break;
+ case 0xB: /* SUEQ */
+ signaling = CMPSAFS;
+ case 0x3: /* UEQ */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x40)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ IRExpr_ITE(binop(Iop_CmpEQ32,
+ mkexpr(t0), mkU32(0x45)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0))))));
+ break;
+ case 0x1B: /* SNEQ */
+ signaling = CMPSAFS;
+ case 0x13: /* NEQ */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0),mkU32(0x01)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ IRExpr_ITE(binop(Iop_CmpEQ32,
+ mkexpr(t0),mkU32(0x00)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0))))));
+ break;
+ case 0xC: /* SLT */
+ signaling = CMPSAFS;
+ case 0x4: /* LT */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x01)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0)))));
+ break;
+ case 0xD: /* SULT */
+ signaling = CMPSAFS;
+ case 0x5: /* ULT */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x01)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ IRExpr_ITE(binop(Iop_CmpEQ32,
+ mkexpr(t0), mkU32(0x45)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0))))));
+ break;
+ case 0xE: /* SLE */
+ signaling = CMPSAFS;
+ case 0x6: /* LE */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0),mkU32(0x01)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ IRExpr_ITE(binop(Iop_CmpEQ32,
+ mkexpr(t0),mkU32(0x40)),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU))),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0))))));
+ break;
+ case 0xF: /* SULE */
+ signaling = CMPSAFS;
+ case 0x7: /* ULE */
+ assign(t0, binop(Iop_CmpF32,
+ getLoFromF64(Ity_F64, getFReg(fs)),
+ getLoFromF64(Ity_F64, getFReg(ft))));
+ calculateFCSR(fs, ft, signaling, True, 2);
+ putFReg(fd,
+ IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x0)),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0))),
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ mkU32(0xFFFFFFFFU)))));
+ break;
+ default:
+ comparison = False;
+ }
+ if (comparison) {
+ if (!VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
+ }
+
switch (function) {
case 0x4: { /* SQRT.fmt */
switch (fmt) {
DIP("round.l.d f%u, f%u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, ROUNDLD, False, 1);
- putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x0),
- getDReg(fs)));
+ putDReg(fd, unop(Iop_ReinterpI64asF64,
+ binop(Iop_F64toI64S,
+ mkU32(0x0),
+ getDReg(fs))));
} else {
ILLEGAL_INSTRUCTON;
}
DIP("trunc.l.d f%u, f%u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, TRUNCLD, False, 1);
- putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x3),
- getDReg(fs)));
+ putDReg(fd, unop(Iop_ReinterpI64asF64,
+ binop(Iop_F64toI64S,
+ mkU32(0x3),
+ getDReg(fs))));
} else {
ILLEGAL_INSTRUCTON;
}
calculateFCSR(fs, 0, CVTWS, True, 1);
putFReg(fd,
mkWidenFromF32(tyF,
- binop(Iop_RoundF32toInt,
- get_IR_roundingmode(),
- getLoFromF64(tyF, getFReg(fs))))
- );
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_F32toI32S,
+ get_IR_roundingmode(),
+ getLoFromF64(tyF,
+ getFReg(fs))))));
break;
case 0x11:
DIP("cvt.l.d %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, CVTLD, False, 1);
- putDReg(fd, binop(Iop_RoundF64toInt,
- get_IR_roundingmode(), getDReg(fs)));
+ putDReg(fd, unop(Iop_ReinterpI64asF64,
+ binop(Iop_F64toI64S,
+ get_IR_roundingmode(),
+ getDReg(fs))));
} else {
ILLEGAL_INSTRUCTON;
}
DIP("floor.l.d %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, FLOORLD, False, 1);
- putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x1),
- getDReg(fs)));
+ putDReg(fd, unop(Iop_ReinterpI64asF64,
+ binop(Iop_F64toI64S,
+ mkU32(0x01),
+ getDReg(fs))));
} else {
ILLEGAL_INSTRUCTON;
}
case 0x10: /* S */
DIP("round.w.s f%u, f%u", fd, fs);
calculateFCSR(fs, 0, ROUNDWS, True, 1);
- if (fp_mode64) {
- t0 = newTemp(Ity_I64);
- t1 = newTemp(Ity_I32);
- t3 = newTemp(Ity_F32);
- t4 = newTemp(Ity_F32);
- /* get lo half of FPR */
- assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
-
- assign(t1, unop(Iop_64to32, mkexpr(t0)));
-
- assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
-
- assign(t4, binop(Iop_RoundF32toInt, mkU32(0x0),
- mkexpr(t3)));
-
- putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
- } else
- putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x0),
- getFReg(fs)));
+ putFReg(fd,
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_F32toI32S,
+ mkU32(0x0),
+ getLoFromF64(tyF,
+ getFReg(fs))))));
break;
case 0x11: /* D */
case 0x10: /* S */
DIP("floor.w.s f%u, f%u", fd, fs);
calculateFCSR(fs, 0, FLOORWS, True, 1);
- if (fp_mode64) {
- t0 = newTemp(Ity_I64);
- t1 = newTemp(Ity_I32);
- t3 = newTemp(Ity_F32);
- t4 = newTemp(Ity_F32);
- /* get lo half of FPR */
- assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
-
- assign(t1, unop(Iop_64to32, mkexpr(t0)));
-
- assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
-
- assign(t4, binop(Iop_RoundF32toInt, mkU32(0x1),
- mkexpr(t3)));
-
- putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
- } else
- putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x1),
- getFReg(fs)));
+ putFReg(fd,
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_F32toI32S,
+ mkU32(0x1),
+ getLoFromF64(tyF,
+ getFReg(fs))))));
break;
case 0x11: /* D */
case 0x10: /* S */
DIP("trunc.w.s %u, %u", fd, fs);
calculateFCSR(fs, 0, TRUNCWS, True, 1);
- if (fp_mode64) {
- t0 = newTemp(Ity_I64);
- t1 = newTemp(Ity_I32);
- t3 = newTemp(Ity_F32);
- t4 = newTemp(Ity_F32);
- /* get lo half of FPR */
- assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
-
- assign(t1, unop(Iop_64to32, mkexpr(t0)));
-
- assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
-
- assign(t4, binop(Iop_RoundF32toInt, mkU32(0x3),
- mkexpr(t3)));
-
- putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
- } else
- putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x3),
- getFReg(fs)));
+ putFReg(fd,
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_F32toI32S,
+ mkU32(0x3),
+ getLoFromF64(tyF,
+ getFReg(fs))))));
break;
case 0x11: /* D */
DIP("trunc.w.d %u, %u", fd, fs);
case 0x10: /* S */
DIP("ceil.w.s %u, %u", fd, fs);
calculateFCSR(fs, 0, CEILWS, True, 1);
- if (fp_mode64) {
- t0 = newTemp(Ity_I64);
- t1 = newTemp(Ity_I32);
- t3 = newTemp(Ity_F32);
- t4 = newTemp(Ity_F32);
- /* get lo half of FPR */
- assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
-
- assign(t1, unop(Iop_64to32, mkexpr(t0)));
-
- assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
-
- assign(t4, binop(Iop_RoundF32toInt, mkU32(0x2),
- mkexpr(t3)));
-
- putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
- } else
- putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x2),
- getFReg(fs)));
- break;
+ putFReg(fd,
+ mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32,
+ binop(Iop_F32toI32S,
+ mkU32(0x2),
+ getLoFromF64(tyF,
+ getFReg(fs))))));
+ break;
case 0x11: /* D */
DIP("ceil.w.d %u, %u", fd, fs);
unop(Iop_ReinterpI32asF32, mkexpr(t0))));
}
break;
+
default:
goto decode_failure;
DIP("ceil.l.d %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, CEILLD, False, 1);
- putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x2),
- getDReg(fs)));
+ putDReg(fd, unop(Iop_ReinterpI64asF64,
+ binop(Iop_F64toI64S,
+ mkU32(0x2),
+ getDReg(fs))));
} else {
ILLEGAL_INSTRUCTON;
}
}
break;
+ case 0x18: /* MADDF.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("maddf.d f%u, f%u, f%u", fd, fs, ft);
+ IRExpr *rm = get_IR_roundingmode();
+ putDReg(fd, qop(Iop_MAddF64, rm, getDReg(fs), getDReg(ft),
+ getDReg(fd)));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("maddf.s f%u, f%u, f%u", fd, fs, ft);
+ IRExpr *rm = get_IR_roundingmode();
+ t1 = newTemp(Ity_F32);
+ assign(t1, qop(Iop_MAddF32, rm,
+ getLoFromF64(tyF, getFReg(fs)),
+ getLoFromF64(tyF, getFReg(ft)),
+ getLoFromF64(tyF, getFReg(fd))));
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1)));
+ break;
+ }
+
+ default:
+ goto decode_failure;
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+
+ break;
+
+ case 0x19: /* MSUBF.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("msubf.d f%u, f%u, f%u", fd, fs, ft);
+ IRExpr *rm = get_IR_roundingmode();
+ putDReg(fd, qop(Iop_MSubF64, rm, getDReg(fs),
+ getDReg(ft), getDReg(fd)));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("msubf.s f%u, f%u, f%u", fd, fs, ft);
+ IRExpr *rm = get_IR_roundingmode();
+ t1 = newTemp(Ity_F32);
+ assign(t1, qop(Iop_MSubF32, rm,
+ getLoFromF64(tyF, getFReg(fs)),
+ getLoFromF64(tyF, getFReg(ft)),
+ getLoFromF64(tyF, getFReg(fd))));
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1)));
+ break;
+ }
+
+ default:
+ goto decode_failure;
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+
+ break;
+
+ case 0x1E: /* MAX.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("max.d f%u, f%u, f%u", fd, fs, ft);
+ calculateFCSR(fs, ft, MAXD, False, 2);
+ putDReg(fd, binop(Iop_MaxNumF64, getDReg(fs), getDReg(ft)));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("max.s f%u, f%u, f%u", fd, fs, ft);
+ calculateFCSR(fs, ft, MAXS, True, 2);
+ putFReg(fd, mkWidenFromF32(tyF, binop(Iop_MaxNumF32,
+ getLoFromF64(Ity_F64,
+ getFReg(fs)),
+ getLoFromF64(Ity_F64,
+ getFReg(ft)))));
+ break;
+ }
+
+ default:
+ goto decode_failure;
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+
+ break;
+
+ case 0x1C: /* MIN.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("min.d f%u, f%u, f%u", fd, fs, ft);
+ calculateFCSR(fs, ft, MIND, False, 2);
+ putDReg(fd, binop(Iop_MinNumF64, getDReg(fs), getDReg(ft)));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("min.s f%u, f%u, f%u", fd, fs, ft);
+ calculateFCSR(fs, ft, MINS, True, 2);
+ putFReg(fd, mkWidenFromF32(tyF, binop(Iop_MinNumF32,
+ getLoFromF64(Ity_F64,
+ getFReg(fs)),
+ getLoFromF64(Ity_F64,
+ getFReg(ft)))));
+ break;
+ }
+ default:
+ goto decode_failure;
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+
+ break;
+
+ case 0x1F: /* MAXA.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("maxa.d f%u, f%u, f%u", fd, fs, ft);
+ calculateFCSR(fs, ft, MAXAD, False, 2);
+ t1 = newTemp(Ity_F64);
+ t2 = newTemp(Ity_F64);
+ t3 = newTemp(Ity_F64);
+ t4 = newTemp(Ity_I1);
+ assign(t1, unop(Iop_AbsF64, getFReg(fs)));
+ assign(t2, unop(Iop_AbsF64, getFReg(ft)));
+ assign(t3, binop(Iop_MaxNumF64, mkexpr(t1), mkexpr(t2)));
+ assign(t4, binop(Iop_CmpEQ32,
+ binop(Iop_CmpF64, mkexpr(t3), mkexpr(t1)),
+ mkU32(0x40)));
+ putFReg(fd, IRExpr_ITE(mkexpr(t4),
+ getFReg(fs), getFReg(ft)));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("maxa.s f%u, f%u, f%u", fd, fs, ft);
+ calculateFCSR(fs, ft, MAXAS, True, 2);
+ t1 = newTemp(Ity_F32);
+ t2 = newTemp(Ity_F32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_I1);
+ assign(t1, unop(Iop_AbsF32, getLoFromF64(Ity_F64,
+ getFReg(fs))));
+ assign(t2, unop(Iop_AbsF32, getLoFromF64(Ity_F64,
+ getFReg(ft))));
+ assign(t3, binop(Iop_MaxNumF32, mkexpr(t1), mkexpr(t2)));
+ assign(t4, binop(Iop_CmpEQ32,
+ binop(Iop_CmpF32, mkexpr(t3), mkexpr(t1)),
+ mkU32(0x40)));
+ putFReg(fd, IRExpr_ITE(mkexpr(t4),
+ getFReg(fs), getFReg(ft)));
+ break;
+ }
+
+ default:
+ goto decode_failure;
+ }
+ /* missing in documentation */
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+
+ case 0x1D: /* MINA.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("mina.d f%u, f%u, f%u", fd, fs, ft);
+ calculateFCSR(fs, ft, MINAD, False, 2);
+ t1 = newTemp(Ity_F64);
+ t2 = newTemp(Ity_F64);
+ t3 = newTemp(Ity_F64);
+ t4 = newTemp(Ity_I1);
+ assign(t1, unop(Iop_AbsF64, getFReg(fs)));
+ assign(t2, unop(Iop_AbsF64, getFReg(ft)));
+ assign(t3, binop(Iop_MinNumF64, mkexpr(t1), mkexpr(t2)));
+ assign(t4, binop(Iop_CmpEQ32,
+ binop(Iop_CmpF64, mkexpr(t3), mkexpr(t1)),
+ mkU32(0x40)));
+ putFReg(fd, IRExpr_ITE(mkexpr(t4),
+ getFReg(fs), getFReg(ft)));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("mina.s f%u, f%u, f%u", fd, fs, ft);
+ calculateFCSR(fs, ft, MINAS, True, 2);
+ t1 = newTemp(Ity_F32);
+ t2 = newTemp(Ity_F32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_I1);
+ assign(t1, unop(Iop_AbsF32, getLoFromF64(Ity_F64,
+ getFReg(fs))));
+ assign(t2, unop(Iop_AbsF32, getLoFromF64(Ity_F64,
+ getFReg(ft))));
+ assign(t3, binop(Iop_MinNumF32, mkexpr(t1), mkexpr(t2)));
+ assign(t4, binop(Iop_CmpEQ32,
+ binop(Iop_CmpF32, mkexpr(t3), mkexpr(t1)),
+ mkU32(0x40)));
+ putFReg(fd, IRExpr_ITE(mkexpr(t4),
+ getFReg(fs), getFReg(ft)));
+ break;
+ }
+
+ default:
+ goto decode_failure;
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+
+ case 0x1A: /* RINT.fmt */
+ if (ft == 0) {
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("rint.d f%u, f%u", fd, fs);
+ calculateFCSR(fs, 0, RINTS, True, 1);
+ IRExpr *rm = get_IR_roundingmode();
+ putDReg(fd, binop(Iop_RoundF64toInt, rm, getDReg(fs)));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("rint.s f%u, f%u", fd, fs);
+ calculateFCSR(fs, 0, RINTD, True, 1);
+ IRExpr *rm = get_IR_roundingmode();
+ putFReg(fd,
+ mkWidenFromF32(tyF,
+ binop(Iop_RoundF32toInt, rm,
+ getLoFromF64(tyF,
+ getFReg(fs)))));
+ break;
+ }
+
+ default:
+ goto decode_failure;
+ }
+
+ }
+ break;
+
+ case 0x10: /* SEL.fmt */
+ switch (fmt) {
+ case 0x11: { /* D */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("sel.d f%u, f%u, f%u", fd, fs, ft);
+ t1 = newTemp(Ity_I1);
+ if (mode64) {
+ assign(t1,binop(Iop_CmpNE64,
+ binop(Iop_And64,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fd)),
+ mkU64(1)),
+ mkU64(0)));
+ } else {
+ assign(t1,binop(Iop_CmpNE32,
+ binop(Iop_And32,
+ unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fd))),
+ mkU32(1)),
+ mkU32(0)));
+ }
+ putDReg(fd, IRExpr_ITE(mkexpr(t1),
+ getDReg(ft), getDReg(fs)));
+ break;
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+
+ }
+
+ case 0x10: { /* S */
+ DIP("sel.s f%u, f%u, f%u", fd, fs, ft);
+ t1 = newTemp(Ity_I1);
+ assign(t1,binop(Iop_CmpNE32,
+ binop(Iop_And32,
+ unop(Iop_ReinterpF32asI32,
+ getLoFromF64(tyF, getFReg(fd))),
+ mkU32(1)),
+ mkU32(0)));
+ putFReg(fd, IRExpr_ITE( mkexpr(t1),
+ getFReg(ft), getFReg(fs)));
+ break;
+ }
+ default:
+ goto decode_failure;
+ }
+ break;
+
+ case 0x14: /* SELEQZ.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ switch (fmt) { /* SELEQZ.df */
+ case 0x11: { /* D */
+ DIP("seleqz.d f%u, f%u, f%u", fd, fs, ft);
+ t1 = newTemp(Ity_I1);
+ if (mode64) {
+ assign(t1, binop(Iop_CmpNE64,
+ binop(Iop_And64,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(ft)),
+ mkU64(1)),
+ mkU64(0)));
+ } else {
+ assign(t1, binop(Iop_CmpNE32,
+ binop(Iop_And32,
+ unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(ft))),
+ mkU32(1)),
+ mkU32(0)));
+ }
+ putDReg(fd, IRExpr_ITE( mkexpr(t1),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(),mkU64(0)),
+ getDReg(fs)));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("seleqz.s f%u, f%u, f%u", fd, fs, ft);
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpNE32,
+ binop(Iop_And32,
+ unop(Iop_ReinterpF32asI32,
+ getLoFromF64(tyF, getFReg(ft))),
+ mkU32(1)),
+ mkU32(0)));
+ putFReg(fd, IRExpr_ITE(mkexpr(t1),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0))),
+ getFReg(fs)));
+ break;
+ }
+
+ default:
+ goto decode_failure;
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+
+ case 0x17: /* SELNEZ.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("selnez.d f%u, f%u, f%u", fd, fs, ft);
+ t1 = newTemp(Ity_I1);
+ if (mode64) {
+ assign(t1, binop(Iop_CmpNE64,
+ binop(Iop_And64,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(ft)),
+ mkU64(1)),
+ mkU64(0)));
+ } else {
+ assign(t1, binop(Iop_CmpNE32,
+ binop(Iop_And32,
+ unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(ft))),
+ mkU32(1)),
+ mkU32(0)));
+ }
+ putDReg(fd, IRExpr_ITE( mkexpr(t1),
+ getDReg(fs),
+ binop(Iop_I64StoF64,
+ get_IR_roundingmode(),
+ mkU64(0))));
+ break;
+ }
+
+ case 0x10: { /* S */
+ DIP("selnez.s f%u, f%u, f%u", fd, fs, ft);
+ t1 = newTemp(Ity_I1);
+ assign(t1,binop(Iop_CmpNE32,
+ binop(Iop_And32,
+ unop(Iop_ReinterpF32asI32,
+ getLoFromF64(tyF, getFReg(ft))),
+ mkU32(1)),
+ mkU32(0)));
+ putFReg(fd, IRExpr_ITE(mkexpr(t1),
+ getFReg(fs),
+ mkWidenFromF32(tyF,
+ binop(Iop_I32StoF32,
+ get_IR_roundingmode(),
+ mkU32(0)))));
+ break;
+ }
+
+ default:
+ goto decode_failure;
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+
+ case 0x1B: /* CLASS.fmt */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ t0 = newTemp(Ity_I1); // exp zero
+ t1 = newTemp(Ity_I1); // exp max
+ t2 = newTemp(Ity_I1); // sign
+ t3 = newTemp(Ity_I1); // first
+ t4 = newTemp(Ity_I1); // val not zero
+ t5 = newTemp(Ity_I32);
+ switch (fmt) {
+ case 0x11: { /* D */
+ DIP("class.d f%u, f%u", fd, fs);
+ assign(t0, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fs))),
+ mkU32(0x7ff00000)),
+ mkU32(0)));
+ assign(t1, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fs))),
+ mkU32(0x7ff00000)),
+ mkU32(0x7ff00000)));
+ assign(t2, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fs))),
+ mkU32(0x80000000)),
+ mkU32(0x80000000)));
+ assign(t3, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fs))),
+ mkU32(0x00080000)),
+ mkU32(0x00080000)));
+ if (mode64) assign(t4, binop(Iop_CmpNE64,
+ binop(Iop_And64,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fs)),
+ mkU64(0x000fffffffffffffULL)),
+ mkU64(0)));
+ else assign(t4, binop(Iop_CmpNE32,
+ binop(Iop_Or32,
+ binop(Iop_And32,
+ unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fs))),
+ mkU32(0x000fffff)),
+ unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64,
+ getDReg(fs)))),
+ mkU32(0)));
+ assign(t5, binop(Iop_Shl32,
+ IRExpr_ITE(mkexpr(t1),
+ IRExpr_ITE(mkexpr(t4),
+ mkU32(0), mkU32(1)),
+ IRExpr_ITE(mkexpr(t0),
+ IRExpr_ITE(mkexpr(t4),
+ mkU32(0x4),
+ mkU32(0x8)),
+ mkU32(2))),
+ IRExpr_ITE(mkexpr(t2), mkU8(2), mkU8(6))));
+ putDReg(fd, unop(Iop_ReinterpI64asF64,
+ unop(Iop_32Uto64,
+ IRExpr_ITE(binop(Iop_CmpNE32,
+ mkexpr(t5), mkU32(0)),
+ mkexpr(t5),
+ IRExpr_ITE(mkexpr(t3),
+ mkU32(2),
+ mkU32(1))))));
+ break;
+ }
+ case 0x10: { /* S */
+ DIP("class.s f%u, f%u", fd, fs);
+ assign(t0, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_ReinterpF32asI32,
+ getLoFromF64(tyF, getFReg(fs))),
+ mkU32(0x7f800000)),
+ mkU32(0)));
+ assign(t1, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_ReinterpF32asI32,
+ getLoFromF64(tyF, getFReg(fs))),
+ mkU32(0x7f800000)),
+ mkU32(0x7f800000)));
+ assign(t2, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_ReinterpF32asI32,
+ getLoFromF64(tyF, getFReg(fs))),
+ mkU32(0x80000000)),
+ mkU32(0x80000000)));
+ assign(t3, binop(Iop_CmpEQ32,
+ binop(Iop_And32,
+ unop(Iop_ReinterpF32asI32,
+ getLoFromF64(tyF, getFReg(fs))),
+ mkU32(0x00400000)),
+ mkU32(0x00400000)));
+ assign(t4, binop(Iop_CmpNE32,
+ binop(Iop_And32,
+ unop(Iop_ReinterpF32asI32,
+ getLoFromF64(tyF, getFReg(fs))),
+ mkU32(0x007fffff)),
+ mkU32(0)));
+ assign(t5, binop(Iop_Shl32,
+ IRExpr_ITE(mkexpr(t1),
+ IRExpr_ITE(mkexpr(t4),
+ mkU32(0), mkU32(1)),
+ IRExpr_ITE(mkexpr(t0),
+ IRExpr_ITE(mkexpr(t4),
+ mkU32(0x4),
+ mkU32(0x8)), //zero or subnorm
+ mkU32(2))),
+ IRExpr_ITE(mkexpr(t2), mkU8(2), mkU8(6))));
+ putDReg(fd, unop(Iop_ReinterpI64asF64,
+ unop(Iop_32Uto64,
+ IRExpr_ITE(binop(Iop_CmpNE32,
+ mkexpr(t5), mkU32(0)),
+ mkexpr(t5),
+ IRExpr_ITE(mkexpr(t3),
+ mkU32(2),
+ mkU32(1))))));
+ break;
+ }
+ default:
+ goto decode_failure;
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+
default:
if (dis_instr_CCondFmt(cins))
break;
break;
case 0x0F: /* LUI */
- p = (imm << 16);
- DIP("lui r%u, imm: 0x%x", rt, imm);
- if (mode64)
- putIReg(rt, mkU64(extend_s_32to64(p)));
- else
- putIReg(rt, mkU32(p));
+ if (rs == 0) {
+ p = (imm << 16);
+ DIP("lui r%u, imm: 0x%x", rt, imm);
+ if (mode64)
+ putIReg(rt, mkU64(extend_s_32to64(p)));
+ else
+ putIReg(rt, mkU32(p));
+ break;
+ } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) { /* AUI */
+ DIP("aui r%u, imm: 0x%x", rt, imm);
+ if (mode64) {
+ putIReg(rt, unop(Iop_32Sto64,
+ unop(Iop_64to32,
+ binop(Iop_Add64,
+ getIReg(rs),
+ mkU64(extend_s_32to64(imm << 16))))));
+ } else {
+ putIReg(rt, binop(Iop_Add32, getIReg(rs), mkU32(imm << 16)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
break;
case 0x13: /* COP1X */
putIReg(rd, mkexpr(tmpRd));
break;
}
+ case 0x08 ... 0x0f: { /* DALIGN */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("daling r%u, r%u, r%u, %d", rd, rs, rt, lsb & 0x7);
+ UInt bp = (lsb & 0x7) << 3;
+ if (bp) {
+ putIReg(rd, binop(Iop_Or64,
+ binop(Iop_Shl64, getIReg(rt), mkU8(bp)),
+ binop(Iop_Shr64,
+ getIReg(rs), mkU8(64 - bp))));
+ } else
+ putIReg(rd, getIReg(rt));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
+
+ case 0: /* DBITSWAP */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dbitswap r%u, r%u", rd, rt);
+ putIReg(rd, qop(Iop_Rotx64, getIReg(rt), mkU8(7), mkU8(8), mkU8(1)));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
default:
vex_printf("\nop6o10 = %u", lsb);
goto decode_failure;;
case 0x3B: /* RDHWR */
DIP("rdhwr r%u, r%u", rt, rd);
if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps) ||
+ VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) ||
(VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_BROADCOM)) {
if (rd == 29) {
putIReg(rt, getULR());
case 0x20: /* BSHFL */
switch (sa) {
+ case 0x0: /* BITSWAP */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("bitswap r%u, r%u", rd, rt);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Uto64, qop(Iop_Rotx32, unop(Iop_64to32, getIReg(rt)),
+ mkU8(7), mkU8(8), mkU8(1))));
+ } else {
+ putIReg(rd, qop(Iop_Rotx32, getIReg(rt), mkU8(7),
+ mkU8(8), mkU8(1)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+
case 0x02: /* WSBH */
DIP("wsbh r%u, r%u", rd, rt);
t0 = newTemp(Ity_I32);
putIReg(rd, unop(Iop_16Sto32, unop(Iop_32to16, getIReg(rt))));
break;
+ case 0x08 ... 0x0b: /* ALIGN */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ if (mode64) {
+ UInt bp = (sa & 0x3) << 3;
+ if (bp) {
+ putIReg(rd, unop(Iop_32Sto64,
+ binop(Iop_Or32,
+ binop(Iop_Shl32,
+ unop(Iop_64to32,
+ getIReg(rt)),
+ mkU8(bp)),
+ binop(Iop_Shr32,
+ unop(Iop_64to32,
+ getIReg(rs)),
+ mkU8(32 - bp)))));
+ } else
+ putIReg(rd, getIReg(rt));
+ } else {
+ UInt bp = (sa & 0x3) << 3;
+ if (bp) {
+ putIReg(rd, binop(Iop_Or32,
+ binop(Iop_Shl32,
+ getIReg(rt), mkU8(bp)),
+ binop(Iop_Shr32,
+ getIReg(rs), mkU8(32 - bp))));
+ } else
+ putIReg(rd, getIReg(rt));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+
default:
goto decode_failure;
goto decode_failure_dsp;
}
}
- default:
- goto decode_failure;
+ case 0x35: { /* PREF r6*/
+ DIP("pref");
+ break;
+ }
+ case 0x36: { /* LL */
+ imm = extend_s_9to16((instr_index >> 7) & 0x1ff);
+ DIP("ll r%u, %u(r%u)", rt, imm, rs);
+ LOAD_STORE_PATTERN;
+ t2 = newTemp(ty);
+ assign(t2, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), True));
+ putLLaddr(mkexpr(t1));
+ putLLdata(mkexpr(t2));
+ putIReg(rt, mkexpr(t2));
+ break;
+ }
+ case 0x26: { /* SC */
+ imm = extend_s_9to16((instr_index >> 7) & 0x1ff);
+ DIP("sc r%u, %u(r%u)", rt, imm, rs);
+ LOAD_STORE_PATTERN;
- }
- break; /* Special3 */
+ t2 = newTemp(Ity_I1);
+ t3 = newTemp(Ity_I32);
+ assign(t2, binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+ mkexpr(t1), getLLaddr()));
+ assign(t3, mkNarrowTo32(ty, getIReg(rt)));
+ putLLaddr(LLADDR_INVALID);
+ putIReg(rt, getIReg(0));
- case 0x3B:
- if (0x3B == function &&
- (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_BROADCOM)) {
- /*RDHWR*/
- DIP("rdhwr r%u, r%u", rt, rd);
- if (rd == 29) {
- putIReg(rt, getULR());
- } else
+ mips_next_insn_if(mkexpr(t2));
+
+ t4 = newTemp(Ity_I32);
+ t5 = newTemp(Ity_I32);
+
+ assign(t5, mkNarrowTo32(ty, getLLdata()));
+
+ stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID, t4, /* old_mem */
+ MIPS_IEND, mkexpr(t1), /* addr */
+ NULL, mkexpr(t5), /* expected value */
+ NULL, mkexpr(t3) /* new value */)));
+
+ putIReg(rt, unop(mode64 ? Iop_1Uto64 : Iop_1Uto32,
+ binop(Iop_CmpEQ32, mkexpr(t4), mkexpr(t5))));
+ break;
+ }
+ case 0x37: { /* LLD */
+ imm = extend_s_9to16((instr_index >> 7) & 0x1ff);
+ DIP("lld r%u, %u(r%u)", rt, imm, rs);
+ LOAD_STORE_PATTERN;
+
+ t2 = newTemp(Ity_I64);
+ assign(t2, load(Ity_I64, mkexpr(t1)));
+ putLLaddr(mkexpr(t1));
+ putLLdata(mkexpr(t2));
+ putIReg(rt, mkexpr(t2));
+ break;
+ }
+ case 0x27: { /* SCD */
+ imm = extend_s_9to16((instr_index >> 7) & 0x1ff);
+ DIP("sdc r%u, %u(r%u)", rt, imm, rs);
+ LOAD_STORE_PATTERN;
+
+ t2 = newTemp(Ity_I1);
+ t3 = newTemp(Ity_I64);
+ assign(t2, binop(Iop_CmpNE64, mkexpr(t1), getLLaddr()));
+ assign(t3, getIReg(rt));
+ putLLaddr(LLADDR_INVALID);
+ putIReg(rt, getIReg(0));
+
+ mips_next_insn_if(mkexpr(t2));
+
+ t4 = newTemp(Ity_I64);
+ t5 = newTemp(Ity_I64);
+
+ assign(t5, getLLdata());
+
+ stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID, t4, /* old_mem */
+ MIPS_IEND, mkexpr(t1), /* addr */
+ NULL, mkexpr(t5), /* expected value */
+ NULL, mkexpr(t3) /* new value */)));
+
+ putIReg(rt, unop(Iop_1Uto64,
+ binop(Iop_CmpEQ64, mkexpr(t4), mkexpr(t5))));
+ break;
+ }
+ default:
+ goto decode_failure;
+
+ }
+ break; /* Special3 */
+
+ case 0x3B: /* PCREL */
+ if (rt == 0x1E) { /* AUIPC */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("auipc r%u, %u", rs, imm);
+ if (mode64) {
+ putIReg(rs, mkU64(guest_PC_curr_instr + (imm << 16)));
+ } else {
+ putIReg(rs, mkU32(guest_PC_curr_instr + (imm << 16)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ } else if (rt == 0x1F) { /* ALUIPC */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("aluipc r%u, %u", rs, imm);
+ if (mode64) {
+ putIReg(rs, mkU64((~0x0FFFFULL) &
+ (guest_PC_curr_instr + extend_s_32to64(imm << 16))));
+ } else {
+ putIReg(rs, mkU32((~0x0FFFFULL) &
+ (guest_PC_curr_instr + (imm << 16))));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ } else if ((rt & 0x18) == 0) { /* ADDIUPC */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("addiupc r%u, %u", rs, instr_index & 0x7FFFF);
+ if (mode64) {
+ putIReg(rs, mkU64(guest_PC_curr_instr +
+ (extend_s_19to64(instr_index & 0x7FFFF) << 2)));
+ } else {
+ putIReg(rs, mkU32(guest_PC_curr_instr +
+ (extend_s_19to32(instr_index & 0x7FFFF) << 2)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ } else if ((rt & 0x18) == 8) { /* LWPC */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("lwpc r%u, %x", rs, instr_index & 0x7FFFF);
+ if (mode64) {
+ t1 = newTemp(Ity_I64);
+ assign(t1, mkU64(guest_PC_curr_instr +
+ (extend_s_19to64(instr_index & 0x7FFFF) << 2)));
+ putIReg(rs, unop(Iop_32Sto64, load(Ity_I32, mkexpr(t1))));
+ } else {
+ t1 = newTemp(Ity_I32);
+ assign(t1, mkU32(guest_PC_curr_instr +
+ (extend_s_19to32(instr_index & 0x7FFFF) << 2)));
+ putIReg(rs, load(Ity_I32, mkexpr(t1)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ } else if ((rt & 0x18) == 16) { /* LWUPC */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("lwupc r%u, %x", rs, instr_index & 0x7FFFF);
+ if (mode64) {
+ t1 = newTemp(Ity_I64);
+ assign(t1, mkU64(guest_PC_curr_instr +
+ (extend_s_19to64(instr_index & 0x7FFFF) << 2)));
+ putIReg(rs, unop(Iop_32Uto64, load(Ity_I32, mkexpr(t1))));
+ } else {
+ t1 = newTemp(Ity_I32);
+ assign(t1, mkU32(guest_PC_curr_instr +
+ (extend_s_19to32(instr_index & 0x7FFFF) << 2)));
+ putIReg(rs, load(Ity_I32, mkexpr(t1)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ } else if ((rt & 0x1C) == 0x18) { /* LDPC */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("ldpc r%u, %x", rs, instr_index & 0x3FFFF);
+ t1 = newTemp(Ity_I64);
+ assign(t1, mkU64(guest_PC_curr_instr +
+ (extend_s_18to64(instr_index & 0x3FFFF) << 3)));
+ putIReg(rs, load(Ity_I64, mkexpr(t1)));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ } else {
+ goto decode_failure;
+ }
+
+ if (0x3B == function &&
+ (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_BROADCOM)) {
+ /*RDHWR*/
+ DIP("rdhwr r%u, r%u", rt, rd);
+ if (rd == 29) {
+ putIReg(rt, getULR());
+ } else
goto decode_failure;
break;
} else {
}
case 0x18: { /* MULT */
- if ( (1 <= ac) && ( 3 >= ac) ) {
- if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
- /* If DSP is present -> DSP ASE MULT */
- UInt retVal = disDSPInstr_MIPS_WRK ( cins );
- if (0 != retVal ) {
- goto decode_failure_dsp;
+ switch (sa & 0x3) {
+ case 0: {
+ if ((1 <= ac) && ( 3 >= ac)) {
+ if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+ /* If DSP is present -> DSP ASE MULT */
+ UInt retVal = disDSPInstr_MIPS_WRK(cins);
+ if (0 != retVal) {
+ goto decode_failure_dsp;
+ }
+ break;
+ } else {
+ goto decode_failure_dsp;
+ }
+ } else {
+ DIP("mult r%u, r%u", rs, rt);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ t2 = newTemp(Ity_I64);
+
+ assign(t2, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)),
+ mkNarrowTo32(ty, getIReg(rt))));
+
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+ break;
+ }
+ }
+ case 2: { /* MUL R6 */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("mul r%u, r%u, r%u", rs, rt, rd);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Sto64,
+ unop(Iop_64to32,
+ binop(Iop_MullS32,
+ unop(Iop_64to32, getIReg(rs)),
+ unop(Iop_64to32, getIReg(rt))))));
+ } else {
+ putIReg(rd, unop(Iop_64to32,
+ binop(Iop_MullS32,
+ getIReg(rs), getIReg(rt))));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
}
break;
- } else {
- goto decode_failure_dsp;
}
- } else {
- DIP("mult r%u, r%u", rs, rt);
- t2 = newTemp(Ity_I64);
- assign(t2, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)),
- mkNarrowTo32(ty, getIReg(rt))));
-
- putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
- putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
- break;
+ case 3: { /* MUH R6 */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("muh r%u, r%u, r%u", rs, rt, rd);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Sto64,
+ unop(Iop_64HIto32,
+ binop(Iop_MullS32,
+ unop(Iop_64to32, getIReg(rs)),
+ unop(Iop_64to32, getIReg(rt))))));
+ } else {
+ putIReg(rd, unop(Iop_64HIto32,
+ binop(Iop_MullS32,
+ getIReg(rs), getIReg(rt))));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
}
+ break;
}
+
case 0x19: { /* MULTU */
- if ( (1 <= ac) && ( 3 >= ac) ) {
- if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
- /* If DSP is present -> DSP ASE MULTU */
- UInt retVal = disDSPInstr_MIPS_WRK ( cins );
- if (0 != retVal ) {
- goto decode_failure_dsp;
+ switch (sa & 0x3) {
+ case 0: {
+ if ((1 <= ac) && ( 3 >= ac)) {
+ if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+ /* If DSP is present -> DSP ASE MULTU */
+ UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+ if (0 != retVal) {
+ goto decode_failure_dsp;
+ }
+ break;
+ } else {
+ goto decode_failure_dsp;
+ }
+ } else {
+ DIP("multu r%u, r%u", rs, rt);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ t2 = newTemp(Ity_I64);
+
+ assign(t2, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)),
+ mkNarrowTo32(ty, getIReg(rt))));
+
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+ break;
+ }
+ }
+ case 2: { /* MULU R6 */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("mulu r%u, r%u, r%u", rs, rt, rd);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Uto64,
+ unop(Iop_64to32,
+ binop(Iop_MullU32,
+ unop(Iop_64to32, getIReg(rs)),
+ unop(Iop_64to32, getIReg(rt))))));
+ } else {
+ putIReg(rd, unop(Iop_64to32,
+ binop(Iop_MullU32,
+ getIReg(rs), getIReg(rt))));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
+ case 3: { /* MUHU R6 */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("muhu r%u, r%u, r%u", rs, rt, rd);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Uto64,
+ unop(Iop_64HIto32,
+ binop(Iop_MullU32,
+ unop(Iop_64to32, getIReg(rs)),
+ unop(Iop_64to32, getIReg(rt))))));
+ } else {
+ putIReg(rd, unop(Iop_64HIto32,
+ binop(Iop_MullU32,
+ getIReg(rs), getIReg(rt))));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
}
break;
- } else {
- goto decode_failure_dsp;
}
- } else {
- DIP("multu r%u, r%u", rs, rt);
- t2 = newTemp(Ity_I64);
-
- assign(t2, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)),
- mkNarrowTo32(ty, getIReg(rt))));
-
- putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
- putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
- break;
}
}
+ break;
+
case 0x20: { /* ADD */
DIP("add r%u, r%u, r%u", rd, rs, rt);
IRTemp tmpRs32 = newTemp(Ity_I32);
putIReg(rd, mkWidenFrom32(ty, mkexpr(t0), True));
break;
}
+
case 0x1A: /* DIV */
- DIP("div r%u, r%u", rs, rt);
- if (mode64) {
- t2 = newTemp(Ity_I64);
+ switch (sa & 0x3) {
+ case 0:
+ DIP("div r%u, r%u", rs, rt);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ if (mode64) {
+ t2 = newTemp(Ity_I64);
- assign(t2, binop(Iop_DivModS32to32,
- mkNarrowTo32(ty, getIReg(rs)),
- mkNarrowTo32(ty, getIReg(rt))));
+ assign(t2, binop(Iop_DivModS32to32,
+ mkNarrowTo32(ty, getIReg(rs)),
+ mkNarrowTo32(ty, getIReg(rt))));
- putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
- putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
- } else {
- t1 = newTemp(Ity_I64);
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+ } else {
+ t1 = newTemp(Ity_I64);
- assign(t1, binop(Iop_DivModS32to32, getIReg(rs), getIReg(rt)));
+ assign(t1, binop(Iop_DivModS32to32, getIReg(rs), getIReg(rt)));
- putHI(unop(Iop_64HIto32, mkexpr(t1)));
- putLO(unop(Iop_64to32, mkexpr(t1)));
+ putHI(unop(Iop_64HIto32, mkexpr(t1)));
+ putLO(unop(Iop_64to32, mkexpr(t1)));
+ }
+ break;
+ case 2:
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("div r%u, r%u, r%u", rs, rt, rd);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Sto64,
+ binop(Iop_DivS32,
+ unop(Iop_64to32, getIReg(rs)),
+ unop(Iop_64to32, getIReg(rt)))));
+ } else
+ putIReg(rd, binop(Iop_DivS32, getIReg(rs), getIReg(rt)));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ case 3:
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("mod r%u, r%u, r%u", rs, rt, rd);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Sto64,
+ unop(Iop_64HIto32,
+ binop(Iop_DivModS32to32,
+ unop(Iop_64to32, getIReg(rs)),
+ unop(Iop_64to32, getIReg(rt))))));
+ } else {
+ t1 = newTemp(Ity_I64);
+
+ assign(t1, binop(Iop_DivModS32to32, getIReg(rs), getIReg(rt)));
+ putIReg(rd, unop(Iop_64HIto32, mkexpr(t1)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
}
break;
case 0x1B: /* DIVU */
- DIP("divu r%u, r%u", rs, rt);
- if (mode64) {
- t2 = newTemp(Ity_I64);
+ switch (sa & 0x3) {
+ case 0:
+ DIP("divu r%u, r%u", rs, rt);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ if (mode64) {
+ t1 = newTemp(Ity_I64);
- assign(t2, binop(Iop_DivModU32to32,
- mkNarrowTo32(ty, getIReg(rs)),
- mkNarrowTo32(ty, getIReg(rt))));
+ assign(t1, binop(Iop_DivModU32to32,
+ mkNarrowTo32(ty, getIReg(rs)),
+ mkNarrowTo32(ty, getIReg(rt))));
- putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
- putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
- } else {
- t1 = newTemp(Ity_I64);
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t1)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t1)), True));
+ } else {
+ t1 = newTemp(Ity_I64);
- assign(t1, binop(Iop_DivModU32to32, getIReg(rs), getIReg(rt)));
- putHI(unop(Iop_64HIto32, mkexpr(t1)));
- putLO(unop(Iop_64to32, mkexpr(t1)));
- }
- break;
+ assign(t1, binop(Iop_DivModU32to32, getIReg(rs), getIReg(rt)));
+ putHI(unop(Iop_64HIto32, mkexpr(t1)));
+ putLO(unop(Iop_64to32, mkexpr(t1)));
+ }
+ break;
+ case 2:
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("divu r%u, r%u, r%u", rs, rt, rd);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Sto64,
+ binop(Iop_DivU32,
+ unop(Iop_64to32, getIReg(rs)),
+ unop(Iop_64to32, getIReg(rt)))));
+ } else {
+ putIReg(rd, binop(Iop_DivU32, getIReg(rs), getIReg(rt)));
+ }
+ break;
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ case 3:
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("modu r%u, r%u, r%u", rs, rt, rd);
+ if (mode64) {
+ putIReg(rd, unop(Iop_32Uto64,
+ unop(Iop_64HIto32,
+ binop(Iop_DivModU32to32,
+ unop(Iop_64to32, getIReg(rs)),
+ unop(Iop_64to32, getIReg(rt))))));
+ } else {
+ t1 = newTemp(Ity_I64);
+
+ assign(t1, binop(Iop_DivModU32to32, getIReg(rs), getIReg(rt)));
+ putIReg(rd, unop(Iop_64HIto32, mkexpr(t1)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
+ break;
case 0x1C: /* Doubleword Multiply - DMULT; MIPS64 */
- DIP("dmult r%u, r%u", rs, rt);
- t0 = newTemp(Ity_I128);
+ switch (sa) {
+ case 0:
+ DIP("dmult r%u, r%u", rs, rt);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ t0 = newTemp(Ity_I128);
- assign(t0, binop(Iop_MullS64, getIReg(rs), getIReg(rt)));
+ assign(t0, binop(Iop_MullS64, getIReg(rs), getIReg(rt)));
- putHI(unop(Iop_128HIto64, mkexpr(t0)));
- putLO(unop(Iop_128to64, mkexpr(t0)));
+ putHI(unop(Iop_128HIto64, mkexpr(t0)));
+ putLO(unop(Iop_128to64, mkexpr(t0)));
+ break;
+ case 2: /* DMUL */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dmul r%u, r%u, r%u", rd, rs, rt);
+ putIReg(rd, unop(Iop_128to64,
+ binop(Iop_MullS64, getIReg(rs), getIReg(rt))));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ /* Value for function in documentation is 111000 */
+ break;
+ case 3: /* DMUH */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dmuh r%u, r%u, r%u", rd, rs, rt);
+ putIReg(rd, unop(Iop_128HIto64,
+ binop(Iop_MullS64, getIReg(rs), getIReg(rt))));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
break;
case 0x1D: /* Doubleword Multiply Unsigned - DMULTU; MIPS64 */
- DIP("dmultu r%u, r%u", rs, rt);
- t0 = newTemp(Ity_I128);
+ switch (sa) {
+ case 0:
+ DIP("dmultu r%u, r%u", rs, rt);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ t0 = newTemp(Ity_I128);
- assign(t0, binop(Iop_MullU64, getIReg(rs), getIReg(rt)));
+ assign(t0, binop(Iop_MullU64, getIReg(rs), getIReg(rt)));
- putHI(unop(Iop_128HIto64, mkexpr(t0)));
- putLO(unop(Iop_128to64, mkexpr(t0)));
+ putHI(unop(Iop_128HIto64, mkexpr(t0)));
+ putLO(unop(Iop_128to64, mkexpr(t0)));
+ break;
+ case 2: /* DMULU */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dmulu r%u, r%u, r%u", rd, rs, rt);
+ putIReg(rd, unop(Iop_128to64,
+ binop(Iop_MullU64, getIReg(rs), getIReg(rt))));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ case 3: /* DMUHU */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dmuhu r%u, r%u, r%u", rd, rs, rt);
+ putIReg(rd, unop(Iop_128HIto64,
+ binop(Iop_MullU64, getIReg(rs), getIReg(rt))));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
break;
case 0x1E: /* Doubleword Divide DDIV; MIPS64 */
- DIP("ddiv r%u, r%u", rs, rt);
- t1 = newTemp(Ity_I128);
+ switch (sa) {
+ case 0:
+ DIP("ddiv r%u, r%u", rs, rt);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ t1 = newTemp(Ity_I128);
- assign(t1, binop(Iop_DivModS64to64, getIReg(rs), getIReg(rt)));
+ assign(t1, binop(Iop_DivModS64to64, getIReg(rs), getIReg(rt)));
- putHI(unop(Iop_128HIto64, mkexpr(t1)));
- putLO(unop(Iop_128to64, mkexpr(t1)));
+ putHI(unop(Iop_128HIto64, mkexpr(t1)));
+ putLO(unop(Iop_128to64, mkexpr(t1)));
+ break;
+ case 2: /* DDIV r6 */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("ddiv r%u, r%u, r%u", rs, rt, rd);
+ putIReg(rd, unop(Iop_128to64,
+ binop(Iop_DivModS64to64,
+ getIReg(rs), getIReg(rt))));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ case 3: /* DMOD r6 */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dmod r%u, r%u, r%u", rs, rt, rd);
+ t2 = newTemp(Ity_I128);
+ assign(t2, binop(Iop_DivModS64to64, getIReg(rs), getIReg(rt)));
+ putIReg(rd, unop(Iop_128HIto64, mkexpr(t2)));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
break;
case 0x1F: /* Doubleword Divide Unsigned DDIVU; MIPS64 check this */
- DIP("ddivu r%u, r%u", rs, rt);
- t1 = newTemp(Ity_I128);
+ switch (sa) {
+ case 0:
+ DIP("ddivu r%u, r%u", rs, rt);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ t1 = newTemp(Ity_I128);
- assign(t1, binop(Iop_DivModU64to64, getIReg(rs), getIReg(rt)));
+ assign(t1, binop(Iop_DivModU64to64, getIReg(rs), getIReg(rt)));
- putHI(unop(Iop_128HIto64, mkexpr(t1)));
- putLO(unop(Iop_128to64, mkexpr(t1)));
+ putHI(unop(Iop_128HIto64, mkexpr(t1)));
+ putLO(unop(Iop_128to64, mkexpr(t1)));
+ break;
+ case 2:
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("ddivu r%u, r%u, r%u", rs, rt, rd);
+ putIReg(rd, unop(Iop_128to64, binop(Iop_DivModU64to64,
+ getIReg(rs), getIReg(rt))));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ case 3:
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dmodu r%u, r%u, r%u", rs, rt, rd);
+ putIReg(rd, unop(Iop_128HIto64, binop(Iop_DivModU64to64,
+ getIReg(rs), getIReg(rt))));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
break;
- case 0x10: { /* MFHI */
- if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+ case 0x10: { /* MFHI, CLZ R6 */
+ if (((instr_index >> 6) & 0x1f) == 1) { /* CLZ */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("clz r%u, r%u", rd, rs);
+ if (mode64) {
+ IRTemp tmpClz32 = newTemp(Ity_I32);
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+
+ assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+ assign(tmpClz32, unop(Iop_Clz32, mkexpr(tmpRs32)));
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpClz32), True));
+ } else {
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t1),
+ mkU32(0x00000020),
+ unop(Iop_Clz32, getIReg(rs))));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ } else if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
/* If DSP is present -> DSP ASE MFHI */
UInt retVal = disDSPInstr_MIPS_WRK ( cins );
if (0 != retVal ) {
}
}
- case 0x11: { /* MTHI */
- if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+ case 0x11: { /* MTHI, CLO R6 */
+ if (((instr_index >> 6) & 0x1f) == 1) { /* CLO */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("clo r%u, r%u", rd, rs);
+ if (mode64) {
+ IRTemp tmpClo32 = newTemp(Ity_I32);
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+ assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ32, mkexpr(tmpRs32), mkU32(0xffffffff)));
+ assign(tmpClo32, IRExpr_ITE(mkexpr(t1),
+ mkU32(0x00000020),
+ unop(Iop_Clz32, unop(Iop_Not32, mkexpr(tmpRs32)))));
+
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpClo32), True));
+ break;
+ } else {
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0xffffffff)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t1),
+ mkU32(0x00000020),
+ unop(Iop_Clz32,
+ unop(Iop_Not32, getIReg(rs)))));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ } else if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
/* If DSP is present -> DSP ASE MTHI */
UInt retVal = disDSPInstr_MIPS_WRK ( cins );
if (0 != retVal ) {
}
break;
} else {
- DIP("mflo r%u", rd);
- putIReg(rd, getLO());
+ switch (sa) {
+ case 0:
+ DIP("mflo r%u", rd);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ putIReg(rd, getLO());
+ break;
+ case 1:
+ DIP("dclz r%u, r%u", rd, rs);
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ64, getIReg(rs), mkU64(0)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t1),
+ mkU64(0x00000040),
+ unop(Iop_Clz64, getIReg(rs))));
+ break;
+ }
break;
}
}
}
break;
} else {
- DIP("mtlo r%u", rs);
- putLO(getIReg(rs));
+ switch (sa) {
+ case 0:
+ DIP("mtlo r%u", rs);
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) &&
+ !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo->hwcaps)) {
+ ILLEGAL_INSTRUCTON;
+ }
+ putLO(getIReg(rs));
+ break;
+ case 1:
+ DIP("dclo r%u, r%u", rd, rs);
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ64, getIReg(rs),
+ mkU64(0xffffffffffffffffULL)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t1),
+ mkU64(0x40),
+ unop(Iop_Clz64, unop(Iop_Not64,
+ getIReg(rs)))));
+ break;
+ }
break;
}
}
break;
}
}
-
- case 0x05: /* LSA */
- if (has_msa) {
- UInt imm2 = (imm & 0xC0) >> 6;
+ case 0x05: { /* LSA */
+ UInt imm2 = (imm & 0xC0) >> 6;
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) || has_msa) {
DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd, rs, rt, imm2);
if (mode64) {
- putIReg(rd,
- unop(Iop_32Sto64,
- binop(Iop_Add32,
- binop(Iop_Shl32,
- unop(Iop_64to32, getIReg(rs)),
- mkU8(imm2 + 1)),
- unop(Iop_64to32, getIReg(rt)))));
+ DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd, rs, rt, imm2);
+ putIReg(rd, unop(Iop_32Sto64,
+ binop(Iop_Add32,
+ binop(Iop_Shl32,
+ unop(Iop_64to32, getIReg(rs)),
+ mkU8(imm2 + 1)),
+ unop(Iop_64to32, getIReg(rt)))));
+ break;
} else {
- putIReg(rd,
- binop(Iop_Add32,
- binop(Iop_Shl32, getIReg(rs), mkU8(imm2 + 1)),
- getIReg(rt)));
+ DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd, rs, rt, imm2);
+ putIReg(rd, binop(Iop_Add32,
+ binop(Iop_Shl32,
+ getIReg(rs), mkU8(imm2 + 1)), getIReg(rt)));
+ break;
}
} else {
ILLEGAL_INSTRUCTON;
}
- break;
+
+ }
case 0x15:{ /* DLSA */
UInt imm2 = (imm & 0xC0) >> 6;
- DIP("dlsa r%u, r%u, r%u, imm: 0x%x", rd, rs, rt, imm2);
- putIReg(rd,
- binop(Iop_Add64,
- binop(Iop_Shl64, getIReg(rs), mkU8(imm2 + 1)),
- getIReg(rt)));
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps) || has_msa) {
+ DIP("dlsa r%u, r%u, r%u, imm: 0x%x", rd, rs, rt, imm2);
+ putIReg(rd, binop(Iop_Add64,
+ binop(Iop_Shl64, getIReg(rs), mkU8(imm2 + 1)),
+ getIReg(rt)));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
break;
}
}
break;
}
+ case 0x35: { /* SELEQZ */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("seleqz r%u, r%u, r%u", rd, rs, rt);
+ if (mode64) {
+ putIReg(rd, binop(Iop_And64,
+ unop(Iop_Not64,
+ unop(Iop_CmpwNEZ64, getIReg(rt))),
+ getIReg(rs)));
+ } else {
+ putIReg(rd, binop(Iop_And32,
+ unop(Iop_Not32,
+ unop(Iop_CmpwNEZ32, getIReg(rt))),
+ getIReg(rs)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
+
case 0x36: { /* TNE */
DIP("tne r%u, r%u %u", rs, rt, trap_code);
if (mode64) {
}
break;
}
+ case 0x37: { /* SELNEZ */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("selnez r%u, r%u, r%u", rd, rs, rt);
+ if (mode64) {
+ putIReg(rd, binop(Iop_And64,
+ unop(Iop_CmpwNEZ64, getIReg(rt)), getIReg(rs)));
+ } else {
+ putIReg(rd, binop(Iop_And32,
+ unop(Iop_CmpwNEZ32, getIReg(rt)), getIReg(rs)));
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
case 0x14:
case 0x16:
case 0x17: /* DSLLV, DROTRV:DSRLV, DSRAV */
case 0x08: /* TGEI */
DIP("tgei r%u, %u %u", rs, imm, trap_code);
if (mode64) {
- stmt (IRStmt_Exit (unop (Iop_Not1,
- binop (Iop_CmpLT64S,
- getIReg (rs),
- mkU64 (extend_s_16to64 (imm)))),
+ stmt (IRStmt_Exit(unop(Iop_Not1,
+ binop(Iop_CmpLT64S,
+ getIReg(rs),
+ mkU64(extend_s_16to64 (imm)))),
Ijk_SigTRAP,
IRConst_U64(guest_PC_curr_instr + 4),
OFFB_PC));
} else {
- stmt (IRStmt_Exit (unop (Iop_Not1,
- binop (Iop_CmpLT32S,
- getIReg (rs),
- mkU32 (extend_s_16to32 (imm)))),
+ stmt (IRStmt_Exit(unop(Iop_Not1,
+ binop(Iop_CmpLT32S,
+ getIReg(rs),
+ mkU32(extend_s_16to32 (imm)))),
Ijk_SigTRAP,
IRConst_U32(guest_PC_curr_instr + 4),
OFFB_PC));
/* Just ignore it */
break;
+ case 0x06: { /* DAHI */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dahi r%u, %x", rs, imm);
+ putIReg(rs, binop(Iop_Add64,
+ getIReg(rs), mkU64(extend_s_16to64 (imm) << 32)));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
+
+ case 0x1E: { /* DATI */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("dati r%u, %x", rs, imm);
+ putIReg(rs, binop(Iop_Add64,
+ getIReg(rs), mkU64((long long)imm << 48)));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
+ }
+
default:
goto decode_failure;
}
getIReg(rs), getIReg(rt)), imm);
break;
- case 0x07: /* BGTZ */
- DIP("bgtz r%u, %u", rs, imm);
- if (mode64)
- dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE64S, getIReg(rs),
- mkU64(0x00))), imm, &bstmt);
- else
- dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE32S, getIReg(rs),
- mkU32(0x00))), imm, &bstmt);
+ case 0x07: /* BGTZ, BGTZALC, BLTZALC, BLTUC */
+ if (rt == 0) { /* BGTZ */
+ DIP("bgtz r%u, %u", rs, imm);
+ if (mode64)
+ dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE64S, getIReg(rs),
+ mkU64(0x00))), imm, &bstmt);
+ else
+ dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE32S, getIReg(rs),
+ mkU32(0x00))), imm, &bstmt);
+ } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ if (rs == 0) { /* BGTZALC */
+ DIP("bgtzalc r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(True,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE64S,
+ getIReg(rt), mkU64(0x0))),
+ imm, &dres);
+ } else {
+ dis_branch_compact(True,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE32S,
+ getIReg(rt), mkU32(0x0))),
+ imm, &dres);
+ }
+ } else if (rs == rt) { /* BLTZALC */
+ DIP("bltzalc r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(True,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE64S,
+ mkU64(0x0), getIReg(rt))),
+ imm, &dres);
+ } else {
+ dis_branch_compact(True,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE32S,
+ mkU32(0x0), getIReg(rt))),
+ imm, &dres);
+ }
+ } else { /* BLTUC */
+ DIP("bltuc r%u, r%u, %u", rt, rs, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ binop(Iop_CmpLT64U, getIReg(rs), getIReg(rt)),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ binop(Iop_CmpLT32U, getIReg(rs), getIReg(rt)),
+ imm, &dres);
+ }
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
break;
- case 0x17: /* BGTZL */
- DIP("bgtzl r%u, %u", rs, imm);
- if (mode64)
- lastn = dis_branch_likely(binop(Iop_CmpLE64S, getIReg(rs),
- mkU64(0x00)), imm);
- else
- lastn = dis_branch_likely(binop(Iop_CmpLE32S, getIReg(rs),
- mkU32(0x00)), imm);
+ case 0x17: /* BGTZL, BGTZC, BLTZC, BLTC */
+ if (rt == 0) { /* BGTZL */
+ DIP("bgtzl r%u, %u", rs, imm);
+ if (mode64)
+ lastn = dis_branch_likely(binop(Iop_CmpLE64S, getIReg(rs),
+ mkU64(0x00)), imm);
+ else
+ lastn = dis_branch_likely(binop(Iop_CmpLE32S, getIReg(rs),
+ mkU32(0x00)), imm);
+ } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ if (rs == 0) { /* BGTZC */
+ DIP("bgtzc r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE64S,
+ getIReg(rt), mkU64(0x0))),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE32S,
+ getIReg(rt), mkU32(0x0))),
+ imm, &dres);
+ }
+ } else if (rs == rt) { /* BLTZC */
+ DIP("bltzc r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE64S,
+ mkU64(0x0), getIReg(rt))),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE32S,
+ mkU32(0x0), getIReg(rt))),
+ imm, &dres);
+ }
+ } else { /* BLTC */
+ DIP("bltc r%u, r%u, %u", rs, rt, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE64S,
+ getIReg(rt), getIReg(rs))),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpLE32S,
+ getIReg(rt), getIReg(rs))),
+ imm, &dres);
+ }
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
break;
- case 0x06: /* BLEZ */
- DIP("blez r%u, %u", rs, imm);
- if (mode64)
- dis_branch(False, binop(Iop_CmpLE64S, getIReg(rs), mkU64(0x0)),
- imm, &bstmt);
- else
- dis_branch(False,binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x0)), imm,
- &bstmt);
+ case 0x06: /* BLEZ, BLEZALC, BGEZALC, BGEUC */
+ if (rt == 0) { /* BLEZ */
+ DIP("blez r%u, %u", rs, imm);
+ if (mode64)
+ dis_branch(False, binop(Iop_CmpLE64S, getIReg(rs), mkU64(0x0)),
+ imm, &bstmt);
+ else
+ dis_branch(False, binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x0)), imm,
+ &bstmt);
+ } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ if (rs == 0) { /* BLEZALC */
+ DIP("blezalc r%u, %u", rt, imm);
+ if (mode64)
+ dis_branch_compact(True,
+ binop(Iop_CmpLE64S, getIReg(rt), mkU64(0x0)),
+ imm, &dres);
+ else
+ dis_branch_compact(True,
+ binop(Iop_CmpLE32S, getIReg(rt), mkU32(0x0)),
+ imm, &dres);
+ } else if (rt == rs) {/* BGEZALC */
+ DIP("bgezalc r%u, %u", rt, imm);
+ if (mode64)
+ dis_branch_compact(True,
+ binop(Iop_CmpLE64S, mkU64(0x0), getIReg(rt)),
+ imm, &dres);
+ else
+ dis_branch_compact(True,
+ binop(Iop_CmpLE32S, mkU32(0x0), getIReg(rt)),
+ imm, &dres);
+ } else { /* BGEUC */
+ DIP("bgeuc r%u, r%u, %u", rt, rs, imm);
+ if (mode64)
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpLT64U,
+ getIReg(rs), getIReg(rt))),
+ imm, &dres);
+ else
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpLT32U,
+ getIReg(rs), getIReg(rt))),
+ imm, &dres);
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
break;
- case 0x16: /* BLEZL */
- DIP("blezl r%u, %u", rs, imm);
- lastn = dis_branch_likely(unop(Iop_Not1, (binop(mode64 ? Iop_CmpLE64S :
- Iop_CmpLE32S, getIReg(rs), mode64 ?
- mkU64(0x0) : mkU32(0x0)))), imm);
+ case 0x16: /* BLEZL, BLEZC, BGEZC, BGEC */
+ if (rt == 0) { /* BLEZL */
+ DIP("blezl r%u, %u", rs, imm);
+ lastn = dis_branch_likely(unop(Iop_Not1, (binop(mode64 ? Iop_CmpLE64S :
+ Iop_CmpLE32S, getIReg(rs), mode64 ?
+ mkU64(0x0) : mkU32(0x0)))), imm);
+ } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ if (rs == 0) { /* BLEZC */
+ DIP("blezc r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ binop(Iop_CmpLE64S, getIReg(rt), mkU64(0x0)),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ binop(Iop_CmpLE32S, getIReg(rt), mkU32(0x0)),
+ imm, &dres);
+ }
+ } else if (rt == rs) { /* BGEZC */
+ DIP("bgezc r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ binop(Iop_CmpLE64S, mkU64(0x0), getIReg(rt)),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ binop(Iop_CmpLE32S, mkU32(0x0), getIReg(rt)),
+ imm, &dres);
+ }
+ } else { /* BGEC */
+ DIP("bgec r%u, r%u, %u", rs, rt, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ binop(Iop_CmpLE64S, getIReg(rt), getIReg(rs)),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ binop(Iop_CmpLE32S, getIReg(rt), getIReg(rs)),
+ imm, &dres);
+ }
+ }
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
break;
+#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev < 6))
case 0x08: { /* ADDI */
DIP("addi r%u, r%u, %u", rt, rs, imm);
IRTemp tmpRs32 = newTemp(Ity_I32);
putIReg(rt, mkWidenFrom32(ty, mkexpr(t0), True));
break;
}
+#elif defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 6))
+ case 0x08: { /* BEQZALC, BEQC, BOVC */
+ if (rs == 0) { /* BEQZALC */
+ DIP("beqzalc r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(True,
+ binop(Iop_CmpEQ64, getIReg(rt), mkU64(0x0)),
+ imm, &dres);
+ } else {
+ dis_branch_compact(True,
+ binop(Iop_CmpEQ32, getIReg(rt), mkU32(0x0)),
+ imm, &dres);
+ }
+ } else if (rs < rt) { /* BEQC */
+ DIP("beqc r%u, r%u, %u",rs, rt, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ binop(Iop_CmpEQ64, getIReg(rt), getIReg(rs)),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ binop(Iop_CmpEQ32, getIReg(rt), getIReg(rs)),
+ imm, &dres);
+ }
+ } else { /* BOVC */
+ DIP("bovc r%u, r%u, %u",rs, rt, imm);
+ if (mode64) {
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I32);
+ assign(t0, IRExpr_ITE(binop(Iop_CmpLT64S,
+ getIReg(rt),
+ mkU64(0xffffffff80000000ULL)),
+ mkU32(1),
+ IRExpr_ITE(binop(Iop_CmpLT64S,
+ getIReg(rt),
+ mkU64(0x7FFFFFFFULL)),
+ mkU32(0),mkU32(1))));
+ assign(t1, IRExpr_ITE(binop(Iop_CmpLT64S,
+ getIReg(rs),
+ mkU64(0xffffffff80000000ULL)),
+ mkU32(1),
+ IRExpr_ITE(binop(Iop_CmpLT64S,
+ getIReg(rs),
+ mkU64(0x7FFFFFFFULL)),
+ mkU32(0), mkU32(1))));
+ assign(t2, IRExpr_ITE(binop(Iop_CmpLT64S,
+ binop(Iop_Add64,
+ getIReg(rt), getIReg(rs)),
+ mkU64(0xffffffff80000000ULL)),
+ mkU32(1),
+ IRExpr_ITE(binop(Iop_CmpLT64S,
+ binop(Iop_Add64,
+ getIReg(rt),
+ getIReg(rs)),
+ mkU64(0x7FFFFFFFULL)),
+ mkU32(0), mkU32(1))));
+ assign(t3, binop(Iop_Add32,
+ mkexpr(t0),
+ binop(Iop_Add32, mkexpr(t1), mkexpr(t2))));
+ dis_branch_compact(False,
+ binop(Iop_CmpNE32, mkexpr(t3), mkU32(0)),
+ imm, &dres);
+ } else {
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+ assign(tmpRs32, getIReg(rs));
+ assign(tmpRt32, getIReg(rt));
+
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I32);
+ t4 = newTemp(Ity_I32);
+ /* dst = src0 + src1
+ if (sign(src0 ) != sign(src1 ))
+ goto no overflow;
+ if (sign(dst) == sign(src0 ))
+ goto no overflow;
+ we have overflow! */
+
+ assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
+ assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
+ assign(t2, unop(Iop_1Uto32,
+ binop(Iop_CmpEQ32,
+ binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)),
+ mkU32(0x80000000))));
+
+ assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32)));
+ assign(t4, unop(Iop_1Uto32,
+ binop(Iop_CmpNE32,
+ binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)),
+ mkU32(0x80000000))));
+
+ dis_branch_compact(False, binop(Iop_CmpEQ32,
+ binop(Iop_Or32, mkexpr(t2), mkexpr(t4)),
+ mkU32(0)), imm, &dres);
+ }
+ }
+ break;
+ /* In documentation for BEQC stands rs > rt and for BOVC stands rs >= rt! */
+ }
+#endif
+
case 0x09: /* ADDIU */
DIP("addiu r%u, r%u, %u", rt, rs, imm);
if (mode64) {
} else
putIReg(rt, binop(Iop_Add32, getIReg(rs),mkU32(extend_s_16to32(imm))));
break;
-
case 0x0C: /* ANDI */
DIP("andi r%u, r%u, %u", rt, rs, imm);
if (mode64) {
mkU32(extend_s_16to32(imm)))));
break;
- case 0x18: { /* Doubleword Add Immidiate - DADD; MIPS64 */
+#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev < 6))
+ case 0x18: { /* Doubleword Add Immidiate - DADDI; MIPS64 */
DIP("daddi r%u, r%u, %u", rt, rs, imm);
IRTemp tmpRs64 = newTemp(Ity_I64);
assign(tmpRs64, getIReg(rs));
putIReg(rt, mkexpr(t0));
break;
}
+#elif defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 6))
+ case 0x18: { /* BNEZALC, BNEC, BNVC */
+ if (rs == 0) { /* BNEZALC */
+ DIP("bnezalc r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(True,
+ unop(Iop_Not1,
+ binop(Iop_CmpEQ64, getIReg(rt), mkU64(0x0))),
+ imm, &dres);
+ } else {
+ dis_branch_compact(True,
+ unop(Iop_Not1,
+ binop(Iop_CmpEQ32, getIReg(rt), mkU32(0x0))),
+ imm, &dres);
+ }
+ } else if (rs < rt) { /* BNEC */
+ DIP("bnec r%u, %u", rt, imm);
+ if (mode64) {
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpEQ64,
+ getIReg(rt), getIReg(rs))),
+ imm, &dres);
+ } else {
+ dis_branch_compact(False,
+ unop(Iop_Not1,
+ binop(Iop_CmpEQ32,
+ getIReg(rt), getIReg(rs))),
+ imm, &dres);
+ }
+ } else { /* BNVC */
+ DIP("bnvc r%u, r%u, %u", rs, rt, imm);
+ if (mode64) {
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I32);
+ assign(t0, IRExpr_ITE(binop(Iop_CmpLT64S,
+ getIReg(rt),
+ mkU64(0xffffffff80000000ULL)),
+ mkU32(1),
+ IRExpr_ITE(binop(Iop_CmpLT64S,
+ getIReg(rt),
+ mkU64(0x7FFFFFFFULL)),
+ mkU32(0),mkU32(1))));
+ assign(t1, IRExpr_ITE(binop(Iop_CmpLT64S,
+ getIReg(rs),
+ mkU64(0xffffffff80000000ULL)),
+ mkU32(1),
+ IRExpr_ITE(binop(Iop_CmpLT64S,
+ getIReg(rs),
+ mkU64(0x7FFFFFFFULL)),
+ mkU32(0),mkU32(1))));
+ assign(t2, IRExpr_ITE(binop(Iop_CmpLT64S,
+ binop(Iop_Add64,
+ getIReg(rt), getIReg(rs)),
+ mkU64(0xffffffff80000000ULL)),
+ mkU32(1),
+ IRExpr_ITE(binop(Iop_CmpLT64S,
+ binop(Iop_Add64,
+ getIReg(rt),
+ getIReg(rs)),
+ mkU64(0x7FFFFFFFULL)),
+ mkU32(0),mkU32(1))));
+ assign(t3, binop(Iop_Add32,
+ mkexpr(t0),
+ binop(Iop_Add32, mkexpr(t1), mkexpr(t2))));
+ dis_branch_compact(False,
+ binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0)),
+ imm, &dres);
+ } else {
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+
+ assign(tmpRs32, getIReg(rs));
+ assign(tmpRt32, getIReg(rt));
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I32);
+ t4 = newTemp(Ity_I32);
+ /* dst = src0 + src1
+ if (sign(src0 ) != sign(src1 ))
+ goto no overflow;
+ if (sign(dst) == sign(src0 ))
+ goto no overflow;
+ we have overflow! */
+
+ assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
+ assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
+ assign(t2, unop(Iop_1Uto32,
+ binop(Iop_CmpEQ32,
+ binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)),
+ mkU32(0x80000000))));
+
+ assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32)));
+ assign(t4, unop(Iop_1Uto32,
+ binop(Iop_CmpNE32,
+ binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)),
+ mkU32(0x80000000))));
+
+ dis_branch_compact(False, binop(Iop_CmpNE32 ,
+ binop(Iop_Or32, mkexpr(t2), mkexpr(t4)),
+ mkU32(0)), imm, &dres);
+ }
+ }
+ break;
+ }
+#endif
case 0x19: /* Doubleword Add Immidiate Unsigned - DADDIU; MIPS64 */
DIP("daddiu r%u, r%u, %u", rt, rs, imm);
mkNarrowTo32(ty, getIReg(rs))),
mkU32(0x0)),
imm, &bstmt);
- break;
+ } else if (archinfo->hwcaps & VEX_MIPS_CPU_ISA_M32R6) { /* BC */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("bc %x", instr_index & 0x3FFFFFF);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ assign(t0, mkU64(guest_PC_curr_instr +
+ ((extend_s_26to64(instr_index & 0x3FFFFFF) + 1 ) << 2)));
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, mkU32(guest_PC_curr_instr +
+ ((extend_s_26to32(instr_index & 0x3FFFFFF) + 1) << 2)));
+ }
+ putPC(mkexpr(t0));
+ dres.whatNext = Dis_StopHere;
+ dres.jk_StopHere = Ijk_Boring;
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
} else {
goto decode_failure;
}
-
+ break;
case 0x36: /* Branch on Bit Clear Plus 32 - BBIT032; Cavium OCTEON */
/* Cavium Specific instructions. */
if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
getIReg(rs)),
mkU64(0x0)),
imm, &bstmt);
- break;
+ } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ if (rs == 0) { /* JIC */
+ DIP("jic r%u, %u", rt, instr_index & 0xFFFF);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ assign(t0, binop(Iop_Add64, getIReg(rt),
+ mkU64(extend_s_16to64((instr_index & 0xFFFF)))));
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_Add32, getIReg(rt),
+ mkU32(extend_s_16to32((instr_index & 0xFFFF)))));
+ }
+ putPC(mkexpr(t0));
+ dres.whatNext = Dis_StopHere;
+ dres.jk_StopHere = Ijk_Boring;
+ } else { /* BEQZC */
+ DIP("beqzc r%u, %u", rs, imm);
+ dres.jk_StopHere = Ijk_Boring;
+ dres.whatNext = Dis_StopHere;
+ ULong branch_offset;
+ t0 = newTemp(Ity_I1);
+ if (mode64) {
+ branch_offset = extend_s_23to64((instr_index& 0x1fffff) << 2);
+ assign(t0, binop(Iop_CmpEQ64, getIReg(rs), mkU64(0x0)));
+ stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+ IRConst_U64(guest_PC_curr_instr + 4 + branch_offset),
+ OFFB_PC));
+ putPC(mkU64(guest_PC_curr_instr + 4));
+ } else {
+ branch_offset = extend_s_23to32((instr_index& 0x1fffff) << 2);
+ assign(t0, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0x0)));
+ stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+ IRConst_U32(guest_PC_curr_instr + 4 +
+ (UInt) branch_offset), OFFB_PC));
+ putPC(mkU32(guest_PC_curr_instr + 4));
+ }
+ }
} else {
- goto decode_failure;
+ ILLEGAL_INSTRUCTON;
}
-
+ break;
case 0x3A: /* Branch on Bit Set - BBIT1; Cavium OCTEON */
/* Cavium Specific instructions. */
if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
mkNarrowTo32(ty, getIReg(rs))),
mkU32(0x0)),
imm, &bstmt);
- break;
+ } else if (archinfo->hwcaps & VEX_MIPS_CPU_ISA_M32R6) {/* BALC */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("balc %x", instr_index & 0x3FFFFFF);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ assign(t0, mkU64(guest_PC_curr_instr + ((extend_s_26to64(instr_index & 0x3FFFFFF)+1)<<2)));
+ putIReg(31, mkU64(guest_PC_curr_instr + 4));
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, mkU32(guest_PC_curr_instr+((extend_s_26to32(instr_index & 0x3FFFFFF)+1)<<2)));
+ putIReg(31, mkU32(guest_PC_curr_instr + 4));
+ }
+ putPC(mkexpr(t0));
+ dres.whatNext = Dis_StopHere;
+ dres.jk_StopHere = Ijk_Call;
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
} else {
goto decode_failure;
}
-
+ break;
case 0x3E: /* Branch on Bit Set Plus 32 - BBIT132; Cavium OCTEON */
/* Cavium Specific instructions. */
if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
getIReg(rs)),
mkU64(0x0)),
imm, &bstmt);
- break;
+ } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ if (rs == 0) {/* JIALC */
+ DIP("jialc r%u, %u", rt, instr_index & 0xFFFF);
+ if (rs) goto decode_failure;
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ assign(t0, binop(Iop_Add64, getIReg(rt),
+ mkU64(extend_s_16to64((instr_index & 0xFFFF)))));
+ putIReg(31, mkU64(guest_PC_curr_instr + 4));
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_Add32, getIReg(rt),
+ mkU32(extend_s_16to32((instr_index & 0xFFFF)))));
+ putIReg(31, mkU32(guest_PC_curr_instr + 4));
+ }
+ putPC(mkexpr(t0));
+ dres.whatNext = Dis_StopHere;
+ dres.jk_StopHere = Ijk_Call;
+ } else { /* BNEZC */
+ DIP("bnezc r%u, %u", rs, imm);
+ dres.jk_StopHere = Ijk_Boring;
+ dres.whatNext = Dis_StopHere;
+ ULong branch_offset;
+ t0 = newTemp(Ity_I1);
+ if (mode64) {
+ branch_offset = extend_s_23to64((instr_index& 0x1fffff) << 2);
+ assign(t0, unop(Iop_Not1, binop(Iop_CmpEQ64, getIReg(rs), mkU64(0x0))));
+ stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+ IRConst_U64(guest_PC_curr_instr + 4 + branch_offset),
+ OFFB_PC));
+ putPC(mkU64(guest_PC_curr_instr + 4));
+ } else {
+ branch_offset = extend_s_23to32((instr_index& 0x1fffff) << 2);
+ assign(t0, unop(Iop_Not1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0x0))));
+ stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+ IRConst_U32(guest_PC_curr_instr + 4 +
+ (UInt) branch_offset), OFFB_PC));
+ putPC(mkU32(guest_PC_curr_instr + 4));
+ }
+ }
} else {
goto decode_failure;
}
+ break;
+
+ case 0x1D: /* DAUI */
+ if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo->hwcaps)) {
+ DIP("daui r%u, r%u, %x", rt, rs, imm);
+ putIReg(rt, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_32to64(imm << 16))));
+ } else {
+ ILLEGAL_INSTRUCTON;
+ }
+ break;
case 0x1E: /* MIPS MSA (SIMD) */
if (has_msa) {
vassert(guest_arch == VexArchMIPS32 || guest_arch == VexArchMIPS64);
mode64 = guest_arch != VexArchMIPS32;
- fp_mode64 = abiinfo->guest_mips_fp_mode64;
+ fp_mode64 = abiinfo->guest_mips_fp_mode & 1;
+ fp_mode64_fre = abiinfo->guest_mips_fp_mode & 2;
has_msa = VEX_MIPS_PROC_MSA(archinfo->hwcaps);
vassert(VEX_MIPS_HOST_FP_MODE(archinfo->hwcaps) >= fp_mode64);
case Mfp_CVTWS:
ret = "cvt.w.s";
break;
+ case Mfp_RINTS:
+ ret = "rint.s";
+ break;
+ case Mfp_RINTD:
+ ret = "rint.d";
+ break;
case Mfp_CVTWD:
ret = "cvt.w.d";
break;
case Mfp_CEILLD:
ret = "ceil.l.d";
break;
+#if (__mips_isa_rev >= 6)
+ case Mfp_CMP_UN:
+ ret = "cmp.un.d";
+ break;
+ case Mfp_CMP_EQ:
+ ret = "cmp.eq.d";
+ break;
+ case Mfp_CMP_LT:
+ ret = "cmp.lt.d";
+ break;
+ case Mfp_CMP_NGT:
+ ret = "cmp.ngt.d";
+ break;
+ case Mfp_CMP_UN_S:
+ ret = "cmp.un.s";
+ break;
+ case Mfp_CMP_EQ_S:
+ ret = "cmp.eq.s";
+ break;
+ case Mfp_CMP_LT_S:
+ ret = "cmp.lt.s";
+ break;
+ case Mfp_CMP_NGT_S:
+ ret = "cmp.ngt.s";
+ break;
+ case Mfp_MAXS:
+ ret = "max.s";
+ break;
+ case Mfp_MAXD:
+ ret = "max.d";
+ break;
+ case Mfp_MINS:
+ ret = "min.s";
+ break;
+ case Mfp_MIND:
+ ret = "min.d";
+ break;
+#else
case Mfp_CMP_UN:
ret = "c.un.d";
break;
case Mfp_CMP_NGT:
ret = "c.ngt.d";
break;
+#endif
default:
vex_printf("Unknown op: %d", (Int)op);
vpanic("showMIPSFpOp");
case MMoveCond_movn:
ret = "movn";
break;
+ case MSeleqz:
+ ret = "seleqz";
+ break;
+ case MSelnez:
+ ret = "selnez";
+ break;
+ case MFpSels:
+ ret = "sel.s";
+ break;
+ case MFpSeld:
+ ret = "sel.d";
+ break;
default:
vpanic("showMIPSFpMoveCondOp");
break;
ret = immR ? "xori" : "xor";
break;
case Malu_DADD:
- ret = immR ? "daddi" : "dadd";
+ ret = immR ? "daddiu" : "dadd";
break;
case Malu_DSUB:
ret = immR ? "dsubi" : "dsub";
return ret;
}
+const HChar *showRotxOp(MIPSRotxOp op) {
+ const HChar *ret;
+ switch(op) {
+ case Rotx32:
+ ret = "rotx32";
+ break;
+ case Rotx64:
+ ret = "rotx64";
+ break;
+ }
+
+ return ret;
+}
+
const HChar *showMsa2RFOp(MSA2RFOp op) {
const HChar *ret;
return i;
}
+MIPSInstr *MIPSInstr_Mulr6(Bool syned, Bool sz32, Bool low, HReg dst,
+ HReg srcL, HReg srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+ i->tag = Min_Mulr6;
+ i->Min.Mulr6.syned = syned;
+ i->Min.Mulr6.sz32 = sz32; /* True = 32 bits */
+ i->Min.Mulr6.low = low;
+ i->Min.Mulr6.dst = dst;
+ i->Min.Mulr6.srcL = srcL;
+ i->Min.Mulr6.srcR = srcR;
+ return i;
+}
+
/* msub */
MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR)
{
return i;
}
+MIPSInstr *MIPSInstr_Divr6(Bool syned, Bool sz32, Bool mod, HReg dst,
+ HReg srcL, HReg srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+ i->tag = Min_Divr6;
+ i->Min.Divr6.syned = syned;
+ i->Min.Divr6.sz32 = sz32; /* True = 32 bits */
+ i->Min.Divr6.mod = mod;
+ i->Min.Divr6.dst = dst;
+ i->Min.Divr6.srcL = srcL;
+ i->Min.Divr6.srcR = srcR;
+ return i;
+}
+
MIPSInstr *MIPSInstr_Call ( MIPSCondCode cond, Addr64 target, UInt argiregs,
HReg src, RetLoc rloc )
{
return i;
}
+MIPSInstr *MIPSInstr_FpMinMax(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+ i->tag = Min_FpMinMax;
+ i->Min.FpMinMax.op = op;
+ i->Min.FpMinMax.dst = dst;
+ i->Min.FpMinMax.srcL = srcL;
+ i->Min.FpMinMax.srcR = srcR;
+ return i;
+}
+
+
MIPSInstr *MIPSInstr_MtFCSR(HReg src)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
return i;
}
+MIPSInstr* MIPSInstr_Bitswap(MIPSRotxOp op, HReg rd, HReg rt, HReg shift, HReg shiftx, HReg stripe) {
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+ i->tag = Min_Rotx;
+ i->Min.Rotx.op = op;
+ i->Min.Rotx.rd = rd;
+ i->Min.Rotx.rt = rt;
+ i->Min.Rotx.shift = shift;
+ i->Min.Rotx.shiftx = shiftx;
+ i->Min.Rotx.stripe = stripe;
+ return i;
+}
+
/* -------- Pretty Print instructions ------------- */
static void ppLoadImm(HReg dst, ULong imm, Bool mode64)
{
ppMIPSRH(rh_srcR, mode64);
return;
}
+ case Min_Rotx: {
+ HReg r_src = i->Min.Rotx.rt;
+ vex_printf("rotx ");
+ ppHRegMIPS(i->Min.Rotx.rd, mode64);
+ vex_printf(",");
+ ppHRegMIPS(r_src, mode64);
+ return;
+ }
case Min_Unary: {
vex_printf("%s ", showMIPSUnaryOp(i->Min.Unary.op));
ppHRegMIPS(i->Min.Unary.dst, mode64);
vex_printf(", %u, %u", i->Min.Ext.pos, i->Min.Ext.size);
return;
}
+ case Min_Mulr6: {
+ if(i->Min.Mulr6.sz32) {
+ if(i->Min.Mulr6.low)vex_printf("mul");
+ else vex_printf("muh");
+ if(i->Min.Mulr6.syned)vex_printf("u ");
+ else vex_printf(" ");
+ } else {
+ if(i->Min.Mulr6.low)
+ vex_printf("%s%s ", "dmul",
+ i->Min.Mulr6.syned ? "" : "u");
+ else
+ vex_printf("%s%s ","dmuh",
+ i->Min.Mulr6.syned ? "" : "u");
+ }
+ ppHRegMIPS(i->Min.Mulr6.dst, mode64);
+ vex_printf(", ");
+ ppHRegMIPS(i->Min.Mulr6.srcL, mode64);
+ vex_printf(", ");
+ ppHRegMIPS(i->Min.Mulr6.srcR, mode64);
+ break;
+ }
case Min_Mthi: {
vex_printf("mthi ");
ppHRegMIPS(i->Min.MtHL.src, mode64);
ppHRegMIPS(i->Min.Div.srcR, mode64);
return;
}
+ case Min_Divr6: {
+ if(i->Min.Divr6.sz32) {
+ if(i->Min.Divr6.mod)vex_printf("mod");
+ else vex_printf("div");
+ if(i->Min.Divr6.syned)vex_printf("u ");
+ else vex_printf(" ");
+ } else {
+ if(i->Min.Divr6.mod)
+ vex_printf("%s%s ", "dmod",
+ i->Min.Divr6.syned ? "" : "u");
+ else
+ vex_printf("%s%s ","ddiv",
+ i->Min.Divr6.syned ? "" : "u");
+ }
+ ppHRegMIPS(i->Min.Divr6.dst, mode64);
+ vex_printf(", ");
+ ppHRegMIPS(i->Min.Divr6.srcL, mode64);
+ vex_printf(", ");
+ ppHRegMIPS(i->Min.Divr6.srcR, mode64);
+ break;
+ }
case Min_Call: {
Int n;
vex_printf("call: ");
vex_printf(",");
ppHRegMIPS(i->Min.FpCompare.srcR, mode64);
return;
+ case Min_FpMinMax:
+ vex_printf("%s ", showMIPSFpOp(i->Min.FpMinMax.op));
+ ppHRegMIPS(i->Min.FpCompare.srcL, mode64);
+ vex_printf(",");
+ ppHRegMIPS(i->Min.FpCompare.srcR, mode64);
+ return;
case Min_FpMulAcc:
vex_printf("%s ", showMIPSFpOp(i->Min.FpMulAcc.op));
ppHRegMIPS(i->Min.FpMulAcc.dst, mode64);
addRegUsage_MIPSRH(u, i->Min.Shft.srcR);
addHRegUse(u, HRmWrite, i->Min.Shft.dst);
return;
+ case Min_Rotx:
+ addHRegUse(u, HRmRead, i->Min.Rotx.rt);
+ addHRegUse(u, HRmWrite, i->Min.Rotx.rd);
+ return;
case Min_Cmp:
addHRegUse(u, HRmRead, i->Min.Cmp.srcL);
addHRegUse(u, HRmRead, i->Min.Cmp.srcR);
addHRegUse(u, HRmWrite, i->Min.Ext.dst);
addHRegUse(u, HRmRead, i->Min.Ext.src);
return;
+ case Min_Mulr6:
+ addHRegUse(u, HRmWrite, i->Min.Mulr6.dst);
+ addHRegUse(u, HRmRead, i->Min.Mulr6.srcL);
+ addHRegUse(u, HRmRead, i->Min.Mulr6.srcR);
+ return;
case Min_Mthi:
case Min_Mtlo:
addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
addHRegUse(u, HRmRead, i->Min.Div.srcL);
addHRegUse(u, HRmRead, i->Min.Div.srcR);
return;
+ case Min_Divr6:
+ addHRegUse(u, HRmWrite, i->Min.Divr6.dst);
+ addHRegUse(u, HRmRead, i->Min.Divr6.srcL);
+ addHRegUse(u, HRmRead, i->Min.Divr6.srcR);
+ return;
case Min_Call: {
/* Logic and comments copied/modified from x86, ppc and arm back end.
First off, claim it trashes all the caller-saved regs
addHRegUse(u, HRmRead, i->Min.FpCompare.srcL);
addHRegUse(u, HRmRead, i->Min.FpCompare.srcR);
return;
+ case Min_FpMinMax:
+ addHRegUse(u, HRmWrite, i->Min.FpMinMax.dst);
+ addHRegUse(u, HRmRead, i->Min.FpMinMax.srcL);
+ addHRegUse(u, HRmRead, i->Min.FpMinMax.srcR);
+ return;
case Min_FpGpMove:
addHRegUse(u, HRmWrite, i->Min.FpGpMove.dst);
addHRegUse(u, HRmRead, i->Min.FpGpMove.src);
return;
case Min_MoveCond:
- addHRegUse(u, HRmModify, i->Min.MoveCond.dst);
+ addHRegUse(u, HRmWrite, i->Min.MoveCond.dst);
addHRegUse(u, HRmRead, i->Min.MoveCond.src);
addHRegUse(u, HRmRead, i->Min.MoveCond.cond);
return;
mapRegs_MIPSRH(m, i->Min.Shft.srcR);
mapReg(m, &i->Min.Shft.dst);
return;
+ case Min_Rotx:
+ mapReg(m, &i->Min.Rotx.rt);
+ mapReg(m, &i->Min.Rotx.rd);
+ return;
case Min_Cmp:
mapReg(m, &i->Min.Cmp.srcL);
mapReg(m, &i->Min.Cmp.srcR);
mapReg(m, &i->Min.Ext.src);
mapReg(m, &i->Min.Ext.dst);
return;
+ case Min_Mulr6:
+ mapReg(m, &i->Min.Mulr6.dst);
+ mapReg(m, &i->Min.Mulr6.srcL);
+ mapReg(m, &i->Min.Mulr6.srcR);
+ return;
case Min_Mthi:
case Min_Mtlo:
mapReg(m, &i->Min.MtHL.src);
mapReg(m, &i->Min.Div.srcL);
mapReg(m, &i->Min.Div.srcR);
return;
+
+ case Min_Divr6:
+ mapReg(m, &i->Min.Divr6.dst);
+ mapReg(m, &i->Min.Divr6.srcL);
+ mapReg(m, &i->Min.Divr6.srcR);
+ return;
+
+ case Min_Call:
+ if (i->Min.Call.cond != MIPScc_AL)
+ mapReg(m, &i->Min.Call.src);
+ return;
+
case Msa_MI10:
mapReg(m, &i->Min.MsaMi10.rs);
mapReg(m, &i->Min.MsaMi10.wd);
mapReg(m, &i->Min.Msa2R.ws);
return;
- case Min_Call:
- {
- if (i->Min.Call.cond != MIPScc_AL)
- mapReg(m, &i->Min.Call.src);
- return;
- }
case Msa_3R:
mapReg(m, &i->Min.Msa3R.wt);
mapReg(m, &i->Min.Msa3R.ws);
mapReg(m, &i->Min.FpCompare.srcL);
mapReg(m, &i->Min.FpCompare.srcR);
return;
+ case Min_FpMinMax:
+ mapReg(m, &i->Min.FpMinMax.dst);
+ mapReg(m, &i->Min.FpMinMax.srcL);
+ mapReg(m, &i->Min.FpMinMax.srcR);
+ return;
case Min_MtFCSR:
mapReg(m, &i->Min.MtFCSR.src);
return;
goto done;
}
+ case Min_Rotx: {
+ UInt r_dst = iregNo(i->Min.Rotx.rd, mode64);
+ UInt r_src = iregNo(i->Min.Rotx.rt, mode64);
+ switch(i->Min.Rotx.op) {
+ case Rotx32:
+ p = mkFormR(p, 31, 0, r_src, r_dst, 0, 32);
+ break;
+ case Rotx64:
+ p = mkFormR(p, 31, 0, r_src, r_dst, 0, 36);
+ break;
+ }
+ goto done;
+ }
case Min_Unary: {
UInt r_dst = iregNo(i->Min.Unary.dst, mode64);
UInt r_src = iregNo(i->Min.Unary.src, mode64);
switch (i->Min.Unary.op) {
/* Mun_CLO, Mun_CLZ, Mun_NOP, Mun_DCLO, Mun_DCLZ */
+#if (__mips_isa_rev >= 6)
+ case Mun_CLO: /* clo */
+ p = mkFormR(p, 0, r_src, 0, r_dst, 1, 17);
+ break;
+ case Mun_CLZ: /* clz */
+ p = mkFormR(p, 0, r_src, 0, r_dst, 1, 16);
+ break;
+ case Mun_DCLO: /* clo */
+ p = mkFormR(p, 0, r_src, 0, r_dst, 1, 19);
+ break;
+ case Mun_DCLZ: /* clz */
+ p = mkFormR(p, 0, r_src, 0, r_dst, 1, 18);
+ break;
+#else
case Mun_CLO: /* clo */
p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 33);
break;
case Mun_CLZ: /* clz */
p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 32);
break;
- case Mun_NOP: /* nop (sll r0,r0,0) */
- p = mkFormR(p, 0, 0, 0, 0, 0, 0);
- break;
case Mun_DCLO: /* clo */
p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 37);
break;
case Mun_DCLZ: /* clz */
p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 36);
break;
+#endif
+ case Mun_NOP: /* nop (sll r0,r0,0) */
+ p = mkFormR(p, 0, 0, 0, 0, 0, 0);
+ break;
}
goto done;
}
goto done;
}
+ case Min_Mulr6: {
+ Bool syned = i->Min.Mulr6.syned;
+ Bool sz32 = i->Min.Mulr6.sz32;
+ UInt r_srcL = iregNo(i->Min.Mulr6.srcL, mode64);
+ UInt r_srcR = iregNo(i->Min.Mulr6.srcR, mode64);
+ UInt r_dst = iregNo(i->Min.Mulr6.dst, mode64);
+ int low = i->Min.Mulr6.low?2:3;
+ if (sz32) {
+ if (syned)
+ /* mul/muh */
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, low, 24);
+ else
+ /* mulu/muhu */
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, low, 25);
+ } else {
+ if (syned) /* DMUL/DMUH r_dst,r_srcL,r_srcR */
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, low, 28);
+ else /* DMULU/DMUHU r_dst,r_srcL,r_srcR */
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, low, 29);
+ }
+ goto done;
+ }
+
case Min_Macc: {
Bool syned = i->Min.Macc.syned;
UInt r_srcL = iregNo(i->Min.Macc.srcL, mode64);
goto done;
}
}
-
+ case Min_Divr6: {
+ Bool syned = i->Min.Divr6.syned;
+ Bool sz32 = i->Min.Divr6.sz32;
+ UInt r_srcL = iregNo(i->Min.Divr6.srcL, mode64);
+ UInt r_srcR = iregNo(i->Min.Divr6.srcR, mode64);
+ UInt r_dst = iregNo(i->Min.Divr6.dst, mode64);
+ int mod = i->Min.Divr6.mod?3:2;
+ if (sz32) {
+ if (syned)
+ /* mul/muh */
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, mod, 26);
+ else
+ /* mulu/muhu */
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, mod, 27);
+ } else {
+ if (syned) /* DMUL/DMUH r_dst,r_srcL,r_srcR */
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, mod, 30);
+ else /* DMULU/DMUHU r_dst,r_srcL,r_srcR */
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, mod, 31);
+ }
+ goto done;
+ }
case Min_Mthi: {
UInt r_src = iregNo(i->Min.MtHL.src, mode64);
p = mkFormR(p, 0, r_src, 0, 0, 0, 17);
UInt r_src = iregNo(am_addr->Mam.IR.base, mode64);
UInt idx = am_addr->Mam.IR.index;
UInt r_dst = iregNo(i->Min.LoadL.dst, mode64);
-
+#if (__mips_isa_rev >= 6)
+ if (i->Min.LoadL.sz == 4)
+ p = mkFormI(p, 0x1F, r_src, r_dst, ((idx << 7) & 0xff80) | 0x36);
+ else
+ p = mkFormI(p, 0x1F, r_src, r_dst, ((idx << 7) & 0xff80) | 0x37);
+#else
if (i->Min.LoadL.sz == 4)
p = mkFormI(p, 0x30, r_src, r_dst, idx);
else
p = mkFormI(p, 0x34, r_src, r_dst, idx);
+#endif
goto done;
}
case Min_StoreC: {
UInt r_src = iregNo(i->Min.StoreC.src, mode64);
UInt idx = am_addr->Mam.IR.index;
UInt r_dst = iregNo(am_addr->Mam.IR.base, mode64);
-
+#if (__mips_isa_rev >= 6)
+ if (i->Min.LoadL.sz == 4)
+ p = mkFormI(p, 0x1F, r_src, r_dst, ((idx << 7) & 0xff80) | 0x26);
+ else
+ p = mkFormI(p, 0x1F, r_src, r_dst, ((idx << 7) & 0xff80) | 0x27);
+#else
if (i->Min.StoreC.sz == 4)
p = mkFormI(p, 0x38, r_dst, r_src, idx);
else
p = mkFormI(p, 0x3C, r_dst, r_src, idx);
+#endif
goto done;
}
case Min_Cas: {
* movn old, expd, data
* end:
*/
+#if (__mips_isa_rev >= 6)
+ // ll(d) old, 0(addr)
+ p = mkFormI(p, 0x1F, addr, old, sz8? 0x37: 0x36);
+ // bne old, expd, end
+ p = mkFormI(p, 5, old, expd, 5);
+#else
// ll(d) old, 0(addr)
p = mkFormI(p, sz8 ? 0x34 : 0x30, addr, old, 0);
// bne old, expd, end
p = mkFormI(p, 5, old, expd, 4);
+#endif
// nop
p = mkFormR(p, 0, 0, 0, 0, 0, 0);
// (d)addiu old, old, 1
- p = mkFormI(p, sz8 ? 25 : 9, old, old, 1);
+ p = mkFormI(p, sz8 ? 25 : 9, old, old, 4);
+
+#if (__mips_isa_rev >= 6)
+ // sc(d) data, 0(addr)
+ p = mkFormI(p, 0x1F, addr, data, sz8? 0x27: 0x26);
+ //beqzc
+ p = mkFormI(p, 0x36, data, 0, 1);
+ //or
+ p = mkFormR(p, 0, 0, expd, old, 0, 0x25 );
+#else
// sc(d) data, 0(addr)
p = mkFormI(p, sz8 ? 0x3C : 0x38, addr, data, 0);
// movn old, expd, data
p = mkFormR(p, 0, expd, data, old, 0, 0xb);
+#endif
goto done;
}
UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
+#if (__mips_isa_rev >= 6)
+ p = mkFormR(p, 0x11, 0x10 , 0x0, fr_src1, fr_dst, 0x6);
+ p = mkFormR(p, 0x11, 0x10, fr_src3, fr_src2, fr_dst, 0x18);
+#else
p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x20);
+#endif
break;
}
case Mfp_MADDD: {
UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
+#if (__mips_isa_rev >= 6)
+ p = mkFormR(p, 0x11, 0x11 , 0x0, fr_src1, fr_dst, 0x6);
+ p = mkFormR(p, 0x11, 0x11, fr_src3, fr_src2, fr_dst, 0x18);
+#else
p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x21);
+#endif
break;
}
case Mfp_MSUBS: {
UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
+#if (__mips_isa_rev >= 6)
+ p = mkFormR(p, 0x11, 0x10 , 0x0, fr_src1, fr_dst, 0x6);
+ p = mkFormR(p, 0x11, 0x10, fr_src3, fr_src2, fr_dst, 0x19);
+#else
p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x28);
+#endif
break;
}
case Mfp_MSUBD: {
UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
+#if (__mips_isa_rev >= 6)
+ p = mkFormR(p, 0x11, 0x11 , 0x0, fr_src1, fr_dst, 0x6);
+ p = mkFormR(p, 0x11, 0x11, fr_src3, fr_src2, fr_dst, 0x19);
+#else
p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x29);
+#endif
break;
}
default:
fr_src = dregNo(i->Min.FpConvert.src);
p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0B);
break;
+ case Mfp_RINTS:
+ fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+ fr_src = fregNo(i->Min.FpConvert.src, mode64);
+ p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x1A);
+ break;
+ case Mfp_RINTD:
+ fr_dst = dregNo(i->Min.FpConvert.dst);
+ fr_src = dregNo(i->Min.FpConvert.src);
+ p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x1A);
+ break;
default:
goto bad;
}
case Min_FpCompare: {
+#if (__mips_isa_rev >= 6)
+ UInt fr_dst;
+ UInt fr_srcL;
+ UInt fr_srcR;
+
+ UInt op;
+ UInt format;
+ switch (i->Min.FpConvert.op) {
+ case Mfp_CMP_UN:
+ fr_dst = dregNo(i->Min.FpCompare.dst);
+ fr_srcL = dregNo(i->Min.FpCompare.srcL);
+ fr_srcR = dregNo(i->Min.FpCompare.srcR);
+ format=0x15;
+ op = 1;
+ break;
+ case Mfp_CMP_EQ:
+ fr_dst = dregNo(i->Min.FpCompare.dst);
+ fr_srcL = dregNo(i->Min.FpCompare.srcL);
+ fr_srcR = dregNo(i->Min.FpCompare.srcR);
+ format=0x15;
+ op = 2;
+ break;
+ case Mfp_CMP_LT:
+ fr_dst = dregNo(i->Min.FpCompare.dst);
+ fr_srcL = dregNo(i->Min.FpCompare.srcL);
+ fr_srcR = dregNo(i->Min.FpCompare.srcR);
+ format=0x15;
+ op = 4;
+ break;
+ case Mfp_CMP_NGT:
+ fr_dst = dregNo(i->Min.FpCompare.dst);
+ fr_srcL = dregNo(i->Min.FpCompare.srcL);
+ fr_srcR = dregNo(i->Min.FpCompare.srcR);
+ format=0x15;
+ op = 5;
+ break;
+ case Mfp_CMP_UN_S:
+ fr_dst = fregNo(i->Min.FpCompare.dst, mode64);
+ fr_srcL = fregNo(i->Min.FpCompare.srcL, mode64);
+ fr_srcR = fregNo(i->Min.FpCompare.srcR, mode64);
+ format=0x14;
+ op = 1;
+ break;
+ case Mfp_CMP_EQ_S:
+ fr_dst = fregNo(i->Min.FpCompare.dst, mode64);
+ fr_srcL = fregNo(i->Min.FpCompare.srcL, mode64);
+ fr_srcR = fregNo(i->Min.FpCompare.srcR, mode64);
+ format=0x14;
+ op = 2;
+ break;
+ case Mfp_CMP_LT_S:
+ fr_dst = fregNo(i->Min.FpCompare.dst, mode64);
+ fr_srcL = fregNo(i->Min.FpCompare.srcL, mode64);
+ fr_srcR = fregNo(i->Min.FpCompare.srcR, mode64);
+ format=0x14;
+ op = 4;
+ break;
+ case Mfp_CMP_NGT_S:
+ fr_dst = fregNo(i->Min.FpCompare.dst, mode64);
+ fr_srcL = fregNo(i->Min.FpCompare.srcL, mode64);
+ fr_srcR = fregNo(i->Min.FpCompare.srcR, mode64);
+ format=0x14;
+ op = 5;
+ break;
+ default:
+ goto bad;
+ }
+ /* cmp.cond.d fr_srcL, fr_srcR */
+ p = mkFormR(p, 0x11, format, fr_srcR, fr_srcL, fr_dst, op);
+#else
UInt r_dst = iregNo(i->Min.FpCompare.dst, mode64);
UInt fr_srcL = dregNo(i->Min.FpCompare.srcL);
UInt fr_srcR = dregNo(i->Min.FpCompare.srcR);
p = mkFormR(p, 0x11, 0x2, r_dst, 31, 0, 0);
p = mkFormS(p, 0, r_dst, 0, r_dst, 23, 2);
p = mkFormI(p, 12, r_dst, r_dst, 1);
+#endif
+ goto done;
+ }
+
+#if (__mips_isa_rev >= 6)
+ case Min_FpMinMax: {
+ UInt r_dst = dregNo(i->Min.FpCompare.dst);
+ UInt fr_srcL = dregNo(i->Min.FpCompare.srcL);
+ UInt fr_srcR = dregNo(i->Min.FpCompare.srcR);
+ UInt format;
+ UInt instr;
+ switch (i->Min.FpMinMax.op) {
+ case Mfp_MAXS:
+ format = 0x10;
+ instr = 0x1E;
+ break;
+ case Mfp_MAXD:
+ format = 0x11;
+ instr = 0x1E;
+ break;
+ case Mfp_MINS:
+ format = 0x10;
+ instr = 0x1C;
+ break;
+ case Mfp_MIND:
+ format = 0x11;
+ instr = 0x1C;
+ break;
+ default:
+ goto bad;
+ }
+ p = mkFormR(p, 0x11, format, fr_srcR, fr_srcL, r_dst, instr);
goto done;
}
+#endif
+
case Min_FpGpMove: {
switch (i->Min.FpGpMove.op) {
s = iregNo(i->Min.MoveCond.src, mode64);
t = iregNo(i->Min.MoveCond.cond, mode64);
p = mkFormR(p, 0, s, t, d, 0, 0xb);
+
+ break;
+ }
+ case MSeleqz: {
+ d = iregNo(i->Min.MoveCond.dst, mode64);
+ s = iregNo(i->Min.MoveCond.src, mode64);
+ t = iregNo(i->Min.MoveCond.cond, mode64);
+ p = mkFormR(p, 0, s, t, d, 0, 0x35);
+ break;
+ }
+ case MSelnez: {
+ d = iregNo(i->Min.MoveCond.dst, mode64);
+ s = iregNo(i->Min.MoveCond.src, mode64);
+ t = iregNo(i->Min.MoveCond.cond, mode64);
+ p = mkFormR(p, 0, s, t, d, 0, 0x37);
+ break;
+ }
+ case MFpSels: {
+ d = fregNo(i->Min.MoveCond.dst, mode64);
+ s = fregNo(i->Min.MoveCond.src, mode64);
+ t = fregNo(i->Min.MoveCond.cond, mode64);
+ p = mkFormR(p, 0x11, 0x10, t, s, d, 0x10);
+ break;
+ }
+ case MFpSeld: {
+ d = fregNo(i->Min.MoveCond.dst, mode64);
+ s = fregNo(i->Min.MoveCond.src, mode64);
+ t = fregNo(i->Min.MoveCond.cond, mode64);
+ p = mkFormR(p, 0x11, 0x11, t, s, d, 0x10);
break;
}
default:
to +infinity | 10 | 10
to -infinity | 11 | 01
*/
- /* rm_MIPS32 = XOR(rm_IR , (rm_IR << 1)) & 2 */
+ /* rm_MIPS32 = XOR(rm_IR , (rm_IR << 1)) & 3 */
HReg irrm = iselWordExpr_R(env, mode);
HReg tmp = newVRegI(env);
HReg fcsr_old = newVRegI(env);
HReg rHi, rLo;
iselInt64Expr(&rHi, &rLo, env, arg);
argiregs |= (1 << (argreg + 4));
- addInstr(env, mk_iMOVds_RR( argregs[argreg++], rHi ));
+ addInstr(env, mk_iMOVds_RR( argregs[argreg++], rLo ));
argiregs |= (1 << (argreg + 4));
- addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo));
+ addInstr(env, mk_iMOVds_RR( argregs[argreg], rHi));
argreg++;
} else if (arg->tag == Iex_GSPTR) {
vassert(0); // ATC
movn v0, s0, v1 */
addInstr(env, MIPSInstr_Alu(Malu_SLT, tmp, argL, argRH));
+#if (__mips_isa_rev >= 6)
+ {
+ HReg r_temp = newVRegI(env);
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, r_dst, argL, tmp));
+ addInstr(env, MIPSInstr_MoveCond(MSelnez, r_temp, argR, tmp));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dst, r_dst,
+ MIPSRH_Reg(r_temp)));
+ }
+
+#else
addInstr(env, mk_iMOVds_RR(r_dst, argL));
addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dst, argR, tmp));
+#endif
return r_dst;
}
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Mulr6(False, True, True,
+ r_dst, r_srcL, r_srcR));
+#else
addInstr(env, MIPSInstr_Mul(r_dst, r_srcL, r_srcR));
+#endif
return r_dst;
}
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Mulr6(False, False, True,
+ r_dst, r_srcL, r_srcR));
+#else
addInstr(env, MIPSInstr_Mult(True, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mflo(r_dst));
+#endif
return r_dst;
}
HReg r_tmpR = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Ext(r_tmpL, r_srcL, 0, 32));
+ addInstr(env, MIPSInstr_Ext(r_tmpR, r_srcR, 0, 32));
+ addInstr(env, MIPSInstr_Mulr6(True, False, True,
+ r_tmpR, r_tmpL, r_tmpR));
+#else
if (VEX_MIPS_CPU_HAS_MIPS64R2(hwcaps_host)) {
addInstr(env, MIPSInstr_Ext(r_tmpL, r_srcL, 0, 32));
addInstr(env, MIPSInstr_Ext(r_tmpR, r_srcR, 0, 32));
}
addInstr(env, MIPSInstr_Mult(False, r_tmpL, r_tmpR));
addInstr(env, MIPSInstr_Mflo(r_tmpR));
+#endif
return r_tmpR;
}
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+#if (__mips_isa_rev >= 6)
+ if (syned) {
+ Int no_bits = (e->Iex.Binop.op == Iop_MullS16) ? 16 : 24;
+ addInstr(env, MIPSInstr_Shft(Mshft_SLL, True,
+ r_srcL, r_srcL,
+ MIPSRH_Imm(False, no_bits)));
+ addInstr(env, MIPSInstr_Shft(Mshft_SRA, True,
+ r_srcL, r_srcL,
+ MIPSRH_Imm(False, no_bits)));
+ addInstr(env, MIPSInstr_Shft(Mshft_SLL, True,
+ r_srcR, r_srcR,
+ MIPSRH_Imm(False, no_bits)));
+ addInstr(env, MIPSInstr_Shft(Mshft_SRA, True,
+ r_srcR, r_srcR,
+ MIPSRH_Imm(False, no_bits)));
+ }
+ addInstr(env, MIPSInstr_Mulr6(syned, True, True,
+ r_dst, r_srcL, r_srcR));
+#else
if (syned) {
Int no_bits = (e->Iex.Binop.op == Iop_MullS16) ? 16 : 24;
addInstr(env, MIPSInstr_Shft(Mshft_SLL, True,
addInstr(env, MIPSInstr_Mult(syned, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mflo(r_dst));
}
+#endif
return r_dst;
}
r_srcL = iselDblExpr(env, e->Iex.Binop.arg1);
r_srcR = iselDblExpr(env, e->Iex.Binop.arg2);
}
+#if (__mips_isa_rev >= 6)
+ HReg tmp = newVRegI(env);
+ HReg tmpf;
+ HReg result = newVRegI(env);
+ if (mode64) tmpf = newVRegF(env);
+ else tmpf = newVRegD(env);
+ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_UN, tmpf, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, tmp, tmpf));
+ addInstr(env, MIPSInstr_Alu(Malu_AND, tmp, tmp,
+ MIPSRH_Imm(False, 0x45)));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, result,
+ hregMIPS_GPR0(env->mode64),
+ MIPSRH_Reg(tmp)));
+ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_LT, tmpf, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, tmp, tmpf));
+ addInstr(env, MIPSInstr_Alu(Malu_AND, tmp, tmp,
+ MIPSRH_Imm(False, 0x1)));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, result, result,
+ MIPSRH_Reg(tmp)));
+ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_EQ, tmpf, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, tmp, tmpf));
+ addInstr(env, MIPSInstr_Alu(Malu_AND, tmp, tmp,
+ MIPSRH_Imm(False, 0x40)));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, result, result,
+ MIPSRH_Reg(tmp)));
+ return result;
+#else
HReg tmp = newVRegI(env);
HReg r_ccMIPS = newVRegI(env);
HReg r_ccIR = newVRegI(env);
addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR, r_ccIR,
MIPSRH_Reg(r_ccIR_b6)));
return r_ccIR;
+#endif
+ }
+
+ if (e->Iex.Binop.op == Iop_CmpF32) {
+#if (__mips_isa_rev >= 6)
+ HReg r_srcL = iselFltExpr(env, e->Iex.Binop.arg1);
+ HReg r_srcR = iselFltExpr(env, e->Iex.Binop.arg2);
+ HReg tmp = newVRegI(env);
+ HReg tmpf;
+ HReg result = newVRegI(env);
+ if (mode64) tmpf = newVRegF(env);
+ else tmpf = newVRegD(env);
+ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_UN_S, tmpf, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, tmp, tmpf));
+ addInstr(env, MIPSInstr_Alu(Malu_AND, tmp, tmp,
+ MIPSRH_Imm(False, 0x45)));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, result,
+ hregMIPS_GPR0(env->mode64),
+ MIPSRH_Reg(tmp)));
+ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_LT_S, tmpf, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, tmp, tmpf));
+ addInstr(env, MIPSInstr_Alu(Malu_AND, tmp, tmp,
+ MIPSRH_Imm(False, 0x1)));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, result, result,
+ MIPSRH_Reg(tmp)));
+ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_EQ_S, tmpf, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, tmp, tmpf));
+ addInstr(env, MIPSInstr_Alu(Malu_AND, tmp, tmp,
+ MIPSRH_Imm(False, 0x40)));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, result, result,
+ MIPSRH_Reg(tmp)));
+ return result;
+#endif
}
if (e->Iex.Binop.op == Iop_DivModU32to32 ||
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
-
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Divr6(syned /* Unsigned or Signed */ ,
+ True /* 32bit or 64bit div */ ,
+ False /* mod */,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_Divr6(syned /* Unsigned or Signed */ ,
+ True /*3 2bit or 64bit div */ ,
+ True /* mod */,
+ tHi, r_srcL, r_srcR));
+#else
addInstr(env, MIPSInstr_Div(syned, True, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mfhi(tHi));
addInstr(env, MIPSInstr_Mflo(tLo));
-
+#endif
addInstr(env, MIPSInstr_Shft(Mshft_SLL, False, tHi_1, tHi,
MIPSRH_Imm(False, 32)));
e->Iex.Binop.op == Iop_DivU32);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Divr6(syned, div32, False,
+ r_dst, r_srcL, r_srcR));
+#else
addInstr(env, MIPSInstr_Div(syned, div32, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mflo(r_dst));
+#endif
return r_dst;
}
return valS;
}
+ if (e->Iex.Binop.op == Iop_F64toI64S) {
+ vassert(mode64);
+ HReg valS = newVRegI(env);
+ HReg tmpF = newVRegF(env);
+ HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
+
+ /* CVTLS tmpF, valF */
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLD, tmpF, valF));
+ set_MIPS_rounding_default(env);
+
+ /* Doubleword Move from Floating Point
+ dmfc1 valS, tmpF */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmfc1, valS, tmpF));
+
+ return valS;
+ }
+
if (e->Iex.Binop.op == Iop_F64toI32S) {
HReg valD;
if (mode64)
return r_dst;
}
+ if (e->Iex.Binop.op == Iop_F32toI32S) {
+ HReg valS = newVRegF(env);
+ HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
+ HReg r_dst = newVRegI(env);
+
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWS, valS, valF));
+ set_MIPS_rounding_default(env);
+
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, valS));
+
+ return r_dst;
+ }
+
/* -------- DSP ASE -------- */
/* All used cases involving host-side helper calls. */
void* fn = NULL;
* r_dst = r0
* movn r_dst, r1, r_cond
*/
+#if (__mips_isa_rev >= 6)
+ HReg r_temp = newVRegI(env);
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, r_dst, r0, r_cond));
+ addInstr(env, MIPSInstr_MoveCond(MSelnez, r_temp, r1, r_cond));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dst, r_dst,
+ MIPSRH_Reg(r_temp)));
+
+#else
addInstr(env, mk_iMOVds_RR(r_dst, r0));
addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dst, r1, r_cond));
+#endif
return r_dst;
}
break;
return r_dst;
}
+ case Iex_Qop: {
+ HReg dst = newVRegI(env);
+ HReg src1 = iselWordExpr_R(env, e->Iex.Qop.details->arg1);
+ HReg src2 = iselWordExpr_R(env, e->Iex.Qop.details->arg2);
+ HReg src3 = iselWordExpr_R(env, e->Iex.Qop.details->arg3);
+ HReg src4 = iselWordExpr_R(env, e->Iex.Qop.details->arg4);
+ switch (e->Iex.Qop.details->op) {
+#if (__mips_isa_rev >= 6)
+ case Iop_Rotx32:
+ addInstr(env, MIPSInstr_Bitswap(Rotx32, dst, src1, src2, src3, src4));
+ break;
+ case Iop_Rotx64:
+ addInstr(env, MIPSInstr_Bitswap(Rotx64, dst, src1, src2, src3, src4));
+ break;
+#endif
+ default:
+ break;
+ }
+ return dst;
+ }
+
default:
break;
} /* end switch(e->tag) */
Bool syned = toBool(e->Iex.Binop.op == Iop_MullS64);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Mulr6(syned, False, True,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_Mulr6(syned, False, False,
+ tHi, r_srcL, r_srcR));
+#else
addInstr(env, MIPSInstr_Mult(syned, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mfhi(tHi));
addInstr(env, MIPSInstr_Mflo(tLo));
+#endif
*rHi = tHi;
*rLo = tLo;
return;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
Bool syned = toBool(e->Iex.Binop.op == Iop_DivModS64to64);
-
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Divr6(syned/*Unsigned or Signed */ ,
+ False /*32bit or 64bit div */ ,
+ False /*mod*/,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_Divr6(syned/*Unsigned or Signed */ ,
+ False /*32bit or 64bit div */ ,
+ True /*mod*/,
+ tHi, r_srcL, r_srcR));
+#else
addInstr(env, MIPSInstr_Div(syned, False, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mfhi(tHi));
addInstr(env, MIPSInstr_Mflo(tLo));
+#endif
*rHi = tHi;
*rLo = tLo;
return;
* move desHi, expr0Hi
* movn desLo, expr1Lo, cond
* movn desHi, expr1Hi, cond */
+#if (__mips_isa_rev >= 6)
+ {
+ HReg r_temp = newVRegI(env);
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, desLo, expr0Lo, cond));
+ addInstr(env, MIPSInstr_MoveCond(MSelnez, r_temp, expr1Lo, cond));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, desLo, desLo, MIPSRH_Reg(r_temp)));
+
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, desHi, expr0Hi, cond));
+ addInstr(env, MIPSInstr_MoveCond(MSelnez, r_temp, expr1Hi, cond));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, desHi, desHi, MIPSRH_Reg(r_temp)));
+ }
+#else
addInstr(env, mk_iMOVds_RR(desLo, expr0Lo));
addInstr(env, mk_iMOVds_RR(desHi, expr0Hi));
addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, desLo, expr1Lo, cond));
addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, desHi, expr1Hi, cond));
+#endif
*rHi = desHi;
*rLo = desLo;
Bool syned = toBool(op_binop == Iop_MullS32);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Mulr6(syned, True, True,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_Mulr6(syned, True, False,
+ tHi, r_srcL, r_srcR));
+#else
addInstr(env, MIPSInstr_Mult(syned, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mfhi(tHi));
addInstr(env, MIPSInstr_Mflo(tLo));
+#endif
*rHi = tHi;
*rLo = tLo;
HReg tHi = newVRegI(env);
Bool syned = toBool(e->Iex.Binop.op == Iop_DivModS32to32);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Divr6(syned /*Unsigned or Signed */ ,
+ True /*32bit or 64bit div */ ,
+ False /*mod*/,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, MIPSInstr_Divr6(syned /*Unsigned or Signed */ ,
+ True /*32bit or 64bit div */ ,
+ True /*mod*/,
+ tHi, r_srcL, r_srcR));
+#else
addInstr(env, MIPSInstr_Div(syned, True, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mfhi(tHi));
addInstr(env, MIPSInstr_Mflo(tLo));
+#endif
*rHi = tHi;
*rLo = tLo;
return;
}
case Iop_Shr64: {
-#if defined (_MIPSEL)
/* 64-bit logical shift right based on what gcc generates:
<shift>:
nor v0, zero, a2
jr ra
movn v1, zero, a0
*/
- HReg a0, a1;
- HReg a0tmp = newVRegI(env);
- HReg a2 = newVRegI(env);
+ HReg r_srcLo, r_srcHi;
+ HReg r_srcLotmp = newVRegI(env);
+ HReg shift = newVRegI(env);
HReg a3 = newVRegI(env);
- HReg v0 = newVRegI(env);
- HReg v1 = newVRegI(env);
- HReg zero = newVRegI(env);
+ HReg r_dstLo = newVRegI(env);
+ HReg r_dstHi = newVRegI(env);
+ HReg zero = hregMIPS_GPR0(env->mode64);
MIPSRH *sa = NULL;
- iselInt64Expr(&a1, &a0, env, e->Iex.Binop.arg1);
+ iselInt64Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg1);
sa = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
if (sa->tag == Mrh_Imm) {
- addInstr(env, MIPSInstr_LI(a2, sa->Mrh.Imm.imm16));
+ addInstr(env, MIPSInstr_LI(shift, sa->Mrh.Imm.imm16));
}
else {
- addInstr(env, MIPSInstr_Alu(Malu_AND, a2, sa->Mrh.Reg.reg,
+ addInstr(env, MIPSInstr_Alu(Malu_AND, shift, sa->Mrh.Reg.reg,
MIPSRH_Imm(False, 0x3f)));
}
-
- addInstr(env, MIPSInstr_LI(zero, 0x00000000));
- /* nor v0, zero, a2 */
- addInstr(env, MIPSInstr_Alu(Malu_NOR, v0, zero, MIPSRH_Reg(a2)));
- /* sll a3, a1, 0x1 */
+ /* nor r_dstLo, zero, shift */
+ addInstr(env, MIPSInstr_Alu(Malu_NOR, r_dstLo, zero, MIPSRH_Reg(shift)));
+ /* sll a3, r_srcHi, 0x1 */
addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
- a3, a1, MIPSRH_Imm(False, 0x1)));
- /* sllv a3, a3, v0 */
+ a3, r_srcHi, MIPSRH_Imm(False, 0x1)));
+ /* sllv a3, a3, r_dstLo */
addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
- a3, a3, MIPSRH_Reg(v0)));
- /* srlv v0, a0, a2 */
- addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
- v0, a0, MIPSRH_Reg(a2)));
- /* srlv v1, a1, a2 */
+ a3, a3, MIPSRH_Reg(r_dstLo)));
+ /* srlv r_dstLo, r_srcLo, shift */
addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
- v1, a1, MIPSRH_Reg(a2)));
- /* andi a0, a2, 0x20 */
- addInstr(env, MIPSInstr_Alu(Malu_AND, a0tmp, a2,
- MIPSRH_Imm(False, 0x20)));
- /* or v0, a3, v0 */
- addInstr(env, MIPSInstr_Alu(Malu_OR, v0, a3, MIPSRH_Reg(v0)));
-
- /* movn v0, v1, a0 */
- addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v0, v1, a0tmp));
- /* movn v1, zero, a0 */
- addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v1, zero, a0tmp));
-
- *rHi = v1;
- *rLo = v0;
- return;
-#elif defined (_MIPSEB)
- /* 64-bit logical shift right based on what gcc generates:
- <shift>:
- nor v0, zero, a2
- sll a3, a0, 0x1
- sllv a3, a3, v0
- srlv v1, a1, a2
- andi v0, a2, 0x20
- or v1, a3, v1
- srlv a2, a0, a2
- movn v1, a2, v0
- movn a2, zero, v0
- jr ra
- move v0, a2
- */
- HReg a0, a1;
- HReg a2 = newVRegI(env);
- HReg a2tmp = newVRegI(env);
- HReg a3 = newVRegI(env);
- HReg v0 = newVRegI(env);
- HReg v1 = newVRegI(env);
- HReg zero = newVRegI(env);
- MIPSRH *sa = NULL;
-
- iselInt64Expr(&a0, &a1, env, e->Iex.Binop.arg1);
- sa = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
-
- if (sa->tag == Mrh_Imm) {
- addInstr(env, MIPSInstr_LI(a2, sa->Mrh.Imm.imm16));
- }
- else {
- addInstr(env, MIPSInstr_Alu(Malu_AND, a2, sa->Mrh.Reg.reg,
- MIPSRH_Imm(False, 0x3f)));
- }
-
- addInstr(env, MIPSInstr_LI(zero, 0x00000000));
- /* nor v0, zero, a2 */
- addInstr(env, MIPSInstr_Alu(Malu_NOR, v0, zero, MIPSRH_Reg(a2)));
- /* sll a3, a0, 0x1 */
- addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
- a3, a0, MIPSRH_Imm(False, 0x1)));
- /* sllv a3, a3, v0 */
- addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
- a3, a3, MIPSRH_Reg(v0)));
- /* srlv v1, a1, a2 */
+ r_dstLo, r_srcLo, MIPSRH_Reg(shift)));
+ /* srlv r_dstHi, r_srcHi, shift */
addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
- v1, a1, MIPSRH_Reg(a2)));
- /* andi v0, a2, 0x20 */
- addInstr(env, MIPSInstr_Alu(Malu_AND, v0, a2,
+ r_dstHi, r_srcHi, MIPSRH_Reg(shift)));
+ /* andi r_srcLo, shift, 0x20 */
+ addInstr(env, MIPSInstr_Alu(Malu_AND, r_srcLotmp, shift,
MIPSRH_Imm(False, 0x20)));
- /* or v1, a3, v1 */
- addInstr(env, MIPSInstr_Alu(Malu_OR, v1, a3, MIPSRH_Reg(v1)));
- /* srlv a2, a0, a2 */
- addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
- a2tmp, a0, MIPSRH_Reg(a2)));
-
- /* movn v1, a2, v0 */
- addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v1, a2tmp, v0));
- /* movn a2, zero, v0 */
- addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, a2tmp, zero, v0));
- /* move v0, a2 */
- addInstr(env, mk_iMOVds_RR(v0, a2tmp));
-
- *rHi = v0;
- *rLo = v1;
- return;
+ /* or r_dstLo, a3, r_dstLo */
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dstLo, a3, MIPSRH_Reg(r_dstLo)));
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, r_dstLo, r_dstLo, r_srcLotmp));
+ addInstr(env, MIPSInstr_MoveCond(MSelnez, a3, r_dstHi, r_srcLotmp));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dstLo, r_dstLo, MIPSRH_Reg(a3)));
+
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, r_dstHi, r_dstHi, r_srcLotmp));
+#else
+ /* movn r_dstLo, r_dstHi, r_srcLo */
+ addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dstLo, r_dstHi, r_srcLotmp));
+ /* movn r_dstHi, zero, r_srcLo */
+ addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dstHi, zero, r_srcLotmp));
#endif
+ *rHi = r_dstHi;
+ *rLo = r_dstLo;
+ return;
}
case Iop_Shl64: {
jr ra
move v0,a2
*/
- HReg a0, a1;
- HReg a2 = newVRegI(env);
+ HReg r_srcLo, r_srcHi;
+ HReg r_shift = newVRegI(env);
HReg a3 = newVRegI(env);
- HReg v0 = newVRegI(env);
- HReg v1 = newVRegI(env);
- HReg zero = newVRegI(env);
+ HReg r_dstLo = newVRegI(env);
+ HReg r_dstHi = newVRegI(env);
+ HReg zero = hregMIPS_GPR0(env->mode64);
MIPSRH *sa = NULL;
- iselInt64Expr(&a1, &a0, env, e->Iex.Binop.arg1);
+ iselInt64Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg1);
sa = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
if (sa->tag == Mrh_Imm) {
- addInstr(env, MIPSInstr_LI(a2, sa->Mrh.Imm.imm16));
+ addInstr(env, MIPSInstr_LI(r_shift, sa->Mrh.Imm.imm16));
}
else {
- addInstr(env, MIPSInstr_Alu(Malu_AND, a2, sa->Mrh.Reg.reg,
+ addInstr(env, MIPSInstr_Alu(Malu_AND, r_shift, sa->Mrh.Reg.reg,
MIPSRH_Imm(False, 0x3f)));
}
-
- addInstr(env, MIPSInstr_LI(zero, 0x00000000));
- /* nor v0, zero, a2 */
- addInstr(env, MIPSInstr_Alu(Malu_NOR, v0, zero, MIPSRH_Reg(a2)));
- /* srl a3, a0, 0x1 */
+ /* nor r_dstLo, zero, r_shift */
+ addInstr(env, MIPSInstr_Alu(Malu_NOR, r_dstLo, zero, MIPSRH_Reg(r_shift)));
+ /* srl a3, r_srcLo, 0x1 */
addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
- a3, a0, MIPSRH_Imm(False, 0x1)));
- /* srlv a3, a3, v0 */
+ a3, r_srcLo, MIPSRH_Imm(False, 0x1)));
+ /* srlv a3, a3, r_dstLo */
addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
- a3, a3, MIPSRH_Reg(v0)));
- /* sllv v1, a1, a2 */
+ a3, a3, MIPSRH_Reg(r_dstLo)));
+ /* sllv r_dstHi, r_srcHi, r_shift */
addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
- v1, a1, MIPSRH_Reg(a2)));
- /* andi v0, a2, 0x20 */
- addInstr(env, MIPSInstr_Alu(Malu_AND, v0, a2,
+ r_dstHi, r_srcHi, MIPSRH_Reg(r_shift)));
+ /* or r_dstHi, a3, r_dstHi */
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dstHi, a3, MIPSRH_Reg(r_dstHi)));
+ /* andi a3, r_shift, 0x20 */
+ addInstr(env, MIPSInstr_Alu(Malu_AND, a3, r_shift,
MIPSRH_Imm(False, 0x20)));
- /* or v1, a3, v1 */
- addInstr(env, MIPSInstr_Alu(Malu_OR, v1, a3, MIPSRH_Reg(v1)));
- /* sllv a2, a0, a2 */
+ /* sllv r_dstLo, r_srcLo, r_shift */
addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
- a2, a0, MIPSRH_Reg(a2)));
-
- /* movn v1, a2, v0 */
- addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v1, a2, v0));
- /* movn a2, zero, v0 */
- addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, a2, zero, v0));
- addInstr(env, mk_iMOVds_RR(v0, a2));
+ r_dstLo, r_srcLo, MIPSRH_Reg(r_shift)));
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, r_dstHi, r_dstHi, a3));
+ addInstr(env, MIPSInstr_MoveCond(MSelnez, r_shift, r_dstLo, a3));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dstHi, r_dstHi, MIPSRH_Reg(r_shift)));
- *rHi = v1;
- *rLo = v0;
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, r_dstLo, r_dstLo, a3));
+#else
+ /* movn r_dstHi, r_dstLo, a3 */
+ addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dstHi, r_dstLo, a3));
+ /* movn r_dstLo, zero, a3 */
+ addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dstLo, zero, a3));
+#endif
+ *rHi = r_dstHi;
+ *rLo = r_dstLo;
return;
}
jr ra
movn v1, a1, a0
*/
- HReg a0, a1;
- HReg a0tmp = newVRegI(env);
- HReg a1tmp = newVRegI(env);
- HReg a2 = newVRegI(env);
+ HReg r_srcHi, r_srcLo;
+ HReg r_srcHitmp = newVRegI(env);
+ HReg r_srcLotmp = newVRegI(env);
+ HReg r_shift = newVRegI(env);
HReg a3 = newVRegI(env);
- HReg v0 = newVRegI(env);
- HReg v1 = newVRegI(env);
- HReg zero = newVRegI(env);
+ HReg r_dstLo = newVRegI(env);
+ HReg r_dstHi = newVRegI(env);
+ HReg zero = hregMIPS_GPR0(env->mode64);
MIPSRH *sa = NULL;
- iselInt64Expr(&a1, &a0, env, e->Iex.Binop.arg1);
+ iselInt64Expr(&r_srcLo, &r_srcHi, env, e->Iex.Binop.arg1);
sa = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
if (sa->tag == Mrh_Imm) {
- addInstr(env, MIPSInstr_LI(a2, sa->Mrh.Imm.imm16));
+ addInstr(env, MIPSInstr_LI(r_shift, sa->Mrh.Imm.imm16));
}
else {
- addInstr(env, MIPSInstr_Alu(Malu_AND, a2, sa->Mrh.Reg.reg,
+ addInstr(env, MIPSInstr_Alu(Malu_AND, r_shift, sa->Mrh.Reg.reg,
MIPSRH_Imm(False, 0x3f)));
}
-
- addInstr(env, MIPSInstr_LI(zero, 0x00000000));
- /* nor v0, zero, a2 */
- addInstr(env, MIPSInstr_Alu(Malu_NOR, v0, zero, MIPSRH_Reg(a2)));
- /* sll a3, a1, 0x1 */
+ /* nor r_dstLo, zero, r_shift */
+ addInstr(env, MIPSInstr_Alu(Malu_NOR, r_dstLo, zero, MIPSRH_Reg(r_shift)));
+ /* sll a3, r_srcLo, 0x1 */
addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
- a3, a1, MIPSRH_Imm(False, 0x1)));
- /* sllv a3, a3, v0 */
+ a3, r_srcLo, MIPSRH_Imm(False, 0x1)));
+ /* sllv a3, a3, r_dstLo */
addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
- a3, a3, MIPSRH_Reg(v0)));
- /* srlv v0, a0, a2 */
+ a3, a3, MIPSRH_Reg(r_dstLo)));
+ /* srlv r_dstLo, r_srcHi, r_shift */
addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
- v0, a0, MIPSRH_Reg(a2)));
- /* srav v1, a1, a2 */
+ r_dstLo, r_srcHi, MIPSRH_Reg(r_shift)));
+ /* srav r_dstHi, r_srcLo, r_shift */
addInstr(env, MIPSInstr_Shft(Mshft_SRA, True /* 32bit shift */,
- v1, a1, MIPSRH_Reg(a2)));
- /* andi a0, a2, 0x20 */
- addInstr(env, MIPSInstr_Alu(Malu_AND, a0tmp, a2,
+ r_dstHi, r_srcLo, MIPSRH_Reg(r_shift)));
+ /* andi r_srcHi, r_shift, 0x20 */
+ addInstr(env, MIPSInstr_Alu(Malu_AND, r_srcHitmp, r_shift,
MIPSRH_Imm(False, 0x20)));
- /* sra a1, a1, 0x1f */
+ /* sra r_srcLo, r_srcLo, 0x1f */
addInstr(env, MIPSInstr_Shft(Mshft_SRA, True /* 32bit shift */,
- a1tmp, a1, MIPSRH_Imm(False, 0x1f)));
- /* or v0, a3, v0 */
- addInstr(env, MIPSInstr_Alu(Malu_OR, v0, a3, MIPSRH_Reg(v0)));
-
- /* movn v0, v1, a0 */
- addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v0, v1, a0tmp));
- /* movn v1, a1, a0 */
- addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v1, a1tmp, a0tmp));
-
- *rHi = v1;
- *rLo = v0;
+ r_srcLotmp, r_srcLo, MIPSRH_Imm(False, 0x1f)));
+ /* or r_dstLo, a3, r_dstLo */
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dstLo, a3, MIPSRH_Reg(r_dstLo)));
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, r_dstLo, r_dstLo, r_srcHitmp));
+ addInstr(env, MIPSInstr_MoveCond(MSelnez, a3, r_dstHi, r_srcHitmp));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dstLo, r_dstLo, MIPSRH_Reg(a3)));
+
+ addInstr(env, MIPSInstr_MoveCond(MSeleqz, r_dstHi, r_dstHi, r_srcHitmp));
+ addInstr(env, MIPSInstr_MoveCond(MSelnez, a3, r_srcLotmp, r_srcHitmp));
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_dstHi, r_dstHi, MIPSRH_Reg(a3)));
+#else
+ /* movn r_dstLo, r_dstHi, r_srcHi */
+ addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dstLo, r_dstHi, r_srcHitmp));
+ /* movn r_dstHi, r_srcLo, r_srcHi */
+ addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dstHi, r_srcLotmp, r_srcHitmp));
+#endif
+ *rHi = r_dstHi;
+ *rLo = r_dstLo;
return;
}
iselInt64Expr(&a_H, &a_L, env, e->Iex.Binop.arg1);
iselInt64Expr(&b_H, &b_L, env, e->Iex.Binop.arg2);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_Mulr6(True, True, True,
+ dst_H, a_H, b_L));
+ addInstr(env, MIPSInstr_Mulr6(True, True, True,
+ dst_L, b_H, a_L));
+ addInstr(env, MIPSInstr_Alu(Malu_ADD, dst_H, dst_H,
+ MIPSRH_Reg(dst_L)));
+ addInstr(env, MIPSInstr_Mulr6(False, True, False,
+ dst_L, a_L, b_L));
+
+ addInstr(env, MIPSInstr_Alu(Malu_ADD, dst_H, dst_H,
+ MIPSRH_Reg(dst_L)));
+ addInstr(env, MIPSInstr_Mulr6(False, True, True,
+ dst_L, a_L, b_L));
+#else
addInstr(env, MIPSInstr_Mul(dst_H, a_H, b_L));
addInstr(env, MIPSInstr_Mult(True, b_H, a_L));
addInstr(env, MIPSInstr_Mflo(dst_L));
addInstr(env, MIPSInstr_Alu(Malu_ADD, dst_H, dst_H,
MIPSRH_Reg(dst_L)));
addInstr(env, MIPSInstr_Mflo(dst_L));
+#endif
*rHi = dst_H;
*rLo = dst_L;
return;
return;
}
+ case Iop_F64toI64S: {
+ HReg tmpD = newVRegD(env);
+ HReg valF;
+ HReg tLo = newVRegI(env);
+ HReg tHi = newVRegI(env);
+ MIPSAMode *am_addr;
+
+ if(mode64){
+ valF = iselFltExpr(env, e->Iex.Binop.arg2);
+ } else {
+ valF = iselDblExpr(env, e->Iex.Binop.arg2);
+ }
+
+ /* CVTLS tmpD, valF */
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLD, tmpD, valF));
+ set_MIPS_rounding_default(env);
+
+ sub_from_sp(env, 16); /* Move SP down 16 bytes */
+ am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+ /* store as F64 */
+ addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, tmpD,
+ am_addr));
+ /* load as 2xI32 */
+#if defined (_MIPSEL)
+ addInstr(env, MIPSInstr_Load(4, tLo, am_addr, mode64));
+ addInstr(env, MIPSInstr_Load(4, tHi, nextMIPSAModeFloat(am_addr),
+ mode64));
+#elif defined (_MIPSEB)
+ addInstr(env, MIPSInstr_Load(4, tHi, am_addr, mode64));
+ addInstr(env, MIPSInstr_Load(4, tLo, nextMIPSAModeFloat(am_addr),
+ mode64));
+#endif
+
+ /* Reset SP */
+ add_to_sp(env, 16);
+
+ *rHi = tHi;
+ *rLo = tLo;
+
+ return;
+ }
+
default:
break;
}
64-bit stuff. */
static HReg iselFltExpr(ISelEnv * env, IRExpr * e)
{
- HReg r = iselFltExpr_wrk(env, e);
- vassert(hregIsVirtual(r));
- return r;
+ HReg r;
+ IRType ty = typeOfIRExpr(env->type_env, e);
+ if (ty == Ity_F32 || (ty == Ity_F64 && fp_mode64)) {
+ r = iselFltExpr_wrk(env, e);
+ } else {
+ r = iselDblExpr_wrk(env, e);
+ vassert(hregClass(r) == HRcFlt64);
+ }
+ return r;
}
/* DO NOT CALL THIS DIRECTLY */
HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_FpConvert(Mfp_RINTS, valS, valF));
+#else
addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWS, valS, valF));
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSW, valS, valS));
+#endif
set_MIPS_rounding_default(env);
return valS;
}
HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_FpConvert(Mfp_RINTD, valS, valF));
+#else
addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLD, valS, valF));
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDL, valS, valS));
+
+#endif
set_MIPS_rounding_default(env);
return valS;
}
return r_dst;
}
+#if (__mips_isa_rev >= 6)
+ case Iop_MaxNumF32: {
+ HReg src1 = iselFltExpr(env, e->Iex.Binop.arg1);
+ HReg src2 = iselFltExpr(env, e->Iex.Binop.arg2);
+ HReg dst = newVRegF(env);
+ addInstr(env, MIPSInstr_FpMinMax(Mfp_MAXS, dst,
+ src1, src2));
+ return dst;
+ }
+
+ case Iop_MaxNumF64: {
+ HReg src1 = iselFltExpr(env, e->Iex.Binop.arg1);
+ HReg src2 = iselFltExpr(env, e->Iex.Binop.arg2);
+ HReg dst = newVRegF(env);
+ addInstr(env, MIPSInstr_FpMinMax(Mfp_MAXD, dst,
+ src1, src2));
+ return dst;
+ }
+
+ case Iop_MinNumF32: {
+ HReg src1 = iselFltExpr(env, e->Iex.Binop.arg1);
+ HReg src2 = iselFltExpr(env, e->Iex.Binop.arg2);
+ HReg dst = newVRegF(env);
+ addInstr(env, MIPSInstr_FpMinMax(Mfp_MINS, dst,
+ src1, src2));
+ return dst;
+ }
+ case Iop_MinNumF64: {
+ HReg src1 = iselFltExpr(env, e->Iex.Binop.arg1);
+ HReg src2 = iselFltExpr(env, e->Iex.Binop.arg2);
+ HReg dst = newVRegF(env);
+ addInstr(env, MIPSInstr_FpMinMax(Mfp_MIND, dst,
+ src1, src2));
+ return dst;
+ }
+#endif
default:
break;
}
case Iop_MSubF32:
case Iop_MSubF64: {
Int op = 0;
+#if (__mips_isa_rev < 6)
MSADFFlx type = 0;
+#endif
switch (e->Iex.Qop.details->op) {
+#if (__mips_isa_rev >= 6)
+ case Iop_MAddF32:
+ op = Mfp_MADDS;
+ break;
+ case Iop_MAddF64:
+ op = Mfp_MADDD;
+ break;
+ case Iop_MSubF32:
+ op = Mfp_MSUBS;
+ break;
+ case Iop_MSubF64:
+ op = Mfp_MSUBD;
+ break;
+#else
case Iop_MAddF32:
op = has_msa ? MSA_FMADD : Mfp_MADDS;
type = MSA_F_WH;
op = has_msa ? MSA_FMSUB : Mfp_MSUBD;
type = MSA_F_DW;
break;
+#endif
default:
vassert(0);
}
HReg src1 = iselFltExpr(env, e->Iex.Qop.details->arg2);
HReg src2 = iselFltExpr(env, e->Iex.Qop.details->arg3);
HReg src3 = iselFltExpr(env, e->Iex.Qop.details->arg4);
-
+#if (__mips_isa_rev >= 6)
+ set_MIPS_rounding_mode(env, e->Iex.Qop.details->arg1);
+ addInstr(env, MIPSInstr_FpTernary(op, dst,
+ src3, src1, src2));
+ set_MIPS_rounding_default(env);
+#else
if (has_msa) {
addInstr(env, MIPSInstr_MsaElm(MSA_MOVE, src3, dst, 0));
set_MIPS_rounding_mode_MSA(env, e->Iex.Qop.details->arg1);
src1, src2, src3));
set_MIPS_rounding_default(env);
}
+#endif
return dst;
}
HReg r1 = iselFltExpr(env, e->Iex.ITE.iftrue);
HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
HReg r_dst = newVRegF(env);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, r_dst, r_cond));
+ addInstr(env, MIPSInstr_MoveCond(MFpSeld, r_dst, r0, r1));
+#else
addInstr(env, MIPSInstr_FpUnary((ty == Ity_F64) ? Mfp_MOVD : Mfp_MOVS,
r_dst, r0));
addInstr(env, MIPSInstr_MoveCond((ty == Ity_F64) ? MFpMoveCond_movnd :
MFpMoveCond_movns,
r_dst, r1, r_cond));
+#endif
return r_dst;
}
HReg dst = newVRegD(env);
set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_FpConvert(Mfp_RINTD, dst, src));
+#else
addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLD, dst, src));
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDL, dst, dst));
+
+#endif
set_MIPS_rounding_default(env);
return dst;
set_MIPS_rounding_default_MSA(env);
return r_dst;
}
+#if (__mips_isa_rev >= 6)
+ case Iop_MaxNumF64: {
+ HReg src1 = iselDblExpr(env, e->Iex.Binop.arg1);
+ HReg src2 = iselDblExpr(env, e->Iex.Binop.arg2);
+ HReg dst = newVRegD(env);
+ addInstr(env, MIPSInstr_FpMinMax(Mfp_MAXD, dst,
+ src1, src2));
+ return dst;
+ }
+
+ case Iop_MinNumF64: {
+ HReg src1 = iselDblExpr(env, e->Iex.Binop.arg1);
+ HReg src2 = iselDblExpr(env, e->Iex.Binop.arg2);
+ HReg dst = newVRegD(env);
+ addInstr(env, MIPSInstr_FpMinMax(Mfp_MIND, dst,
+ src1, src2));
+ return dst;
+ }
+#endif
default:
break;
}
if (e->tag == Iex_Qop) {
- vassert(has_msa);
switch (e->Iex.Qop.details->op) {
case Iop_MAddF64:
case Iop_MSubF64: {
MSA3RFOp op = 0;
switch (e->Iex.Qop.details->op) {
+#if (__mips_isa_rev >= 6)
+ case Iop_MAddF64:
+ op = Mfp_MADDD;
+ break;
+ case Iop_MSubF64:
+ op = Mfp_MSUBD;
+ break;
+#else
case Iop_MAddF64:
op = MSA_FMADD;
break;
case Iop_MSubF64:
op = MSA_FMSUB;
break;
+#endif
default:
vassert(0);
}
HReg src1 = iselDblExpr(env, e->Iex.Qop.details->arg2);
HReg src2 = iselDblExpr(env, e->Iex.Qop.details->arg3);
HReg src3 = iselDblExpr(env, e->Iex.Qop.details->arg4);
+#if (__mips_isa_rev >= 6)
+ set_MIPS_rounding_mode(env, e->Iex.Qop.details->arg1);
+ addInstr(env, MIPSInstr_FpTernary(op, dst,
+ src3, src1, src2));
+ set_MIPS_rounding_default(env);
+#else
+ vassert(has_msa);
addInstr(env, MIPSInstr_MsaElm(MSA_MOVE, src3, dst, 0));
set_MIPS_rounding_mode_MSA(env, e->Iex.Qop.details->arg1);
addInstr(env, MIPSInstr_Msa3RF(op, MSA_F_DW, dst, src1, src2));
set_MIPS_rounding_default_MSA(env);
+#endif
return dst;
}
+ case Iop_I64StoF64: {
+ HReg r_dst = newVRegD(env);
+ MIPSAMode *am_addr;
+ HReg tmp, fr_src;
+ if (mode64) {
+ tmp = newVRegF(env);
+ fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2);
+ /* Move SP down 8 bytes */
+ sub_from_sp(env, 8);
+ am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+ /* store as I64 */
+ addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64));
+
+ /* load as Ity_F64 */
+ addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, tmp, am_addr));
+
+ /* Reset SP */
+ add_to_sp(env, 8);
+ } else {
+ HReg Hi, Lo;
+ tmp = newVRegD(env);
+ iselInt64Expr(&Hi, &Lo, env, e->Iex.Binop.arg2);
+ tmp = mk_LoadRR32toFPR(env, Hi, Lo); /* 2*I32 -> F64 */
+ }
+
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDL, r_dst, tmp));
+ set_MIPS_rounding_default(env);
+
+ return r_dst;
+ }
default:
break;
HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue);
HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
HReg r_dst = newVRegD(env);
-
+#if (__mips_isa_rev >= 6)
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, r_dst, r_cond));
+ addInstr(env, MIPSInstr_MoveCond(MFpSeld, r_dst, r0, r1));
+#else
addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, r_dst, r0));
addInstr(env, MIPSInstr_MoveCond(MFpMoveCond_movnd, r_dst, r1,
r_cond));
+#endif
return r_dst;
}
}
}
if (ty == Ity_F64) {
- HReg fr_src = iselFltExpr(env, stmt->Ist.Put.data);
- MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset,
- GuestStatePointer(mode64));
- addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
- am_addr));
+ if (mode64) {
+ HReg fr_src = iselFltExpr(env, stmt->Ist.Put.data);
+ MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset,
+ GuestStatePointer(mode64));
+ addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
+ am_addr));
+ } else {
+ HReg fr_src = iselDblExpr(env, stmt->Ist.Put.data);
+ MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset,
+ GuestStatePointer(mode64));
+ addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
+ am_addr));
+ }
return;
}
if (ty == Ity_V128) {