This file is part of Valgrind, a dynamic binary instrumentation
framework.
- Copyright (C) 2010-2012 RT-RK
+ Copyright (C) 2010-2013 RT-RK
mips-valgrind@rt-rk.com
This program is free software; you can redistribute it and/or
#include "libvex_ir.h"
#include "libvex.h"
#include "libvex_guest_mips32.h"
+#include "libvex_guest_mips64.h"
#include "main_util.h"
#include "main_globals.h"
#include "guest_mips_defs.h"
/*------------------------------------------------------------*/
-/*--- Globals ---*/
+/*--- Globals ---*/
/*------------------------------------------------------------*/
/* These are set at the start of the translation of a instruction, so
- that we don't have to pass them around endlessly. CONST means does
- not change during translation of the instruction.
-*/
+ that we don't have to pass them around endlessly. CONST means does
+ not change during translation of the instruction. */
/* CONST: is the host bigendian? This has to do with float vs double
register accesses on VFP, but it's complex and not properly thought
/* Pointer to the guest code area. */
static UChar *guest_code;
-/* The guest address corresponding to guest_code[0]. */
-static Addr32 guest_PC_bbstart;
-
/* CONST: The guest address for the instruction currently being
translated. */
+#if defined(VGP_mips32_linux)
static Addr32 guest_PC_curr_instr;
+#else
+static Addr64 guest_PC_curr_instr;
+#endif
/* MOD: The IRSB* into which we're generating code. */
static IRSB *irsb;
-/* Is our guest binary 32 or 64bit? Set at each call to
+/* Is our guest binary 32 or 64bit? Set at each call to
disInstr_MIPS below. */
static Bool mode64 = False;
/*------------------------------------------------------------*/
-/*--- Debugging output ---*/
+/*--- Debugging output ---*/
/*------------------------------------------------------------*/
#define DIP(format, args...) \
registers are accessed, but I don't think that ever happens on
MIPS. */
UInt ret;
- switch (iregNo) {
- case 0:
- ret = offsetof(VexGuestMIPS32State, guest_r0); break;
- case 1:
- ret = offsetof(VexGuestMIPS32State, guest_r1); break;
- case 2:
- ret = offsetof(VexGuestMIPS32State, guest_r2); break;
- case 3:
- ret = offsetof(VexGuestMIPS32State, guest_r3); break;
- case 4:
- ret = offsetof(VexGuestMIPS32State, guest_r4); break;
- case 5:
- ret = offsetof(VexGuestMIPS32State, guest_r5); break;
- case 6:
- ret = offsetof(VexGuestMIPS32State, guest_r6); break;
- case 7:
- ret = offsetof(VexGuestMIPS32State, guest_r7); break;
- case 8:
- ret = offsetof(VexGuestMIPS32State, guest_r8); break;
- case 9:
- ret = offsetof(VexGuestMIPS32State, guest_r9); break;
- case 10:
- ret = offsetof(VexGuestMIPS32State, guest_r10); break;
- case 11:
- ret = offsetof(VexGuestMIPS32State, guest_r11); break;
- case 12:
- ret = offsetof(VexGuestMIPS32State, guest_r12); break;
- case 13:
- ret = offsetof(VexGuestMIPS32State, guest_r13); break;
- case 14:
- ret = offsetof(VexGuestMIPS32State, guest_r14); break;
- case 15:
- ret = offsetof(VexGuestMIPS32State, guest_r15); break;
- case 16:
- ret = offsetof(VexGuestMIPS32State, guest_r16); break;
- case 17:
- ret = offsetof(VexGuestMIPS32State, guest_r17); break;
- case 18:
- ret = offsetof(VexGuestMIPS32State, guest_r18); break;
- case 19:
- ret = offsetof(VexGuestMIPS32State, guest_r19); break;
- case 20:
- ret = offsetof(VexGuestMIPS32State, guest_r20); break;
- case 21:
- ret = offsetof(VexGuestMIPS32State, guest_r21); break;
- case 22:
- ret = offsetof(VexGuestMIPS32State, guest_r22); break;
- case 23:
- ret = offsetof(VexGuestMIPS32State, guest_r23); break;
- case 24:
- ret = offsetof(VexGuestMIPS32State, guest_r24); break;
- case 25:
- ret = offsetof(VexGuestMIPS32State, guest_r25); break;
- case 26:
- ret = offsetof(VexGuestMIPS32State, guest_r26); break;
- case 27:
- ret = offsetof(VexGuestMIPS32State, guest_r27); break;
- case 28:
- ret = offsetof(VexGuestMIPS32State, guest_r28); break;
- case 29:
- ret = offsetof(VexGuestMIPS32State, guest_r29); break;
- case 30:
- ret = offsetof(VexGuestMIPS32State, guest_r30); break;
- case 31:
- ret = offsetof(VexGuestMIPS32State, guest_r31); break;
- default:
- vassert(0);
- break;
- }
+ if (!mode64)
+ switch (iregNo) {
+ case 0:
+ ret = offsetof(VexGuestMIPS32State, guest_r0); break;
+ case 1:
+ ret = offsetof(VexGuestMIPS32State, guest_r1); break;
+ case 2:
+ ret = offsetof(VexGuestMIPS32State, guest_r2); break;
+ case 3:
+ ret = offsetof(VexGuestMIPS32State, guest_r3); break;
+ case 4:
+ ret = offsetof(VexGuestMIPS32State, guest_r4); break;
+ case 5:
+ ret = offsetof(VexGuestMIPS32State, guest_r5); break;
+ case 6:
+ ret = offsetof(VexGuestMIPS32State, guest_r6); break;
+ case 7:
+ ret = offsetof(VexGuestMIPS32State, guest_r7); break;
+ case 8:
+ ret = offsetof(VexGuestMIPS32State, guest_r8); break;
+ case 9:
+ ret = offsetof(VexGuestMIPS32State, guest_r9); break;
+ case 10:
+ ret = offsetof(VexGuestMIPS32State, guest_r10); break;
+ case 11:
+ ret = offsetof(VexGuestMIPS32State, guest_r11); break;
+ case 12:
+ ret = offsetof(VexGuestMIPS32State, guest_r12); break;
+ case 13:
+ ret = offsetof(VexGuestMIPS32State, guest_r13); break;
+ case 14:
+ ret = offsetof(VexGuestMIPS32State, guest_r14); break;
+ case 15:
+ ret = offsetof(VexGuestMIPS32State, guest_r15); break;
+ case 16:
+ ret = offsetof(VexGuestMIPS32State, guest_r16); break;
+ case 17:
+ ret = offsetof(VexGuestMIPS32State, guest_r17); break;
+ case 18:
+ ret = offsetof(VexGuestMIPS32State, guest_r18); break;
+ case 19:
+ ret = offsetof(VexGuestMIPS32State, guest_r19); break;
+ case 20:
+ ret = offsetof(VexGuestMIPS32State, guest_r20); break;
+ case 21:
+ ret = offsetof(VexGuestMIPS32State, guest_r21); break;
+ case 22:
+ ret = offsetof(VexGuestMIPS32State, guest_r22); break;
+ case 23:
+ ret = offsetof(VexGuestMIPS32State, guest_r23); break;
+ case 24:
+ ret = offsetof(VexGuestMIPS32State, guest_r24); break;
+ case 25:
+ ret = offsetof(VexGuestMIPS32State, guest_r25); break;
+ case 26:
+ ret = offsetof(VexGuestMIPS32State, guest_r26); break;
+ case 27:
+ ret = offsetof(VexGuestMIPS32State, guest_r27); break;
+ case 28:
+ ret = offsetof(VexGuestMIPS32State, guest_r28); break;
+ case 29:
+ ret = offsetof(VexGuestMIPS32State, guest_r29); break;
+ case 30:
+ ret = offsetof(VexGuestMIPS32State, guest_r30); break;
+ case 31:
+ ret = offsetof(VexGuestMIPS32State, guest_r31); break;
+ default:
+ vassert(0);
+ break;
+ }
+ else
+ switch (iregNo) {
+ case 0:
+ ret = offsetof(VexGuestMIPS64State, guest_r0); break;
+ case 1:
+ ret = offsetof(VexGuestMIPS64State, guest_r1); break;
+ case 2:
+ ret = offsetof(VexGuestMIPS64State, guest_r2); break;
+ case 3:
+ ret = offsetof(VexGuestMIPS64State, guest_r3); break;
+ case 4:
+ ret = offsetof(VexGuestMIPS64State, guest_r4); break;
+ case 5:
+ ret = offsetof(VexGuestMIPS64State, guest_r5); break;
+ case 6:
+ ret = offsetof(VexGuestMIPS64State, guest_r6); break;
+ case 7:
+ ret = offsetof(VexGuestMIPS64State, guest_r7); break;
+ case 8:
+ ret = offsetof(VexGuestMIPS64State, guest_r8); break;
+ case 9:
+ ret = offsetof(VexGuestMIPS64State, guest_r9); break;
+ case 10:
+ ret = offsetof(VexGuestMIPS64State, guest_r10); break;
+ case 11:
+ ret = offsetof(VexGuestMIPS64State, guest_r11); break;
+ case 12:
+ ret = offsetof(VexGuestMIPS64State, guest_r12); break;
+ case 13:
+ ret = offsetof(VexGuestMIPS64State, guest_r13); break;
+ case 14:
+ ret = offsetof(VexGuestMIPS64State, guest_r14); break;
+ case 15:
+ ret = offsetof(VexGuestMIPS64State, guest_r15); break;
+ case 16:
+ ret = offsetof(VexGuestMIPS64State, guest_r16); break;
+ case 17:
+ ret = offsetof(VexGuestMIPS64State, guest_r17); break;
+ case 18:
+ ret = offsetof(VexGuestMIPS64State, guest_r18); break;
+ case 19:
+ ret = offsetof(VexGuestMIPS64State, guest_r19); break;
+ case 20:
+ ret = offsetof(VexGuestMIPS64State, guest_r20); break;
+ case 21:
+ ret = offsetof(VexGuestMIPS64State, guest_r21); break;
+ case 22:
+ ret = offsetof(VexGuestMIPS64State, guest_r22); break;
+ case 23:
+ ret = offsetof(VexGuestMIPS64State, guest_r23); break;
+ case 24:
+ ret = offsetof(VexGuestMIPS64State, guest_r24); break;
+ case 25:
+ ret = offsetof(VexGuestMIPS64State, guest_r25); break;
+ case 26:
+ ret = offsetof(VexGuestMIPS64State, guest_r26); break;
+ case 27:
+ ret = offsetof(VexGuestMIPS64State, guest_r27); break;
+ case 28:
+ ret = offsetof(VexGuestMIPS64State, guest_r28); break;
+ case 29:
+ ret = offsetof(VexGuestMIPS64State, guest_r29); break;
+ case 30:
+ ret = offsetof(VexGuestMIPS64State, guest_r30); break;
+ case 31:
+ ret = offsetof(VexGuestMIPS64State, guest_r31); break;
+ default:
+ vassert(0);
+ break;
+ }
return ret;
}
+#if defined(VGP_mips32_linux)
#define OFFB_PC offsetof(VexGuestMIPS32State, guest_PC)
+#else
+#define OFFB_PC offsetof(VexGuestMIPS64State, guest_PC)
+#endif
/* ---------------- Floating point registers ---------------- */
{
vassert(fregNo < 32);
UInt ret;
- switch (fregNo) {
- case 0:
- ret = offsetof(VexGuestMIPS32State, guest_f0); break;
- case 1:
- ret = offsetof(VexGuestMIPS32State, guest_f1); break;
- case 2:
- ret = offsetof(VexGuestMIPS32State, guest_f2); break;
- case 3:
- ret = offsetof(VexGuestMIPS32State, guest_f3); break;
- case 4:
- ret = offsetof(VexGuestMIPS32State, guest_f4); break;
- case 5:
- ret = offsetof(VexGuestMIPS32State, guest_f5); break;
- case 6:
- ret = offsetof(VexGuestMIPS32State, guest_f6); break;
- case 7:
- ret = offsetof(VexGuestMIPS32State, guest_f7); break;
- case 8:
- ret = offsetof(VexGuestMIPS32State, guest_f8); break;
- case 9:
- ret = offsetof(VexGuestMIPS32State, guest_f9); break;
- case 10:
- ret = offsetof(VexGuestMIPS32State, guest_f10); break;
- case 11:
- ret = offsetof(VexGuestMIPS32State, guest_f11); break;
- case 12:
- ret = offsetof(VexGuestMIPS32State, guest_f12); break;
- case 13:
- ret = offsetof(VexGuestMIPS32State, guest_f13); break;
- case 14:
- ret = offsetof(VexGuestMIPS32State, guest_f14); break;
- case 15:
- ret = offsetof(VexGuestMIPS32State, guest_f15); break;
- case 16:
- ret = offsetof(VexGuestMIPS32State, guest_f16); break;
- case 17:
- ret = offsetof(VexGuestMIPS32State, guest_f17); break;
- case 18:
- ret = offsetof(VexGuestMIPS32State, guest_f18); break;
- case 19:
- ret = offsetof(VexGuestMIPS32State, guest_f19); break;
- case 20:
- ret = offsetof(VexGuestMIPS32State, guest_f20); break;
- case 21:
- ret = offsetof(VexGuestMIPS32State, guest_f21); break;
- case 22:
- ret = offsetof(VexGuestMIPS32State, guest_f22); break;
- case 23:
- ret = offsetof(VexGuestMIPS32State, guest_f23); break;
- case 24:
- ret = offsetof(VexGuestMIPS32State, guest_f24); break;
- case 25:
- ret = offsetof(VexGuestMIPS32State, guest_f25); break;
- case 26:
- ret = offsetof(VexGuestMIPS32State, guest_f26); break;
- case 27:
- ret = offsetof(VexGuestMIPS32State, guest_f27); break;
- case 28:
- ret = offsetof(VexGuestMIPS32State, guest_f28); break;
- case 29:
- ret = offsetof(VexGuestMIPS32State, guest_f29); break;
- case 30:
- ret = offsetof(VexGuestMIPS32State, guest_f30); break;
- case 31:
- ret = offsetof(VexGuestMIPS32State, guest_f31); break;
- default:
- vassert(0);
- break;
- }
+ if (!mode64)
+ switch (fregNo) {
+ case 0:
+ ret = offsetof(VexGuestMIPS32State, guest_f0); break;
+ case 1:
+ ret = offsetof(VexGuestMIPS32State, guest_f1); break;
+ case 2:
+ ret = offsetof(VexGuestMIPS32State, guest_f2); break;
+ case 3:
+ ret = offsetof(VexGuestMIPS32State, guest_f3); break;
+ case 4:
+ ret = offsetof(VexGuestMIPS32State, guest_f4); break;
+ case 5:
+ ret = offsetof(VexGuestMIPS32State, guest_f5); break;
+ case 6:
+ ret = offsetof(VexGuestMIPS32State, guest_f6); break;
+ case 7:
+ ret = offsetof(VexGuestMIPS32State, guest_f7); break;
+ case 8:
+ ret = offsetof(VexGuestMIPS32State, guest_f8); break;
+ case 9:
+ ret = offsetof(VexGuestMIPS32State, guest_f9); break;
+ case 10:
+ ret = offsetof(VexGuestMIPS32State, guest_f10); break;
+ case 11:
+ ret = offsetof(VexGuestMIPS32State, guest_f11); break;
+ case 12:
+ ret = offsetof(VexGuestMIPS32State, guest_f12); break;
+ case 13:
+ ret = offsetof(VexGuestMIPS32State, guest_f13); break;
+ case 14:
+ ret = offsetof(VexGuestMIPS32State, guest_f14); break;
+ case 15:
+ ret = offsetof(VexGuestMIPS32State, guest_f15); break;
+ case 16:
+ ret = offsetof(VexGuestMIPS32State, guest_f16); break;
+ case 17:
+ ret = offsetof(VexGuestMIPS32State, guest_f17); break;
+ case 18:
+ ret = offsetof(VexGuestMIPS32State, guest_f18); break;
+ case 19:
+ ret = offsetof(VexGuestMIPS32State, guest_f19); break;
+ case 20:
+ ret = offsetof(VexGuestMIPS32State, guest_f20); break;
+ case 21:
+ ret = offsetof(VexGuestMIPS32State, guest_f21); break;
+ case 22:
+ ret = offsetof(VexGuestMIPS32State, guest_f22); break;
+ case 23:
+ ret = offsetof(VexGuestMIPS32State, guest_f23); break;
+ case 24:
+ ret = offsetof(VexGuestMIPS32State, guest_f24); break;
+ case 25:
+ ret = offsetof(VexGuestMIPS32State, guest_f25); break;
+ case 26:
+ ret = offsetof(VexGuestMIPS32State, guest_f26); break;
+ case 27:
+ ret = offsetof(VexGuestMIPS32State, guest_f27); break;
+ case 28:
+ ret = offsetof(VexGuestMIPS32State, guest_f28); break;
+ case 29:
+ ret = offsetof(VexGuestMIPS32State, guest_f29); break;
+ case 30:
+ ret = offsetof(VexGuestMIPS32State, guest_f30); break;
+ case 31:
+ ret = offsetof(VexGuestMIPS32State, guest_f31); break;
+ default:
+ vassert(0);
+ break;
+ }
+ else
+ switch (fregNo) {
+ case 0:
+ ret = offsetof(VexGuestMIPS64State, guest_f0); break;
+ case 1:
+ ret = offsetof(VexGuestMIPS64State, guest_f1); break;
+ case 2:
+ ret = offsetof(VexGuestMIPS64State, guest_f2); break;
+ case 3:
+ ret = offsetof(VexGuestMIPS64State, guest_f3); break;
+ case 4:
+ ret = offsetof(VexGuestMIPS64State, guest_f4); break;
+ case 5:
+ ret = offsetof(VexGuestMIPS64State, guest_f5); break;
+ case 6:
+ ret = offsetof(VexGuestMIPS64State, guest_f6); break;
+ case 7:
+ ret = offsetof(VexGuestMIPS64State, guest_f7); break;
+ case 8:
+ ret = offsetof(VexGuestMIPS64State, guest_f8); break;
+ case 9:
+ ret = offsetof(VexGuestMIPS64State, guest_f9); break;
+ case 10:
+ ret = offsetof(VexGuestMIPS64State, guest_f10); break;
+ case 11:
+ ret = offsetof(VexGuestMIPS64State, guest_f11); break;
+ case 12:
+ ret = offsetof(VexGuestMIPS64State, guest_f12); break;
+ case 13:
+ ret = offsetof(VexGuestMIPS64State, guest_f13); break;
+ case 14:
+ ret = offsetof(VexGuestMIPS64State, guest_f14); break;
+ case 15:
+ ret = offsetof(VexGuestMIPS64State, guest_f15); break;
+ case 16:
+ ret = offsetof(VexGuestMIPS64State, guest_f16); break;
+ case 17:
+ ret = offsetof(VexGuestMIPS64State, guest_f17); break;
+ case 18:
+ ret = offsetof(VexGuestMIPS64State, guest_f18); break;
+ case 19:
+ ret = offsetof(VexGuestMIPS64State, guest_f19); break;
+ case 20:
+ ret = offsetof(VexGuestMIPS64State, guest_f20); break;
+ case 21:
+ ret = offsetof(VexGuestMIPS64State, guest_f21); break;
+ case 22:
+ ret = offsetof(VexGuestMIPS64State, guest_f22); break;
+ case 23:
+ ret = offsetof(VexGuestMIPS64State, guest_f23); break;
+ case 24:
+ ret = offsetof(VexGuestMIPS64State, guest_f24); break;
+ case 25:
+ ret = offsetof(VexGuestMIPS64State, guest_f25); break;
+ case 26:
+ ret = offsetof(VexGuestMIPS64State, guest_f26); break;
+ case 27:
+ ret = offsetof(VexGuestMIPS64State, guest_f27); break;
+ case 28:
+ ret = offsetof(VexGuestMIPS64State, guest_f28); break;
+ case 29:
+ ret = offsetof(VexGuestMIPS64State, guest_f29); break;
+ case 30:
+ ret = offsetof(VexGuestMIPS64State, guest_f30); break;
+ case 31:
+ ret = offsetof(VexGuestMIPS64State, guest_f31); break;
+ default:
+ vassert(0);
+ break;
+ }
return ret;
}
| BITS4((_b3),(_b2),(_b1),(_b0)))
#define LOAD_STORE_PATTERN \
- t1 = newTemp(Ity_I32); \
- assign(t1, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm)))); \
+ t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
+ if(!mode64) \
+ assign(t1, binop(Iop_Add32, getIReg(rs), \
+ mkU32(extend_s_16to32(imm)))); \
+ else \
+ assign(t1, binop(Iop_Add64, getIReg(rs), \
+ mkU64(extend_s_16to64(imm)))); \
+
+#define LWX_SWX_PATTERN64 \
+ t2 = newTemp(Ity_I64); \
+ assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFC))); \
+ t4 = newTemp(Ity_I32); \
+ assign(t4, mkNarrowTo32( ty, binop(Iop_And64, \
+ mkexpr(t1), mkU64(0x3))));
+
+#define LWX_SWX_PATTERN64_1 \
+ t2 = newTemp(Ity_I64); \
+ assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8))); \
+ t4 = newTemp(Ity_I64); \
+ assign(t4, binop(Iop_And64, mkexpr(t1), mkU64(0x7)));
#define LWX_SWX_PATTERN \
t2 = newTemp(Ity_I32); \
) \
)
+#define SXXV_PATTERN64(op) \
+ putIReg(rd, mkWidenFrom32(ty, binop(op, \
+ mkNarrowTo32(ty, getIReg(rt)), \
+ unop(Iop_32to8, \
+ binop(Iop_And32, \
+ mkNarrowTo32(ty, getIReg(rs)), \
+ mkU32(0x0000001F) \
+ ) \
+ ) \
+ ), True \
+ ))
+
#define SXX_PATTERN(op) \
putIReg(rd, binop(op, getIReg(rt), mkU8(sa)));
#define ALUI_PATTERN64(op) \
putIReg(rt, binop(op, getIReg(rs), mkU64(imm)));
+#define ALU_PATTERN64(op) \
+ putIReg(rd, mkWidenFrom32(ty, binop(op, \
+ mkNarrowTo32(ty, getIReg(rs)), \
+ mkNarrowTo32(ty, getIReg(rt))), True));
+
#define FP_CONDITIONAL_CODE \
t3 = newTemp(Ity_I32); \
assign(t3, binop(Iop_And32, \
mkU32(0x1)));
/*------------------------------------------------------------*/
-/*--- Field helpers ---*/
+/*--- Field helpers ---*/
/*------------------------------------------------------------*/
static UInt get_opcode(UInt mipsins)
return IRExpr_Triop(op, a1, a2, a3);
}
+static IRExpr *qop ( IROp op, IRExpr * a1, IRExpr * a2, IRExpr * a3,
+ IRExpr * a4 )
+{
+ return IRExpr_Qop(op, a1, a2, a3, a4);
+}
+
static IRExpr *load(IRType ty, IRExpr * addr)
{
IRExpr *load1 = NULL;
return (UInt) ((((Int) x) << 14) >> 14);
}
-static void jmp_lit( /*MOD*/DisResult* dres,
- IRJumpKind kind, Addr32 d32 )
+static ULong extend_s_16to64 ( UInt x )
+{
+ return (ULong) ((((Long) x) << 48) >> 48);
+}
+
+static ULong extend_s_18to64 ( UInt x )
+{
+ return (ULong) ((((Long) x) << 46) >> 46);
+}
+
+static ULong extend_s_32to64 ( UInt x )
+{
+ return (ULong) ((((Long) x) << 32) >> 32);
+}
+
+static void jmp_lit32 ( /*MOD*/ DisResult* dres, IRJumpKind kind, Addr32 d32 )
{
vassert(dres->whatNext == Dis_Continue);
vassert(dres->len == 0);
stmt( IRStmt_Put( OFFB_PC, mkU32(d32) ) );
}
+static void jmp_lit64 ( /*MOD*/ DisResult* dres, IRJumpKind kind, Addr64 d64 )
+{
+ vassert(dres->whatNext == Dis_Continue);
+ vassert(dres->len == 0);
+ vassert(dres->continueAt == 0);
+ vassert(dres->jk_StopHere == Ijk_INVALID);
+ dres->whatNext = Dis_StopHere;
+ dres->jk_StopHere = kind;
+ stmt(IRStmt_Put(OFFB_PC, mkU64(d64)));
+}
+
/* Fetch a byte from the guest insn stream. */
static UChar getIByte(Int delta)
{
static IRExpr *getHI(void)
{
- return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_HI), Ity_I32);
+ if (mode64)
+ return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_HI), Ity_I64);
+ else
+ return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_HI), Ity_I32);
}
static IRExpr *getLO(void)
{
- return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_LO), Ity_I32);
+ if (mode64)
+ return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_LO), Ity_I64);
+ else
+ return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_LO), Ity_I32);
}
static IRExpr *getFCSR(void)
{
- return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_FCSR), Ity_I32);
+ if (mode64)
+ return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_FCSR), Ity_I32);
+ else
+ return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_FCSR), Ity_I32);
}
static void putFCSR(IRExpr * e)
{
- stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_FCSR), e));
+ if (mode64)
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_FCSR), e));
+ else
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_FCSR), e));
}
static IRExpr *getULR(void)
{
- return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_ULR), Ity_I32);
+ if (mode64)
+ return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_ULR), Ity_I64);
+ else
+ return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_ULR), Ity_I32);
}
static void putIReg(UInt archreg, IRExpr * e)
static void putLO(IRExpr * e)
{
- stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_LO), e));
+ if (mode64)
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_LO), e));
+ else
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_LO), e));
}
static void putHI(IRExpr * e)
{
- stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_HI), e));
+ if (mode64)
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_HI), e));
+ else
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_HI), e));
+}
+
+static IRExpr *mkNarrowTo8 ( IRType ty, IRExpr * src )
+{
+ vassert(ty == Ity_I32 || ty == Ity_I64);
+ return ty == Ity_I64 ? unop(Iop_64to8, src) : unop(Iop_32to8, src);
}
static void putPC(IRExpr * e)
static IRExpr *mkWidenFromF32(IRType ty, IRExpr * src)
{
vassert(ty == Ity_F32 || ty == Ity_F64);
- return ty == Ity_F64 ? unop(Iop_F32toF64, src) : src;
+ if (ty == Ity_F64) {
+ IRTemp t0 = newTemp(Ity_I32);
+ IRTemp t1 = newTemp(Ity_I64);
+ assign(t0, unop(Iop_ReinterpF32asI32, src));
+ assign(t1, binop(Iop_32HLto64, mkU32(0x0), mkexpr(t0)));
+ return unop(Iop_ReinterpI64asF64, mkexpr(t1));
+ } else
+ return src;
}
static IRExpr *dis_branch_likely(IRExpr * guard, UInt imm)
is added to the address of the instruction following
the branch (not the branch itself), in the branch delay slot, to form
a PC-relative effective target address. */
- branch_offset = extend_s_18to32(imm << 2);
+ if (mode64)
+ branch_offset = extend_s_18to64(imm << 2);
+ else
+ branch_offset = extend_s_18to32(imm << 2);
t0 = newTemp(Ity_I1);
assign(t0, guard);
- stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
- IRConst_U32(guest_PC_curr_instr + 8), OFFB_PC));
+ if (mode64)
+ stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+ IRConst_U64(guest_PC_curr_instr + 8), OFFB_PC));
+ else
+ stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+ IRConst_U32(guest_PC_curr_instr + 8), OFFB_PC));
irsb->jumpkind = Ijk_Boring;
- return mkU32(guest_PC_curr_instr + 4 + branch_offset);
+ if (mode64)
+ return mkU64(guest_PC_curr_instr + 4 + branch_offset);
+ else
+ return mkU32(guest_PC_curr_instr + 4 + branch_offset);
}
static void dis_branch(Bool link, IRExpr * guard, UInt imm, IRStmt ** set)
ULong branch_offset;
IRTemp t0;
- if (link) { // LR (GPR31) = addr of the 2nd instr after branch instr
- putIReg(31, mkU32(guest_PC_curr_instr + 8));
+ if (link) { /* LR (GPR31) = addr of the 2nd instr after branch instr */
+ if (mode64)
+ putIReg(31, mkU64(guest_PC_curr_instr + 8));
+ else
+ putIReg(31, mkU32(guest_PC_curr_instr + 8));
}
/* PC = PC + (SignExtend(signed_immed_24) << 2)
the branch (not the branch itself), in the branch delay slot, to form
a PC-relative effective target address. */
- branch_offset = extend_s_18to32(imm << 2);
+ if (mode64)
+ branch_offset = extend_s_18to64(imm << 2);
+ else
+ branch_offset = extend_s_18to32(imm << 2);
t0 = newTemp(Ity_I1);
assign(t0, guard);
- *set = IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring,
- IRConst_U32(guest_PC_curr_instr + 4 + (UInt) branch_offset),
- OFFB_PC);
+ if (mode64)
+ *set = IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring,
+ IRConst_U64(guest_PC_curr_instr + 4 + branch_offset),
+ OFFB_PC);
+ else
+ *set = IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring,
+ IRConst_U32(guest_PC_curr_instr + 4 +
+ (UInt) branch_offset), OFFB_PC);
}
static IRExpr *getFReg(UInt dregNo)
static IRExpr *getDReg(UInt dregNo)
{
- vassert(dregNo < 32);
- IRTemp t0 = newTemp(Ity_F32);
- IRTemp t1 = newTemp(Ity_F32);
- IRTemp t2 = newTemp(Ity_F64);
- IRTemp t3 = newTemp(Ity_I32);
- IRTemp t4 = newTemp(Ity_I32);
- IRTemp t5 = newTemp(Ity_I64);
+ if (mode64) {
+ vassert(dregNo < 32);
+ IRType ty = Ity_F64;
+ return IRExpr_Get(floatGuestRegOffset(dregNo), ty);
+ } else {
+ vassert(dregNo < 32);
+ IRTemp t0 = newTemp(Ity_F32);
+ IRTemp t1 = newTemp(Ity_F32);
+ IRTemp t2 = newTemp(Ity_F64);
+ IRTemp t3 = newTemp(Ity_I32);
+ IRTemp t4 = newTemp(Ity_I32);
+ IRTemp t5 = newTemp(Ity_I64);
#if defined (_MIPSEL)
- assign(t0, getFReg(dregNo));
- assign(t1, getFReg(dregNo + 1));
+ assign(t0, getFReg(dregNo));
+ assign(t1, getFReg(dregNo + 1));
#elif defined (_MIPSEB)
- assign(t0, getFReg(dregNo + 1));
- assign(t1, getFReg(dregNo));
+ assign(t0, getFReg(dregNo + 1));
+ assign(t1, getFReg(dregNo));
#endif
- assign(t3, unop(Iop_ReinterpF32asI32, mkexpr(t0)));
- assign(t4, unop(Iop_ReinterpF32asI32, mkexpr(t1)));
- assign(t5, binop(Iop_32HLto64, mkexpr(t4), mkexpr(t3)));
- assign(t2, unop(Iop_ReinterpI64asF64, mkexpr(t5)));
+ assign(t3, unop(Iop_ReinterpF32asI32, mkexpr(t0)));
+ assign(t4, unop(Iop_ReinterpF32asI32, mkexpr(t1)));
+ assign(t5, binop(Iop_32HLto64, mkexpr(t4), mkexpr(t3)));
+ assign(t2, unop(Iop_ReinterpI64asF64, mkexpr(t5)));
- return mkexpr(t2);
+ return mkexpr(t2);
+ }
}
static void putFReg(UInt dregNo, IRExpr * e)
static void putDReg(UInt dregNo, IRExpr * e)
{
- vassert(dregNo < 32);
- vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64);
- IRTemp t1 = newTemp(Ity_F64);
- IRTemp t4 = newTemp(Ity_I32);
- IRTemp t5 = newTemp(Ity_I32);
- IRTemp t6 = newTemp(Ity_I64);
- assign(t1, e);
- assign(t6, unop(Iop_ReinterpF64asI64, mkexpr(t1)));
- assign(t4, unop(Iop_64HIto32, mkexpr(t6))); // hi
- assign(t5, unop(Iop_64to32, mkexpr(t6))); //lo
+ if (mode64) {
+ vassert(dregNo < 32);
+ IRType ty = Ity_F64;
+ vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
+ stmt(IRStmt_Put(floatGuestRegOffset(dregNo), e));
+ } else {
+ vassert(dregNo < 32);
+ vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64);
+ IRTemp t1 = newTemp(Ity_F64);
+ IRTemp t4 = newTemp(Ity_I32);
+ IRTemp t5 = newTemp(Ity_I32);
+ IRTemp t6 = newTemp(Ity_I64);
+ assign(t1, e);
+ assign(t6, unop(Iop_ReinterpF64asI64, mkexpr(t1)));
+ assign(t4, unop(Iop_64HIto32, mkexpr(t6))); /* hi */
+ assign(t5, unop(Iop_64to32, mkexpr(t6))); /* lo */
#if defined (_MIPSEL)
- putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t5)));
- putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t4)));
+ putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t5)));
+ putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t4)));
#elif defined (_MIPSEB)
- putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t5)));
- putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t4)));
+ putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t5)));
+ putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t4)));
#endif
+ }
}
static void setFPUCondCode(IRExpr * e, UInt cc)
}
}
-static IRExpr */* :: Ity_I32 */get_IR_roundingmode(void)
+static IRExpr* get_IR_roundingmode ( void )
{
/*
rounding mode | MIPS | IR
IRTemp rm_MIPS = newTemp(Ity_I32);
/* Last two bits in FCSR are rounding mode. */
- assign(rm_MIPS, binop(Iop_And32, IRExpr_Get(offsetof(VexGuestMIPS32State,
- guest_FCSR), Ity_I32), mkU32(3)));
+ if (mode64)
+ assign(rm_MIPS, binop(Iop_And32, IRExpr_Get(offsetof(VexGuestMIPS64State,
+ guest_FCSR), Ity_I32), mkU32(3)));
+ else
+ assign(rm_MIPS, binop(Iop_And32, IRExpr_Get(offsetof(VexGuestMIPS32State,
+ guest_FCSR), Ity_I32), mkU32(3)));
- // rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2)
+ /* rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2) */
return binop(Iop_Xor32, mkexpr(rm_MIPS), binop(Iop_And32,
binop(Iop_Shl32, mkexpr(rm_MIPS), mkU8(1)), mkU32(2)));
}
+/* sz, ULong -> IRExpr */
+static IRExpr *mkSzImm ( IRType ty, ULong imm64 )
+{
+ vassert(ty == Ity_I32 || ty == Ity_I64);
+ return ty == Ity_I64 ? mkU64(imm64) : mkU32((UInt) imm64);
+}
+
+static IRConst *mkSzConst ( IRType ty, ULong imm64 )
+{
+ vassert(ty == Ity_I32 || ty == Ity_I64);
+ return (ty == Ity_I64 ? IRConst_U64(imm64) : IRConst_U32((UInt) imm64));
+}
+
+/* Make sure we get valid 32 and 64bit addresses */
+static Addr64 mkSzAddr ( IRType ty, Addr64 addr )
+{
+ vassert(ty == Ity_I32 || ty == Ity_I64);
+ return (ty == Ity_I64 ? (Addr64) addr :
+ (Addr64) extend_s_32to64(toUInt(addr)));
+}
+
+/* Shift and Rotate instructions for MIPS64 */
+static Bool dis_instr_shrt ( UInt theInstr )
+{
+ UInt opc2 = get_function(theInstr);
+ UChar regRs = get_rs(theInstr);
+ UChar regRt = get_rt(theInstr);
+ UChar regRd = get_rd(theInstr);
+ UChar uImmsa = get_sa(theInstr);
+ Long sImmsa = extend_s_16to64(uImmsa);
+ IRType ty = mode64 ? Ity_I64 : Ity_I32;
+ IRTemp tmp = newTemp(ty);
+ IRTemp tmpOr = newTemp(ty);
+ IRTemp tmpRt = newTemp(ty);
+ IRTemp tmpRs = newTemp(ty);
+ IRTemp tmpRd = newTemp(ty);
+
+ assign(tmpRs, getIReg(regRs));
+ assign(tmpRt, getIReg(regRt));
+
+ switch (opc2) {
+ case 0x3A:
+ if ((regRs & 0x01) == 0) {
+ /* Doubleword Shift Right Logical - DSRL; MIPS64 */
+ DIP("dsrl r%u, r%u,r%u\n", regRd, regRt, (Int) sImmsa);
+ assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa)));
+ putIReg(regRd, mkexpr(tmpRd));
+ } else if ((regRs & 0x01) == 1) {
+ /* Doubleword Rotate Right - DROTR; MIPS64r2 */
+ vassert(mode64);
+ DIP("drotr r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa);
+ IRTemp tmpL = newTemp(ty);
+ IRTemp tmpR = newTemp(ty);
+ assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa)));
+ assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(63 - uImmsa)));
+ assign(tmpL, binop(Iop_Shl64, mkexpr(tmp), mkU8(1)));
+ assign(tmpRd, binop(Iop_Or64, mkexpr(tmpL), mkexpr(tmpR)));
+ putIReg(regRd, mkexpr(tmpRd));
+ } else
+ return False;
+ break;
+
+ case 0x3E:
+ if ((regRs & 0x01) == 0) {
+ /* Doubleword Shift Right Logical Plus 32 - DSRL32; MIPS64 */
+ DIP("dsrl32 r%u, r%u,r%u\n", regRd, regRt, (Int) (sImmsa + 32));
+ assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
+ putIReg(regRd, mkexpr(tmpRd));
+ } else if ((regRs & 0x01) == 1) {
+ /* Doubleword Rotate Right Plus 32 - DROTR32; MIPS64r2 */
+ DIP("drotr32 r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa);
+ vassert(mode64);
+ IRTemp tmpL = newTemp(ty);
+ IRTemp tmpR = newTemp(ty);
+ /* (tmpRt >> sa) | (tmpRt << (64 - sa)) */
+ assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
+ assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt),
+ mkU8(63 - (uImmsa + 32))));
+ assign(tmpL, binop(Iop_Shl64, mkexpr(tmp), mkU8(1)));
+ assign(tmpRd, binop(Iop_Or64, mkexpr(tmpL), mkexpr(tmpR)));
+ putIReg(regRd, mkexpr(tmpRd));
+ } else
+ return False;
+ break;
+
+ case 0x16:
+ if ((uImmsa & 0x01) == 0) {
+ /* Doubleword Shift Right Logical Variable - DSRLV; MIPS64 */
+ DIP("dsrlv r%u, r%u,r%u\n", regRd, regRt, regRs);
+ IRTemp tmpRs8 = newTemp(Ity_I8);
+ /* s = tmpRs[5..0] */
+ assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkU64(63)));
+ assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp)));
+ assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkexpr(tmpRs8)));
+ putIReg(regRd, mkexpr(tmpRd));
+ } else if ((uImmsa & 0x01) == 1) {
+ /* Doubleword Rotate Right Variable - DROTRV; MIPS64r2 */
+ DIP("drotrv r%u, r%u,r%u\n", regRd, regRt, regRs);
+ IRTemp tmpL = newTemp(ty);
+ IRTemp tmpR = newTemp(ty);
+ IRTemp tmpRs8 = newTemp(Ity_I8);
+ IRTemp tmpLs8 = newTemp(Ity_I8);
+ IRTemp tmp64 = newTemp(ty);
+ /* s = tmpRs[5...0]
+ m = 64 - s
+ (tmpRt << s) | (tmpRt >> m) */
+
+ assign(tmp64, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63)));
+ assign(tmp, binop(Iop_Sub64, mkU64(63), mkexpr(tmp64)));
+
+ assign(tmpLs8, mkNarrowTo8(ty, mkexpr(tmp)));
+ assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp64)));
+
+ assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkexpr(tmpRs8)));
+ assign(tmpL, binop(Iop_Shl64, mkexpr(tmpRt), mkexpr(tmpLs8)));
+ assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpL), mkU8(1)));
+ assign(tmpOr, binop(Iop_Or64, mkexpr(tmpRd), mkexpr(tmpR)));
+
+ putIReg(regRd, mkexpr(tmpOr));
+ } else
+ return False;
+ break;
+
+ case 0x38: /* Doubleword Shift Left Logical - DSLL; MIPS64 */
+ DIP("dsll r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa);
+ vassert(mode64);
+ assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa)));
+ putIReg(regRd, mkexpr(tmpRd));
+ break;
+
+ case 0x3C: /* Doubleword Shift Left Logical Plus 32 - DSLL32; MIPS64 */
+ DIP("dsll32 r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa);
+ assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
+ putIReg(regRd, mkexpr(tmpRd));
+ break;
+
+ case 0x14: { /* Doubleword Shift Left Logical Variable - DSLLV; MIPS64 */
+ DIP("dsllv r%u, r%u,r%u\n", regRd, regRt, regRs);
+ IRTemp tmpRs8 = newTemp(Ity_I8);
+
+ assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63)));
+ assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp)));
+ assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkexpr(tmpRs8)));
+ putIReg(regRd, mkexpr(tmpRd));
+ break;
+ }
+
+ case 0x3B: /* Doubleword Shift Right Arithmetic - DSRA; MIPS64 */
+ DIP("dsra r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa);
+ assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkU8(uImmsa)));
+ putIReg(regRd, mkexpr(tmpRd));
+ break;
+
+ case 0x3F: /* Doubleword Shift Right Arithmetic Plus 32 - DSRA32;
+ MIPS64 */
+ DIP("dsra32 r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa);
+ assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
+ putIReg(regRd, mkexpr(tmpRd));
+ break;
+
+ case 0x17: { /* Doubleword Shift Right Arithmetic Variable - DSRAV;
+ MIPS64 */
+ DIP("dsrav r%u, r%u,r%u\n", regRd, regRt, regRs);
+ IRTemp tmpRs8 = newTemp(Ity_I8);
+ assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63)));
+ assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp)));
+ assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkexpr(tmpRs8)));
+ putIReg(regRd, mkexpr(tmpRd));
+ break;
+
+ }
+
+ default:
+ return False;
+
+ }
+ return True;
+}
+
+static IROp mkSzOp ( IRType ty, IROp op8 )
+{
+ Int adj;
+ vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ty == Ity_I64);
+ vassert(op8 == Iop_Add8 || op8 == Iop_Sub8 || op8 == Iop_Mul8
+ || op8 == Iop_Or8 || op8 == Iop_And8 || op8 == Iop_Xor8
+ || op8 == Iop_Shl8 || op8 == Iop_Shr8 || op8 == Iop_Sar8
+ || op8 == Iop_CmpEQ8 || op8 == Iop_CmpNE8 || op8 == Iop_Not8);
+ adj = ty == Ity_I8 ? 0 : (ty == Ity_I16 ? 1 : (ty == Ity_I32 ? 2 : 3));
+ return adj + op8;
+}
+
/*********************************************************/
/*--- Floating Point Compare ---*/
/*********************************************************/
-static Bool dis_instr_CCondFmt(UInt cins)
+static Bool dis_instr_CCondFmt ( UInt cins )
{
- IRTemp t0, t1, t2, t3;
+ IRTemp t0, t1, t2, t3, tmp5, tmp6;
IRTemp ccIR = newTemp(Ity_I32);
IRTemp ccMIPS = newTemp(Ity_I32);
UInt FC = get_FC(cins);
UInt ft = get_ft(cins);
UInt cond = get_cond(cins);
- if (FC == 0x3) { // C.cond.fmt
+ if (FC == 0x3) { /* C.cond.fmt */
UInt fpc_cc = get_fpc_cc(cins);
switch (fmt) {
- case 0x10: { //C.cond.S
+ case 0x10: { /* C.cond.S */
DIP("C.cond.S %d f%d, f%d\n", fpc_cc, fs, ft);
- t0 = newTemp(Ity_I32);
- t1 = newTemp(Ity_I32);
- t2 = newTemp(Ity_I32);
- t3 = newTemp(Ity_I32);
-
- assign(ccIR, binop(Iop_CmpF64, unop(Iop_F32toF64, getFReg(fs)),
- unop(Iop_F32toF64, getFReg(ft))));
- /* Map compare result from IR to MIPS */
- /*
- FP cmp result | MIPS | IR
- --------------------------
- UN | 0x1 | 0x45
- EQ | 0x2 | 0x40
- GT | 0x4 | 0x00
- LT | 0x8 | 0x01
- */
-
- // ccMIPS = Shl(1, (~(ccIR>>5) & 2)
- // | ((ccIR ^ (ccIR>>6)) & 1)
- assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,
- binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32,
- binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))), mkU32(2)),
- binop(Iop_And32, binop(Iop_Xor32, mkexpr(ccIR),
- binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))),
- mkU32(1))))));
- assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1))); // UN
- assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
- mkU8(0x1)), mkU32(0x1))); // EQ
- assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32,
- mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1))); // NGT
- assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
- mkU8(0x3)), mkU32(0x1))); // LT
-
- switch (cond) {
- case 0x0:
- setFPUCondCode(mkU32(0), fpc_cc);
- break;
- case 0x1:
- DIP("unorderd: %d\n", fpc_cc);
- setFPUCondCode(mkexpr(t0), fpc_cc);
- break;
- case 0x2:
- setFPUCondCode(mkexpr(t1), fpc_cc);
- break;
- case 0x3:
- setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
- fpc_cc);
- break;
- case 0x4:
- setFPUCondCode(mkexpr(t3), fpc_cc);
- break;
- case 0x5:
- setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
- fpc_cc);
- break;
- case 0x6:
- setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
- fpc_cc);
- break;
- case 0x7:
- setFPUCondCode(mkexpr(t2), fpc_cc);
- break;
- case 0x8:
- setFPUCondCode(mkU32(0), fpc_cc);
- break;
- case 0x9:
- setFPUCondCode(mkexpr(t0), fpc_cc);
- break;
- case 0xA:
- setFPUCondCode(mkexpr(t1), fpc_cc);
- break;
- case 0xB:
- setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
- fpc_cc);
- break;
- case 0xC:
- setFPUCondCode(mkexpr(t3), fpc_cc);
- break;
- case 0xD:
- setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
- fpc_cc);
- break;
- case 0xE:
- setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
- fpc_cc);
- break;
- case 0xF:
- setFPUCondCode(mkexpr(t2), fpc_cc);
- break;
+ if (mode64) {
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I32);
+
+ tmp5 = newTemp(Ity_F64);
+ tmp6 = newTemp(Ity_F64);
+
+ assign(tmp5, unop(Iop_F32toF64, getLoFromF64(Ity_F64,
+ getFReg(fs))));
+ assign(tmp6, unop(Iop_F32toF64, getLoFromF64(Ity_F64,
+ getFReg(ft))));
+
+ assign(ccIR, binop(Iop_CmpF64, mkexpr(tmp5), mkexpr(tmp6)));
+ putHI(mkWidenFrom32(Ity_I64, mkexpr(ccIR), True));
+ /* Map compare result from IR to MIPS
+ FP cmp result | MIPS | IR
+ --------------------------
+ UN | 0x1 | 0x45
+ EQ | 0x2 | 0x40
+ GT | 0x4 | 0x00
+ LT | 0x8 | 0x01
+ */
+
+ /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
+ assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,
+ binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32,
+ binop(Iop_Shr32, mkexpr(ccIR),mkU8(5))),mkU32(2)),
+ binop(Iop_And32, binop(Iop_Xor32, mkexpr(ccIR),
+ binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))),
+ mkU32(1))))));
+ putLO(mkWidenFrom32(Ity_I64, mkexpr(ccMIPS), True));
+
+ /* UN */
+ assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1)));
+ /* EQ */
+ assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+ mkU8(0x1)), mkU32(0x1)));
+ /* NGT */
+ assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32,
+ mkexpr(ccMIPS), mkU8(0x2))),mkU32(0x1)));
+ /* LT */
+ assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+ mkU8(0x3)), mkU32(0x1)));
+ switch (cond) {
+ case 0x0:
+ setFPUCondCode(mkU32(0), fpc_cc);
+ break;
+ case 0x1:
+ DIP("unorderd: %d\n", fpc_cc);
+ setFPUCondCode(mkexpr(t0), fpc_cc);
+ break;
+ case 0x2:
+ setFPUCondCode(mkexpr(t1), fpc_cc);
+ break;
+ case 0x3:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+ fpc_cc);
+ break;
+ case 0x4:
+ setFPUCondCode(mkexpr(t3), fpc_cc);
+ break;
+ case 0x5:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+ fpc_cc);
+ break;
+ case 0x6:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+ fpc_cc);
+ break;
+ case 0x7:
+ setFPUCondCode(mkexpr(t2), fpc_cc);
+ break;
+ case 0x8:
+ setFPUCondCode(mkU32(0), fpc_cc);
+ break;
+ case 0x9:
+ setFPUCondCode(mkexpr(t0), fpc_cc);
+ break;
+ case 0xA:
+ setFPUCondCode(mkexpr(t1), fpc_cc);
+ break;
+ case 0xB:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+ fpc_cc);
+ break;
+ case 0xC:
+ setFPUCondCode(mkexpr(t3), fpc_cc);
+ break;
+ case 0xD:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+ fpc_cc);
+ break;
+ case 0xE:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+ fpc_cc);
+ break;
+ case 0xF:
+ setFPUCondCode(mkexpr(t2), fpc_cc);
+ break;
+
+ default:
+ return False;
+ }
- default:
- return False;
+ } else {
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I32);
+
+ assign(ccIR, binop(Iop_CmpF64, unop(Iop_F32toF64, getFReg(fs)),
+ unop(Iop_F32toF64, getFReg(ft))));
+ /* Map compare result from IR to MIPS
+ FP cmp result | MIPS | IR
+ --------------------------
+ UN | 0x1 | 0x45
+ EQ | 0x2 | 0x40
+ GT | 0x4 | 0x00
+ LT | 0x8 | 0x01
+ */
+
+ /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
+ assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,
+ binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32,
+ binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))),
+ mkU32(2)), binop(Iop_And32,
+ binop(Iop_Xor32, mkexpr(ccIR),
+ binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))),
+ mkU32(1))))));
+ /* UN */
+ assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1)));
+ /* EQ */
+ assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+ mkU8(0x1)), mkU32(0x1)));
+ /* NGT */
+ assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32,
+ mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1)));
+ /* LT */
+ assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+ mkU8(0x3)), mkU32(0x1)));
+
+ switch (cond) {
+ case 0x0:
+ setFPUCondCode(mkU32(0), fpc_cc);
+ break;
+ case 0x1:
+ DIP("unorderd: %d\n", fpc_cc);
+ setFPUCondCode(mkexpr(t0), fpc_cc);
+ break;
+ case 0x2:
+ setFPUCondCode(mkexpr(t1), fpc_cc);
+ break;
+ case 0x3:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+ fpc_cc);
+ break;
+ case 0x4:
+ setFPUCondCode(mkexpr(t3), fpc_cc);
+ break;
+ case 0x5:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+ fpc_cc);
+ break;
+ case 0x6:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+ fpc_cc);
+ break;
+ case 0x7:
+ setFPUCondCode(mkexpr(t2), fpc_cc);
+ break;
+ case 0x8:
+ setFPUCondCode(mkU32(0), fpc_cc);
+ break;
+ case 0x9:
+ setFPUCondCode(mkexpr(t0), fpc_cc);
+ break;
+ case 0xA:
+ setFPUCondCode(mkexpr(t1), fpc_cc);
+ break;
+ case 0xB:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+ fpc_cc);
+ break;
+ case 0xC:
+ setFPUCondCode(mkexpr(t3), fpc_cc);
+ break;
+ case 0xD:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+ fpc_cc);
+ break;
+ case 0xE:
+ setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+ fpc_cc);
+ break;
+ case 0xF:
+ setFPUCondCode(mkexpr(t2), fpc_cc);
+ break;
+
+ default:
+ return False;
+ }
}
}
break;
- case 0x11: //C.cond.D
+ case 0x11: { /* C.cond.D */
DIP("C.%d.D %d f%d, f%d\n", cond, fpc_cc, fs, ft);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I32);
assign(ccIR, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
- /* Map compare result from IR to MIPS */
- /*
+ /* Map compare result from IR to MIPS
FP cmp result | MIPS | IR
--------------------------
UN | 0x1 | 0x45
LT | 0x8 | 0x01
*/
- // ccMIPS = Shl(1, (~(ccIR>>5) & 2)
- // | ((ccIR ^ (ccIR>>6)) & 1)
+ /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,
binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32,
binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))), mkU32(2)),
binop(Iop_And32, binop(Iop_Xor32, mkexpr(ccIR),
binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))),
mkU32(1))))));
-
- assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1))); // UN
+
+ /* UN */
+ assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1)));
+ /* EQ */
assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
- mkU8(0x1)), mkU32(0x1))); // EQ
+ mkU8(0x1)), mkU32(0x1)));
+ /* NGT */
assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32,
- mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1))); // NGT
+ mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1)));
+ /* LT */
assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
- mkU8(0x3)), mkU32(0x1))); // LT
+ mkU8(0x3)), mkU32(0x1)));
switch (cond) {
case 0x0:
default:
return False;
}
- break;
+ }
+ break;
- default:
- return False;
+ default:
+ return False;
}
} else {
return False;
return True;
}
-/*------------------------------------------------------------*/
-/*--- Disassemble a single instruction ---*/
-/*------------------------------------------------------------*/
+/*********************************************************/
+/*--- Branch Instructions for mips64 ---*/
+/*********************************************************/
+static Bool dis_instr_branch ( UInt theInstr, DisResult * dres,
+ Bool(*resteerOkFn) (void *, Addr64),
+ void *callback_opaque, IRStmt ** set )
+{
+ UInt jmpKind = 0;
+ UChar opc1 = get_opcode(theInstr);
+ UChar regRs = get_rs(theInstr);
+ UChar regRt = get_rt(theInstr);
+ UInt offset = get_imm(theInstr);
+ Long sOffset = extend_s_16to64(offset);
+ IRType ty = mode64 ? Ity_I64 : Ity_I32;
+ IROp opSlt = mode64 ? Iop_CmpLT64S : Iop_CmpLT32S;
-/* Disassemble a single instruction into IR. The instruction is
- located in host memory at guest_instr, and has guest IP of
- guest_PC_curr_instr, which will have been set before the call
- here. */
+ IRTemp tmp = newTemp(ty);
+ IRTemp tmpRs = newTemp(ty);
+ IRTemp tmpRt = newTemp(ty);
+ IRTemp tmpLt = newTemp(ty);
+ IRTemp tmpReg0 = newTemp(ty);
-static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *,
- Addr64),
- Bool resteerCisOk,
- void* callback_opaque,
- Long delta64,
- VexArchInfo* archinfo,
- VexAbiInfo* abiinfo,
- Bool sigill_diag )
-{
- IRTemp t0, t1, t2, t3, t4, t5, t6, t7, t8;
- UInt opcode, cins, rs, rt, rd, sa, ft, fs, fd, fmt, tf, nd, function,
- trap_code, imm, instr_index, p, msb, lsb, size, rot, sel;
+ UChar regLnk = 31; /* reg 31 is link reg in MIPS */
+ Addr64 addrTgt = 0;
+ Addr64 cia = guest_PC_curr_instr;
- DisResult dres;
+ IRExpr *eConst0 = mkSzImm(ty, (UInt) 0);
+ IRExpr *eNia = mkSzImm(ty, cia + 8);
+ IRExpr *eCond = NULL;
- static IRExpr *lastn = NULL; /* last jump addr */
- static IRStmt *bstmt = NULL; /* branch (Exit) stmt */
+ assign(tmpRs, getIReg(regRs));
+ assign(tmpRt, getIReg(regRt));
+ assign(tmpReg0, getIReg(0));
+
+ eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpReg0), mkexpr(tmpReg0));
+
+ switch (opc1) {
+ case 0x01:
+ switch (regRt) {
+ case 0x00: { /* BLTZ rs, offset */
+ addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2));
+ IRTemp tmpLtRes = newTemp(Ity_I1);
+
+ assign(tmp, eConst0);
+ assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp)));
+ assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) :
+ unop(Iop_1Uto32, mkexpr(tmpLtRes)));
+
+ eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpLt),
+ mkexpr(tmpReg0));
+
+ jmpKind = Ijk_Call;
+ break;
+ }
+
+ case 0x01: { /* BGEZ rs, offset */
+ IRTemp tmpLtRes = newTemp(Ity_I1);
+ addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2));
+
+ assign(tmp, eConst0);
+ assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp)));
+ assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) :
+ unop(Iop_1Uto32, mkexpr(tmpLtRes)));
+ eCond = binop(mkSzOp(ty, Iop_CmpEQ8), mkexpr(tmpLt),
+ mkexpr(tmpReg0));
+
+ jmpKind = Ijk_Call;
+ break;
+ }
+
+ case 0x11: { /* BGEZAL rs, offset */
+ addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2));
+ putIReg(regLnk, eNia);
+ IRTemp tmpLtRes = newTemp(Ity_I1);
+
+ assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), eConst0));
+ assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) :
+ unop(Iop_1Uto32, mkexpr(tmpLtRes)));
+
+ eCond = binop(mkSzOp(ty, Iop_CmpEQ8), mkexpr(tmpLt),
+ mkexpr(tmpReg0));
+
+ jmpKind = Ijk_Call;
+ break;
+ }
+
+ case 0x10: { /* BLTZAL rs, offset */
+ IRTemp tmpLtRes = newTemp(Ity_I1);
+ IRTemp tmpRes = newTemp(ty);
+
+ addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2));
+ putIReg(regLnk, eNia);
+
+ assign(tmp, eConst0);
+ assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp)));
+ assign(tmpRes, mode64 ? unop(Iop_1Uto64,
+ mkexpr(tmpLtRes)) : unop(Iop_1Uto32, mkexpr(tmpLtRes)));
+ eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpRes),
+ mkexpr(tmpReg0));
+
+ jmpKind = Ijk_Call;
+ break;
+ }
+
+ }
+ break;
+ default:
+ return False;
+ }
+ *set = IRStmt_Exit(eCond, jmpKind, mkSzConst(ty, addrTgt), OFFB_PC);
+ return True;
+}
+
+/*------------------------------------------------------------*/
+/*--- Disassemble a single instruction ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR. The instruction is
+ located in host memory at guest_instr, and has guest IP of
+ guest_PC_curr_instr, which will have been set before the call
+ here. */
+
+static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *,
+ Addr64),
+ Bool resteerCisOk,
+ void* callback_opaque,
+ Long delta64,
+ VexArchInfo* archinfo,
+ VexAbiInfo* abiinfo,
+ Bool sigill_diag )
+{
+ IRTemp t0, t1, t2, t3, t4, t5, t6, t7, t8;
+ UInt opcode, cins, rs, rt, rd, sa, ft, fs, fd, fmt, tf, nd, function,
+ trap_code, imm, instr_index, p, msb, lsb, size, rot, sel;
+
+ DisResult dres;
+
+ static IRExpr *lastn = NULL; /* last jump addr */
+ static IRStmt *bstmt = NULL; /* branch (Exit) stmt */
/* The running delta */
Int delta = (Int) delta64;
"srl $0, $0, 13
"srl $0, $0, 29
"srl $0, $0, 3
- "srl $0, $0, 19 */
- UInt word1 = 0x00000342;
- UInt word2 = 0x00000742;
- UInt word3 = 0x000000C2;
- UInt word4 = 0x000004C2;
+ "srl $0, $0, 19
+
+ ****mips64****
+ dsll $0, $0, 3
+ dsll $0, $0, 13
+ dsll $0, $0, 29
+ dsll $0, $0, 19 */
+
+ UInt word1 = mode64 ? 0xF8 : 0x342;
+ UInt word2 = mode64 ? 0x378 : 0x742;
+ UInt word3 = mode64 ? 0x778 : 0xC2;
+ UInt word4 = mode64 ? 0x4F8 : 0x4C2;
if (getUInt(code + 0) == word1 && getUInt(code + 4) == word2 &&
getUInt(code + 8) == word3 && getUInt(code + 12) == word4) {
- /* Got a "Special" instruction preamble. Which one is it? */
- if (getUInt(code + 16) == 0x01ad6825 /* or t5, t5, t5 */ ) {
- /* v0 = client_request ( t9 ) */
- DIP("v0 = client_request ( t9 )\n");
- putPC(mkU32(guest_PC_curr_instr + 20));
+ /* Got a "Special" instruction preamble. Which one is it? */
+ if (getUInt(code + 16) == 0x01ad6825 /* or $13, $13, $13 */ ) {
+ /* $11 = client_request ( $12 ) */
+ DIP("$11 = client_request ( $12 )\n");
+ if (mode64)
+ putPC(mkU64(guest_PC_curr_instr + 20));
+ else
+ putPC(mkU32(guest_PC_curr_instr + 20));
dres.jk_StopHere = Ijk_ClientReq;
dres.whatNext = Dis_StopHere;
goto decode_success;
- } else if (getUInt(code + 16) == 0x01ce7025 /* or t6,t6,t6 */ ) {
- /* t9 = guest_NRADDR */
- DIP("t9 = guest_NRADDR\n");
+ } else if (getUInt(code + 16) == 0x01ce7025 /* or $14, $14, $14 */ ) {
+ /* $11 = guest_NRADDR */
+ DIP("$11 = guest_NRADDR\n");
dres.len = 20;
delta += 20;
- putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS32State, guest_NRADDR),
- Ity_I32));
+ if (mode64)
+ putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS64State,
+ guest_NRADDR), Ity_I64));
+ else
+ putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS32State,
+ guest_NRADDR), Ity_I32));
goto decode_success;
- } else if (getUInt(code + 16) == 0x01ef7825/* or t7,t7,t7 */ ) {
- /* branch-and-link-to-noredir t9 */
- DIP("branch-and-link-to-noredir t9\n");
- putIReg(31, mkU32(guest_PC_curr_instr + 20));
+ } else if (getUInt(code + 16) == 0x01ef7825 /* or $15, $15, $15 */ ) {
+ /* branch-and-link-to-noredir $25 */
+ DIP("branch-and-link-to-noredir $25\n");
+ if (mode64)
+ putIReg(31, mkU64(guest_PC_curr_instr + 20));
+ else
+ putIReg(31, mkU32(guest_PC_curr_instr + 20));
putPC(getIReg(25));
dres.jk_StopHere = Ijk_NoRedir;
dres.whatNext = Dis_StopHere;
goto decode_success;
- } else if (getUInt(code + 16) == 0x016b5825/* or t3,t3,t3 */ ) {
+ } else if (getUInt(code + 16) == 0x016b5825 /* or $11,$11,$11 */ ) {
/* IR injection */
DIP("IR injection\n");
#if defined (_MIPSEL)
#elif defined (_MIPSEB)
vex_inject_ir(irsb, Iend_BE);
#endif
- stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_TISTART),
- mkU32(guest_PC_curr_instr)));
- stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_TILEN),
- mkU32(20)));
+ if (mode64) {
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_TISTART),
+ mkU64(guest_PC_curr_instr)));
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_TILEN),
+ mkU64(20)));
+
+ putPC(mkU64(guest_PC_curr_instr + 20));
+ } else {
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_TISTART),
+ mkU32(guest_PC_curr_instr)));
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_TILEN),
+ mkU32(20)));
- putPC(mkU32(guest_PC_curr_instr + 20));
+ putPC(mkU32(guest_PC_curr_instr + 20));
+ }
dres.whatNext = Dis_StopHere;
dres.jk_StopHere = Ijk_TInval;
dres.len = 20;
case 0x03: /* JAL */
DIP("jal 0x%x", instr_index);
- putIReg(31, mkU32(guest_PC_curr_instr + 8));
- t0 = newTemp(ty);
- assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) |
- (instr_index << 2)));
+ if (mode64) {
+ putIReg(31, mkU64(guest_PC_curr_instr + 8));
+ t0 = newTemp(ty);
+ assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000) |
+ (instr_index << 2)));
+ } else {
+ putIReg(31, mkU32(guest_PC_curr_instr + 8));
+ t0 = newTemp(ty);
+ assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) |
+ (instr_index << 2)));
+ }
lastn = mkexpr(t0);
break;
case 0x02: /* J */
DIP("j 0x%x", instr_index);
t0 = newTemp(ty);
- assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) |
- (instr_index << 2)));
+ if (mode64)
+ assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000) |
+ (instr_index << 2)));
+ else
+ assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) |
+ (instr_index << 2)));
lastn = mkexpr(t0);
break;
UInt bc1_cc = get_bc1_cc(cins);
if (0x08 == fmt) {
switch (fmt) {
- case 0x08: //BC
+ case 0x08: /* BC */
{
DIP("tf: %d, nd: %d\n", tf, nd);
- //FcConditionalCode(bc1_cc)
+ /* FcConditionalCode(bc1_cc) */
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I1);
));
if (tf == 1 && nd == 0) {
- //branch on true
+ /* branch on true */
DIP("bc1t %d, %d", bc1_cc, imm);
assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
dis_branch(False, mkexpr(t3), imm, &bstmt);
break;
} else if (tf == 0 && nd == 0) {
- //branch on false
+ /* branch on false */
DIP("bc1f %d, %d", bc1_cc, imm);
assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
dis_branch(False, mkexpr(t3), imm, &bstmt);
} else if (nd == 1 && tf == 0) {
DIP("bc1fl %d, %d", bc1_cc, imm);
lastn = dis_branch_likely(binop(Iop_CmpNE32, mkexpr(t2),
- mode64 ? mkU64(0x0) : mkU32(0x0)), imm);
+ mkU32(0x0)), imm);
break;
} else if (nd == 1 && tf == 1) {
DIP("bc1tl %d, %d", bc1_cc, imm);
} else {
switch (function) {
- case 0x4: //SQRT.fmt
+ case 0x4: /* SQRT.fmt */
{
switch (fmt) {
- case 0x10: //S
+ case 0x10: /* S */
{
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, binop(Iop_SqrtF32, rm,
getLoFromF64(tyF, getFReg(fs)))));
}
break;
- case 0x11: //D
+ case 0x11: /* D */
{
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, binop(Iop_SqrtF64, rm, getDReg(fs)));
}
}
break;
- case 0x5: //abs.fmt
+ case 0x5: /* abs.fmt */
switch (fmt) {
- case 0x10: //S
+ case 0x10: /* S */
DIP("abs.s f%d, f%d\n", fd, fs);
putFReg(fd, mkWidenFromF32(tyF, unop(Iop_AbsF32,
getLoFromF64(tyF, getFReg(fs)))));
break;
- case 0x11: //D
+ case 0x11: /* D */
DIP("abs.d f%d, f%d\n", fd, fs);
putDReg(fd, unop(Iop_AbsF64, getDReg(fs)));
break;
default:
goto decode_failure;
}
- break; //case 0x5
+ break; /* case 0x5 */
- case 0x02: // MUL.fmt
+ case 0x02: /* MUL.fmt */
switch (fmt) {
- case 0x11: // D
+ case 0x11: /* D */
{
DIP("mul.d f%d, f%d, f%d", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
getDReg(ft)));
break;
}
- case 0x10: // S
+ case 0x10: /* S */
{
DIP("mul.s f%d, f%d, f%d", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
default:
goto decode_failure;
}
- break; // MUL.fmt
+ break; /* MUL.fmt */
- case 0x03: // DIV.fmt
+ case 0x03: /* DIV.fmt */
switch (fmt) {
- case 0x11: // D
+ case 0x11: /* D */
{
DIP("div.d f%d, f%d, f%d", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
getDReg(ft)));
break;
}
- case 0x10: // S
+ case 0x10: /* S */
{
DIP("div.s f%d, f%d, f%d", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
default:
goto decode_failure;
}
- break; // DIV.fmt
+ break; /* DIV.fmt */
- case 0x01: // SUB.fmt
+ case 0x01: /* SUB.fmt */
switch (fmt) {
- case 0x11: // D
+ case 0x11: /* D */
{
DIP("sub.d f%d, f%d, f%d", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
- putDReg(fd, triop(Iop_SubF64, rm, getDReg(fs), getDReg(ft)));
+ putDReg(fd, triop(Iop_SubF64, rm, getDReg(fs),
+ getDReg(ft)));
break;
}
- case 0x10: // S
+ case 0x10: /* S */
{
DIP("sub.s f%d, f%d, f%d", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
default:
goto decode_failure;
}
- break; // SUB.fmt
+ break; /* SUB.fmt */
- case 0x06: // MOV.fmt
+ case 0x06: /* MOV.fmt */
switch (fmt) {
- case 0x11: // D
- /* TODO: Check this for 64 bit FPU registers. */
- DIP("mov.d f%d, f%d", fd, fs);
- putFReg(fd, getFReg(fs));
- putFReg(fd + 1, getFReg(fs + 1));
- break;
- case 0x10: // S
- DIP("mov.s f%d, f%d", fd, fs);
- putFReg(fd, getFReg(fs));
- break;
- default:
- goto decode_failure;
+ case 0x11: /* D */
+ DIP("mov.d f%d, f%d", fd, fs);
+ if (mode64) {
+ putFReg(fd, getFReg(fs));
+ } else {
+ putFReg(fd, getFReg(fs));
+ putFReg(fd + 1, getFReg(fs + 1));
+ }
+ break;
+ case 0x10: /* S */
+ DIP("mov.s f%d, f%d", fd, fs);
+ putFReg(fd, getFReg(fs));
+ break;
+ default:
+ goto decode_failure;
}
- break; // MOV.fmt
+ break; /* MOV.fmt */
- case 0x7: //neg.fmt
+ case 0x7: /* neg.fmt */
switch (fmt) {
- case 0x10: //S
- DIP("neg.s f%d, f%d", fd, fs);
- putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32,
- getLoFromF64(tyF, getFReg(fs)))));
- break;
- case 0x11: //D
- DIP("neg.d f%d, f%d", fd, fs);
- putDReg(fd, unop(Iop_NegF64, getDReg(fs)));
+ case 0x10: /* S */
+ DIP("neg.s f%d, f%d", fd, fs);
+ putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32,
+ getLoFromF64(tyF, getFReg(fs)))));
+ break;
+ case 0x11: /* D */
+ DIP("neg.d f%d, f%d", fd, fs);
+ putDReg(fd, unop(Iop_NegF64, getDReg(fs)));
+ break;
+ default:
+ goto decode_failure;
+ }
+ break; /* case 0x7 */
+
+ case 0x08: /* ROUND.L.fmt */
+ switch (fmt) {
+ case 0x10: /* S */
+ DIP("round.l.s f%d, f%d\n", fd, fs);
+ t0 = newTemp(Ity_I64);
+
+ assign(t0, binop(Iop_F32toI64S, mkU32(0x0),
+ getLoFromF64(Ity_F64, getFReg(fs))));
+
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
break;
- default:
- goto decode_failure;
+ case 0x11: /* D */
+ DIP("round.l.d f%d, f%d\n", fd, fs);
+ putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x0),
+ getFReg(fs)));
+ break;
+ default:
+ goto decode_failure;
+
}
- break; //case 0x7
+ break; /* ROUND.L.fmt */
+
+ case 0x09: /* TRUNC.L.fmt */
+ switch (fmt) {
+ case 0x10: /* S */
+ DIP("trunc.l.s f%d, f%d\n", fd, fs);
+ t0 = newTemp(Ity_I64);
+ assign(t0, binop(Iop_F32toI64S, mkU32(0x3),
+ getLoFromF64(Ity_F64, getFReg(fs))));
+
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+ break;
+ case 0x11: /* D */
+ DIP("trunc.l.d f%d, f%d\n", fd, fs);
+ putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x3),
+ getFReg(fs)));
+ break;
+ default:
+ goto decode_failure;
+ }
+ break; /* TRUNC.L.fmt */
- case 0x15: //RECIP.fmt
+ case 0x15: /* RECIP.fmt */
switch (fmt) {
- case 0x10:
- { //S
+ case 0x10: { /* S */
DIP("recip.s f%d, f%d\n", fd, fs);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32,
getFReg(fs)))));
break;
}
- case 0x11:
- { //D
+ case 0x11: { /* D */
DIP("recip.d f%d, f%d\n", fd, fs);
#if defined (_MIPSEL)
IRExpr *rm = get_IR_roundingmode();
goto decode_failure;
}
- break; //case 0x15
+ break; /* case 0x15 */
- case 0x13: //MOVN.fmt
+ case 0x13: /* MOVN.fmt */
switch (fmt) {
- case 0x10: // S
+ case 0x10: /* S */
DIP("movn.s f%d, f%d, r%d", fd, fs, rt);
t1 = newTemp(Ity_F64);
t2 = newTemp(Ity_F64);
t3 = newTemp(Ity_I1);
t4 = newTemp(Ity_F64);
-
- assign(t1, unop(Iop_F32toF64, getFReg(fs)));
- assign(t2, unop(Iop_F32toF64, getFReg(fd)));
- assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt)));
+ if (mode64) {
+ assign(t1, getFReg(fs));
+ assign(t2, getFReg(fd));
+ assign(t3, binop(Iop_CmpNE64, mkU64(0), getIReg(rt)));
+ } else {
+ assign(t1, unop(Iop_F32toF64, getFReg(fs)));
+ assign(t2, unop(Iop_F32toF64, getFReg(fd)));
+ assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt)));
+ }
assign(t4, IRExpr_ITE(mkexpr(t3), mkexpr(t1), mkexpr(t2)));
-
- putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
- mkexpr(t4)));
+ if (mode64) {
+ IRTemp f = newTemp(Ity_F64);
+ IRTemp fd_hi = newTemp(Ity_I32);
+ t5 = newTemp(Ity_I64);
+ assign(f, getFReg(fd));
+ assign(fd_hi, unop(Iop_64HIto32, unop(Iop_ReinterpF64asI64,
+ mkexpr(f))));
+
+ assign(t5, mkWidenFrom32(ty, unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64, mkexpr(t4))), True));
+
+ putFReg(fd, unop (Iop_ReinterpI64asF64, mkexpr(t5)));
+ } else
+ putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
+ mkexpr(t4)));
break;
- case 0x11: // D
+ case 0x11: /* D */
DIP("movn.d f%d, f%d, r%d", fd, fs, rt);
t3 = newTemp(Ity_I1);
t4 = newTemp(Ity_F64);
- assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt)));
+ if (mode64)
+ assign(t3, binop(Iop_CmpNE64, mkU64(0), getIReg(rt)));
+ else
+ assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt)));
+
putDReg(fd, IRExpr_ITE(mkexpr(t3), getDReg(fs), getDReg(fd)));
break;
default:
goto decode_failure;
}
- break; // MOVN.fmt
+ break; /* MOVN.fmt */
- case 0x12: //MOVZ.fmt
+ case 0x12: /* MOVZ.fmt */
switch (fmt) {
- case 0x10: // S
+ case 0x10: /* S */
DIP("movz.s f%d, f%d, r%d", fd, fs, rt);
t1 = newTemp(Ity_F64);
t2 = newTemp(Ity_F64);
t3 = newTemp(Ity_I1);
t4 = newTemp(Ity_F64);
-
- assign(t1, unop(Iop_F32toF64, getFReg(fs)));
- assign(t2, unop(Iop_F32toF64, getFReg(fd)));
- assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt)));
+ if (mode64) {
+ assign(t1, getFReg(fs));
+ assign(t2, getFReg(fd));
+ assign(t3, binop(Iop_CmpEQ64, mkU64(0), getIReg(rt)));
+ } else {
+ assign(t1, unop(Iop_F32toF64, getFReg(fs)));
+ assign(t2, unop(Iop_F32toF64, getFReg(fd)));
+ assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt)));
+ }
assign(t4, IRExpr_ITE(mkexpr(t3), mkexpr(t1), mkexpr(t2)));
- putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
- mkexpr(t4)));
+ if (mode64) {
+ IRTemp f = newTemp(Ity_F64);
+ IRTemp fd_hi = newTemp(Ity_I32);
+ t7 = newTemp(Ity_I64);
+ assign(f, getFReg(fd));
+ assign(fd_hi, unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64, mkexpr(f))));
+ assign(t7, mkWidenFrom32(ty, unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64, mkexpr(t4))), True));
+
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7)));
+ } else
+ putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
+ mkexpr(t4)));
break;
- case 0x11: // D
+ case 0x11: /* D */
DIP("movz.d f%d, f%d, r%d", fd, fs, rt);
-
t3 = newTemp(Ity_I1);
t4 = newTemp(Ity_F64);
+ if (mode64)
+ assign(t3, binop(Iop_CmpEQ64, mkU64(0), getIReg(rt)));
+ else
+ assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt)));
- assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt)));
putDReg(fd, IRExpr_ITE(mkexpr(t3), getDReg(fs), getDReg(fd)));
break;
default:
goto decode_failure;
}
- break; // MOVZ.fmt
+ break; /* MOVZ.fmt */
- case 0x11: // MOVT.fmt
+ case 0x11: /* MOVT.fmt */
if (tf == 1) {
UInt mov_cc = get_mov_cc(cins);
- switch (fmt) // MOVCF = 010001
- {
- case 0x11: // D
+ switch (fmt) { /* MOVCF = 010001 */
+ case 0x11: /* D */
DIP("movt.d f%d, f%d, %d", fd, fs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
assign(t4, IRExpr_ITE(mkexpr(t3),
- getDReg(fd), getDReg(fs)));
+ getDReg(fs), getDReg(fd)));
putDReg(fd, mkexpr(t4));
break;
- case 0x10: // S
+ case 0x10: /* S */
DIP("movt.s f%d, f%d, %d", fd, fs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
t6 = newTemp(Ity_F64);
t7 = newTemp(Ity_I64);
- assign(t5, unop(Iop_F32toF64, getFReg(fs)));
- assign(t6, unop(Iop_F32toF64, getFReg(fd)));
+ if (mode64) {
+ assign(t5, getFReg(fs));
+ assign(t6, getFReg(fd));
+ } else {
+ assign(t5, unop(Iop_F32toF64, getFReg(fs)));
+ assign(t6, unop(Iop_F32toF64, getFReg(fd)));
+ }
assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
assign(t2, IRExpr_ITE(mkexpr(t1),
assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
assign(t4, IRExpr_ITE(mkexpr(t3),
- mkexpr(t6), mkexpr(t5)));
-
- putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
- mkexpr(t4)));
+ mkexpr(t5), mkexpr(t6)));
+
+ if (mode64) {
+ IRTemp f = newTemp(Ity_F64);
+ IRTemp fd_hi = newTemp(Ity_I32);
+ assign(f, getFReg(fd));
+ assign(fd_hi, unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64, mkexpr(f))));
+ assign(t7, mkWidenFrom32(ty, unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64, mkexpr(t4))),
+ True));
+
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7)));
+ } else
+ putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
+ mkexpr(t4)));
break;
default:
goto decode_failure;
}
- } else if (tf == 0) //movf.fmt
+ } else if (tf == 0) /* movf.fmt */
{
UInt mov_cc = get_mov_cc(cins);
- switch (fmt) // MOVCF = 010001
+ switch (fmt) /* MOVCF = 010001 */
{
- case 0x11: // D
+ case 0x11: /* D */
DIP("movf.d f%d, f%d, %d", fd, fs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
mkU32(0x1))
));
- assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
+ assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
assign(t4, IRExpr_ITE(mkexpr(t3),
getDReg(fs), getDReg(fd)));
putDReg(fd, mkexpr(t4));
break;
- case 0x10: // S
+ case 0x10: /* S */
DIP("movf.s f%d, f%d, %d", fd, fs, mov_cc);
- {
- t1 = newTemp(Ity_I1);
- t2 = newTemp(Ity_I32);
- t3 = newTemp(Ity_I1);
- t4 = newTemp(Ity_F64);
- t5 = newTemp(Ity_F64);
- t6 = newTemp(Ity_F64);
+ t1 = newTemp(Ity_I1);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I1);
+ t4 = newTemp(Ity_F64);
+ t5 = newTemp(Ity_F64);
+ t6 = newTemp(Ity_F64);
+ if (mode64) {
+ assign(t5, getFReg(fs));
+ assign(t6, getFReg(fd));
+ } else {
assign(t5, unop(Iop_F32toF64, getFReg(fs)));
assign(t6, unop(Iop_F32toF64, getFReg(fd)));
+ }
+
+ assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+ assign(t2, IRExpr_ITE(mkexpr(t1),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
- assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
- assign(t2, IRExpr_ITE(mkexpr(t1),
- binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(),
- mkU8(23)),
- mkU32(0x1)),
- binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(),
- mkU8(24 + mov_cc)),
- mkU32(0x1))
- ));
-
- assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
- assign(t4, IRExpr_ITE(mkexpr(t3),
- mkexpr(t5), mkexpr(t6)));
+ assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
+ assign(t4, IRExpr_ITE(mkexpr(t3),
+ mkexpr(t5), mkexpr(t6)));
+
+ if (mode64) {
+ IRTemp f = newTemp(Ity_F64);
+ IRTemp fd_hi = newTemp(Ity_I32);
+ t7 = newTemp(Ity_I64);
+ assign(f, getFReg(fd));
+ assign(fd_hi, unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64, mkexpr(f))));
+ assign(t7, mkWidenFrom32(ty, unop(Iop_64to32,
+ unop(Iop_ReinterpF64asI64, mkexpr(t4))),
+ True));
+
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7)));
+ } else
putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
mkexpr(t4)));
- }
break;
default:
goto decode_failure;
}
}
- break; // MOVT.fmt
+ break; /* MOVT.fmt */
- case 0x0: //add.fmt
+ case 0x0: /* add.fmt */
switch (fmt) {
- case 0x10: //S
+ case 0x10: /* S */
{
DIP("add.s f%d, f%d, f%d\n", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
getLoFromF64(tyF, getFReg(ft)))));
break;
}
- case 0x11: //D
- {
- DIP("add.d f%d, f%d, f%d\n", fd, fs, ft);
- IRExpr *rm = get_IR_roundingmode();
- putDReg(fd, triop(Iop_AddF64, rm, getDReg(fs),
- getDReg(ft)));
- break;
- }
+ case 0x11: { /* D */
+ DIP("add.d f%d, f%d, f%d\n", fd, fs, ft);
+ IRExpr *rm = get_IR_roundingmode();
+ putDReg(fd, triop(Iop_AddF64, rm, getDReg(fs), getDReg(ft)));
+ break;
+ }
- case 0x4: //MTC1 (Move Word to Floating Point)
+ case 0x4: /* MTC1 (Move Word to Floating Point) */
DIP("mtc1 r%d, f%d", rt, fs);
- putFReg(fs, unop(Iop_ReinterpI32asF32, getIReg(rt)));
+ if (mode64) {
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_F32);
+ assign(t0, unop(Iop_64to32, getIReg(rt)));
+ assign(t1, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+
+ putFReg(fs, mkWidenFromF32(tyF, mkexpr(t1)));
+ } else
+ putFReg(fs, unop(Iop_ReinterpI32asF32, getIReg(rt)));
+ break;
+
+ case 0x5: /* Doubleword Move to Floating Point DMTC1; MIPS64 */
+ DIP("dmtc1 r%d, f%d", rt, fs);
+ vassert(mode64);
+ putFReg(fs, unop(Iop_ReinterpI64asF64, getIReg(rt)));
break;
- case 0x0: //MFC1
+ case 0x0: /* MFC1 */
DIP("mfc1 r%d, f%d", rt, fs);
- putIReg(rt, unop(Iop_ReinterpF32asI32, getFReg(fs)));
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+ assign(t1, unop(Iop_64to32, mkexpr(t0)));
+ putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True));
+ } else
+ putIReg(rt, unop(Iop_ReinterpF32asI32, getFReg(fs)));
break;
- case 0x6: //CTC1
+ case 0x1: /* Doubleword Move from Floating Point DMFC1;
+ MIPS64 */
+ DIP("dmfc1 r%d, f%d", rt, fs);
+ putIReg(rt, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+ break;
+
+ case 0x6: /* CTC1 */
DIP("ctc1 r%d, f%d", rt, fs);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t5 = newTemp(Ity_I32);
t6 = newTemp(Ity_I32);
assign(t0, mkNarrowTo32(ty, getIReg(rt)));
- if (fs == 25) { //FCCR
+ if (fs == 25) { /* FCCR */
assign(t1, binop(Iop_Shl32, binop(Iop_And32, mkexpr(t0),
mkU32(0x000000FE)), mkU8(24)));
assign(t2, binop(Iop_And32, mkexpr(t0),
putFCSR(binop(Iop_Or32, binop(Iop_Or32, mkexpr(t1),
mkexpr(t2)), binop(Iop_Or32, mkexpr(t3),
mkexpr(t4))));
- } else if (fs == 26) { //FEXR
+ } else if (fs == 26) { /* FEXR */
assign(t1, binop(Iop_And32, getFCSR(), mkU32(0xFFFC0000)));
assign(t2, binop(Iop_And32, mkexpr(t0),
mkU32(0x0003F000)));
putFCSR(mkexpr(t0));
}
break;
- case 0x2: //CFC1
+ case 0x2: /* CFC1 */
DIP("cfc1 r%d, f%d", rt, fs);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
default:
goto decode_failure;
}
- break; //case 0x0: //add.fmt
+ break;
- case 0x21: //CVT.D
+ case 0x21: /* CVT.D */
switch (fmt) {
- case 0x10: //S
- DIP("cvt.d.s f%d, f%d", fd, fs);
- putDReg(fd, unop(Iop_F32toF64, getFReg(fs)));
- break;
+ case 0x10: /* S */
+ DIP("cvt.d.s f%d, f%d", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_F32);
+ /* get lo half of FPR */
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+ assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+ assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+ putFReg(fd, unop(Iop_F32toF64, mkexpr(t3)));
+ break;
+ } else {
+ putDReg(fd, unop(Iop_F32toF64, getFReg(fs)));
+ break;
+ }
- case 0x14:
- { //W
+ case 0x14:
DIP("cvt.d.w %d, %d\n", fd, fs);
- t0 = newTemp(Ity_I32);
- assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs)));
- putDReg(fd, unop(Iop_I32StoF64, mkexpr(t0)));
- }
- break;
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_F32);
+ /* get lo half of FPR */
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+ assign(t1, unop(Iop_64to32, mkexpr(t0)));
+ putDReg(fd,unop(Iop_I32StoF64, mkexpr(t1)));
+ break;
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs)));
+ putDReg(fd, unop(Iop_I32StoF64, mkexpr(t0)));
+ break;
+ }
- default:
- goto decode_failure;
+ case 0x15: { /* L */
+ if (mode64) {
+ DIP("cvt.d.l %d, %d\n", fd, fs);
+ t0 = newTemp(Ity_I64);
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+ putFReg(fd, binop(Iop_I64StoF64,
+ get_IR_roundingmode(), mkexpr(t0)));
+ break;
+ } else
+ goto decode_failure;
+ }
+ default:
+ goto decode_failure;
}
- break; //CVT.D
+ break; /* CVT.D */
- case 0x20: //cvt.s
+ case 0x20: /* cvt.s */
switch (fmt) {
- case 0x14: //W
- DIP("cvt.s.w %d, %d\n", fd, fs);
- t0 = newTemp(Ity_I32);
- assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs)));
- putFReg(fd, binop(Iop_I32StoF32, get_IR_roundingmode(),
- mkexpr(t0)));
- break;
+ case 0x14: /* W */
+ DIP("cvt.s.w %d, %d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_F32);
+ /* get lo half of FPR */
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+ assign(t1, unop(Iop_64to32, mkexpr(t0)));
+ putFReg(fd, mkWidenFromF32(tyF, binop(Iop_I32StoF32,
+ get_IR_roundingmode(), mkexpr(t1))));
+ break;
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs)));
+ putFReg(fd, binop(Iop_I32StoF32, get_IR_roundingmode(),
+ mkexpr(t0)));
+ break;
+ }
+
+ case 0x11: /* D */
+ DIP("cvt.s.d %d, %d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_F32);
+ assign(t0, binop(Iop_F64toF32, get_IR_roundingmode(),
+ getFReg(fs)));
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t0)));
+ } else
+ putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
+ getDReg(fs)));
+ break;
- case 0x11: //D
- DIP("cvt.s.d %d, %d\n", fd, fs);
- putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
- getDReg(fs)));
- break;
+ case 0x15: /* L */
+ DIP("cvt.s.l %d, %d\n", fd, fs);
+ t0 = newTemp(Ity_I64);
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
- default:
- goto decode_failure;
+ putFReg(fd, mkWidenFromF32(tyF, binop(Iop_I64StoF32,
+ get_IR_roundingmode(), mkexpr(t0))));
+ break;
+
+ default:
+ goto decode_failure;
}
- break; //cvt.s
+ break; /* cvt.s */
- case 0x24: //cvt.w
+ case 0x24: /* cvt.w */
switch (fmt) {
- case 0x10: //S
+ case 0x10: /* S */
DIP("cvt.w.s %d, %d\n", fd, fs);
- putFReg(fd, binop(Iop_RoundF32toInt, get_IR_roundingmode(),
- getFReg(fs)));
+ if (mode64) {
+ putFReg(fd, mkWidenFromF32(tyF, binop(Iop_RoundF32toInt,
+ get_IR_roundingmode(), getLoFromF64(tyF,
+ getFReg(fs)))));
+ } else
+ putFReg(fd, binop(Iop_RoundF32toInt, get_IR_roundingmode(),
+ getFReg(fs)));
break;
case 0x11:
- { //D
- DIP("cvt.w.d %d, %d\n", fd, fs);
+ DIP("cvt.w.d %d, %d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_F32);
+ assign(t0, binop(Iop_F64toI32S, get_IR_roundingmode(),
+ getFReg(fs)));
+ assign(t1, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1)));
+ } else {
t0 = newTemp(Ity_I32);
assign(t0, binop(Iop_F64toI32S, get_IR_roundingmode(),
}
break;
- case 0x09: //TRUNC.L
+ case 0x25: /* cvt.l */
switch (fmt) {
- case 0x10: //S
- DIP("trunc.l.s %d, %d\n", fd, fs);
- goto decode_failure;
+ case 0x10: /* S */
+ DIP("cvt.l.s %d, %d\n", fd, fs);
+ t0 = newTemp(Ity_I64);
- case 0x11: //D
- DIP("trunc.l.d %d, %d\n", fd, fs);
- goto decode_failure;
+ assign(t0, binop(Iop_F32toI64S, get_IR_roundingmode(),
+ getLoFromF64(Ity_F64, getFReg(fs))));
- default:
- goto decode_failure;
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+ break;
+
+ case 0x11: { /* D */
+ DIP("cvt.l.d %d, %d\n", fd, fs);
+ putFReg(fd, binop(Iop_RoundF64toInt,
+ get_IR_roundingmode(), getFReg(fs)));
+ break;
+ }
+ default:
+ goto decode_failure;
}
- break; //trunc.l
+ break;
- case 0x0C: //ROUND.W.fmt
+ case 0x0B: /* FLOOR.L.fmt */
switch (fmt) {
- case 0x10: //S
- DIP("round.w.s f%d, f%d\n", fd, fs);
- putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x0),
- getFReg(fs)));
- break;
+ case 0x10: /* S */
+ DIP("floor.l.s %d, %d\n", fd, fs);
+ t0 = newTemp(Ity_I64);
- case 0x11: //D
- DIP("round.w.d f%d, f%d\n", fd, fs);
- t0 = newTemp(Ity_I32);
-
- assign(t0, binop(Iop_F64toI32S, mkU32(0x0), getDReg(fs)));
-
- putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
- break;
+ assign(t0, binop(Iop_F32toI64S, mkU32(0x1),
+ getLoFromF64(Ity_F64, getFReg(fs))));
- default:
- goto decode_failure;
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+ break;
+ case 0x11: /* D */
+ DIP("floor.l.d %d, %d\n", fd, fs);
+ putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x1),
+ getFReg(fs)));
+ break;
+ default:
+ goto decode_failure;
}
- break; //ROUND.W.fmt
+ break;
- case 0x0F: //FLOOR.W.fmt
+ case 0x0C: /* ROUND.W.fmt */
switch (fmt) {
- case 0x10: //S
- DIP("floor.w.s f%d, f%d\n", fd, fs);
- putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x1),
- getFReg(fs)));
- break;
+ case 0x10: /* S */
+ DIP("round.w.s f%d, f%d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_F32);
+ /* get lo half of FPR */
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+ assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+ assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+ assign(t4, binop(Iop_RoundF32toInt, mkU32(0x0),
+ mkexpr(t3)));
+
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
+ break;
+ } else {
+ putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x0),
+ getFReg(fs)));
+ break;
+ }
- case 0x11: //D
- DIP("floor.w.d f%d, f%d\n", fd, fs);
- t0 = newTemp(Ity_I32);
+ case 0x11: /* D */
+ DIP("round.w.d f%d, f%d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_F64toI32S, mkU32(0x0),
+ getDReg(fs)));
+ putFReg(fd, mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32, mkexpr(t0))));
+ break;
+ } else {
+ t0 = newTemp(Ity_I32);
+
+ assign(t0, binop(Iop_F64toI32S, mkU32(0x0),
+ getDReg(fs)));
+
+ putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+ break;
+ }
+ default:
+ goto decode_failure;
- assign(t0, binop(Iop_F64toI32S, mkU32(0x1), getDReg(fs)));
+ }
+ break; /* ROUND.W.fmt */
- putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
- break;
+ case 0x0F: /* FLOOR.W.fmt */
+ switch (fmt) {
+ case 0x10: /* S */
+ DIP("floor.w.s f%d, f%d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_F32);
+ /* get lo half of FPR */
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+ assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+ assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+ assign(t4, binop(Iop_RoundF32toInt, mkU32(0x1),
+ mkexpr(t3)));
+
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
+ break;
+ } else {
+ putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x1),
+ getFReg(fs)));
+ break;
+ }
- default:
- goto decode_failure;
+ case 0x11: /* D */
+ DIP("floor.w.d f%d, f%d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_F64toI32S, mkU32(0x1),
+ getDReg(fs)));
+ putFReg(fd, mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32, mkexpr(t0))));
+ break;
+ } else {
+ t0 = newTemp(Ity_I32);
+
+ assign(t0, binop(Iop_F64toI32S, mkU32(0x1),
+ getDReg(fs)));
+
+ putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+ break;
+ }
+ default:
+ goto decode_failure;
}
- break; //FLOOR.W.fmt
+ break; /* FLOOR.W.fmt */
- case 0x0D: //TRUNC.W
+ case 0x0D: /* TRUNC.W */
switch (fmt) {
- case 0x10: //S
- DIP("trunc.w.s %d, %d\n", fd, fs);
- putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x3),
- getFReg(fs)));
- break;
+ case 0x10: /* S */
+ DIP("trunc.w.s %d, %d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_F32);
+ /* get lo half of FPR */
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+ assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+ assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+ assign(t4, binop(Iop_RoundF32toInt, mkU32(0x3),
+ mkexpr(t3)));
+
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
+ break;
+ } else {
+ putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x3),
+ getFReg(fs)));
+ break;
+ }
+ case 0x11: /* D */
+ DIP("trunc.w.d %d, %d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I32);
- case 0x11: //D
- DIP("trunc.w.d %d, %d\n", fd, fs);
- t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_F64toI32S, mkU32(0x3),
+ getFReg(fs)));
- assign(t0, binop(Iop_F64toI32S, mkU32(0x3), getDReg(fs)));
+ putFReg(fd, mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32, mkexpr(t0))));
+ break;
+ } else {
+ t0 = newTemp(Ity_I32);
- putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
- break;
+ assign(t0, binop(Iop_F64toI32S, mkU32(0x3),
+ getDReg(fs)));
- default:
- goto decode_failure;
+ putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+ break;
+ }
+ default:
+ goto decode_failure;
}
break;
- case 0x0E: //CEIL.W.fmt
- switch (fmt) {
- case 0x10: //S
- DIP("ceil.w.s %d, %d\n", fd, fs);
- putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x2),
- getFReg(fs)));
- break;
-
- case 0x11: //D
- DIP("ceil.w.d %d, %d\n", fd, fs);
- t0 = newTemp(Ity_I32);
- assign(t0, binop(Iop_F64toI32S, mkU32(0x2), getDReg(fs)));
-
- putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
- break;
+ case 0x0E: /* CEIL.W.fmt */
+ switch (fmt) {
+ case 0x10: /* S */
+ DIP("ceil.w.s %d, %d\n", fd, fs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_F32);
+ /* get lo half of FPR */
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+ assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+ assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+ assign(t4, binop(Iop_RoundF32toInt, mkU32(0x2),
+ mkexpr(t3)));
+
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
+ } else
+ putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x2),
+ getFReg(fs)));
+ break;
- default:
- goto decode_failure;
+ case 0x11: /* D */
+ DIP("ceil.w.d %d, %d\n", fd, fs);
+ if (!mode64) {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_F64toI32S, mkU32(0x2),
+ getDReg(fs)));
+ putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+ break;
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_F64toI32S, mkU32(0x2),
+ getDReg(fs)));
+ putFReg(fd, mkWidenFromF32(tyF,
+ unop(Iop_ReinterpI32asF32, mkexpr(t0))));
+ break;
+ }
+ default:
+ goto decode_failure;
}
break;
- case 0x0A: //CEIL.L.fmt
+
+ case 0x0A: /* CEIL.L.fmt */
switch (fmt) {
- case 0x10: //S
- DIP("ceil.l.s %d, %d\n", fd, fs);
- goto decode_failure;
+ case 0x10: /* S */
+ DIP("ceil.l.s %d, %d\n", fd, fs);
+ t0 = newTemp(Ity_I64);
- case 0x11: //D
- DIP("ceil.l.d %d, %d\n", fd, fs);
+ assign(t0, binop(Iop_F32toI64S, mkU32(0x2),
+ getLoFromF64(Ity_F64, getFReg(fs))));
- goto decode_failure;
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+ break;
- default:
- goto decode_failure;
+ case 0x11: /* D */
+ DIP("ceil.l.d %d, %d\n", fd, fs);
+ putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x2),
+ getFReg(fs)));
+ break;
+
+ default:
+ goto decode_failure;
}
break;
- case 0x16: //RSQRT.fmt
+ case 0x16: /* RSQRT.fmt */
switch (fmt) {
- case 0x10:
- { //S
+ case 0x10: { /* S */
DIP("rsqrt.s %d, %d\n", fd, fs);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32, rm,
getFReg(fs))))));
break;
}
- case 0x11:
- { //D
+ case 0x11: { /* D */
DIP("rsqrt.d %d, %d\n", fd, fs);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, triop(Iop_DivF64, rm,
binop(Iop_SqrtF64, rm, getDReg(fs))));
break;
}
- default:
- goto decode_failure;
+ default:
+ goto decode_failure;
}
break;
}
}
- break; /*COP1 */
- case 0x10: /* COP0 */
- if (rs == 0) { /* MFC0 */
+ break; /* COP1 */
+ case 0x10: /* COP0 */
+ if (rs == 0) { /* MFC0 */
DIP("mfc0 r%d, r%d, %d", rt, rd, sel);
-
IRTemp val = newTemp(Ity_I32);
IRExpr** args = mkIRExprVec_2 (mkU32(rd), mkU32(sel));
IRDirty *d = unsafeIRDirty_1_N(val,
"mips32_dirtyhelper_mfc0",
&mips32_dirtyhelper_mfc0,
args);
-
stmt(IRStmt_Dirty(d));
putIReg(rt, mkexpr(val));
+ } else if (rs == 1) {
+ /* Doubleword Move from Coprocessor 0 - DMFC0; MIPS64 */
+ DIP("dmfc0 r%d, r%d, %d", rt, rd, sel);
+ IRTemp val = newTemp(Ity_I64);
+ IRExpr** args = mkIRExprVec_2 (mkU64(rd), mkU64(sel));
+ IRDirty *d = unsafeIRDirty_1_N(val,
+ 0,
+ "mips64_dirtyhelper_dmfc0",
+ &mips64_dirtyhelper_dmfc0,
+ args);
+ stmt(IRStmt_Dirty(d));
+ putDReg(rt, mkexpr(val));
} else
goto decode_failure;
break;
- case 0x31: /* LWC1 */
- /* Load Word to Floating Point - LWC1 (MIPS32) */
- LOAD_STORE_PATTERN;
- putFReg(ft, load(Ity_F32, mkexpr(t1)));
+ case 0x31: /* LWC1 */
+ /* Load Word to Floating Point - LWC1 (MIPS32) */
DIP("lwc1 f%d, %d(r%d)", ft, imm, rs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_F32);
+ t2 = newTemp(Ity_I64);
+ /* new LO */
+ assign(t0, binop(Iop_Add64, getIReg(rs),
+ mkU64(extend_s_16to64(imm))));
+ assign(t1, load(Ity_F32, mkexpr(t0)));
+ assign(t2, mkWidenFrom32(ty, unop(Iop_ReinterpF32asI32,
+ mkexpr(t1)), True));
+ putFReg(ft, unop(Iop_ReinterpI64asF64, mkexpr(t2)));
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_Add32, getIReg(rs),
+ mkU32(extend_s_16to32(imm))));
+ putFReg(ft, load(Ity_F32, mkexpr(t0)));
+ }
break;
- case 0x39: /* SWC1 */
- LOAD_STORE_PATTERN;
- store(mkexpr(t1), getFReg(ft));
+ case 0x39: /* SWC1 */
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t2 = newTemp(Ity_I32);
+ LOAD_STORE_PATTERN;
+ assign(t0, unop(Iop_ReinterpF64asI64, getFReg(ft)));
+ assign(t2, unop(Iop_64to32, mkexpr(t0)));
+ store(mkexpr(t1), unop(Iop_ReinterpI32asF32, mkexpr(t2)));
+ } else {
+ LOAD_STORE_PATTERN;
+ store(mkexpr(t1), getFReg(ft));
+ }
DIP("swc1 f%d, %d(r%d)", ft, imm, rs);
break;
- case 0x33: /* PREF */
+ case 0x33: /* PREF */
DIP("pref");
break;
case 0x35:
/* Load Doubleword to Floating Point - LDC1 (MIPS32) */
LOAD_STORE_PATTERN;
- putDReg(ft, load(Ity_F64, mkexpr(t1)));
- DIP("ldc1 f%d, %d(%d) \n", rt, imm, rs);
+ if (mode64)
+ putFReg(ft, load(Ity_F64, mkexpr(t1)));
+ else
+ putDReg(ft, load(Ity_F64, mkexpr(t1)));
+ DIP("ldc1 f%d, %d(%d)", rt, imm, rs);
break;
case 0x3D:
/* Store Doubleword from Floating Point - SDC1 */
LOAD_STORE_PATTERN;
- store(mkexpr(t1), getDReg(ft));
+ if (mode64)
+ store(mkexpr(t1), getFReg(ft));
+ else
+ store(mkexpr(t1), getDReg(ft));
DIP("sdc1 f%d, %d(%d)", ft, imm, rs);
break;
- case 0x23: /* LW */
+ case 0x23: /* LW */
DIP("lw r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
putIReg(rt, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), True));
break;
- case 0x20: /* LB */
+ case 0x20: /* LB */
DIP("lb r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
- putIReg(rt, unop(Iop_8Sto32, load(Ity_I8, mkexpr(t1))));
+ if (mode64)
+ putIReg(rt, unop(Iop_8Sto64, load(Ity_I8, mkexpr(t1))));
+ else
+ putIReg(rt, unop(Iop_8Sto32, load(Ity_I8, mkexpr(t1))));
break;
- case 0x24: /* LBU */
+ case 0x24: /* LBU */
DIP("lbu r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
- putIReg(rt, unop(Iop_8Uto32, load(Ity_I8, mkexpr(t1))));
+ if (mode64)
+ putIReg(rt, unop(Iop_8Uto64, load(Ity_I8, mkexpr(t1))));
+ else
+ putIReg(rt, unop(Iop_8Uto32, load(Ity_I8, mkexpr(t1))));
break;
- case 0x21: /* LH */
+ case 0x21: /* LH */
DIP("lh r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
- putIReg(rt, unop(Iop_16Sto32, load(Ity_I16, mkexpr(t1))));
+ if (mode64)
+ putIReg(rt, unop(Iop_16Sto64, load(Ity_I16, mkexpr(t1))));
+ else
+ putIReg(rt, unop(Iop_16Sto32, load(Ity_I16, mkexpr(t1))));
break;
- case 0x25: /* LHU */
+ case 0x25: /* LHU */
DIP("lhu r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
- putIReg(rt, unop(Iop_16Uto32, load(Ity_I16, mkexpr(t1))));
+ if (mode64)
+ putIReg(rt, unop(Iop_16Uto64, load(Ity_I16, mkexpr(t1))));
+ else
+ putIReg(rt, unop(Iop_16Uto32, load(Ity_I16, mkexpr(t1))));
break;
- case 0x0F: /* LUI */
+ case 0x0F: /* LUI */
p = (imm << 16);
DIP("lui rt: %d, imm: %d, imm << 16: %d", rt, imm, p);
if ((vex_traceflags & VEX_TRACE_FE) && !mode64)
ppIRExpr(mkU32(p));
- putIReg(rt, mkU32(p));
+ if (mode64)
+ putIReg(rt, mkU64(extend_s_32to64(p)));
+ else
+ putIReg(rt, mkU32(p));
break;
- case 0x13: /* COP1X */
+ case 0x13: /* COP1X */
switch (function) {
- case 0x0: { /* LWXC1 */
+ case 0x0: { /* LWXC1 */
/* Load Word Indexed to Floating Point - LWXC1 (MIPS32r2) */
DIP("lwxc1 f%d, r%d(r%d) \n", fd, rt, rs);
- t0 = newTemp(Ity_I32);
- assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
- putFReg(fd, load(Ity_F32, mkexpr(t0)));
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I64);
+ t3 = newTemp(Ity_F32);
+ t4 = newTemp(Ity_I64);
+
+ /* new LO */
+ assign(t2, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
+ assign(t3, load(Ity_F32, mkexpr(t2)));
+
+ assign(t4, mkWidenFrom32(ty, unop(Iop_ReinterpF32asI32,
+ mkexpr(t3)), True));
+
+ putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t4)));
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+ putFReg(fd, load(Ity_F32, mkexpr(t0)));
+ }
break;
}
- case 0x1: { /* LDXC1 */
- /* Load Doubleword Indexed to Floating Point - LDXC1 (MIPS32r2) */
- t0 = newTemp(Ity_I32);
- assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+ case 0x1: { /* LDXC1 */
+ /* Load Doubleword Indexed to Floating Point
+ LDXC1 (MIPS32r2 and MIPS64) */
+ if (mode64) {
+ DIP("ldxc1 f%d, r%d(r%d) \n", fd, rt, rs);
+ t0 = newTemp(Ity_I64);
+ assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
+ putFReg(fd, load(Ity_F64, mkexpr(t0)));
+ break;
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
- t1 = newTemp(Ity_I32);
- assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4)));
+ t1 = newTemp(Ity_I32);
+ assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4)));
#if defined (_MIPSEL)
- putFReg(fd, load(Ity_F32, mkexpr(t0)));
- putFReg(fd + 1, load(Ity_F32, mkexpr(t1)));
+ putFReg(fd, load(Ity_F32, mkexpr(t0)));
+ putFReg(fd + 1, load(Ity_F32, mkexpr(t1)));
#elif defined (_MIPSEB)
- putFReg(fd + 1, load(Ity_F32, mkexpr(t0)));
- putFReg(fd, load(Ity_F32, mkexpr(t1)));
+ putFReg(fd + 1, load(Ity_F32, mkexpr(t0)));
+ putFReg(fd, load(Ity_F32, mkexpr(t1)));
#endif
- DIP("ldxc1 f%d, r%d(r%d) \n", fd, rt, rs);
- break;
+ break;
+ }
}
- case 0x5: // Load Doubleword Indexed Unaligned
- // to Floating Point - LUXC1; MIPS32r2
+ case 0x5: /* Load Doubleword Indexed Unaligned to Floating Point - LUXC1;
+ MIPS32r2 */
DIP("luxc1 f%d, r%d(r%d) \n", fd, rt, rs);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
- assign(t1, binop(Iop_And64, mkexpr(t0), mkU64(0xfffffffffffffff8ULL)));
+ assign(t1, binop(Iop_And64, mkexpr(t0),
+ mkU64(0xfffffffffffffff8ULL)));
putFReg(fd, load(Ity_F64, mkexpr(t1)));
break;
- case 0x8: { /* SWXC1 */
- /* Store Word Indexed from Floating Point - SWXC1 */
- t0 = newTemp(Ity_I32);
- assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+ case 0x8: { /* Store Word Indexed from Floating Point - SWXC1 */
+ DIP("swxc1 f%d, r%d(r%d)", ft, rt, rs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
- store(mkexpr(t0), getFReg(fs));
+ store(mkexpr(t0), getLoFromF64(tyF, getFReg(fs)));
- DIP("swxc1 f%d, r%d(r%d)", ft, rt, rs);
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+
+ store(mkexpr(t0), getFReg(fs));
+ }
break;
}
- case 0x9: { /* SDXC1 */
- /* Store Doubleword Indexed from Floating Point - SDXC1 */
- t0 = newTemp(Ity_I32);
- assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+ case 0x9: { /* Store Doubleword Indexed from Floating Point - SDXC1 */
+ DIP("sdc1 f%d, %d(%d)", ft, imm, rs);
+ if (mode64) {
+ t0 = newTemp(Ity_I64);
+ assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
+ store(mkexpr(t0), getFReg(fs));
+ } else {
+ t0 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
- t1 = newTemp(Ity_I32);
- assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4)));
+ t1 = newTemp(Ity_I32);
+ assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4)));
#if defined (_MIPSEL)
- store(mkexpr(t0), getFReg(fs));
- store(mkexpr(t1), getFReg(fs + 1));
+ store(mkexpr(t0), getFReg(fs));
+ store(mkexpr(t1), getFReg(fs + 1));
#elif defined (_MIPSEB)
- store(mkexpr(t0), getFReg(fs + 1));
- store(mkexpr(t1), getFReg(fs));
+ store(mkexpr(t0), getFReg(fs + 1));
+ store(mkexpr(t1), getFReg(fs));
#endif
-
- DIP("sdxc1 f%d, %d(%d)", ft, imm, rs);
+ }
break;
}
+ case 0xD: /* Store Doubleword Indexed Unaligned from Floating Point -
+ SUXC1; MIPS64 MIPS32r2 */
+ DIP("suxc1 f%d, r%d(r%d) \n", fd, rt, rs);
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I64);
+ assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
+ assign(t1, binop(Iop_And64, mkexpr(t0), mkU64(0xfffffffffffffff8)));
+ store(mkexpr(t1), getFReg(fs));
+ break;
+
case 0x0F: {
DIP("prefx");
break;
}
- case 0x20: { /* MADD.S */
- DIP("madd.s f%d, f%d, f%d, f%d", fmt, ft, fs, fd);
+ case 0x20: { /* MADD.S */
+ DIP("madd.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F32);
- assign(t1, triop(Iop_MulF32, rm, getLoFromF64(tyF, getFReg(fs)),
- getLoFromF64(tyF, getFReg(ft))));
-
- putFReg(fd, mkWidenFromF32(tyF, triop(Iop_AddF32, rm, mkexpr(t1),
- getLoFromF64(tyF, getFReg(fmt)))));
- break; /* MADD.S */
+ assign(t1, qop(Iop_MAddF32, rm,
+ getLoFromF64(tyF, getFReg(fmt)),
+ getLoFromF64(tyF, getFReg(fs)),
+ getLoFromF64(tyF, getFReg(ft))));
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1)));
+ break; /* MADD.S */
}
- case 0x21: { /* MADD.D */
- DIP("madd.d f%d, f%d, f%d, f%d", fmt, ft, fs, fd);
+ case 0x21: { /* MADD.D */
+ DIP("madd.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
- t1 = newTemp(Ity_F64);
- assign(t1, triop(Iop_MulF64, rm, getDReg(fs), getDReg(ft)));
-
- putDReg(fd, triop(Iop_AddF64, rm, mkexpr(t1), getDReg(fmt)));
- break; /* MADD.D */
+ putDReg(fd, qop(Iop_MAddF64, rm, getDReg(fmt), getDReg(fs),
+ getDReg(ft)));
+ break; /* MADD.D */
}
- case 0x28: { /* MSUB.S */
- DIP("msub.s f%d, f%d, f%d, f%d", fmt, ft, fs, fd);
+ case 0x28: { /* MSUB.S */
+ DIP("msub.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F32);
- assign(t1, triop(Iop_MulF32, rm, getLoFromF64(tyF, getFReg(fs)),
- getLoFromF64(tyF, getFReg(ft))));
-
- putFReg(fd, mkWidenFromF32(tyF, triop(Iop_SubF32, rm,
- mkexpr(t1), getLoFromF64(tyF, getFReg(fmt)))));
- break; /* MSUB.S */
+ assign(t1, qop(Iop_MSubF32, rm,
+ getLoFromF64(tyF, getFReg(fmt)),
+ getLoFromF64(tyF, getFReg(fs)),
+ getLoFromF64(tyF, getFReg(ft))));
+ putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1)));
+ break; /* MSUB.S */
}
- case 0x29: { /* MSUB.D */
- DIP("msub.d f%d, f%d, f%d, f%d", fmt, ft, fs, fd);
+ case 0x29: { /* MSUB.D */
+ DIP("msub.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
- t1 = newTemp(Ity_F64);
- assign(t1, triop(Iop_MulF64, rm, getDReg(fs), getDReg(ft)));
-
- putDReg(fd, triop(Iop_SubF64, rm, mkexpr(t1), getDReg(fmt)));
- break; /* MSUB.D */
+ putDReg(fd, qop(Iop_MSubF64, rm, getDReg(fmt), getDReg(fs),
+ getDReg(ft)));
+ break; /* MSUB.D */
}
- case 0x30: { /* NMADD.S */
- DIP("nmadd.s f%d, f%d, f%d, f%d", fmt, ft, fs, fd);
+ case 0x30: { /* NMADD.S */
+ DIP("nmadd.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F32);
- t2 = newTemp(Ity_F32);
- assign(t1, triop(Iop_MulF32, rm, getLoFromF64(tyF, getFReg(fs)),
- getLoFromF64(tyF, getFReg(ft))));
+ assign(t1, qop(Iop_MAddF32, rm,
+ getLoFromF64(tyF, getFReg(fmt)),
+ getLoFromF64(tyF, getFReg(fs)),
+ getLoFromF64(tyF, getFReg(ft))));
- assign(t2, triop(Iop_AddF32, rm, mkexpr(t1),
- getLoFromF64(tyF, getFReg(fmt))));
-
- putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t2))));
- break; /* NMADD.S */
+ putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t1))));
+ break; /* NMADD.S */
}
- case 0x31: { /* NMADD.D */
- DIP("nmadd.d f%d, f%d, f%d, f%d", fmt, ft, fs, fd);
+ case 0x31: { /* NMADD.D */
+ DIP("nmadd.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F64);
- t2 = newTemp(Ity_F64);
- assign(t1, triop(Iop_MulF64, rm, getDReg(fs), getDReg(ft)));
-
- assign(t2, triop(Iop_AddF64, rm, mkexpr(t1), getDReg(fmt)));
- putDReg(fd, unop(Iop_NegF64, mkexpr(t2)));
- break; /* NMADD.D */
+ assign(t1, qop(Iop_MAddF64, rm, getDReg(fmt), getDReg(fs),
+ getDReg(ft)));
+ putDReg(fd, unop(Iop_NegF64, mkexpr(t1)));
+ break; /* NMADD.D */
}
- case 0x38: { /* NMSUBB.S */
- DIP("nmsub.s f%d, f%d, f%d, f%d", fmt, ft, fs, fd);
+ case 0x38: { /* NMSUBB.S */
+ DIP("nmsub.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F32);
- t2 = newTemp(Ity_F32);
- assign(t1, triop(Iop_MulF32, rm, getLoFromF64(tyF, getFReg(fs)),
- getLoFromF64(tyF, getFReg(ft))));
-
- assign(t2, triop(Iop_SubF32, rm, mkexpr(t1), getLoFromF64(tyF,
- getFReg(fmt))));
+ assign(t1, qop(Iop_MSubF32, rm,
+ getLoFromF64(tyF, getFReg(fmt)),
+ getLoFromF64(tyF, getFReg(fs)),
+ getLoFromF64(tyF, getFReg(ft))));
- putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t2))));
- break; /* NMSUBB.S */
+ putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t1))));
+ break; /* NMSUBB.S */
}
- case 0x39: { /* NMSUBB.D */
- DIP("nmsub.d f%d, f%d, f%d, f%d", fmt, ft, fs, fd);
+ case 0x39: { /* NMSUBB.D */
+ DIP("nmsub.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F64);
- t2 = newTemp(Ity_F64);
- assign(t1, triop(Iop_MulF64, rm, getDReg(fs), getDReg(ft)));
-
- assign(t2, triop(Iop_SubF64, rm, mkexpr(t1), getDReg(fmt)));
- putDReg(fd, unop(Iop_NegF64, mkexpr(t2)));
- break; /* NMSUBB.D */
+ assign(t1, qop(Iop_MSubF64, rm, getDReg(fmt), getDReg(fs),
+ getDReg(ft)));
+ putDReg(fd, unop(Iop_NegF64, mkexpr(t1)));
+ break; /* NMSUBB.D */
}
default:
}
break;
- case 0x22: /* LWL */
+ case 0x22: /* LWL */
DIP("lwl r%d, %d(r%d)", rt, imm, rs);
- {
+ if (mode64) {
+ /* t1 = addr */
+#if defined (_MIPSEL)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Xor64, mkU64(0x3),
+ binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))));
+#endif
+
+ /* t2 = word addr */
+ /* t4 = addr mod 4 */
+ LWX_SWX_PATTERN64;
+
+ /* t3 = word content - shifted */
+ t3 = newTemp(Ity_I32);
+ assign(t3, binop(Iop_Shl32, mkNarrowTo32(ty, load(Ity_I64,
+ mkexpr(t2))), narrowTo(Ity_I8, binop(Iop_Shl32,
+ binop(Iop_Sub32, mkU32(0x03), mkexpr(t4)), mkU8(3)))));
+
+ /* rt content - adjusted */
+ t5 = newTemp(Ity_I32);
+ assign(t5, binop(Iop_And32, mkNarrowTo32(ty, getIReg(rt)),
+ binop(Iop_Shr32, mkU32(0xFFFFFFFF), narrowTo(Ity_I8,
+ binop(Iop_Shl32, binop(Iop_Add32, mkexpr(t4), mkU32(0x1)),
+ mkU8(0x3))))));
+
+ putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t5),
+ mkexpr(t3)), True));
+ } else {
/* t1 = addr */
t1 = newTemp(Ity_I32);
#if defined (_MIPSEL)
}
break;
- case 0x26: /* LWR */
+ case 0x26: /* LWR */
DIP("lwr r%d, %d(r%d)", rt, imm, rs);
- {
+ if (mode64) {
+ /* t1 = addr */
+#if defined (_MIPSEL)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+#endif
+
+ /* t2 = word addr */
+ /* t4 = addr mod 4 */
+ LWX_SWX_PATTERN64;
+
+ /* t3 = word content - shifted */
+ t3 = newTemp(Ity_I32);
+ assign(t3, binop(Iop_Shr32, mkNarrowTo32(ty, load(Ity_I64,mkexpr(t2))),
+ narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(3)))));
+
+ /* rt content - adjusted */
+ t5 = newTemp(Ity_I32);
+ assign(t5, binop(Iop_And32, mkNarrowTo32(ty, getIReg(rt)),
+ unop(Iop_Not32, binop(Iop_Shr32, mkU32(0xFFFFFFFF),
+ narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(0x3)))))));
+
+ putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t5),
+ mkexpr(t3)), True));
+ } else {
/* t1 = addr */
t1 = newTemp(Ity_I32);
#if defined (_MIPSEL)
}
break;
- case 0x2B: /* SW */
+ case 0x2B: /* SW */
DIP("sw r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
store(mkexpr(t1), mkNarrowTo32(ty, getIReg(rt)));
break;
- case 0x28: /* SB */
+ case 0x2C: { /* SDL rt, offset(base) MIPS64 */
+ DIP("sdl r%u,%d(r%u)\n", rt, (Int) imm, rs);
+ /* t1 = addr */
+#if defined (_MIPSEL)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Xor64, mkU64(0x7), binop(Iop_Add64, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+#endif
+
+ /* t2 = word addr */
+ /* t4 = addr mod 4 */
+ LWX_SWX_PATTERN64_1;
+
+ /* t3 = rt content - shifted */
+ t3 = newTemp(Ity_I64);
+ assign(t3, binop(Iop_Shr64, getIReg(rt), narrowTo(Ity_I8, binop(Iop_Shl64,
+ binop(Iop_Sub64, mkU64(0x07), mkexpr(t4)), mkU8(3)))));
+
+ /* word content - adjusted */
+ t5 = newTemp(Ity_I64);
+ t6 = newTemp(Ity_I64);
+ t7 = newTemp(Ity_I64);
+ t8 = newTemp(Ity_I64);
+
+ /* neg(shr(0xFFFFFFFF, mul(sub(7,n), 8))) */
+ assign(t5, binop(Iop_Mul64, binop(Iop_Sub64, mkU64(0x7), mkexpr(t4)),
+ mkU64(0x8)));
+
+ assign(t6, binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFF),
+ narrowTo(Ity_I8, mkexpr(t5))));
+ assign(t7, binop(Iop_Xor64, mkU64(0xFFFFFFFFFFFFFFFF), mkexpr(t6)));
+ assign(t8, binop(Iop_And64, load(Ity_I64, mkexpr(t2)), mkexpr(t7)));
+ store(mkexpr(t2), binop(Iop_Or64, mkexpr(t8), mkexpr(t3)));
+ break;
+ }
+
+ case 0x2D: {
+ /* SDR rt, offset(base) - MIPS64 */
+ vassert(mode64);
+ DIP("sdr r%u,%d(r%u)\n", rt, imm, rs);
+ /* t1 = addr */
+#if defined (_MIPSEL)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Xor64, mkU64(0x7), binop(Iop_Add64, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+#endif
+
+ /* t2 = word addr */
+ /* t4 = addr mod 4 */
+ LWX_SWX_PATTERN64_1;
+
+ /* t3 = rt content - shifted */
+ t3 = newTemp(Ity_I64);
+ assign(t3, binop(Iop_Shl64, getIReg(rt), narrowTo(Ity_I8,
+ binop(Iop_Shl64, mkexpr(t4), mkU8(3)))));
+
+ /* word content - adjusted */
+ t5 = newTemp(Ity_I64);
+ assign(t5, binop(Iop_And64, load(Ity_I64, mkexpr(t2)), unop(Iop_Not64,
+ binop(Iop_Shl64, mkU64(0xFFFFFFFFFFFFFFFF),
+ narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(0x3)))))));
+
+ store(mkexpr(t2), binop(Iop_Xor64, mkexpr(t5), mkexpr(t3)));
+ }
+
+ case 0x28: /* SB */
DIP("sb r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
store(mkexpr(t1), narrowTo(Ity_I8, getIReg(rt)));
break;
- case 0x29: /* SH */
+ case 0x29: /* SH */
DIP("sh r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
store(mkexpr(t1), narrowTo(Ity_I16, getIReg(rt)));
break;
- case 0x2A: /* SWL */
-
+ case 0x2A: /* SWL */
DIP("swl r%d, %d(r%d)", rt, imm, rs);
- {
+ if (mode64) {
+#if defined (_MIPSEL)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+#endif
+ /* t2 = word addr */
+ /* t4 = addr mod 4 */
+ LWX_SWX_PATTERN64;
+
+ /* t3 = rt content - shifted */
+ t3 = newTemp(Ity_I32);
+ assign(t3, binop(Iop_Shr32, mkNarrowTo32(ty, getIReg(rt)),
+ narrowTo(Ity_I8, binop(Iop_Shl32, binop(Iop_Sub32,
+ mkU32(0x03), mkexpr(t4)), mkU8(3)))));
+
+ /* word content - adjusted */
+ t5 = newTemp(Ity_I32);
+ t6 = newTemp(Ity_I32);
+ t7 = newTemp(Ity_I32);
+ t8 = newTemp(Ity_I32);
+
+ /* neg(shr(0xFFFFFFFF, mul(sub(3,n), 8))) */
+ assign(t5, binop(Iop_Mul32, binop(Iop_Sub32, mkU32(0x3), mkexpr(t4)),
+ mkU32(0x8)));
+
+ assign(t6, binop(Iop_Shr32, mkU32(0xFFFFFFFF), narrowTo(Ity_I8,
+ mkexpr(t5))));
+ assign(t7, binop(Iop_Xor32, mkU32(0xFFFFFFFF), mkexpr(t6)));
+ assign(t8, binop(Iop_And32, load(Ity_I32, mkexpr(t2)), mkexpr(t7)));
+ store(mkexpr(t2), binop(Iop_Or32, mkexpr(t8), mkexpr(t3)));
+ } else {
/* t1 = addr */
t1 = newTemp(Ity_I32);
#if defined (_MIPSEL)
assign(t1, binop(Iop_Xor32, mkU32(0x3), binop(Iop_Add32, getIReg(rs),
mkU32(extend_s_16to32(imm)))));
#endif
-
/* t2 = word addr */
/* t4 = addr mod 4 */
LWX_SWX_PATTERN;
t7 = newTemp(Ity_I32);
t8 = newTemp(Ity_I32);
- // neg(shr(0xFFFFFFFF, mul(sub(3,n), 8)))
+ /* neg(shr(0xFFFFFFFF, mul(sub(3,n), 8))) */
assign(t5, binop(Iop_Mul32, binop(Iop_Sub32, mkU32(0x3), mkexpr(t4)),
mkU32(0x8)));
}
break;
- case 0x2E: /* SWR */
+ case 0x2E: /* SWR */
DIP("swr r%d, %d(r%d)", rt, imm, rs);
- {
+ if (mode64) {
+ /* t1 = addr */
+#if defined (_MIPSEL)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+#endif
+
+ /* t2 = word addr */
+ /* t4 = addr mod 4 */
+ LWX_SWX_PATTERN64;
+
+ /* t3 = rt content - shifted */
+ t3 = newTemp(Ity_I32);
+ assign(t3, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rt)),
+ narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(3)))));
+
+ /* word content - adjusted */
+ t5 = newTemp(Ity_I32);
+ assign(t5, binop(Iop_And32, load(Ity_I32, mkexpr(t2)), unop(Iop_Not32,
+ binop(Iop_Shl32, mkU32(0xFFFFFFFF), narrowTo(Ity_I8,
+ binop(Iop_Shl32, mkexpr(t4), mkU8(0x3)))))));
+
+ store(mkexpr(t2), binop(Iop_Xor32, mkexpr(t5), mkexpr(t3)));
+ } else {
/* t1 = addr */
t1 = newTemp(Ity_I32);
#if defined (_MIPSEL)
}
break;
- case 0x1C: /*Special2 */
+ case 0x1C: /* Special2 */
switch (function) {
- case 0x02: { /* MUL */
+ case 0x02: { /* MUL */
DIP("mul r%d, r%d, r%d", rd, rs, rt);
- putIReg(rd, binop(Iop_Mul32, getIReg(rs), getIReg(rt)));
+ if (mode64) {
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+ IRTemp tmpRes = newTemp(Ity_I32);
+
+ assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+ assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+ assign(tmpRes, binop(Iop_Mul32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpRes), True));
+ } else
+ putIReg(rd, binop(Iop_Mul32, getIReg(rs), getIReg(rt)));
break;
}
- case 0x00: { /* MADD */
+ case 0x00: { /* MADD */
DIP("madd r%d, r%d", rs, rt);
- t1 = newTemp(Ity_I32);
- t2 = newTemp(Ity_I32);
- t3 = newTemp(Ity_I64);
- t4 = newTemp(Ity_I32);
- t5 = newTemp(Ity_I32);
- t6 = newTemp(Ity_I32);
+ if (mode64) {
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I64);
+ t5 = newTemp(Ity_I64);
+ t6 = newTemp(Ity_I32);
+
+ assign(t1, mkNarrowTo32(ty, getHI()));
+ assign(t2, mkNarrowTo32(ty, getLO()));
+
+ assign(t3, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)),
+ mkNarrowTo32(ty, getIReg(rt))));
+
+ assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2)));
+ assign(t5, binop(Iop_Add64, mkexpr(t3), mkexpr(t4)));
+
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True));
+ } else {
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I32);
+ t5 = newTemp(Ity_I32);
+ t6 = newTemp(Ity_I32);
- assign(t1, getHI());
- assign(t2, getLO());
+ assign(t1, getHI());
+ assign(t2, getLO());
- assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt)));
+ assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt)));
- assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32,
- mkexpr(t3))));
+ assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32,
+ mkexpr(t3))));
- assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4),
- unop(Iop_64to32, mkexpr(t3)))));
- assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1)));
+ assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4),
+ unop(Iop_64to32, mkexpr(t3)))));
+ assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1)));
- putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
- putLO(mkexpr(t4));
+ putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
+ putLO(mkexpr(t4));
+ }
break;
}
- case 0x01: { /* MADDU */
+ case 0x01: { /* MADDU */
DIP("maddu r%d, r%d", rs, rt);
- t1 = newTemp(Ity_I32);
- t2 = newTemp(Ity_I32);
- t3 = newTemp(Ity_I64);
- t4 = newTemp(Ity_I32);
- t5 = newTemp(Ity_I32);
- t6 = newTemp(Ity_I32);
+ if (mode64) {
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I64);
+ t5 = newTemp(Ity_I64);
+ t6 = newTemp(Ity_I32);
+
+ assign(t1, mkNarrowTo32(ty, getHI()));
+ assign(t2, mkNarrowTo32(ty, getLO()));
+
+ assign(t3, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)),
+ mkNarrowTo32(ty, getIReg(rt))));
+
+ assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2)));
+ assign(t5, binop(Iop_Add64, mkexpr(t3), mkexpr(t4)));
+
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True));
+ } else {
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I32);
+ t5 = newTemp(Ity_I32);
+ t6 = newTemp(Ity_I32);
- assign(t1, getHI());
- assign(t2, getLO());
+ assign(t1, getHI());
+ assign(t2, getLO());
- assign(t3, binop(Iop_MullU32, getIReg(rs), getIReg(rt)));
+ assign(t3, binop(Iop_MullU32, getIReg(rs), getIReg(rt)));
- assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32,
- mkexpr(t3))));
- assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4),
- unop(Iop_64to32, mkexpr(t3)))));
- assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1)));
+ assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32,
+ mkexpr(t3))));
+ assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4),
+ unop(Iop_64to32, mkexpr(t3)))));
+ assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1)));
- putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
- putLO(mkexpr(t4));
+ putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
+ putLO(mkexpr(t4));
+ }
break;
}
- case 0x04: { /* MSUB */
+ case 0x04: { /* MSUB */
DIP("msub r%d, r%d", rs, rt);
- t1 = newTemp(Ity_I32);
- t2 = newTemp(Ity_I32);
- t3 = newTemp(Ity_I64);
- t4 = newTemp(Ity_I32);
- t5 = newTemp(Ity_I1);
- t6 = newTemp(Ity_I32);
+ if (mode64) {
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I64);
+ t5 = newTemp(Ity_I64);
+ t6 = newTemp(Ity_I32);
+
+ assign(t1, mkNarrowTo32(ty, getHI()));
+ assign(t2, mkNarrowTo32(ty, getLO()));
+
+ assign(t3, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)),
+ mkNarrowTo32(ty, getIReg(rt))));
+
+ assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2)));
+ assign(t5, binop(Iop_Sub64, mkexpr(t4), mkexpr(t3)));
+
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True));
+ } else {
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I32);
+ t5 = newTemp(Ity_I1);
+ t6 = newTemp(Ity_I32);
- assign(t1, getHI());
- assign(t2, getLO());
+ assign(t1, getHI());
+ assign(t2, getLO());
- assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt)));
- assign(t4, unop(Iop_64to32, mkexpr(t3))); //new lo
+ assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt)));
+ assign(t4, unop(Iop_64to32, mkexpr(t3))); /* new lo */
- //if lo<lo(mul) hi = hi - 1
- assign(t5, binop(Iop_CmpLT32U, mkexpr(t2), mkexpr(t4)));
+ /* if lo<lo(mul) hi = hi - 1 */
+ assign(t5, binop(Iop_CmpLT32U, mkexpr(t2), mkexpr(t4)));
- assign(t6, IRExpr_ITE(mkexpr(t5),
- binop(Iop_Sub32, mkexpr(t1), mkU32(0x1)),
- mkexpr(t1)));
+ assign(t6, IRExpr_ITE(mkexpr(t5),
+ binop(Iop_Sub32, mkexpr(t1), mkU32(0x1)),
+ mkexpr(t1)));
- putHI(binop(Iop_Sub32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
- putLO(binop(Iop_Sub32, mkexpr(t2), mkexpr(t4)));
+ putHI(binop(Iop_Sub32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
+ putLO(binop(Iop_Sub32, mkexpr(t2), mkexpr(t4)));
+ }
break;
}
case 0x05: { /* MSUBU */
DIP("msubu r%d, r%d", rs, rt);
+ if (mode64) {
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I64);
+ t5 = newTemp(Ity_I64);
+ t6 = newTemp(Ity_I32);
+
+ assign(t1, mkNarrowTo32(ty, getHI()));
+ assign(t2, mkNarrowTo32(ty, getLO()));
+
+ assign(t3, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)),
+ mkNarrowTo32(ty, getIReg(rt))));
+
+ assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2)));
+ assign(t5, binop(Iop_Sub64, mkexpr(t4), mkexpr(t3)));
+
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True));
+ } else {
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I32);
+ t5 = newTemp(Ity_I1);
+ t6 = newTemp(Ity_I32);
+
+ assign(t1, getHI());
+ assign(t2, getLO());
+
+ assign(t3, binop(Iop_MullU32, getIReg(rs), getIReg(rt)));
+ assign(t4, unop(Iop_64to32, mkexpr(t3))); /* new lo */
+
+ /* if lo<lo(mul) hi = hi - 1 */
+ assign(t5, binop(Iop_CmpLT32U, mkexpr(t2), mkexpr(t4)));
+
+ assign(t6, IRExpr_ITE(mkexpr(t5),
+ binop(Iop_Sub32, mkexpr(t1), mkU32(0x1)),
+ mkexpr(t1)));
+
+ putHI(binop(Iop_Sub32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
+ putLO(binop(Iop_Sub32, mkexpr(t2), mkexpr(t4)));
+ }
+ break;
+ }
+
+ case 0x6: /* dmul MIPS64 - Netlogic */
+ DIP("dmul r%u, r%u, r%u\n", rd, rs, rt);
+ t0 = newTemp(Ity_I128);
+
+ assign(t0, binop(Iop_MullU64, getIReg(rs), getIReg(rt)));
+
+ putIReg(rd, unop(Iop_128to64, mkexpr(t0)));
+ break;
+
+ case 0x10: /* LDADDW - Swap Word - Netlogic */
+ DIP("ldaddw r%u, r%u\n", rt, rs);
+ t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
t4 = newTemp(Ity_I32);
- t5 = newTemp(Ity_I1);
+ t5 = newTemp(Ity_I32);
t6 = newTemp(Ity_I32);
- assign(t1, getHI());
- assign(t2, getLO());
+ /* v = GPR[rt] */
+ assign(t0, mkNarrowTo32(ty, getIReg(rt)));
+
+ /* GPR[rt] = memory[base]; */
+ assign(t1, load(Ity_I32, getIReg(rs)));
+ putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True));
+
+ /* memory[base] = memory[base] + v; */
+ store(getIReg(rs), binop(Iop_Add32, mkexpr(t0), mkexpr(t1)));
+ break;
- assign(t3, binop(Iop_MullU32, getIReg(rs), getIReg(rt)));
- assign(t4, unop(Iop_64to32, mkexpr(t3))); //new lo
+ case 0x12: /* LDADDD - Swap Word - Netlogic */
+ DIP("ldaddw r%u, r%u\n", rt, rs);
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I64);
- //if lo<lo(mul) hi = hi - 1
- assign(t5, binop(Iop_CmpLT32U, mkexpr(t2), mkexpr(t4)));
+ /* v = GPR[rt] */
+ assign(t0, getIReg(rt));
- assign(t6, IRExpr_ITE(mkexpr(t5),
- binop(Iop_Sub32, mkexpr(t1), mkU32(0x1)),
- mkexpr(t1)));
+ /* GPR[rt] = memory[base]; */
+ assign(t1, load(Ity_I64, getIReg(rs)));
+ putIReg(rt, mkexpr(t1));
- putHI(binop(Iop_Sub32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
- putLO(binop(Iop_Sub32, mkexpr(t2), mkexpr(t4)));
+ /* memory[base] = memory[base] + v; */
+ store(getIReg(rs), binop(Iop_Add64, mkexpr(t0), mkexpr(t1)));
break;
- }
- case 0x20: { /* CLZ */
+ case 0x14: /* SWAPW - Swap Word - Netlogic */
+ DIP("swapw r%u, r%u\n", rt, rs);
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_I32);
+ assign(t0, mkNarrowTo32(ty, getIReg(rt)));
+ assign(t1, load(Ity_I32, getIReg(rs)));
+ putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True));
+ store(getIReg(rs), mkexpr(t0));
+ break;
+
+ case 0x16: /* SWAPD - Swap Double - Netlogic */
+ DIP("swapw r%u, r%u\n", rt, rs);
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I64);
+ assign(t0, getIReg(rt));
+ assign(t1, load(Ity_I64, getIReg(rs)));
+ putIReg(rt, mkexpr(t1));
+ store(getIReg(rs), mkexpr(t0));
+ break;
+
+ case 0x20: { /* CLZ */
DIP("clz r%d, r%d", rd, rs);
- t1 = newTemp(Ity_I1);
- assign(t1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0)));
- putIReg(rd, IRExpr_ITE(mkexpr(t1),
- mkU32(0x00000020),
- unop(Iop_Clz32, getIReg(rs))));
+ if (mode64) {
+ IRTemp tmpClz32 = newTemp(Ity_I32);
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+
+ assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+ assign(tmpClz32, unop(Iop_Clz32, mkexpr(tmpRs32)));
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpClz32), True));
+ } else {
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t1),
+ mkU32(0x00000020),
+ unop(Iop_Clz32, getIReg(rs))));
+ }
break;
}
- case 0x21: { /* CLO */
+ case 0x21: { /* CLO */
DIP("clo r%d, r%d", rd, rs);
+ if (mode64) {
+ IRTemp tmpClo32 = newTemp(Ity_I32);
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+ assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ32, mkexpr(tmpRs32), mkU32(0xffffffff)));
+ assign(tmpClo32, IRExpr_ITE(mkexpr(t1),
+ mkU32(0x00000020),
+ unop(Iop_Clz32, unop(Iop_Not32, mkexpr(tmpRs32)))));
+
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpClo32), True));
+ break;
+ } else {
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0xffffffff)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t1),
+ mkU32(0x00000020),
+ unop(Iop_Clz32,
+ unop(Iop_Not32, getIReg(rs)))));
+ break;
+ }
+ }
+
+ case 0x24: /* Count Leading Zeros in Doubleword - DCLZ; MIPS64 */
+ DIP("dclz r%d, r%d", rd, rs);
t1 = newTemp(Ity_I1);
- assign(t1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0xffffffff)));
+ assign(t1, binop(Iop_CmpEQ64, getIReg(rs), mkU64(0)));
putIReg(rd, IRExpr_ITE(mkexpr(t1),
- mkU32(0x00000020),
- unop(Iop_Clz32, unop(Iop_Not32, getIReg(rs)))));
+ mkU64(0x00000040),
+ unop(Iop_Clz64, getIReg(rs))));
+ break;
+
+ case 0x25: /* Count Leading Ones in Doubleword - DCLO; MIPS64 */
+ DIP("dclo r%d, r%d", rd, rs);
+ t1 = newTemp(Ity_I1);
+ assign(t1, binop(Iop_CmpEQ64, getIReg(rs),
+ mkU64(0xffffffffffffffff)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t1),
+ mkU64(0x40),
+ unop(Iop_Clz64, unop(Iop_Not64,
+ getIReg(rs)))));
break;
- }
default:
goto decode_failure;
}
break;
- case 0x1F: /*Special3 */
+ case 0x1F: /* Special3 */
switch (function) {
- case 0x3B:
- /*RDHWR*/ {
- DIP("rdhwr r%d, r%d", rt, rd);
+ case 0x01: {
+ /* Doubleword Extract Bit Field - DEXTM; MIPS64r2 */
+ msb = get_msb(cins);
+ lsb = get_lsb(cins);
+ size = msb + 1;
+ UInt srcPos = lsb;
+ UInt dstSz = msb + 33;
+ t1 = newTemp(Ity_I64);
+ DIP("dextm r%u, r%u, %d, %d\n", rt, rs, lsb, msb + 1);
+
+ UChar lsAmt = 64 - (srcPos + dstSz); /* left shift amount; */
+ UChar rsAmt = 64 - dstSz; /* right shift amount; */
+
+ assign(t1, binop(Iop_Shl64, getIReg(rs), mkU8(lsAmt)));
+ putIReg(rt, binop(Iop_Shr64, mkexpr(t1), mkU8(rsAmt)));
+
+ break;
+ }
+ case 0x02: {
+ /* Doubleword Extract Bit Field Upper - DEXTU; MIPS64r2 */
+ msb = get_msb(cins);
+ lsb = get_lsb(cins);
+ size = msb + 1;
+ UInt srcPos = lsb + 32;
+ UInt dstSz = msb + 1;
+ t1 = newTemp(Ity_I64);
+ DIP("dextu r%u, r%u, %d, %d\n", rt, rs, lsb + 32, msb + 1);
+
+ vassert(srcPos >= 32 && srcPos < 64);
+ vassert(dstSz > 0 && dstSz <= 32);
+ vassert((srcPos + dstSz) > 32 && (srcPos + dstSz) <= 64);
+
+ UChar lsAmt = 64 - (srcPos + dstSz); /* left shift amount; */
+ UChar rsAmt = 64 - dstSz; /* right shift amount; */
+
+ assign(t1, binop(Iop_Shl64, getIReg(rs), mkU8(lsAmt)));
+ putIReg(rt, binop(Iop_Shr64, mkexpr(t1), mkU8(rsAmt)));
+ break;
+ }
+ case 0x05: {
+ /* Doubleword Insert Bit Field Middle - DINSM; MIPS64r2 */
+ msb = get_msb(cins);
+ lsb = get_lsb(cins);
+ size = msb + 1;
+ UInt dstPos = lsb;
+ UInt srcSz = msb - lsb + 33;
+ t1 = newTemp(ty);
+ t2 = newTemp(ty);
+ t3 = newTemp(ty);
+ t4 = newTemp(ty);
+ IRTemp tmpT1 = newTemp(ty);
+ IRTemp tmpT2 = newTemp(ty);
+ IRTemp tmpT3 = newTemp(ty);
+ IRTemp tmpT4 = newTemp(ty);
+ IRTemp tmpT5 = newTemp(ty);
+ IRTemp tmpT6 = newTemp(ty);
+ IRTemp tmpT7 = newTemp(ty);
+ IRTemp tmpRs = newTemp(ty);
+ IRTemp tmpRt = newTemp(ty);
+ IRTemp tmpRd = newTemp(ty);
+
+ assign(tmpRs, getIReg(rs));
+ assign(tmpRt, getIReg(rt));
+ DIP("dinsm r%u, r%u, %d, %d\n", rt, rs, lsb, msb);
+
+ UChar lsAmt = dstPos + srcSz - 1; /* left shift amount; */
+ UChar rsAmt = dstPos + srcSz - 1; /* right shift amount; */
+
+ assign(t1, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt)));
+ assign(tmpT1, binop(Iop_Shr64, mkexpr(t1), mkU8(1)));
+ assign(t2, binop(Iop_Shl64, mkexpr(tmpT1), mkU8(lsAmt)));
+ assign(tmpT2, binop(Iop_Shl64, mkexpr(t2), mkU8(1)));
+
+ lsAmt = 63 - dstPos; /* left shift amount; */
+ rsAmt = 63 - dstPos; /* right shift amount; */
+
+ assign(t3, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt)));
+ assign(tmpT3, binop(Iop_Shl64, mkexpr(t3), mkU8(1)));
+ assign(t4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(rsAmt)));
+ assign(tmpT4, binop(Iop_Shr64, mkexpr(t4), mkU8(1)));
+
+ /* extract size from src register */
+ lsAmt = 64 - srcSz; /* left shift amount; */
+ rsAmt = 64 - (lsb + srcSz); /* right shift amount; */
+
+ assign(tmpT5, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt)));
+ assign(tmpT6, binop(Iop_Shr64, mkexpr(tmpT5), mkU8(rsAmt)));
+
+ assign(tmpT7, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT4)));
+ assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT6), mkexpr(tmpT7)));
+ putIReg(rt, mkexpr(tmpRd));
+ break;
+ }
+ case 0x06: {
+ /* Doubleword Insert Bit Field Upper - DINSU; MIPS64r2 */
+ msb = get_msb(cins);
+ lsb = get_lsb(cins);
+ size = msb + 1;
+ UInt dstPos = lsb + 32;
+ UInt srcSz = msb - lsb + 1;
+ IRTemp tmpT1 = newTemp(ty);
+ IRTemp tmpT2 = newTemp(ty);
+ IRTemp tmpT3 = newTemp(ty);
+ IRTemp tmpT4 = newTemp(ty);
+ IRTemp tmpT5 = newTemp(ty);
+ IRTemp tmpT6 = newTemp(ty);
+ IRTemp tmpT7 = newTemp(ty);
+ IRTemp tmpT8 = newTemp(ty);
+ IRTemp tmpT9 = newTemp(ty);
+ IRTemp tmpRs = newTemp(ty);
+ IRTemp tmpRt = newTemp(ty);
+ IRTemp tmpRd = newTemp(ty);
+
+ assign(tmpRs, getIReg(rs));
+ assign(tmpRt, getIReg(rt));
+ DIP("dinsu r%u, r%u, %d, %d\n", rt, rs, lsb, msb);
+
+ UChar lsAmt = 64 - srcSz; /* left shift amount; */
+ UChar rsAmt = 64 - (dstPos + srcSz); /* right shift amount; */
+ assign(tmpT1, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt)));
+ assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(rsAmt)));
+
+ lsAmt = 64 - dstPos; /* left shift amount; */
+ rsAmt = 64 - dstPos; /* right shift amount; */
+ assign(tmpT3, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt)));
+ assign(tmpT4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(rsAmt)));
+
+ lsAmt = dstPos; /* left shift amount; */
+ rsAmt = srcSz; /* right shift amount; */
+ assign(tmpT5, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt)));
+ assign(tmpT6, binop(Iop_Shr64, mkexpr(tmpT5), mkU8(lsAmt)));
+
+ assign(tmpT7, binop(Iop_Shl64, mkexpr(tmpT6), mkU8(rsAmt)));
+ assign(tmpT8, binop(Iop_Shl64, mkexpr(tmpT7), mkU8(lsAmt)));
+
+ assign(tmpT9, binop(Iop_Or64, mkexpr(tmpT8), mkexpr(tmpT4)));
+ assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT9)));
+ putIReg(rt, mkexpr(tmpRd));
+ break;
+ }
+ case 0x07: {
+ /* Doubleword Insert Bit Field - DINS; MIPS64r2 */
+ IRTemp tmp1 = newTemp(ty);
+ IRTemp tmpT1 = newTemp(ty);
+ IRTemp tmpT2 = newTemp(ty);
+ IRTemp tmpT3 = newTemp(ty);
+ IRTemp tmpT4 = newTemp(ty);
+ IRTemp tmpT5 = newTemp(ty);
+ IRTemp tmpT6 = newTemp(ty);
+ IRTemp tmpT7 = newTemp(ty);
+ IRTemp tmpT8 = newTemp(ty);
+ IRTemp tmpT9 = newTemp(ty);
+ IRTemp tmp = newTemp(ty);
+ IRTemp tmpRs = newTemp(ty);
+ IRTemp tmpRt = newTemp(ty);
+ IRTemp tmpRd = newTemp(ty);
+
+ assign(tmpRs, getIReg(rs));
+ assign(tmpRt, getIReg(rt));
+
+ msb = get_msb(cins);
+ lsb = get_lsb(cins);
+ size = msb + 1;
+ DIP("dins r%u, r%u, %d, %d\n", rt, rs, lsb,
+ msb - lsb + 1);
+ UChar lsAmt = 63 - lsb; /* left shift amount; */
+ UChar rsAmt = 63 - lsb; /* right shift amount; */
+ assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt)));
+ assign(tmpT1, binop(Iop_Shl64, mkexpr(tmp), mkU8(1)));
+ assign(tmp1, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(rsAmt)));
+ assign(tmpT2, binop(Iop_Shr64, mkexpr(tmp1), mkU8(1)));
+
+ lsAmt = msb; /* left shift amount; */
+ rsAmt = 1; /*right shift amount; */
+ assign(tmpT3, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt)));
+ assign(tmpT4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(lsAmt)));
+ assign(tmpT5, binop(Iop_Shl64, mkexpr(tmpT4), mkU8(rsAmt)));
+ assign(tmpT6, binop(Iop_Shl64, mkexpr(tmpT5), mkU8(lsAmt)));
+
+ lsAmt = 64 - (msb - lsb + 1); /* left shift amount; */
+ rsAmt = 64 - (msb + 1); /* right shift amount; */
+ assign(tmpT7, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt)));
+ assign(tmpT8, binop(Iop_Shr64, mkexpr(tmpT7), mkU8(rsAmt)));
+
+ assign(tmpT9, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT8)));
+ assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT6), mkexpr(tmpT9)));
+ putIReg(rt, mkexpr(tmpRd));
+ break;
+ }
+ case 0x24: /* DBSHFL */
+ lsb = get_lsb(cins);
+ IRTemp tmpRs = newTemp(ty);
+ IRTemp tmpRt = newTemp(ty);
+ IRTemp tmpRd = newTemp(ty);
+ assign(tmpRs, getIReg(rs));
+ assign(tmpRt, getIReg(rt));
+ switch (lsb) {
+ case 0x02: { /* DSBH */
+ IRTemp tmpT1 = newTemp(ty);
+ IRTemp tmpT2 = newTemp(ty);
+ IRTemp tmpT3 = newTemp(ty);
+ IRTemp tmpT4 = newTemp(ty);
+ IRTemp tmpT5 = newTemp(Ity_I64);
+ IRTemp tmpT6 = newTemp(ty);
+ DIP("dsbh r%u, r%u\n", rd, rt);
+ assign(tmpT5, mkU64(0xFF00FF00FF00FF00ULL));
+ assign(tmpT6, mkU64(0x00FF00FF00FF00FFULL));
+ assign(tmpT1, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT5)));
+ assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(8)));
+ assign(tmpT3, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT6)));
+ assign(tmpT4, binop(Iop_Shl64, mkexpr(tmpT3), mkU8(8)));
+ assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT4), mkexpr(tmpT2)));
+ putIReg(rd, mkexpr(tmpRd));
+ break;
+ }
+ case 0x05: { /* DSHD */
+ IRTemp tmpT1 = newTemp(ty);
+ IRTemp tmpT2 = newTemp(ty);
+ IRTemp tmpT3 = newTemp(ty);
+ IRTemp tmpT4 = newTemp(ty);
+ IRTemp tmpT5 = newTemp(Ity_I64);
+ IRTemp tmpT6 = newTemp(ty);
+ IRTemp tmpT7 = newTemp(ty);
+ IRTemp tmpT8 = newTemp(ty);
+ IRTemp tmpT9 = newTemp(ty);
+ DIP("dshd r%u, r%u\n", rd, rt);
+ assign(tmpT5, mkU64(0xFFFF0000FFFF0000ULL));
+ assign(tmpT6, mkU64(0x0000FFFF0000FFFFULL));
+ assign(tmpT1, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT5)));
+ assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(16)));
+ assign(tmpT3, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT6)));
+ assign(tmpT4, binop(Iop_Shl64, mkexpr(tmpT3), mkU8(16)));
+ assign(tmpT7, binop(Iop_Or64, mkexpr(tmpT4), mkexpr(tmpT2)));
+ assign(tmpT8, binop(Iop_Shl64, mkexpr(tmpT7), mkU8(32)));
+ assign(tmpT9, binop(Iop_Shr64, mkexpr(tmpT7), mkU8(32)));
+ assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT8), mkexpr(tmpT9)));
+ putIReg(rd, mkexpr(tmpRd));
+ break;
+ }
+ default:
+ vex_printf("\nop6o10 = %d", lsb);
+ goto decode_failure;;
+ }
+ break;
+ case 0x3B: { /* RDHWR */
+ DIP("rdhwr r%d, r%d\n", rt, rd);
if (rd == 29) {
putIReg(rt, getULR());
+#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
+ } else if (rd == 1) {
+ IRTemp val = newTemp(Ity_I64);
+ IRExpr** args = mkIRExprVec_2 (mkU64(rt), mkU64(rd));
+ IRDirty *d = unsafeIRDirty_1_N(val,
+ 0,
+ "mips64_dirtyhelper_rdhwr",
+ &mips64_dirtyhelper_rdhwr,
+ args);
+ stmt(IRStmt_Dirty(d));
+ putIReg(rt, mkexpr(val));
+#endif
} else
goto decode_failure;
break;
}
- case 0x04:
- /*INS*/ msb = get_msb(cins);
+ case 0x04: /* INS */
+ msb = get_msb(cins);
lsb = get_lsb(cins);
size = msb - lsb + 1;
vassert(lsb + size > 0);
DIP("ins size:%d msb:%d lsb:%d", size, msb, lsb);
- /*put size bits from rs at the pos in temporary */
+ /* put size bits from rs at the pos in temporary */
t0 = newTemp(Ity_I32);
t3 = newTemp(Ity_I32);
- /*shift left for 32 - size to clear leading bits and get zeros
- at the end */
- assign(t0, binop(Iop_Shl32, getIReg(rs), mkU8(32 - size)));
- /*now set it at pos */
+ /* shift left for 32 - size to clear leading bits and get zeros
+ at the end */
+ assign(t0, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rs)),
+ mkU8(32 - size)));
+ /* now set it at pos */
t1 = newTemp(Ity_I32);
assign(t1, binop(Iop_Shr32, mkexpr(t0), mkU8(32 - size - lsb)));
if (lsb > 0) {
t2 = newTemp(Ity_I32);
- /*clear everything but lower pos bits from rt */
- assign(t2, binop(Iop_Shl32, getIReg(rt), mkU8(32 - lsb)));
+ /* clear everything but lower pos bits from rt */
+ assign(t2, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rt)),
+ mkU8(32 - lsb)));
assign(t3, binop(Iop_Shr32, mkexpr(t2), mkU8(32 - lsb)));
}
if (msb < 31) {
t4 = newTemp(Ity_I32);
- /*clear everything but upper msb + 1 bits from rt */
- assign(t4, binop(Iop_Shr32, getIReg(rt), mkU8(msb + 1)));
+ /* clear everything but upper msb + 1 bits from rt */
+ assign(t4, binop(Iop_Shr32, mkNarrowTo32(ty, getIReg(rt)),
+ mkU8(msb + 1)));
t5 = newTemp(Ity_I32);
assign(t5, binop(Iop_Shl32, mkexpr(t4), mkU8(msb + 1)));
- /*now combine these registers */
+ /* now combine these registers */
if (lsb > 0) {
t6 = newTemp(Ity_I32);
assign(t6, binop(Iop_Or32, mkexpr(t5), mkexpr(t1)));
- putIReg(rt, binop(Iop_Or32, mkexpr(t6), mkexpr(t3)));
+ putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t6),
+ mkexpr(t3)), False));
} else {
- putIReg(rt, binop(Iop_Or32, mkexpr(t1), mkexpr(t5)));
+ putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t1),
+ mkexpr(t5)), False));
}
- }
-
- else {
- putIReg(rt, binop(Iop_Or32, mkexpr(t1), mkexpr(t3)));
-
+ } else {
+ putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t1),
+ mkexpr(t3)), False));
}
break;
- case 0x00:
- /*EXT*/ msb = get_msb(cins);
+ case 0x00: /* EXT */
+ msb = get_msb(cins);
lsb = get_lsb(cins);
size = msb + 1;
DIP("ext size:%d msb:%d lsb:%d", size, msb, lsb);
vassert(lsb + size <= 32);
vassert(lsb + size > 0);
- /*put size bits from rs at the top of in temporary */
+ /* put size bits from rs at the top of in temporary */
if (lsb + size < 32) {
t0 = newTemp(Ity_I32);
- assign(t0, binop(Iop_Shl32, getIReg(rs), mkU8(32 - lsb - size)));
- putIReg(rt, binop(Iop_Shr32, mkexpr(t0), mkU8(32 - size)));
- } else {
- putIReg(rt, binop(Iop_Shr32, getIReg(rs), mkU8(32 - size)));
+ assign(t0, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rs)),
+ mkU8(32 - lsb - size)));
+ putIReg(rt, mkWidenFrom32(ty, binop(Iop_Shr32, mkexpr(t0),
+ mkU8(32 - size)), True));
+ } else {
+ putIReg(rt, mkWidenFrom32(ty, binop(Iop_Shr32,
+ mkNarrowTo32(ty, getIReg(rs)),
+ mkU8(32 - size)), True));
}
break;
- case 0x20:
- /*BSHFL*/ switch (sa) {
- case 0x10:
- /*SEB*/ DIP("seb r%d, r%d", rd, rt);
- putIReg(rd, unop(Iop_8Sto32, unop(Iop_32to8, getIReg(rt))));
- break;
+ case 0x03: /* Doubleword Extract Bit Field - DEXT; MIPS64r2 */
+ msb = get_msb(cins);
+ lsb = get_lsb(cins);
+ size = msb + 1;
+ t1 = newTemp(Ity_I64);
+ DIP("dext r%u, r%u, %d, %d\n", rt, rs, lsb, msb + 1);
+ vassert(lsb >= 0 && lsb < 32);
+ vassert(size > 0 && size <= 32);
+ vassert((lsb + size) > 0 && (lsb + size) <= 63);
- case 0x18:
- /*SEH*/ DIP("seh r%d, r%d", rd, rt);
- putIReg(rd, unop(Iop_16Sto32, unop(Iop_32to16, getIReg(rt))));
- break;
+ UChar lsAmt = 63 - (lsb + msb); /* left shift amount; */
+ UChar rsAmt = 63 - msb; /* right shift amount; */
- case 0x02:
- /*WSBH*/ DIP("wsbh r%d, r%d", rd, rt);
- t0 = newTemp(Ity_I32);
- t1 = newTemp(Ity_I32);
- t2 = newTemp(Ity_I32);
- t3 = newTemp(Ity_I32);
- assign(t0, binop(Iop_Shl32, binop(Iop_And32, getIReg(rt),
- mkU32(0x00FF0000)), mkU8(0x8)));
- assign(t1, binop(Iop_Shr32, binop(Iop_And32, getIReg(rt),
- mkU32(0xFF000000)), mkU8(0x8)));
- assign(t2, binop(Iop_Shl32, binop(Iop_And32, getIReg(rt),
- mkU32(0x000000FF)), mkU8(0x8)));
- assign(t3, binop(Iop_Shr32, binop(Iop_And32, getIReg(rt),
- mkU32(0x0000FF00)), mkU8(0x8)));
- putIReg(rd, binop(Iop_Or32, binop(Iop_Or32, mkexpr(t0),
- mkexpr(t1)), binop(Iop_Or32, mkexpr(t2), mkexpr(t3))));
- break;
+ assign(t1, binop(Iop_Shl64, getIReg(rs), mkU8(lsAmt)));
+ putIReg(rt, binop(Iop_Shr64, mkexpr(t1), mkU8(rsAmt)));
- default:
- goto decode_failure;
+ break;
+
+ case 0x20: /* BSHFL */
+ switch (sa) {
+ case 0x02: /* WSBH */
+ DIP("wsbh r%d, r%d", rd, rt);
+ t0 = newTemp(Ity_I32);
+ t1 = newTemp(Ity_I32);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I32);
+ assign(t0, binop(Iop_Shl32, binop(Iop_And32, mkNarrowTo32(ty,
+ getIReg(rt)), mkU32(0x00FF0000)),
+ mkU8(0x8)));
+ assign(t1, binop(Iop_Shr32, binop(Iop_And32, mkNarrowTo32(ty,
+ getIReg(rt)), mkU32(0xFF000000)), mkU8(0x8)));
+ assign(t2, binop(Iop_Shl32, binop(Iop_And32, mkNarrowTo32(ty,
+ getIReg(rt)), mkU32(0x000000FF)), mkU8(0x8)));
+ assign(t3, binop(Iop_Shr32, binop(Iop_And32, mkNarrowTo32(ty,
+ getIReg(rt)), mkU32(0x0000FF00)), mkU8(0x8)));
+ putIReg(rd, mkWidenFrom32(ty, binop(Iop_Or32, binop(Iop_Or32,
+ mkexpr(t0), mkexpr(t1)),
+ binop(Iop_Or32, mkexpr(t2),
+ mkexpr(t3))), True));
+ break;
+
+ case 0x10: /* SEB */
+ DIP("seb");
+ if (mode64)
+ putIReg(rd, unop(Iop_8Sto64, unop(Iop_64to8, getIReg(rt))));
+ else
+ putIReg(rd, unop(Iop_8Sto32, unop(Iop_32to8, getIReg(rt))));
+ break;
+
+ case 0x18: /* SEH */
+ DIP("seh");
+ if (mode64)
+ putIReg(rd, unop(Iop_16Sto64, unop(Iop_64to16, getIReg(rt))));
+ else
+ putIReg(rd, unop(Iop_16Sto32, unop(Iop_32to16, getIReg(rt))));
+ break;
+
+ default:
+ goto decode_failure;
}
- break;
- /*BSHFL*/ default:
- goto decode_failure;
+ break; /* BSHFL */
+ default:
+ goto decode_failure;
}
- break; /*Special3 */
+ break; /* Special3 */
case 0x3B:
if (0x3B == function && (archinfo->hwcaps & VEX_PRID_COMP_BROADCOM)) {
- /*RDHWR*/
- DIP("rdhwr r%d, r%d", rt, rd);
- if (rd == 29) {
- putIReg(rt, getULR());
- } else
- goto decode_failure;
- break;
+ /*RDHWR*/
+ DIP("rdhwr r%d, r%d", rt, rd);
+ if (rd == 29) {
+ putIReg(rt, getULR());
+ } else
+ goto decode_failure;
+ break;
} else {
goto decode_failure;
}
- case 0x00: /*Special */
+ case 0x00: /* Special */
switch (function) {
case 0x1: {
UInt mov_cc = get_mov_cc(cins);
- if (tf == 0) { /* MOVF */
+ if (tf == 0) { /* MOVF */
DIP("movf r%d, r%d, %d", rd, rs, mov_cc);
- {
- t1 = newTemp(Ity_I1);
- t2 = newTemp(Ity_I32);
- t3 = newTemp(Ity_I1);
- t4 = newTemp(Ity_I32);
-
- assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
- assign(t2, IRExpr_ITE(mkexpr(t1),
- binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(),
- mkU8(23)),
- mkU32(0x1)),
- binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(),
- mkU8(24 + mov_cc)),
- mkU32(0x1))
- ));
-
- assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
- assign(t4, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd)));
- putIReg(rd, mkexpr(t4));
- }
- } else if (tf == 1) { /* MOVT */
+ t1 = newTemp(Ity_I1);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I1);
+
+ assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+ assign(t2, IRExpr_ITE(mkexpr(t1),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
+ assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd)));
+ } else if (tf == 1) { /* MOVT */
DIP("movt r%d, r%d, %d", rd, rs, mov_cc);
- {
- t1 = newTemp(Ity_I1);
- t2 = newTemp(Ity_I32);
- t3 = newTemp(Ity_I1);
- t4 = newTemp(Ity_I32);
-
- assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
- assign(t2, IRExpr_ITE(mkexpr(t1),
- binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(),
- mkU8(23)),
- mkU32(0x1)),
- binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(),
- mkU8(24 + mov_cc)),
- mkU32(0x1))
- ));
-
- assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
- assign(t4, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd)));
- putIReg(rd, mkexpr(t4));
- }
+ t1 = newTemp(Ity_I1);
+ t2 = newTemp(Ity_I32);
+ t3 = newTemp(Ity_I1);
+
+ assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+ assign(t2, IRExpr_ITE(mkexpr(t1),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
+ assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
+ putIReg(rd, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd)));
}
break;
}
- case 0x0A: {
- /* MOVZ */
+ case 0x0A: { /* MOVZ */
DIP("movz r%d, r%d, r%d", rd, rs, rt);
t1 = newTemp(ty);
t2 = newTemp(ty);
- {
+ if (mode64) {
+ assign(t1, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpEQ64,
+ getIReg(rt), mkU64(0x0)))));
+ assign(t2, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpNE64,
+ getIReg(rt), mkU64(0x0)))));
+ putIReg(rd, binop(Iop_Add64, binop(Iop_And64, getIReg(rs),
+ mkexpr(t1)), binop(Iop_And64, getIReg(rd),mkexpr(t2))));
+ } else {
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, getIReg(rt),
mkU32(0x0))));
assign(t2, unop(Iop_1Sto32, binop(Iop_CmpNE32, getIReg(rt),
break;
}
- case 0x0B: {
- /* MOVN */
+ case 0x0B: { /* MOVN */
DIP("movn r%d, r%d, r%d", rd, rs, rt);
t1 = newTemp(ty);
t2 = newTemp(ty);
- {
+ if (mode64) {
+ assign(t1, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpEQ64,
+ getIReg(rt), mkU64(0x0)))));
+ assign(t2, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpNE64,
+ getIReg(rt), mkU64(0x0)))));
+ putIReg(rd, binop(Iop_Add64, binop(Iop_And64, getIReg(rs),
+ mkexpr(t2)), binop(Iop_And64, getIReg(rd),
+ mkexpr(t1))));
+ } else {
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, getIReg(rt),
mkU32(0x0))));
assign(t2, unop(Iop_1Sto32, binop(Iop_CmpNE32, getIReg(rt),
putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
break;
- case 0x20: /* ADD */
+ case 0x20: { /* ADD */
DIP("add r%d, r%d, r%d", rd, rs, rt);
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+
+ assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+ assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I32);
t4 = newTemp(Ity_I32);
/* dst = src0 + src1
- * if(sign(src0 ) != sign(src1 ))
- * goto no overflow;
- * if(sign(dst) == sign(src0 ))
- * goto no overflow;
- * # we have overflow! */
-
- assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
- assign(t1, binop(Iop_Xor32, getIReg(rs), getIReg(rt)));
+ if(sign(src0 ) != sign(src1 ))
+ goto no overflow;
+ if(sign(dst) == sign(src0 ))
+ goto no overflow;
+ we have overflow! */
+
+ assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
+ assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
assign(t2, unop(Iop_1Uto32,
binop(Iop_CmpEQ32,
binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)),
mkU32(0x80000000))));
- assign(t3, binop(Iop_Xor32, mkexpr(t0), getIReg(rs)));
+ assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32)));
assign(t4, unop(Iop_1Uto32,
binop(Iop_CmpNE32,
binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)),
mkU32(0x80000000))));
-
+
stmt(IRStmt_Exit(binop(Iop_CmpEQ32,
binop(Iop_Or32, mkexpr(t2), mkexpr(t4)),
mkU32(0)),
Ijk_SigFPE_IntOvf,
- IRConst_U32(guest_PC_curr_instr + 4), OFFB_PC));
+ mode64 ? IRConst_U64(guest_PC_curr_instr + 4) :
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
- putIReg(rd, mkexpr(t0));
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(t0), True));
break;
-
+ }
case 0x1A: /* DIV */
DIP("div r%d, r%d", rs, rt);
- {
+ if (mode64) {
+ t2 = newTemp(Ity_I64);
+
+ assign(t2, binop(Iop_DivModS64to32,
+ getIReg(rs), mkNarrowTo32(ty, getIReg(rt))));
+
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+ } else {
t1 = newTemp(Ity_I64);
t2 = newTemp(Ity_I64);
case 0x1B: /* DIVU */
DIP("divu r%d, r%d", rs, rt);
- {
+ if (mode64) {
+ t2 = newTemp(Ity_I64);
+
+ assign(t2, binop(Iop_DivModU64to32,
+ getIReg(rs), mkNarrowTo32(ty, getIReg(rt))));
+
+ putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+ putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+ } else {
t1 = newTemp(Ity_I64);
t2 = newTemp(Ity_I64);
assign(t1, unop(Iop_32Uto64, getIReg(rs)));
}
break;
+ case 0x1C: /* Doubleword Multiply - DMULT; MIPS64 */
+ DIP("dmult r%u, r%u\n", rs, rt);
+ t0 = newTemp(Ity_I128);
+
+ assign(t0, binop(Iop_MullS64, getIReg(rs), getIReg(rt)));
+
+ putHI(unop(Iop_128HIto64, mkexpr(t0)));
+ putLO(unop(Iop_128to64, mkexpr(t0)));
+ break;
+
+ case 0x1D: /* Doubleword Multiply Unsigned - DMULTU; MIPS64 */
+ DIP("dmultu r%u, r%u\n", rs, rt);
+ t0 = newTemp(Ity_I128);
+
+ assign(t0, binop(Iop_MullU64, getIReg(rs), getIReg(rt)));
+
+ putHI(unop(Iop_128HIto64, mkexpr(t0)));
+ putLO(unop(Iop_128to64, mkexpr(t0)));
+ break;
+
+ case 0x1E: /* Doubleword Divide DDIV; MIPS64 */
+ DIP("ddiv");
+ t1 = newTemp(Ity_I128);
+
+ assign(t1, binop(Iop_DivModS64to64, getIReg(rs), getIReg(rt)));
+
+ putHI(unop(Iop_128HIto64, mkexpr(t1)));
+ putLO(unop(Iop_128to64, mkexpr(t1)));
+ break;
+
+ case 0x1F: /* Doubleword Divide Unsigned DDIVU; MIPS64 check this */
+ DIP("ddivu");
+ t1 = newTemp(Ity_I128);
+ t2 = newTemp(Ity_I128);
+
+ assign(t1, binop(Iop_64HLto128, mkU64(0), getIReg(rs)));
+
+ assign(t2, binop(Iop_DivModU128to64, mkexpr(t1), getIReg(rt)));
+
+ putHI(unop(Iop_128HIto64, mkexpr(t2)));
+ putLO(unop(Iop_128to64, mkexpr(t2)));
+ break;
+
case 0x10: /* MFHI */
DIP("mfhi r%d", rd);
putIReg(rd, getHI());
case 0x21: /* ADDU */
DIP("addu r%d, r%d, r%d", rd, rs, rt);
- ALU_PATTERN(Iop_Add32);
+ if (mode64) {
+ ALU_PATTERN64(Iop_Add32);
+ } else {
+ ALU_PATTERN(Iop_Add32);
+ }
break;
- case 0x22: /* SUB */
+ case 0x22: { /* SUB */
DIP("sub r%d, r%d, r%d", rd, rs, rt);
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+
+ assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+ assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t4 = newTemp(Ity_I32);
t5 = newTemp(Ity_I32);
/* dst = src0 + (-1 * src1)
- * if(sign(src0 ) != sign((-1 * src1) ))
- * goto no overflow;
- * if(sign(dst) == sign(src0 ))
- * goto no overflow;
- * # we have overflow! */
-
- assign(t5, binop(Iop_Mul32, getIReg(rt), mkU32(-1)));
- assign(t0, binop(Iop_Add32, getIReg(rs), mkexpr(t5)));
- assign(t1, binop(Iop_Xor32, getIReg(rs), mkexpr(t5)));
- assign(t2, unop(Iop_1Sto32,
- binop(Iop_CmpEQ32,
- binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)),
- mkU32(0x80000000))));
-
- assign(t3, binop(Iop_Xor32, mkexpr(t0), getIReg(rs)));
- assign(t4, unop(Iop_1Sto32,
- binop(Iop_CmpNE32,
- binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)),
- mkU32(0x80000000))));
-
- stmt(IRStmt_Exit(binop(Iop_CmpEQ32,
- binop(Iop_Or32, mkexpr(t2), mkexpr(t4)),
- mkU32(0)),
- Ijk_SigFPE_IntOvf,
- IRConst_U32(guest_PC_curr_instr + 4), OFFB_PC));
-
- putIReg(rd, mkexpr(t0));
+ if(sign(src0 ) != sign((-1 * src1) ))
+ goto no overflow;
+ if(sign(dst) == sign(src0 ))
+ goto no overflow;
+ we have overflow! */
+
+ assign(t5, binop(Iop_Mul32, mkexpr(tmpRt32), mkU32(-1)));
+ assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), mkexpr(t5)));
+ assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), mkexpr(t5)));
+ assign(t2, unop(Iop_1Sto32, binop(Iop_CmpEQ32, binop(Iop_And32,
+ mkexpr(t1), mkU32(0x80000000)), mkU32(0x80000000))));
+
+ assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32)));
+ assign(t4, unop(Iop_1Sto32, binop(Iop_CmpNE32, binop(Iop_And32,
+ mkexpr(t3), mkU32(0x80000000)), mkU32(0x80000000))));
+
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ32, binop(Iop_Or32, mkexpr(t2),
+ mkexpr(t4)), mkU32(0)), Ijk_SigFPE_IntOvf,
+ mode64 ? IRConst_U64(guest_PC_curr_instr + 4) :
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(t0), True));
break;
-
+ }
case 0x23: /* SUBU */
DIP("subu r%d, r%d, r%d", rd, rs, rt);
- ALU_PATTERN(Iop_Sub32);
+ if (mode64) {
+ ALU_PATTERN64(Iop_Sub32);
+ } else {
+ ALU_PATTERN(Iop_Sub32);
+ }
break;
case 0x24: /* AND */
DIP("and r%d, r%d, r%d", rd, rs, rt);
- ALU_PATTERN(Iop_And32);
+ if (mode64) {
+ ALU_PATTERN(Iop_And64);
+ } else {
+ ALU_PATTERN(Iop_And32);
+ }
break;
case 0x25: /* OR */
DIP("or r%d, r%d, r%d", rd, rs, rt);
- ALU_PATTERN(Iop_Or32);
+ if (mode64) {
+ ALU_PATTERN(Iop_Or64);
+ } else {
+ ALU_PATTERN(Iop_Or32);
+ }
break;
case 0x26: /* XOR */
DIP("xor r%d, r%d, r%d", rd, rs, rt);
- ALU_PATTERN(Iop_Xor32);
+ if (mode64) {
+ ALU_PATTERN(Iop_Xor64);
+ } else {
+ ALU_PATTERN(Iop_Xor32);
+ }
break;
case 0x27: /* NOR */
DIP("nor r%d, r%d, r%d", rd, rs, rt);
- putIReg(rd, unop(Iop_Not32, binop(Iop_Or32, getIReg(rs),getIReg(rt))));
+ if (mode64)
+ putIReg(rd, unop(Iop_Not64, binop(Iop_Or64, getIReg(rs),
+ getIReg(rt))));
+ else
+ putIReg(rd, unop(Iop_Not32, binop(Iop_Or32, getIReg(rs),
+ getIReg(rt))));
break;
case 0x08: /* JR */
case 0x09: /* JALR */
DIP("jalr r%d r%d", rd, rs);
- putIReg(rd, mkU32(guest_PC_curr_instr + 8));
- t0 = newTemp(Ity_I32);
- assign(t0, getIReg(rs));
- lastn = mkexpr(t0);
+ if (mode64) {
+ putIReg(rd, mkU64(guest_PC_curr_instr + 8));
+ t0 = newTemp(Ity_I64);
+ assign(t0, getIReg(rs));
+ lastn = mkexpr(t0);
+ } else {
+ putIReg(rd, mkU32(guest_PC_curr_instr + 8));
+ t0 = newTemp(Ity_I32);
+ assign(t0, getIReg(rs));
+ lastn = mkexpr(t0);
+ }
break;
case 0x0C: /* SYSCALL */
DIP("syscall");
- putPC(mkU32(guest_PC_curr_instr + 4));
+ if (mode64)
+ putPC(mkU64(guest_PC_curr_instr + 4));
+ else
+ putPC(mkU32(guest_PC_curr_instr + 4));
dres.jk_StopHere = Ijk_Sys_syscall;
dres.whatNext = Dis_StopHere;
break;
case 0x2A: /* SLT */
DIP("slt r%d, r%d, r%d", rd, rs, rt);
- putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs),
- getIReg(rt))));
+ if (mode64)
+ putIReg(rd, unop(Iop_1Uto64, binop(Iop_CmpLT64S, getIReg(rs),
+ getIReg(rt))));
+ else
+ putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs),
+ getIReg(rt))));
break;
case 0x2B: /* SLTU */
DIP("sltu r%d, r%d, r%d", rd, rs, rt);
- putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs),
- getIReg(rt))));
+ if (mode64)
+ putIReg(rd, unop(Iop_1Uto64, binop(Iop_CmpLT64U, getIReg(rs),
+ getIReg(rt))));
+ else
+ putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs),
+ getIReg(rt))));
break;
- case 0x00:
- /* SLL */
+ case 0x00: { /* SLL */
DIP("sll r%d, r%d, %d", rd, rt, sa);
- SXX_PATTERN(Iop_Shl32);
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+ IRTemp tmpSh32 = newTemp(Ity_I32);
+ IRTemp tmpRd = newTemp(Ity_I64);
+ if (mode64) {
+ assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+ assign(tmpSh32, binop(Iop_Shl32, mkexpr(tmpRt32), mkU8(sa)));
+ assign(tmpRd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+ putIReg(rd, mkexpr(tmpRd));
+ } else
+ SXX_PATTERN(Iop_Shl32);
break;
+ }
- case 0x04: /* SLLV */
+ case 0x04: { /* SLLV */
DIP("sllv r%d, r%d, r%d", rd, rt, rs);
- SXXV_PATTERN(Iop_Shl32);
+ if (mode64) {
+ IRTemp tmpRs8 = newTemp(Ity_I8);
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+ IRTemp tmpSh32 = newTemp(Ity_I32);
+ IRTemp tmp = newTemp(ty);
+ assign(tmp, binop(mkSzOp(ty, Iop_And8), getIReg(rs),
+ mkSzImm(ty, 31)));
+ assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp)));
+ assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+ assign(tmpSh32, binop(Iop_Shl32, mkexpr(tmpRt32), mkexpr(tmpRs8)));
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+ } else {
+ SXXV_PATTERN(Iop_Shl32);
+ }
break;
+ }
case 0x03: /* SRA */
DIP("sra r%d, r%d, %d", rd, rt, sa);
- SXX_PATTERN(Iop_Sar32);
+ if (mode64) {
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+ IRTemp tmpSh32 = newTemp(Ity_I32);
+
+ t1 = newTemp(Ity_I64);
+ t2 = newTemp(Ity_I64);
+ t3 = newTemp(Ity_I64);
+
+ assign(t1, binop(Iop_And64, getIReg(rt), /* hi */
+ mkU64(0xFFFFFFFF00000000)));
+
+ assign(t2, binop(Iop_Sar64, mkexpr(t1), mkU8(sa)));
+
+ assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+ assign(tmpSh32, binop(Iop_Sar32, mkexpr(tmpRt32), mkU8(sa)));
+
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+ } else {
+ SXX_PATTERN(Iop_Sar32);
+ }
break;
case 0x07: /* SRAV */
DIP("srav r%d, r%d, r%d", rd, rt, rs);
- SXXV_PATTERN(Iop_Sar32);
+ if (mode64) {
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+ IRTemp tmpSh32 = newTemp(Ity_I32);
+
+ t1 = newTemp(Ity_I64);
+ t2 = newTemp(Ity_I64);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I8);
+
+ assign(t4, unop(Iop_32to8, binop(Iop_And32,
+ mkNarrowTo32(ty, getIReg(rs)), mkU32(0x0000001F))));
+
+ assign(t1, binop(Iop_And64, getIReg(rt), /* hi */
+ mkU64(0xFFFFFFFF00000000)));
+
+ assign(t2, binop(Iop_Sar64, mkexpr(t1), mkexpr(t4)));
+
+ assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+ assign(tmpSh32, binop(Iop_Sar32, mkexpr(tmpRt32), mkexpr(t4)));
+
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+ } else {
+ SXXV_PATTERN(Iop_Sar32);
+ }
break;
case 0x02: { /* SRL */
if (rot) {
DIP("rotr r%d, r%d, %d", rd, rt, sa);
putIReg(rd, mkWidenFrom32(ty, genROR32(mkNarrowTo32(ty,
- getIReg(rt)), sa), False));
+ getIReg(rt)), sa), True));
} else {
DIP("srl r%d, r%d, %d", rd, rt, sa);
- SXX_PATTERN(Iop_Shr32);
+ if (mode64) {
+ IRTemp tmpSh32 = newTemp(Ity_I32);
+ IRTemp tmpRt32 = newTemp(Ity_I32);
+
+ assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+ assign(tmpSh32, binop(Iop_Shr32, mkexpr(tmpRt32), mkU8(sa)));
+ putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+ } else {
+ SXX_PATTERN(Iop_Shr32);
+ }
}
break;
}
if (rot) {
DIP("rotrv r%d, r%d, r%d", rd, rt, rs);
putIReg(rd, mkWidenFrom32(ty, genRORV32(mkNarrowTo32(ty,
- getIReg(rt)), mkNarrowTo32(ty, getIReg(rs))),False));
+ getIReg(rt)), mkNarrowTo32(ty, getIReg(rs))), True));
break;
- } else {
- /* SRLV */
+ } else { /* SRLV */
DIP("srlv r%d, r%d, r%d", rd, rt, rs);
- SXXV_PATTERN(Iop_Shr32);
+ if (mode64) {
+ SXXV_PATTERN64(Iop_Shr32);
+ } else {
+ SXXV_PATTERN(Iop_Shr32);
+ }
break;
}
}
case 0x0D: /* BREAK */
DIP("Info: Breakpoint...code = %d", trap_code);
- jmp_lit(&dres, Ijk_SigTRAP, (guest_PC_curr_instr + 4));
+ if (mode64)
+ jmp_lit64(&dres, Ijk_SigTRAP, (guest_PC_curr_instr + 4));
+ else
+ jmp_lit32(&dres, Ijk_SigTRAP, (guest_PC_curr_instr + 4));
vassert(dres.whatNext == Dis_StopHere);
break;
- case 0x30: { /* TGE */
- /*tge */ DIP("tge r%d, r%d %d", rs, rt, trap_code);
- if (trap_code == 7)
- stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rt), getIReg (rs)),
- Ijk_SigFPE_IntDiv,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else if (trap_code == 6)
- stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rt), getIReg (rs)),
- Ijk_SigFPE_IntOvf,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else
- stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rt), getIReg (rs)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- break;
- }
- case 0x31: { /* TGEU */
- /*tgeu */ DIP("tgeu r%d, r%d %d", rs, rt, trap_code);
- if (trap_code == 7)
- stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rt), getIReg (rs)),
- Ijk_SigFPE_IntDiv,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else if (trap_code == 6)
- stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rt), getIReg (rs)),
- Ijk_SigFPE_IntOvf,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else
- stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rt), getIReg (rs)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- break;
- }
- case 0x32: { /* TLT */
- /*tlt */ DIP("tlt r%d, r%d %d", rs, rt, trap_code);
- if (trap_code == 7)
- stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), getIReg (rt)),
- Ijk_SigFPE_IntDiv,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else if (trap_code == 6)
- stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), getIReg (rt)),
- Ijk_SigFPE_IntOvf,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else
- stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), getIReg (rt)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- break;
- }
- case 0x33: { /* TLTU */
- /*tltu */ DIP("tltu r%d, r%d %d", rs, rt, trap_code);
- if (trap_code == 7)
- stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), getIReg (rt)),
- Ijk_SigFPE_IntDiv,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else if (trap_code == 6)
- stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), getIReg (rt)),
- Ijk_SigFPE_IntOvf,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else
- stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), getIReg (rt)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- break;
- }
- case 0x34: { /* TEQ */
- /*teq */ DIP("teq r%d, r%d %d", rs, rt, trap_code);
- if (trap_code == 7)
- stmt (IRStmt_Exit (binop (Iop_CmpEQ32, getIReg (rs), getIReg (rt)),
- Ijk_SigFPE_IntDiv,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else if (trap_code == 6)
- stmt (IRStmt_Exit(binop (Iop_CmpEQ32, getIReg (rs), getIReg (rt)),
- Ijk_SigFPE_IntOvf,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else
- stmt (IRStmt_Exit(binop (Iop_CmpEQ32, getIReg (rs), getIReg (rt)),
- Ijk_SigTRAP, IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- break;
- }
- case 0x36: { /* TNE */
- /*tne */ DIP("tne r%d, r%d %d", rs, rt, trap_code);
- if (trap_code == 7)
- stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), getIReg (rt)),
- Ijk_SigFPE_IntDiv,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else if (trap_code == 6)
- stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), getIReg (rt)),
- Ijk_SigFPE_IntOvf,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
- else
- stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), getIReg (rt)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
+ case 0x30: { /* TGE */
+ DIP("tge r%d, r%d %d", rs, rt, trap_code);
+ if (mode64) {
+ if (trap_code == 7)
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT64S,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigFPE_IntDiv,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT64S,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT64S,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ if (trap_code == 7)
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT32S,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigFPE_IntDiv,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT32S,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigFPE_IntOvf,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT32S,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
break;
}
- case 0x0F: {
- /*SYNC*/ DIP("sync r%d, r%d, %d", rt, rd, sel);
+ case 0x31: { /* TGEU */
+ DIP("tgeu r%d, r%d %d", rs, rt, trap_code);
+ if (mode64) {
+ if (trap_code == 7)
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT64U,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigFPE_IntDiv,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT64U,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT64U,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ if (trap_code == 7)
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT32U,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigFPE_IntDiv,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT32U,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigFPE_IntOvf,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT32U,
+ getIReg (rs),
+ getIReg (rt))),
+ Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
+ break;
+ }
+ case 0x32: { /* TLT */
+ DIP("tlt r%d, r%d %d", rs, rt, trap_code);
+ if (mode64) {
+ if (trap_code == 7)
+ stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntDiv,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs),
+ getIReg(rt)), Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ if (trap_code == 7)
+ stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntDiv,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntOvf,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs),
+ getIReg(rt)), Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
+ break;
+ }
+ case 0x33: { /* TLTU */
+ DIP("tltu r%d, r%d %d", rs, rt, trap_code);
+ if (mode64) {
+ if (trap_code == 7)
+ stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntDiv,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs),
+ getIReg(rt)), Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ if (trap_code == 7)
+ stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntDiv,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntOvf,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs),
+ getIReg (rt)), Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
+ break;
+ }
+ case 0x34: { /* TEQ */
+ DIP("teq r%d, r%d, %d", rs, rt, trap_code);
+ if (mode64) {
+ if (trap_code == 7)
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntDiv,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs),
+ getIReg(rt)), Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ if (trap_code == 7)
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntDiv,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntOvf,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs),
+ getIReg(rt)), Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
+ break;
+ }
+ case 0x36: { /* TNE */
+ DIP("tne r%d, r%d %d", rs, rt, trap_code);
+ if (mode64) {
+ if (trap_code == 7)
+ stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntDiv,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs),
+ getIReg(rt)), Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ if (trap_code == 7)
+ stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntDiv,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else if (trap_code == 6)
+ stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs),
+ getIReg(rt)), Ijk_SigFPE_IntOvf,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ else
+ stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs),
+ getIReg(rt)), Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
+ break;
+ }
+ case 0x14:
+ case 0x16:
+ case 0x17: /* DSLLV, DROTRV:DSRLV, DSRAV */
+ case 0x38:
+ case 0x3A:
+ case 0x3B: /* DSLL, DROTL:DSRL, DSRA */
+ case 0x3C:
+ case 0x3E:
+ case 0x3F: /* DSLL32, DROTR32:DSRL32, DSRA32 */
+ if (dis_instr_shrt(cins))
+ break;
+ goto decode_failure;
+
+ case 0x0F: { /* SYNC */
+ DIP("sync r%d, r%d, %d", rt, rd, sel);
lsb = get_lsb(cins);
IRDirty *d = unsafeIRDirty_0_N(0,
"mips32_dirtyhelper_sync",
break;
}
- default:
- goto decode_failure;
+ case 0x2C: { /* Doubleword Add - DADD; MIPS64 */
+ DIP("dadd r%d, r%d, r%d", rd, rs, rt);
+
+ IRTemp tmpRs64 = newTemp(Ity_I64);
+ IRTemp tmpRt64 = newTemp(Ity_I64);
+
+ assign(tmpRs64, getIReg(rs));
+ assign(tmpRt64, getIReg(rt));
+
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I64);
+ t2 = newTemp(Ity_I64);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I64);
+ /* dst = src0 + src1
+ if(sign(src0 ) != sign(src1 ))
+ goto no overflow;
+ if(sign(dst) == sign(src0 ))
+ goto no overflow;
+ we have overflow! */
+
+ assign(t0, binop(Iop_Add64, mkexpr(tmpRs64), mkexpr(tmpRt64)));
+ assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64), mkexpr(tmpRt64)));
+ assign(t2, unop(Iop_1Uto64,
+ binop(Iop_CmpEQ64,
+ binop(Iop_And64, mkexpr(t1),
+ mkU64(0x8000000000000000)),
+ mkU64(0x8000000000000000))));
+
+ assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
+ assign(t4, unop(Iop_1Uto64,
+ binop(Iop_CmpNE64,
+ binop(Iop_And64, mkexpr(t3),
+ mkU64(0x8000000000000000)),
+ mkU64(0x8000000000000000))));
+
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ64,
+ binop(Iop_Or64, mkexpr(t2), mkexpr(t4)),
+ mkU64(0)),
+ Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+
+ putIReg(rd, mkexpr(t0));
+ break;
}
- break;
- case 0x01: /* Regimm */
+ case 0x2D: /* Doubleword Add Unsigned - DADDU; MIPS64 */
+ DIP("daddu r%d, r%d, r%d", rd, rs, rt);
+ ALU_PATTERN(Iop_Add64);
+ break;
+
+ case 0x2E: { /* Doubleword Subtract - DSUB; MIPS64 */
+ DIP("dsub r%u, r%u,r%u\n", rd, rs, rt);
- switch (rt) {
- case 0x01: /* BGEZ */
- DIP("bgez r%d, %d", rs, imm);
- dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
- mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt);
+ IRTemp tmpRs64 = newTemp(Ity_I64);
+ IRTemp tmpRt64 = newTemp(Ity_I64);
+
+ assign(tmpRs64, getIReg(rs));
+ assign(tmpRt64, getIReg(rt));
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I64);
+ t2 = newTemp(Ity_I64);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I64);
+ t5 = newTemp(Ity_I64);
+ /* dst = src0 + (-1 * src1)
+ if(sign(src0 ) != sign((-1 * src1) ))
+ goto no overflow;
+ if(sign(dst) == sign(src0 ))
+ goto no overflow;
+ we have overflow! */
+
+ assign(t5, binop(Iop_Mul64, mkexpr(tmpRt64), mkU64(0xffffffffffffffff)));
+ assign(t0, binop(Iop_Add64, mkexpr(tmpRs64), mkexpr(t5)));
+ assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64), mkexpr(t5)));
+ assign(t2, unop(Iop_1Sto64, binop(Iop_CmpEQ64, binop(Iop_And64,
+ mkexpr(t1), mkU64(0x8000000000000000)),
+ mkU64(0x8000000000000000))));
+
+ assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
+ assign(t4, unop(Iop_1Sto64, binop(Iop_CmpNE64, binop(Iop_And64,
+ mkexpr(t3), mkU64(0x8000000000000000)),
+ mkU64(0x8000000000000000))));
+
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ64, binop(Iop_Or64, mkexpr(t2),
+ mkexpr(t4)), mkU64(0)), Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+
+ putIReg(rd, binop(Iop_Sub64, getIReg(rs), getIReg(rt)));
break;
+ }
- case 0x03: /* BGEZL */
- DIP("bgezl r%d, %d", rs, imm);
- lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32,
- getIReg(rs), mode64 ?
- mkU64(0x8000000000000000ULL)
- :mkU32(0x80000000)),
- mkU32(0x0)), imm);
+ case 0x2F: /* Doubleword Subtract Unsigned - DSUBU; MIPS64 */
+ DIP("dsub r%u, r%u,r%u\n", rd, rt, rt);
+ ALU_PATTERN(Iop_Sub64);
break;
+ default:
+ goto decode_failure;
+ }
+ break;
+
+ case 0x01: /* Regimm */
+
+ switch (rt) {
case 0x00: /* BLTZ */
DIP("bltz r%d, %d", rs, imm);
- dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
- mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt);
+ if (mode64) {
+ if (!dis_instr_branch(cins, &dres, resteerOkFn,
+ callback_opaque, &bstmt))
+ goto decode_failure;
+ } else
+ dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
+ mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt);
+ break;
+
+ case 0x01: /* BGEZ */
+ DIP("bgez r%d, %d", rs, imm);
+ if (mode64) {
+ if (!dis_instr_branch(cins, &dres, resteerOkFn,
+ callback_opaque, &bstmt))
+ goto decode_failure;
+ } else
+ dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
+ mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt);
break;
case 0x02: /* BLTZL */
DIP("bltzl r%d, %d", rs, imm);
- lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32,
- getIReg(rs), mkU32(0x80000000)),
- mkU32(0x80000000)), imm);
+ lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+ binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+ mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
+ mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
+ imm);
+ break;
+
+ case 0x03: /* BGEZL */
+ DIP("bgezl r%d, %d", rs, imm);
+ lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+ binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+ mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
+ mode64 ? mkU64(0x0) : mkU32(0x0)), imm);
break;
case 0x10: /* BLTZAL */
DIP("bltzal r%d, %d", rs, imm);
- dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
- mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt);
+ if (mode64) {
+ if (!dis_instr_branch(cins, &dres, resteerOkFn,
+ callback_opaque, &bstmt))
+ goto decode_failure;
+ } else
+ dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
+ mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt);
break;
case 0x12: /* BLTZALL */
DIP("bltzall r%d, %d", rs, imm);
- putIReg(31, mkU32(guest_PC_curr_instr + 8));
- lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32,
- getIReg(rs), mkU32(0x80000000)),
- mkU32(0x80000000)), imm);
+ putIReg(31, mode64 ? mkU64(guest_PC_curr_instr + 8) :
+ mkU32(guest_PC_curr_instr + 8));
+ lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+ binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+ mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
+ mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
+ imm);
break;
case 0x11: /* BGEZAL */
DIP("bgezal r%d, %d", rs, imm);
- dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
- mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt);
+ if (mode64) {
+ if (!dis_instr_branch(cins, &dres, resteerOkFn,
+ callback_opaque, &bstmt))
+ goto decode_failure;
+ } else
+ dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
+ mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt);
break;
case 0x13: /* BGEZALL */
DIP("bgezall r%d, %d", rs, imm);
- putIReg(31, mkU32(guest_PC_curr_instr + 8));
- lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32,
- getIReg(rs), mkU32(0x80000000)),
- mkU32(0x0)), imm);
+ if (mode64) {
+ putIReg(31, mkU64(guest_PC_curr_instr + 8));
+ lastn = dis_branch_likely(binop(Iop_CmpNE64, binop(Iop_And64,
+ getIReg(rs), mkU64(0x8000000000000000)),
+ mkU64(0x0)), imm);
+ } else {
+ putIReg(31, mkU32(guest_PC_curr_instr + 8));
+ lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32,
+ getIReg(rs), mkU32(0x80000000)),
+ mkU32(0x0)), imm);
+ }
break;
- case 0x08: { /* TGEI */
- /*tgei */ DIP("tgei r%d, %d %d", rs, imm, trap_code);
- stmt (IRStmt_Exit (binop (Iop_CmpLT32S, mkU32 (imm), getIReg (rs)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
+ case 0x08: /* TGEI */
+ DIP("tgei r%d, %d %d", rs, imm, trap_code);
+ if (mode64) {
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT64S,
+ getIReg (rs),
+ mkU64 (extend_s_16to64 (imm)))),
+ Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT32S,
+ getIReg (rs),
+ mkU32 (extend_s_16to32 (imm)))),
+ Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
break;
- }
- case 0x09: { /* TGEIU */
- /*tqeiu */ DIP("tgeiu r%d, %d %d", rs, imm, trap_code);
- stmt (IRStmt_Exit (binop (Iop_CmpLT32U, mkU32 (imm), getIReg (rs)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
+
+ case 0x09: { /* TGEIU */
+ DIP("tgeiu r%d, %d %d", rs, imm, trap_code);
+ if (mode64) {
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT64U,
+ getIReg (rs),
+ mkU64 (extend_s_16to64 (imm)))),
+ Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ stmt (IRStmt_Exit (unop (Iop_Not1,
+ binop (Iop_CmpLT32U,
+ getIReg (rs),
+ mkU32 (extend_s_16to32 (imm)))),
+ Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
break;
}
case 0x0A: { /* TLTI */
- /*tlti */ DIP("tlti r%d, %d %d", rs, imm, trap_code);
- stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), mkU32 (imm)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
+ DIP("tlti r%d, %d %d", rs, imm, trap_code);
+ if (mode64) {
+ stmt (IRStmt_Exit (binop (Iop_CmpLT64S, getIReg (rs),
+ mkU64 (extend_s_16to64 (imm))),
+ Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs),
+ mkU32 (extend_s_16to32 (imm))),
+ Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
break;
}
- case 0x0B: { /* TLTIU */
- /*tltiu */ DIP("tltiu r%d, %d %d", rs, imm, trap_code);
- stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), mkU32 (imm)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
+ case 0x0B: { /* TLTIU */
+ DIP("tltiu r%d, %d %d", rs, imm, trap_code);
+ if (mode64) {
+ stmt (IRStmt_Exit (binop (Iop_CmpLT64U, getIReg (rs),
+ mkU64 (extend_s_16to64 (imm))),
+ Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs),
+ mkU32 (extend_s_16to32 (imm))),
+ Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
break;
}
- case 0x0C: { /* TEQI */
- /*teqi */ DIP("teqi r%d, %d %d", rs, imm, trap_code);
- stmt (IRStmt_Exit (binop (Iop_CmpEQ32, getIReg (rs), mkU32 (imm)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
+ case 0x0C: { /* TEQI */
+ DIP("teqi r%d, %d %d", rs, imm, trap_code);
+ if (mode64) {
+ stmt (IRStmt_Exit (binop (Iop_CmpEQ64, getIReg (rs),
+ mkU64 (extend_s_16to64 (imm))),
+ Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ stmt (IRStmt_Exit (binop (Iop_CmpEQ32, getIReg (rs),
+ mkU32 (extend_s_16to32 (imm))),
+ Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
break;
}
- case 0x0E: { /* TNEI */
- /*tnei */ DIP("tnei r%d, %d %d", rs, imm, trap_code);
- stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), mkU32 (imm)),
- Ijk_SigTRAP,
- IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC));
+ case 0x0E: { /* TNEI */
+ DIP("tnei r%d, %d %d", rs, imm, trap_code);
+ if (mode64) {
+ stmt (IRStmt_Exit (binop (Iop_CmpNE64, getIReg (rs),
+ mkU64 (extend_s_16to64 (imm))),
+ Ijk_SigTRAP,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ } else {
+ stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs),
+ mkU32 (extend_s_16to32 (imm))),
+ Ijk_SigTRAP,
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+ }
break;
}
case 0x1F:
- /*SYNCI*/
- //Just ignore it
- break;
+ /* SYNCI */
+ /* Just ignore it */
+ break;
default:
goto decode_failure;
case 0x04:
DIP("beq r%d, r%d, %d", rs, rt, imm);
- dis_branch(False, binop(Iop_CmpEQ32, getIReg(rs), getIReg(rt)),
- imm, &bstmt);
+ if (mode64)
+ dis_branch(False, binop(Iop_CmpEQ64, getIReg(rs), getIReg(rt)),
+ imm, &bstmt);
+ else
+ dis_branch(False, binop(Iop_CmpEQ32, getIReg(rs), getIReg(rt)),
+ imm, &bstmt);
break;
case 0x14:
DIP("beql r%d, r%d, %d", rs, rt, imm);
- lastn = dis_branch_likely(binop(Iop_CmpNE32, getIReg(rs), getIReg(rt)),
- imm);
+ lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+ getIReg(rs), getIReg(rt)), imm);
break;
case 0x05:
DIP("bne r%d, r%d, %d", rs, rt, imm);
- dis_branch(False, binop(Iop_CmpNE32, getIReg(rs), getIReg(rt)),
- imm, &bstmt);
+ if (mode64)
+ dis_branch(False, binop(Iop_CmpNE64, getIReg(rs), getIReg(rt)),
+ imm, &bstmt);
+ else
+ dis_branch(False, binop(Iop_CmpNE32, getIReg(rs), getIReg(rt)),
+ imm, &bstmt);
break;
case 0x15:
DIP("bnel r%d, r%d, %d", rs, rt, imm);
- lastn =
- dis_branch_likely(binop(Iop_CmpEQ32, getIReg(rs), getIReg(rt)), imm);
+ lastn = dis_branch_likely(binop(mode64 ? Iop_CmpEQ64 : Iop_CmpEQ32,
+ getIReg(rs), getIReg(rt)), imm);
break;
- case 0x07: /* BGTZ */
+ case 0x07: /* BGTZ */
DIP("bgtz r%d, %d", rs, imm);
- dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE32S, getIReg(rs),
- mkU32(0x00))), imm, &bstmt);
+ if (mode64)
+ dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE64S, getIReg(rs),
+ mkU64(0x00))), imm, &bstmt);
+ else
+ dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE32S, getIReg(rs),
+ mkU32(0x00))), imm, &bstmt);
break;
- case 0x17: /* BGTZL */
+ case 0x17: /* BGTZL */
DIP("bgtzl r%d, %d", rs, imm);
- lastn = dis_branch_likely(binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x00)),
- imm);
+ if (mode64)
+ lastn = dis_branch_likely(binop(Iop_CmpLE64S, getIReg(rs),
+ mkU64(0x00)), imm);
+ else
+ lastn = dis_branch_likely(binop(Iop_CmpLE32S, getIReg(rs),
+ mkU32(0x00)), imm);
break;
- case 0x06: /* BLEZ */
+ case 0x06: /* BLEZ */
DIP("blez r%d, %d", rs, imm);
- dis_branch(False,binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x0)), imm,
- &bstmt);
+ if (mode64)
+ dis_branch(False, binop(Iop_CmpLE64S, getIReg(rs), mkU64(0x0)),
+ imm, &bstmt);
+ else
+ dis_branch(False,binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x0)), imm,
+ &bstmt);
break;
- case 0x16: /* BLEZL */
+ case 0x16: /* BLEZL */
DIP("blezl r%d, %d", rs, imm);
- lastn = dis_branch_likely(unop(Iop_Not1, (binop(Iop_CmpLE32S,
- getIReg(rs), mkU32(0x0)))), imm);
+ lastn = dis_branch_likely(unop(Iop_Not1, (binop(mode64 ? Iop_CmpLE64S :
+ Iop_CmpLE32S, getIReg(rs), mode64 ?
+ mkU64(0x0) : mkU32(0x0)))), imm);
break;
- case 0x08: /* ADDI */
+ case 0x08: { /* ADDI */
DIP("addi r%d, r%d, %d", rt, rs, imm);
+ IRTemp tmpRs32 = newTemp(Ity_I32);
+ assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I32);
t4 = newTemp(Ity_I32);
/* dst = src0 + sign(imm)
- * if(sign(src0 ) != sign(imm ))
- * goto no overflow;
- * if(sign(dst) == sign(src0 ))
- * goto no overflow;
- * # we have overflow! */
-
- assign(t0, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm))));
- assign(t1, binop(Iop_Xor32, getIReg(rs), mkU32(extend_s_16to32(imm))));
- assign(t2, unop(Iop_1Sto32,
- binop(Iop_CmpEQ32,
- binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)),
- mkU32(0x80000000))));
-
- assign(t3, binop(Iop_Xor32, mkexpr(t0), getIReg(rs)));
- assign(t4, unop(Iop_1Sto32,
- binop(Iop_CmpNE32,
- binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)),
- mkU32(0x80000000))));
-
- stmt(IRStmt_Exit(binop(Iop_CmpEQ32,
- binop(Iop_Or32, mkexpr(t2), mkexpr(t4)),
- mkU32(0)),
- Ijk_SigFPE_IntOvf,
- IRConst_U32(guest_PC_curr_instr + 4), OFFB_PC));
-
- putIReg(rt, mkexpr(t0));
+ if(sign(src0 ) != sign(imm ))
+ goto no overflow;
+ if(sign(dst) == sign(src0 ))
+ goto no overflow;
+ we have overflow! */
+
+ assign(t0, binop(Iop_Add32, mkexpr(tmpRs32),
+ mkU32(extend_s_16to32(imm))));
+ assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32),
+ mkU32(extend_s_16to32(imm))));
+ assign(t2, unop(Iop_1Sto32, binop(Iop_CmpEQ32, binop(Iop_And32,
+ mkexpr(t1), mkU32(0x80000000)), mkU32(0x80000000))));
+
+ assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32)));
+ assign(t4, unop(Iop_1Sto32, binop(Iop_CmpNE32, binop(Iop_And32,
+ mkexpr(t3), mkU32(0x80000000)), mkU32(0x80000000))));
+
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ32, binop(Iop_Or32, mkexpr(t2),
+ mkexpr(t4)), mkU32(0)), Ijk_SigFPE_IntOvf,
+ mode64 ? IRConst_U64(guest_PC_curr_instr + 4) :
+ IRConst_U32(guest_PC_curr_instr + 4),
+ OFFB_PC));
+
+ putIReg(rt, mkWidenFrom32(ty, mkexpr(t0), True));
break;
-
- case 0x09: /* ADDIU */
+ }
+ case 0x09: /* ADDIU */
DIP("addiu r%d, r%d, %d", rt, rs, imm);
- putIReg(rt, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm))));
+ if (mode64) {
+ putIReg(rt, mkWidenFrom32(ty, binop(Iop_Add32,
+ mkNarrowTo32(ty, getIReg(rs)),mkU32(extend_s_16to32(imm))),
+ True));
+ } else
+ putIReg(rt, binop(Iop_Add32, getIReg(rs),mkU32(extend_s_16to32(imm))));
break;
- case 0x0C: /* ANDI */
+ case 0x0C: /* ANDI */
DIP("andi r%d, r%d, %d", rt, rs, imm);
- ALUI_PATTERN(Iop_And32);
+ if (mode64) {
+ ALUI_PATTERN64(Iop_And64);
+ } else {
+ ALUI_PATTERN(Iop_And32);
+ }
break;
- case 0x0E: /* XORI */
+ case 0x0E: /* XORI */
DIP("xori r%d, r%d, %d", rt, rs, imm);
- ALUI_PATTERN(Iop_Xor32);
+ if (mode64) {
+ ALUI_PATTERN64(Iop_Xor64);
+ } else {
+ ALUI_PATTERN(Iop_Xor32);
+ }
break;
- case 0x0D: /* ORI */
+ case 0x0D: /* ORI */
DIP("ori r%d, r%d, %d", rt, rs, imm);
- ALUI_PATTERN(Iop_Or32);
+ if (mode64) {
+ ALUI_PATTERN64(Iop_Or64);
+ } else {
+ ALUI_PATTERN(Iop_Or32);
+ }
break;
- case 0x0A: /* SLTI */
+ case 0x0A: /* SLTI */
DIP("slti r%d, r%d, %d", rt, rs, imm);
- putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs),
- mkU32(extend_s_16to32(imm)))));
+ if (mode64)
+ putIReg(rt, unop(Iop_1Uto64, binop(Iop_CmpLT64S, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+ else
+ putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs),
+ mkU32(extend_s_16to32(imm)))));
break;
- case 0x0B: /* SLTIU */
+ case 0x0B: /* SLTIU */
DIP("sltiu r%d, r%d, %d", rt, rs, imm);
- putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs),
- mkU32(extend_s_16to32(imm)))));
+ if (mode64)
+ putIReg(rt, unop(Iop_1Uto64, binop(Iop_CmpLT64U, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+ else
+ putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs),
+ mkU32(extend_s_16to32(imm)))));
break;
- case 0x30: /* LL / LWC0 */
+ case 0x18: { /* Doubleword Add Immidiate - DADD; MIPS64 */
+ DIP("daddi r%d, r%d, %d", rt, rs, imm);
+ IRTemp tmpRs64 = newTemp(Ity_I64);
+ assign(tmpRs64, getIReg(rs));
+
+ t0 = newTemp(Ity_I64);
+ t1 = newTemp(Ity_I64);
+ t2 = newTemp(Ity_I64);
+ t3 = newTemp(Ity_I64);
+ t4 = newTemp(Ity_I64);
+ /* dst = src0 + sign(imm)
+ if(sign(src0 ) != sign(imm ))
+ goto no overflow;
+ if(sign(dst) == sign(src0 ))
+ goto no overflow;
+ we have overflow! */
+
+ assign(t0, binop(Iop_Add64, mkexpr(tmpRs64),
+ mkU64(extend_s_16to64(imm))));
+ assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64),
+ mkU64(extend_s_16to64(imm))));
+ assign(t2, unop(Iop_1Sto64, binop(Iop_CmpEQ64, binop(Iop_And64,
+ mkexpr(t1), mkU64(0x8000000000000000)),
+ mkU64(0x8000000000000000))));
+
+ assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
+ assign(t4, unop(Iop_1Sto64, binop(Iop_CmpNE64, binop(Iop_And64,
+ mkexpr(t3), mkU64(0x8000000000000000)),
+ mkU64(0x8000000000000000))));
+
+ stmt(IRStmt_Exit(binop(Iop_CmpEQ64, binop(Iop_Or64, mkexpr(t2),
+ mkexpr(t4)), mkU64(0)), Ijk_SigFPE_IntOvf,
+ IRConst_U64(guest_PC_curr_instr + 4),
+ OFFB_PC));
+
+ putIReg(rt, mkexpr(t0));
+ break;
+ }
+
+ case 0x19: /* Doubleword Add Immidiate Unsigned - DADDIU; MIPS64 */
+ DIP("daddiu r%d, r%d, %d", rt, rs, imm);
+ putIReg(rt, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+ break;
+
+ case 0x1A: {
+ /* Load Doubleword Left - LDL; MIPS64 */
+ vassert(mode64);
+ DIP("ldl r%u,%d(r%u)\n", rt, imm, rs);
+ /* t1 = addr */
+#if defined (_MIPSEL)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Xor64, mkU64(0x7), binop(Iop_Add64, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+#endif
+ /* t2 = word addr */
+ /* t4 = addr mod 4 */
+ LWX_SWX_PATTERN64_1;
+
+ /* t3 = word content - shifted */
+ t3 = newTemp(Ity_I64);
+ assign(t3, binop(Iop_Shl64, load(Ity_I64, mkexpr(t2)),
+ narrowTo(Ity_I8, binop(Iop_Shl64, binop(Iop_Sub64, mkU64(0x07),
+ mkexpr(t4)), mkU8(3)))));
+
+ /* rt content - adjusted */
+ t5 = newTemp(Ity_I64);
+ t6 = newTemp(Ity_I64);
+ t7 = newTemp(Ity_I64);
+ t8 = newTemp(Ity_I64);
+
+ assign(t5, binop(Iop_Mul64, mkexpr(t4), mkU64(0x8)));
+
+ assign(t6, binop(Iop_Shr64, mkU64(0x00FFFFFFFFFFFFFF),
+ narrowTo(Ity_I8, mkexpr(t5))));
+
+ assign(t7, binop(Iop_And64, getIReg(rt), mkexpr(t6)));
+
+ putIReg(rt, binop(Iop_Or64, mkexpr(t7), mkexpr(t3)));
+ break;
+ }
+
+ case 0x1B: {
+ /* Load Doubleword Right - LDR; MIPS64 */
+ vassert(mode64);
+ DIP("ldr r%u,%d(r%u)\n", rt, imm, rs);
+ /* t1 = addr */
+#if defined (_MIPSEL)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+ t1 = newTemp(Ity_I64);
+ assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs),
+ mkU64(extend_s_16to64(imm)))));
+#endif
+ /* t2 = word addr */
+ /* t4 = addr mod 4 */
+ LWX_SWX_PATTERN64_1;
+
+ /* t3 = word content - shifted */
+ t3 = newTemp(Ity_I64);
+ assign(t3, binop(Iop_Shr64, load(Ity_I64, mkexpr(t2)),
+ narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(3)))));
+
+ /* rt content - adjusted */
+ t5 = newTemp(Ity_I64);
+ assign(t5, binop(Iop_And64, getIReg(rt), unop(Iop_Not64,
+ binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFF),
+ narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(0x3)))))));
+
+ putIReg(rt, binop(Iop_Or64, mkexpr(t5), mkexpr(t3)));
+ break;
+ }
+
+ case 0x27: /* Load Word unsigned - LWU; MIPS64 */
+ DIP("lwu r%u,%d(r%u)\n", rt, imm, rs);
+ LOAD_STORE_PATTERN;
+
+ putIReg(rt, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), False));
+ break;
+
+ case 0x30: /* LL / LWC0 */
DIP("ll r%d, %d(r%d)", rt, imm, rs);
LOAD_STORE_PATTERN;
#elif defined (_MIPSEB)
stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), NULL /*this is a load */ ));
#endif
+ if (mode64)
+ putIReg(rt, unop(Iop_32Sto64, mkexpr(t2)));
+ else
+ putIReg(rt, mkexpr(t2));
+ break;
+
+ case 0x34: /* Load Linked Doubleword - LLD; MIPS64 */
+ DIP("lld r%d, %d(r%d)", rt, imm, rs);
+ LOAD_STORE_PATTERN;
+
+ t2 = newTemp(Ity_I64);
+#if defined (_MIPSEL)
+ stmt(IRStmt_LLSC
+ (Iend_LE, t2, mkexpr(t1), NULL /*this is a load */ ));
+#elif defined (_MIPSEB)
+ stmt(IRStmt_LLSC
+ (Iend_BE, t2, mkexpr(t1), NULL /*this is a load */ ));
+#endif
putIReg(rt, mkexpr(t2));
break;
stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), mkNarrowTo32(ty, getIReg(rt))));
#endif
- putIReg(rt, unop(Iop_1Uto32, mkexpr(t2)));
+ putIReg(rt, unop(mode64 ? Iop_1Uto64 : Iop_1Uto32, mkexpr(t2)));
+ break;
+
+ case 0x3C: /* Store Conditional Doubleword - SCD; MIPS64 */
+ DIP("sdc r%d, %d(r%d)", rt, imm, rs);
+ LOAD_STORE_PATTERN;
+
+ t2 = newTemp(Ity_I1);
+#if defined (_MIPSEL)
+ stmt(IRStmt_LLSC(Iend_LE, t2, mkexpr(t1), getIReg(rt)));
+#elif defined (_MIPSEB)
+ stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), getIReg(rt)));
+#endif
+
+ putIReg(rt, unop(Iop_1Uto64, mkexpr(t2)));
break;
- decode_failure:
+ case 0x37: /* Load Doubleword - LD; MIPS64 */
+ DIP("ld r%u,%d(r%u)", rt, imm, rs);
+ LOAD_STORE_PATTERN;
+ putIReg(rt, load(Ity_I64, mkexpr(t1)));
+ break;
+
+ case 0x3F: /* Store Doubleword - SD; MIPS64 */
+ DIP("sd r%u,%d(r%u)", rt, imm, rs);
+ LOAD_STORE_PATTERN;
+
+ store(mkexpr(t1), getIReg(rt));
+ break;
+
+ decode_failure:
/* All decode failures end up here. */
if (sigill_diag)
vex_printf("vex mips->IR: unhandled instruction bytes: "
EIP should be up-to-date since it made so at the start bnezof each
insn, but nevertheless be paranoid and update it again right
now. */
- stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_PC),
- mkU32(guest_PC_curr_instr)));
- jmp_lit(&dres, Ijk_NoDecode, guest_PC_curr_instr);
+ if (mode64) {
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_PC),
+ mkU64(guest_PC_curr_instr)));
+ jmp_lit64(&dres, Ijk_NoDecode, guest_PC_curr_instr);
+ } else {
+ stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_PC),
+ mkU32(guest_PC_curr_instr)));
+ jmp_lit32(&dres, Ijk_NoDecode, guest_PC_curr_instr);
+ }
dres.whatNext = Dis_StopHere;
dres.len = 0;
return dres;
- } /* switch (opc) for the main (primary) opcode switch. */
+ } /* switch (opc) for the main (primary) opcode switch. */
/* All MIPS insn have 4 bytes */
delay_slot_branch = False;
stmt(bstmt);
bstmt = NULL;
- putPC(mkU32(guest_PC_curr_instr + 4));
+ if (mode64)
+ putPC(mkU64(guest_PC_curr_instr + 4));
+ else
+ putPC(mkU32(guest_PC_curr_instr + 4));
dres.jk_StopHere = is_Branch_or_Jump_and_Link(guest_code + delta - 4) ?
Ijk_Call : Ijk_Boring;
}
/* All decode successes end up here. */
switch (dres.whatNext) {
case Dis_Continue:
- putPC(mkU32(guest_PC_curr_instr + 4));
+ if (mode64)
+ putPC(mkU64(guest_PC_curr_instr + 4));
+ else
+ putPC(mkU32(guest_PC_curr_instr + 4));
break;
case Dis_ResteerU:
case Dis_ResteerC:
if (branch_or_jump(guest_code + delta + 4)) {
dres.whatNext = Dis_StopHere;
dres.jk_StopHere = Ijk_Boring;
- putPC(mkU32(guest_PC_curr_instr + 4));
+ if (mode64)
+ putPC(mkU64(guest_PC_curr_instr + 4));
+ else
+ putPC(mkU32(guest_PC_curr_instr + 4));
}
dres.len = 4;
/* Disassemble a single instruction into IR. The instruction
is located in host memory at &guest_code[delta]. */
-DisResult
-disInstr_MIPS(IRSB* irsb_IN,
- Bool (*resteerOkFn) (void *, Addr64),
- Bool resteerCisOk,
- void* callback_opaque,
- UChar* guest_code_IN,
- Long delta,
- Addr64 guest_IP,
- VexArch guest_arch,
- VexArchInfo* archinfo,
- VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
- Bool sigill_diag_IN)
+DisResult disInstr_MIPS( IRSB* irsb_IN,
+ Bool (*resteerOkFn) ( void *, Addr64 ),
+ Bool resteerCisOk,
+ void* callback_opaque,
+ UChar* guest_code_IN,
+ Long delta,
+ Addr64 guest_IP,
+ VexArch guest_arch,
+ VexArchInfo* archinfo,
+ VexAbiInfo* abiinfo,
+ Bool host_bigendian_IN,
+ Bool sigill_diag_IN )
{
DisResult dres;
-
/* Set globals (see top of this file) */
- vassert(guest_arch == VexArchMIPS32);
+ vassert(guest_arch == VexArchMIPS32 || guest_arch == VexArchMIPS64);
mode64 = guest_arch != VexArchMIPS32;
guest_code = guest_code_IN;
irsb = irsb_IN;
host_is_bigendian = host_bigendian_IN;
- guest_PC_curr_instr = (Addr32) guest_IP;
- guest_PC_bbstart = (Addr32) toUInt(guest_IP - delta);
+#if defined(VGP_mips32_linux)
+ guest_PC_curr_instr = (Addr32)guest_IP;
+#elif defined(VGP_mips64_linux)
+ guest_PC_curr_instr = (Addr64)guest_IP;
+#endif
dres = disInstr_MIPS_WRK(resteerOkFn, resteerCisOk, callback_opaque,
delta, archinfo, abiinfo, sigill_diag_IN);
This file is part of Valgrind, a dynamic binary instrumentation
framework.
- Copyright (C) 2010-2012 RT-RK
+ Copyright (C) 2010-2013 RT-RK
mips-valgrind@rt-rk.com
This program is free software; you can redistribute it and/or
#include "host_generic_regs.h"
#include "host_mips_defs.h"
+/* guest_COND offset. */
+#define COND_OFFSET(__mode64) (__mode64 ? 612 : 316)
+
+/* Register number for guest state pointer in host code. */
+#define GuestSP 23
+
+#define MkHRegGPR(_n, _mode64) \
+ mkHReg(_n, _mode64 ? HRcInt64 : HRcInt32, False)
+
+#define MkHRegFPR(_n, _mode64) \
+ mkHReg(_n, _mode64 ? HRcFlt64 : HRcFlt32, False)
+
/*---------------- Registers ----------------*/
void ppHRegMIPS(HReg reg, Bool mode64)
hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64);
/* But specific for real regs. */
- {
- switch (hregClass(reg)) {
- case HRcInt32:
- r = hregNumber(reg);
- vassert(r >= 0 && r < 32);
- vex_printf("%s", ireg32_names[r]);
- return;
- case HRcFlt32:
- r = hregNumber(reg);
- vassert(r >= 0 && r < 32);
- vex_printf("%s", freg32_names[r]);
- return;
- case HRcFlt64:
- r = hregNumber(reg);
- vassert(r >= 0 && r < 32);
- vex_printf("%s", freg64_names[r]);
- return;
- default:
- vpanic("ppHRegMIPS");
- break;
- }
+ switch (hregClass(reg)) {
+ case HRcInt32:
+ r = hregNumber(reg);
+ vassert(r >= 0 && r < 32);
+ vex_printf("%s", ireg32_names[r]);
+ return;
+ case HRcInt64:
+ vassert(mode64);
+ r = hregNumber (reg);
+ vassert (r >= 0 && r < 32);
+ vex_printf ("%s", ireg32_names[r]);
+ return;
+ case HRcFlt32:
+ r = hregNumber(reg);
+ vassert(r >= 0 && r < 32);
+ vex_printf("%s", freg32_names[r]);
+ return;
+ case HRcFlt64:
+ r = hregNumber(reg);
+ vassert(r >= 0 && r < 32);
+ vex_printf("%s", freg64_names[r]);
+ return;
+ default:
+ vpanic("ppHRegMIPS");
+ break;
}
return;
}
-#define MkHRegGPR(_n, _mode64) \
- mkHReg(_n, _mode64 ? HRcInt64 : HRcInt32, False)
-
HReg hregMIPS_GPR0(Bool mode64)
{
return MkHRegGPR(0, mode64);
return MkHRegGPR(31, mode64);
}
-#define MkHRegFPR(_n, _mode64) \
- mkHReg(_n, _mode64 ? HRcFlt64 : HRcFlt32, False)
-
HReg hregMIPS_F0(Bool mode64)
{
return MkHRegFPR(0, mode64);
void getAllocableRegs_MIPS(Int * nregs, HReg ** arr, Bool mode64)
{
- /*
- * The list of allocable registers is shorten to fit MIPS32 mode on Loongson.
- * More precisely, we workaround Loongson MIPS32 issues by avoiding usage of
- * odd single precision FP registers.
- */
+ /* The list of allocable registers is shorten to fit MIPS32 mode on Loongson.
+ More precisely, we workaround Loongson MIPS32 issues by avoiding usage of
+ odd single precision FP registers. */
if (mode64)
- *nregs = 24;
+ *nregs = 20;
else
- *nregs = 29;
+ *nregs = 28;
UInt i = 0;
*arr = LibVEX_Alloc(*nregs * sizeof(HReg));
- //ZERO = constant 0
- //AT = assembler temporary
- // callee saves ones are listed first, since we prefer them
- // if they're available
+ /* ZERO = constant 0
+ AT = assembler temporary
+ callee saves ones are listed first, since we prefer them
+ if they're available */
(*arr)[i++] = hregMIPS_GPR16(mode64);
(*arr)[i++] = hregMIPS_GPR17(mode64);
(*arr)[i++] = hregMIPS_GPR18(mode64);
(*arr)[i++] = hregMIPS_GPR20(mode64);
(*arr)[i++] = hregMIPS_GPR21(mode64);
(*arr)[i++] = hregMIPS_GPR22(mode64);
- if (!mode64)
- (*arr)[i++] = hregMIPS_GPR23(mode64);
- // otherwise we'll have to slum it out with caller-saves ones
- if (mode64) {
- (*arr)[i++] = hregMIPS_GPR8(mode64);
- (*arr)[i++] = hregMIPS_GPR9(mode64);
- (*arr)[i++] = hregMIPS_GPR10(mode64);
- (*arr)[i++] = hregMIPS_GPR11(mode64);
- }
(*arr)[i++] = hregMIPS_GPR12(mode64);
(*arr)[i++] = hregMIPS_GPR13(mode64);
(*arr)[i++] = hregMIPS_GPR14(mode64);
(*arr)[i++] = hregMIPS_GPR15(mode64);
(*arr)[i++] = hregMIPS_GPR24(mode64);
- /***********mips32********************/
- // t0 (=dispatch_ctr)
- // t1 spill reg temp
- // t2 (=guest_state)
- // t3 (=PC = next guest address)
- // K0 and K1 are reserved for OS kernel
- // GP = global pointer
- // SP = stack pointer
- // FP = frame pointer
- // RA = link register
- // + PC, HI and LO
+ /* s7 (=guest_state) */
(*arr)[i++] = hregMIPS_F16(mode64);
(*arr)[i++] = hregMIPS_F18(mode64);
(*arr)[i++] = hregMIPS_F20(mode64);
const HChar* ret;
switch (cond) {
case MIPScc_EQ:
- ret = "EQ"; /* equal */
+ ret = "EQ"; /* equal */
break;
case MIPScc_NE:
- ret = "NEQ"; /* not equal */
+ ret = "NEQ"; /* not equal */
break;
case MIPScc_HS:
- ret = "GE"; /* >=u (Greater Than or Equal) */
+ ret = "GE"; /* >=u (Greater Than or Equal) */
break;
case MIPScc_LO:
- ret = "LT"; /* <u (lower) */
+ ret = "LT"; /* <u (lower) */
break;
case MIPScc_MI:
- ret = "mi"; /* minus (negative) */
+ ret = "MI"; /* minus (negative) */
break;
case MIPScc_PL:
- ret = "pl"; /* plus (zero or +ve) */
+ ret = "PL"; /* plus (zero or +ve) */
break;
case MIPScc_VS:
- ret = "vs"; /* overflow */
+ ret = "VS"; /* overflow */
break;
case MIPScc_VC:
- ret = "vc"; /* no overflow */
+ ret = "VC"; /* no overflow */
break;
case MIPScc_HI:
- ret = "hi"; /* >u (higher) */
+ ret = "HI"; /* >u (higher) */
break;
case MIPScc_LS:
- ret = "ls"; /* <=u (lower or same) */
+ ret = "LS"; /* <=u (lower or same) */
break;
case MIPScc_GE:
- ret = "ge"; /* >=s (signed greater or equal) */
+ ret = "GE"; /* >=s (signed greater or equal) */
break;
case MIPScc_LT:
- ret = "lt"; /* <s (signed less than) */
+ ret = "LT"; /* <s (signed less than) */
break;
case MIPScc_GT:
- ret = "gt"; /* >s (signed greater) */
+ ret = "GT"; /* >s (signed greater) */
break;
case MIPScc_LE:
- ret = "le"; /* <=s (signed less or equal) */
+ ret = "LE"; /* <=s (signed less or equal) */
break;
case MIPScc_AL:
- ret = "al"; /* always (unconditional) */
+ ret = "AL"; /* always (unconditional) */
break;
case MIPScc_NV:
- ret = "nv"; /* never (unconditional): */
+ ret = "NV"; /* never (unconditional): */
break;
default:
vpanic("showMIPSCondCode");
case Mfp_MOVD:
ret = "MOV.D";
break;
- case Mfp_RES:
- ret = "RES";
- break;
case Mfp_ROUNDWS:
ret = "ROUND.W.S";
break;
ret = "frsqrte";
break;
case Mfp_CVTDW:
- case Mfp_CVTD:
- ret = "CVT.D";
+ ret = "CVT.D.W";
+ break;
+ case Mfp_CVTDL:
+ ret = "CVT.D.L";
+ break;
+ case Mfp_CVTDS:
+ ret = "CVT.D.S";
break;
case Mfp_CVTSD:
case Mfp_CVTSW:
return ret;
}
+/* Show move from/to fpr to/from gpr */
+const HChar* showMIPSFpGpMoveOp ( MIPSFpGpMoveOp op )
+{
+ const HChar *ret;
+ switch (op) {
+ case MFpGpMove_mfc1:
+ ret = "mfc1";
+ break;
+ case MFpGpMove_dmfc1:
+ ret = "dmfc1";
+ break;
+ case MFpGpMove_mtc1:
+ ret = "mtc1";
+ break;
+ case MFpGpMove_dmtc1:
+ ret = "dmtc1";
+ break;
+ default:
+ vpanic("showMIPSFpGpMoveOp");
+ break;
+ }
+ return ret;
+}
+
+/* Show floating point move conditional */
+const HChar* showMIPSMoveCondOp ( MIPSMoveCondOp op )
+{
+ const HChar *ret;
+ switch (op) {
+ case MFpMoveCond_movns:
+ ret = "movn.s";
+ break;
+ case MFpMoveCond_movnd:
+ ret = "movn.d";
+ break;
+ case MMoveCond_movn:
+ ret = "movn";
+ break;
+ default:
+ vpanic("showMIPSFpMoveCondOp");
+ break;
+ }
+ return ret;
+}
+
/* --------- MIPSAMode: memory address expressions. --------- */
MIPSAMode *MIPSAMode_IR(Int idx, HReg base)
case Mun_NOP:
ret = "nop";
break;
+ case Mun_DCLO:
+ ret = "dclo";
+ break;
+ case Mun_DCLZ:
+ ret = "dclz";
+ break;
default:
vpanic("showMIPSUnaryOp");
break;
case Malu_XOR:
ret = immR ? "xori" : "xor";
break;
+ case Malu_DADD:
+ ret = immR ? "daddi" : "dadd";
+ break;
+ case Malu_DSUB:
+ ret = immR ? "dsubi" : "dsub";
+ break;
+ case Malu_SLT:
+ ret = immR ? "slti" : "slt";
+ break;
default:
vpanic("showMIPSAluOp");
break;
return i;
}
-MIPSInstr *MIPSInstr_Call(MIPSCondCode cond, Addr32 target, UInt argiregs,
- HReg src, RetLoc rloc)
+MIPSInstr *MIPSInstr_Call ( MIPSCondCode cond, Addr64 target, UInt argiregs,
+ HReg src, RetLoc rloc )
{
UInt mask;
MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
i->Min.Call.argiregs = argiregs;
i->Min.Call.src = src;
i->Min.Call.rloc = rloc;
- /* Only r4 .. r7 inclusive may be used as arg regs. Hence: */
- mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+ /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
+ mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
+ | (1 << 10) | (1 << 11);
vassert(0 == (argiregs & ~mask));
vassert(rloc != RetLocINVALID);
return i;
}
-MIPSInstr *MIPSInstr_CallAlways(MIPSCondCode cond, Addr32 target, UInt argiregs,
- RetLoc rloc)
+MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode cond, Addr64 target,
+ UInt argiregs, RetLoc rloc )
{
UInt mask;
MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
i->Min.Call.target = target;
i->Min.Call.argiregs = argiregs;
i->Min.Call.rloc = rloc;
- /* Only r4 .. r7 inclusive may be used as arg regs. Hence: */
- mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+ /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
+ mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
+ | (1 << 10) | (1 << 11);
vassert(0 == (argiregs & ~mask));
vassert(rloc != RetLocINVALID);
return i;
}
-MIPSInstr *MIPSInstr_XDirect ( Addr32 dstGA, MIPSAMode* amPC,
+MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC,
MIPSCondCode cond, Bool toFastEP ) {
MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
i->tag = Min_XDirect;
return i;
}
+MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2,
+ HReg src3 )
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_FpTernary;
+ i->Min.FpTernary.op = op;
+ i->Min.FpTernary.dst = dst;
+ i->Min.FpTernary.src1 = src1;
+ i->Min.FpTernary.src2 = src2;
+ i->Min.FpTernary.src3 = src3;
+ return i;
+}
+
MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src)
{
MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
return i;
}
-MIPSInstr *MIPSInstr_MovCond(HReg dst, HReg argL, MIPSRH * argR, HReg condR,
- MIPSCondCode cond)
-{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
- i->tag = Min_MovCond;
- i->Min.MovCond.dst = dst;
- i->Min.MovCond.srcL = argL;
- i->Min.MovCond.srcR = argR;
- i->Min.MovCond.condR = condR;
- i->Min.MovCond.cond = cond;
- return i;
-}
-
MIPSInstr *MIPSInstr_MtFCSR(HReg src)
{
MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
return i;
}
+MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src )
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_FpGpMove;
+ i->Min.FpGpMove.op = op;
+ i->Min.FpGpMove.dst = dst;
+ i->Min.FpGpMove.src = src;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src,
+ HReg cond )
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_MoveCond;
+ i->Min.MoveCond.op = op;
+ i->Min.MoveCond.dst = dst;
+ i->Min.MoveCond.src = src;
+ i->Min.MoveCond.cond = cond;
+ return i;
+}
+
MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter,
MIPSAMode* amFailAddr ) {
MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
if (i->Min.Call.cond != MIPScc_AL) {
vex_printf("if (%s) ", showMIPSCondCode(i->Min.Call.cond));
}
- vex_printf("{ ");
- ppLoadImm(hregMIPS_GPR11(mode64), i->Min.Call.target, mode64);
+ vex_printf(" {");
+ if (!mode64)
+ vex_printf(" addiu $29, $29, -16");
- vex_printf(" ; mtctr r10 ; bctrl [");
+ ppLoadImm(hregMIPS_GPR25(mode64), i->Min.Call.target, mode64);
+
+ vex_printf(" ; jarl $31, $25; # args [");
for (n = 0; n < 32; n++) {
if (i->Min.Call.argiregs & (1 << n)) {
- vex_printf("r%d", n);
+ vex_printf("$%d", n);
if ((i->Min.Call.argiregs >> n) > 1)
vex_printf(",");
}
}
- vex_printf(",");
- ppRetLoc(i->Min.Call.rloc);
- vex_printf("] }");
+ vex_printf("] nop; ");
+ if (!mode64)
+ vex_printf("addiu $29, $29, 16; ]");
+
break;
}
case Min_XDirect:
vex_printf("(xDirect) ");
vex_printf("if (guest_COND.%s) { ",
showMIPSCondCode(i->Min.XDirect.cond));
- vex_printf("move $9, 0x%x,", i->Min.XDirect.dstGA);
+ vex_printf("move $9, 0x%x,", (UInt)i->Min.XDirect.dstGA);
vex_printf("; sw $9, ");
ppMIPSAMode(i->Min.XDirect.amPC, mode64);
vex_printf("; move $9, $disp_cp_chain_me_to_%sEP; jalr $9; nop}",
vex_printf(",");
ppHRegMIPS(i->Min.FpBinary.srcR, mode64);
return;
+ case Min_FpTernary:
+ vex_printf("%s", showMIPSFpOp(i->Min.FpTernary.op));
+ ppHRegMIPS(i->Min.FpTernary.dst, mode64);
+ vex_printf(",");
+ ppHRegMIPS(i->Min.FpTernary.src1, mode64);
+ vex_printf(",");
+ ppHRegMIPS(i->Min.FpTernary.src2, mode64);
+ vex_printf(",");
+ ppHRegMIPS(i->Min.FpTernary.src3, mode64);
+ return;
case Min_FpConvert:
vex_printf("%s", showMIPSFpOp(i->Min.FpConvert.op));
ppHRegMIPS(i->Min.FpConvert.dst, mode64);
}
return;
}
- case Min_MovCond: {
- if (i->Min.MovCond.cond == MIPScc_MI) {
- vex_printf("\ncond move\n");
- return;
-
- }
- break;
- }
case Min_MtFCSR: {
vex_printf("ctc1 ");
ppHRegMIPS(i->Min.MtFCSR.src, mode64);
vex_printf(", $31");
return;
}
-
case Min_MfFCSR: {
vex_printf("ctc1 ");
ppHRegMIPS(i->Min.MfFCSR.dst, mode64);
vex_printf(", $31");
return;
}
+ case Min_FpGpMove: {
+ vex_printf("%s", showMIPSFpGpMoveOp(i->Min.FpGpMove.op));
+ ppHRegMIPS(i->Min.FpGpMove.dst, mode64);
+ vex_printf(", ");
+ ppHRegMIPS(i->Min.FpGpMove.src, mode64);
+ return;
+ }
+ case Min_MoveCond: {
+ vex_printf("%s", showMIPSMoveCondOp(i->Min.MoveCond.op));
+ ppHRegMIPS(i->Min.MoveCond.dst, mode64);
+ vex_printf(", ");
+ ppHRegMIPS(i->Min.MoveCond.src, mode64);
+ vex_printf(", ");
+ ppHRegMIPS(i->Min.MoveCond.cond, mode64);
+ return;
+ }
case Min_EvCheck:
vex_printf("(evCheck) lw $9, ");
ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
vex_printf("; nofail:");
return;
case Min_ProfInc:
- vex_printf("(profInc) move $9, ($NotKnownYet); "
- "lw $8, 0($9); "
- "addiu $8, $8, 1; "
- "sw $8, 0($9); "
- "sltiu $1, $8, 1; "
- "lw $8, 4($9); "
- "addu $8, $8, $1; "
- "sw $8, 4($9); " );
+ if (mode64)
+ vex_printf("(profInc) move $9, ($NotKnownYet); "
+ "ld $8, 0($9); "
+ "daddiu $8, $8, 1; "
+ "sd $8, 0($9); " );
+ else
+ vex_printf("(profInc) move $9, ($NotKnownYet); "
+ "lw $8, 0($9); "
+ "addiu $8, $8, 1; "
+ "sw $8, 0($9); "
+ "sltiu $1, $8, 1; "
+ "lw $8, 4($9); "
+ "addu $8, $8, $1; "
+ "sw $8, 4($9); " );
return;
default:
vpanic("ppMIPSInstr");
addHRegUse(u, HRmRead, i->Min.Div.srcR);
return;
case Min_Call: {
+ /* Logic and comments copied/modified from x86, ppc and arm back end.
+ First off, claim it trashes all the caller-saved regs
+ which fall within the register allocator's jurisdiction. */
if (i->Min.Call.cond != MIPScc_AL)
addHRegUse(u, HRmRead, i->Min.Call.src);
UInt argir;
addHRegUse(u, HRmWrite, hregMIPS_GPR24(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR25(mode64));
- addHRegUse(u, HRmWrite, hregMIPS_GPR26(mode64));
- addHRegUse(u, HRmWrite, hregMIPS_GPR27(mode64));
+ addHRegUse(u, HRmWrite, hregMIPS_GPR31(mode64));
/* Now we have to state any parameter-carrying registers
- which might be read. This depends on the argiregs field. */
+ which might be read. This depends on the argiregs field. */
argir = i->Min.Call.argiregs;
- if (argir & (1 << 7))
- addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64));
- if (argir & (1 << 6))
- addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64));
- if (argir & (1 << 5))
- addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64));
- if (argir & (1 << 4))
- addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64));
+ if (argir & (1<<11)) addHRegUse(u, HRmRead, hregMIPS_GPR11(mode64));
+ if (argir & (1<<10)) addHRegUse(u, HRmRead, hregMIPS_GPR10(mode64));
+ if (argir & (1<<9)) addHRegUse(u, HRmRead, hregMIPS_GPR9(mode64));
+ if (argir & (1<<8)) addHRegUse(u, HRmRead, hregMIPS_GPR8(mode64));
+ if (argir & (1<<7)) addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64));
+ if (argir & (1<<6)) addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64));
+ if (argir & (1<<5)) addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64));
+ if (argir & (1<<4)) addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64));
+
+ vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6)
+ | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10)
+ | (1 << 11))));
- vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6) | (1 << 7))));
return;
}
/* XDirect/XIndir/XAssisted are also a bit subtle. They
}
break;
case Min_FpUnary:
- if (i->Min.FpUnary.op == Mfp_CVTD) {
- addHRegUse(u, HRmWrite, i->Min.FpUnary.dst);
- addHRegUse(u, HRmRead, i->Min.FpUnary.src);
- return;
- } else {
- addHRegUse(u, HRmWrite, i->Min.FpUnary.dst);
- addHRegUse(u, HRmRead, i->Min.FpUnary.src);
- return;
- }
+ addHRegUse(u, HRmWrite, i->Min.FpUnary.dst);
+ addHRegUse(u, HRmRead, i->Min.FpUnary.src);
+ return;
case Min_FpBinary:
addHRegUse(u, HRmWrite, i->Min.FpBinary.dst);
addHRegUse(u, HRmRead, i->Min.FpBinary.srcL);
addHRegUse(u, HRmRead, i->Min.FpBinary.srcR);
return;
+ case Min_FpTernary:
+ addHRegUse(u, HRmWrite, i->Min.FpTernary.dst);
+ addHRegUse(u, HRmRead, i->Min.FpTernary.src1);
+ addHRegUse(u, HRmRead, i->Min.FpTernary.src2);
+ addHRegUse(u, HRmRead, i->Min.FpTernary.src3);
+ return;
case Min_FpConvert:
addHRegUse(u, HRmWrite, i->Min.FpConvert.dst);
addHRegUse(u, HRmRead, i->Min.FpConvert.src);
addHRegUse(u, HRmRead, i->Min.FpCompare.srcL);
addHRegUse(u, HRmRead, i->Min.FpCompare.srcR);
return;
- case Min_MovCond:
- if (i->Min.MovCond.srcR->tag == Mrh_Reg) {
- addHRegUse(u, HRmRead, i->Min.MovCond.srcR->Mrh.Reg.reg);
- }
- addHRegUse(u, HRmRead, i->Min.MovCond.srcL);
- addHRegUse(u, HRmRead, i->Min.MovCond.condR);
- addHRegUse(u, HRmWrite, i->Min.MovCond.dst);
+ case Min_FpGpMove:
+ addHRegUse(u, HRmWrite, i->Min.FpGpMove.dst);
+ addHRegUse(u, HRmRead, i->Min.FpGpMove.src);
+ return;
+ case Min_MoveCond:
+ addHRegUse(u, HRmWrite, i->Min.MoveCond.dst);
+ addHRegUse(u, HRmRead, i->Min.MoveCond.src);
+ addHRegUse(u, HRmRead, i->Min.MoveCond.cond);
return;
case Min_EvCheck:
/* We expect both amodes only to mention %ebp, so this is in
}
break;
case Min_FpUnary:
- if (i->Min.FpUnary.op == Mfp_CVTD) {
- mapReg(m, &i->Min.FpUnary.dst);
- mapReg(m, &i->Min.FpUnary.src);
- return;
- } else {
- mapReg(m, &i->Min.FpUnary.dst);
- mapReg(m, &i->Min.FpUnary.src);
- return;
- }
+ mapReg(m, &i->Min.FpUnary.dst);
+ mapReg(m, &i->Min.FpUnary.src);
+ return;
case Min_FpBinary:
mapReg(m, &i->Min.FpBinary.dst);
mapReg(m, &i->Min.FpBinary.srcL);
mapReg(m, &i->Min.FpBinary.srcR);
return;
+ case Min_FpTernary:
+ mapReg(m, &i->Min.FpTernary.dst);
+ mapReg(m, &i->Min.FpTernary.src1);
+ mapReg(m, &i->Min.FpTernary.src2);
+ mapReg(m, &i->Min.FpTernary.src3);
+ return;
case Min_FpConvert:
mapReg(m, &i->Min.FpConvert.dst);
mapReg(m, &i->Min.FpConvert.src);
case Min_MfFCSR:
mapReg(m, &i->Min.MfFCSR.dst);
return;
- case Min_MovCond:
- if (i->Min.MovCond.srcR->tag == Mrh_Reg) {
- mapReg(m, &(i->Min.MovCond.srcR->Mrh.Reg.reg));
- }
- mapReg(m, &i->Min.MovCond.srcL);
- mapReg(m, &i->Min.MovCond.condR);
- mapReg(m, &i->Min.MovCond.dst);
-
+ case Min_FpGpMove:
+ mapReg(m, &i->Min.FpGpMove.dst);
+ mapReg(m, &i->Min.FpGpMove.src);
+ return;
+ case Min_MoveCond:
+ mapReg(m, &i->Min.MoveCond.dst);
+ mapReg(m, &i->Min.MoveCond.src);
+ mapReg(m, &i->Min.MoveCond.cond);
return;
case Min_EvCheck:
/* We expect both amodes only to mention %ebp, so this is in
{
/* Moves between integer regs */
if (i->tag == Min_Alu) {
- // or Rd,Rs,Rs == mr Rd,Rs
+ /* or Rd,Rs,Rs == mr Rd,Rs */
if (i->Min.Alu.op != Malu_OR)
return False;
if (i->Min.Alu.srcR->tag != Mrh_Reg)
}
/* Generate mips spill/reload instructions under the direction of the
- register allocator.
-*/
+ register allocator. */
void genSpill_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
Int offsetB, Bool mode64)
{
r_dst = rSD;
if (opc1 < 40) {
- //load
+ /* load */
if (rSD == 33)
/* mfhi */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
p = mkFormI(p, opc1, rA, r_dst, idx);
if (opc1 >= 40) {
- //store
+ /* store */
if (rSD == 33)
/* mthi */
p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
r_dst = rSD;
if (opc1 < 40) {
- //load
+ /* load */
if (rSD == 33)
/* mfhi */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
/* mflo */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
}
- /* addiu sp, sp, -4
- * sw rA, 0(sp)
- * addu rA, rA, rB
- * sw/lw r_dst, 0(rA)
- * lw rA, 0(sp)
- * addiu sp, sp, 4 */
+
if (mode64) {
- p = mkFormI(p, 25, 29, 29, 0xFFFC);
+ /* addiu sp, sp, -8
+ sd rA, 0(sp)
+ daddu rA, rA, rB
+ sd/ld r_dst, 0(rA)
+ ld rA, 0(sp)
+ daddiu sp, sp, 8 */
+ p = mkFormI(p, 25, 29, 29, 0xFFF8);
p = mkFormI(p, 63, 29, rA, 0);
p = mkFormR(p, 0, rA, rB, rA, 0, 45);
p = mkFormI(p, opc1, rA, r_dst, 0);
p = mkFormI(p, 55, 29, rA, 0);
- p = mkFormI(p, 25, 29, 29, 4);
+ p = mkFormI(p, 25, 29, 29, 8);
} else {
+ /* addiu sp, sp, -4
+ sw rA, 0(sp)
+ addu rA, rA, rB
+ sw/lw r_dst, 0(rA)
+ lw rA, 0(sp)
+ addiu sp, sp, 4 */
p = mkFormI(p, 9, 29, 29, 0xFFFC);
p = mkFormI(p, 43, 29, rA, 0);
p = mkFormR(p, 0, rA, rB, rA, 0, 33);
p = mkFormI(p, 9, 29, 29, 4);
}
if (opc1 >= 40) {
- //store
+ /* store */
if (rSD == 33)
/* mthi */
p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
}
if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
- // sign-extendable from 16 bits
- // addiu r_dst,0,imm => li r_dst,imm
+ /* sign-extendable from 16 bits
+ addiu r_dst, 0, imm => li r_dst, imm */
p = mkFormI(p, 9, 0, r_dst, imm & 0xFFFF);
} else {
if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
- // sign-extendable from 32 bits
- // addiu r_dst,r0,(imm>>16) => lis r_dst, (imm>>16)
- // lui r_dst, (imm>>16)
+ /* sign-extendable from 32 bits
+ addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
+ lui r_dst, (imm >> 16) */
p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
- // ori r_dst, r_dst, (imm & 0xFFFF)
+ /* ori r_dst, r_dst, (imm & 0xFFFF) */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
} else {
vassert(mode64);
- // lui load in upper half of low word
+ /* lui load in upper half of low word */
p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
- // ori
+ /* ori */
p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
- //shift
+ /* shift */
p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
- // ori
+ /* ori */
p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
- //shift
+ /* shift */
p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
- // ori
+ /* ori */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
}
}
return p;
}
-/* A simplified version of mkLoadImm that always generates 2 or 5
+/* A simplified version of mkLoadImm that always generates 2 or 6
instructions (32 or 64 bits respectively) even if it could generate
fewer. This is needed for generating fixed sized patchable
sequences. */
-static UChar* mkLoadImm_EXACTLY2or5 ( UChar* p,
- UInt r_dst, ULong imm, Bool mode64 )
+static UChar* mkLoadImm_EXACTLY2or6 ( UChar* p,
+ UInt r_dst, ULong imm, Bool mode64)
{
vassert(r_dst < 0x20);
if (!mode64) {
/* In 32-bit mode, make sure the top 32 bits of imm are a sign
- extension of the bottom 32 bits. (Probably unnecessary.) */
+ extension of the bottom 32 bits. (Probably unnecessary.) */
UInt u32 = (UInt)imm;
Int s32 = (Int)u32;
Long s64 = (Long)s32;
}
if (!mode64) {
- // sign-extendable from 32 bits
- // addiu r_dst,r0,(imm>>16) => lis r_dst, (imm>>16)
- // lui r_dst, (imm>>16)
+ /* sign-extendable from 32 bits
+ addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
+ lui r_dst, (imm >> 16) */
p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
- // ori r_dst, r_dst, (imm & 0xFFFF)
+ /* ori r_dst, r_dst, (imm & 0xFFFF) */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
} else {
- vassert(0);
+ /* full 64bit immediate load: 6 (six!) insns. */
+ vassert(mode64);
+ /* lui load in upper half of low word */
+ p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
+ /* ori */
+ p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
+ /* shift */
+ p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+ /* ori */
+ p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
+ /* shift */
+ p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+ /* ori */
+ p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
}
return p;
}
/* Checks whether the sequence of bytes at p was indeed created
- by mkLoadImm_EXACTLY2or5 with the given parameters. */
-static Bool isLoadImm_EXACTLY2or5 ( UChar* p_to_check,
+ by mkLoadImm_EXACTLY2or6 with the given parameters. */
+static Bool isLoadImm_EXACTLY2or6 ( UChar* p_to_check,
UInt r_dst, ULong imm, Bool mode64 )
{
vassert(r_dst < 0x20);
if (!mode64) {
UInt expect[2] = { 0, 0 };
UChar* p = (UChar*)&expect[0];
- // lui r_dst, (imm>>16)
+ /* lui r_dst, (immi >> 16) */
p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
- // ori r_dst, r_dst, (imm & 0xFFFF)
+ /* ori r_dst, r_dst, (imm & 0xFFFF) */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
vassert(p == (UChar*)&expect[2]);
ret = fetch32(p_to_check + 0) == expect[0]
- && fetch32(p_to_check + 4) == expect[1];
-
+ && fetch32(p_to_check + 4) == expect[1];
} else {
- vassert(0);
+ UInt expect[6] = { 0, 0, 0, 0, 0, 0};
+ UChar* p = (UChar*)&expect[0];
+ /* lui load in upper half of low word */
+ p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
+ /* ori */
+ p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
+ /* shift */
+ p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+ /* ori */
+ p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
+ /* shift */
+ p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+ /* ori */
+ p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
+ vassert(p == (UChar*)&expect[6]);
+
+ ret = fetch32(p_to_check + 0) == expect[0]
+ && fetch32(p_to_check + 4) == expect[1]
+ && fetch32(p_to_check + 8) == expect[2]
+ && fetch32(p_to_check + 12) == expect[3]
+ && fetch32(p_to_check + 16) == expect[4]
+ && fetch32(p_to_check + 20) == expect[5];
}
return ret;
}
vassert(nbuf >= 32);
switch (i->tag) {
- case Min_MovCond: {
- MIPSRH *srcR = i->Min.MovCond.srcR;
- UInt condR = iregNo(i->Min.MovCond.condR, mode64);
- UInt dst = iregNo(i->Min.MovCond.dst, mode64);
-
- UInt srcL = iregNo(i->Min.MovCond.srcL, mode64);
-
- p = mkMoveReg(p, dst, srcL);
- if (i->Min.MovCond.cond == MIPScc_MI) {
- p = mkFormI(p, 7, condR, 0, 2); //bgtz cond,2
- }
-
- p = mkFormR(p, 0, 0, 0, 0, 0, 0); //nop
-
- if (srcR->tag == Mrh_Reg) {
- //or dst,src,src
- p = mkMoveReg(p, dst, iregNo(srcR->Mrh.Reg.reg, mode64));
- /*p = mkFormR(p, 0, dst, iregNo(src->Mrh.Reg.reg, mode64),
- iregNo(src->Mrh.Reg.reg, mode64), 0, 37);*/
- } else {
- p = mkLoadImm(p, dst, srcR->Mrh.Imm.imm16, mode64);
- }
- }
- goto done;
-
case Min_LI:
p = mkLoadImm(p, iregNo(i->Min.LI.dst, mode64), i->Min.LI.imm, mode64);
goto done;
-
+
case Min_Alu: {
MIPSRH *srcR = i->Min.Alu.srcR;
Bool immR = toBool(srcR->tag == Mrh_Imm);
UInt r_dst = iregNo(i->Min.Alu.dst, mode64);
UInt r_srcL = iregNo(i->Min.Alu.srcL, mode64);
- UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg, mode64);
-
+ UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg,
+ mode64);
switch (i->Min.Alu.op) {
- /*Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR */
+ /* Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR, Malu_SLT */
case Malu_ADD:
if (immR) {
vassert(srcR->Mrh.Imm.imm16 != 0x8000);
} else {
/* or */
if (r_srcL == 33)
- //MFHI
+ /* MFHI */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
else if (r_srcL == 34)
- //MFLO
+ /* MFLO */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
else if (r_dst == 33)
- //MTHI
+ /* MTHI */
p = mkFormR(p, 0, r_srcL, 0, 0, 0, 17);
else if (r_dst == 34)
- //MTLO
+ /* MTLO */
p = mkFormR(p, 0, r_srcL, 0, 0, 0, 19);
else
p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 37);
p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
}
break;
-
+ case Malu_DADD:
+ if (immR) {
+ vassert(srcR->Mrh.Imm.syned);
+ vassert(srcR->Mrh.Imm.imm16 != 0x8000);
+ p = mkFormI(p, 25, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
+ } else {
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 45);
+ }
+ break;
+ case Malu_DSUB:
+ if (immR) {
+ p = mkFormI(p, 25, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16));
+ } else {
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 47);
+ }
+ break;
+ case Malu_SLT:
+ if (immR) {
+ goto bad;
+ } else {
+ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
+ }
+ break;
+
default:
goto bad;
}
if (sz32) {
if (immR) {
UInt n = srcR->Mrh.Imm.imm16;
- vassert(n >= 0 && n < 32);
+ vassert(n >= 0 && n <= 32);
p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 0);
} else {
/* shift variable */
}
}
break;
-
+
case Mshft_SRL:
if (sz32) {
- // SRL, SRLV
+ /* SRL, SRLV */
if (immR) {
UInt n = srcR->Mrh.Imm.imm16;
vassert(n >= 0 && n < 32);
p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 6);
}
} else {
- // DSRL, DSRL32, DSRLV
+ /* DSRL, DSRL32, DSRLV */
if (immR) {
UInt n = srcR->Mrh.Imm.imm16;
vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
}
}
break;
-
+
case Mshft_SRA:
if (sz32) {
- // SRA, SRAV
+ /* SRA, SRAV */
if (immR) {
UInt n = srcR->Mrh.Imm.imm16;
vassert(n >= 0 && n < 32);
p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 7);
}
} else {
- // DSRA, DSRA32, DSRAV
+ /* DSRA, DSRA32, DSRAV */
if (immR) {
UInt n = srcR->Mrh.Imm.imm16;
vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
goto done;
}
-
+
case Min_Unary: {
UInt r_dst = iregNo(i->Min.Unary.dst, mode64);
UInt r_src = iregNo(i->Min.Unary.src, mode64);
switch (i->Min.Unary.op) {
- /*Mun_CLO, Mun_CLZ, Mun_NOP */
- case Mun_CLO: //clo
+ /* Mun_CLO, Mun_CLZ, Mun_NOP, Mun_DCLO, Mun_DCLZ */
+ case Mun_CLO: /* clo */
p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 33);
break;
- case Mun_CLZ: //clz
+ case Mun_CLZ: /* clz */
p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 32);
break;
- case Mun_NOP: //nop (sll r0,r0,0)
+ case Mun_NOP: /* nop (sll r0,r0,0) */
p = mkFormR(p, 0, 0, 0, 0, 0, 0);
break;
+ case Mun_DCLO: /* clo */
+ p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 37);
+ break;
+ case Mun_DCLZ: /* clz */
+ p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 36);
+ break;
}
goto done;
}
-
+
case Min_Cmp: {
UInt r_srcL = iregNo(i->Min.Cmp.srcL, mode64);
UInt r_srcR = iregNo(i->Min.Cmp.srcR, mode64);
switch (i->Min.Cmp.cond) {
case MIPScc_EQ:
- /* addiu r_dst, r0, 1
+ /* addiu r_dst, r0, 1
beq r_srcL, r_srcR, 2
nop
addiu r_dst, r0, 0
p = mkFormI(p, 9, 0, r_dst, 0);
break;
case MIPScc_NE:
- /* addiu r_dst, r0, 1
+ /* addiu r_dst, r0, 1
bne r_srcL, r_srcR, 2
nop
addiu r_dst, r0, 0
p = mkFormI(p, 9, 0, r_dst, 0);
break;
case MIPScc_LT:
- /* slt r_dst, r_srcL, r_srcR */
+ /* slt r_dst, r_srcL, r_srcR */
p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
break;
case MIPScc_LO:
- /* sltu r_dst, r_srcL, r_srcR */
+ /* sltu r_dst, r_srcL, r_srcR */
p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 43);
break;
case MIPScc_LE:
- /* addiu r_dst, r0, 1
+ /* addiu r_dst, r0, 1
beq r_srcL, r_srcR, 2
nop
slt r_dst, r_srcL, r_srcR */
p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
break;
case MIPScc_LS:
- /* addiu r_dst, r0, 1
+ /* addiu r_dst, r0, 1
beq r_srcL, r_srcR, 2
nop
sltu r_dst, r_srcL, r_srcR */
}
goto done;
}
-
+
case Min_Mul: {
Bool syned = i->Min.Mul.syned;
Bool widening = i->Min.Mul.widening;
UInt r_srcL = iregNo(i->Min.Mul.srcL, mode64);
UInt r_srcR = iregNo(i->Min.Mul.srcR, mode64);
UInt r_dst = iregNo(i->Min.Mul.dst, mode64);
-
if (widening) {
if (sz32) {
if (syned)
}
goto done;
}
-
+
case Min_Macc: {
Bool syned = i->Min.Macc.syned;
UInt r_srcL = iregNo(i->Min.Macc.srcL, mode64);
if (syned) {
switch (i->Min.Macc.op) {
case Macc_ADD:
- //madd
+ /* madd */
p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 0);
break;
case Macc_SUB:
- //msub
+ /* msub */
p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
4);
break;
} else {
switch (i->Min.Macc.op) {
case Macc_ADD:
- //maddu
+ /* maddu */
p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
1);
break;
case Macc_SUB:
- //msubu
+ /* msubu */
p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
5);
break;
goto done;
}
}
-
+
case Min_Mthi: {
UInt r_src = iregNo(i->Min.MtHL.src, mode64);
p = mkFormR(p, 0, r_src, 0, 0, 0, 17);
goto done;
}
-
+
case Min_Mtlo: {
UInt r_src = iregNo(i->Min.MtHL.src, mode64);
p = mkFormR(p, 0, r_src, 0, 0, 0, 19);
goto done;
}
-
+
case Min_Mfhi: {
UInt r_dst = iregNo(i->Min.MfHL.dst, mode64);
p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
goto done;
}
-
+
case Min_Mflo: {
UInt r_dst = iregNo(i->Min.MfHL.dst, mode64);
p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
goto done;
}
-
+
case Min_MtFCSR: {
UInt r_src = iregNo(i->Min.MtFCSR.src, mode64);
/* ctc1 */
p = mkFormR(p, 17, 6, r_src, 31, 0, 0);
goto done;
}
-
+
case Min_MfFCSR: {
UInt r_dst = iregNo(i->Min.MfFCSR.dst, mode64);
/* cfc1 */
p = mkFormR(p, 17, 2, r_dst, 31, 0, 0);
goto done;
}
-
+
case Min_Call: {
if (i->Min.Call.cond != MIPScc_AL && i->Min.Call.rloc != RetLocNone) {
/* The call might not happen (it isn't unconditional) and
}
MIPSCondCode cond = i->Min.Call.cond;
UInt r_dst = 25; /* using %r25 as address temporary -
- see getRegUsage_MIPSInstr */
+ see getRegUsage_MIPSInstr */
/* jump over the following insns if condition does not hold */
if (cond != MIPScc_AL) {
/* jmp fwds if !condition */
/* don't know how many bytes to jump over yet...
make space for a jump instruction + nop!!! and fill in later. */
- ptmp = p; /* fill in this bit later */
- p += 8; // p += 8
+ ptmp = p; /* fill in this bit later */
+ p += 8; /* p += 8 */
+ }
+
+ if (!mode64) {
+ /* addiu $29, $29, -16 */
+ p = mkFormI(p, 9, 29, 29, 0xFFF0);
}
- /* load target to r_dst */// p += 4|8
+ /* load target to r_dst; p += 4|8 */
p = mkLoadImm(p, r_dst, i->Min.Call.target, mode64);
- /* jalr %r_dst */
- p = mkFormR(p, 0, r_dst, 0, 31, 0, 9); // p += 4
- p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4
+ /* jalr r_dst */
+ p = mkFormR(p, 0, r_dst, 0, 31, 0, 9); /* p += 4 */
+ p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
+
+ if (!mode64) {
+ /* addiu $29, $29, 16 */
+ p = mkFormI(p, 9, 29, 29, 0x0010);
+ }
/* Fix up the conditional jump, if there was one. */
if (cond != MIPScc_AL) {
Int delta = p - ptmp;
vassert(delta >= 20 && delta <= 32);
- /* bc !ct,cf,delta/4 */
- /* blez r_src, delta/4-1 */
- vassert(cond == MIPScc_EQ);
+ /* blez r_src, delta/4-1
+ nop */
ptmp = mkFormI(ptmp, 6, r_src, 0, delta / 4 - 1);
mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
}
/* Update the guest PC. */
/* move r9, dstGA */
- /* sw r9, amPC */
- p = mkLoadImm_EXACTLY2or5(p, /*r*/9,
- (ULong)i->Min.XDirect.dstGA, mode64);
- p = do_load_or_store_machine_word(p, False/*!isLoad*/,
- /*r*/9, i->Min.XDirect.amPC, mode64);
+ /* sw/sd r9, amPC */
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, (ULong)i->Min.XDirect.dstGA,
+ mode64);
+ p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 9,
+ i->Min.XDirect.amPC, mode64);
/* --- FIRST PATCHABLE BYTE follows --- */
/* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're
void* disp_cp_chain_me
= i->Min.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
- p = mkLoadImm_EXACTLY2or5(p, /*r*/9,
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
Ptr_to_ULong(disp_cp_chain_me), mode64);
/* jalr $9 */
/* nop */
- p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4
- p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4
+ p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
+ p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
/* --- END of PATCHABLE BYTES --- */
/* Fix up the conditional jump, if there was one. */
Int delta = p - ptmp;
delta = delta / 4 - 3;
vassert(delta > 0 && delta < 40);
- /* lw $9, 316($10) // guest_COND
+
+ /* lw $9, COND_OFFSET(GuestSP)
beq $9, $0, 2
- nop*/
- ptmp = mkFormI(ptmp, 35, 10, 9, 316);
+ nop */
+ ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
}
/* Update the guest PC. */
/* sw r-dstGA, amPC */
- p = do_load_or_store_machine_word(p, False/*!isLoad*/,
+ p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
iregNo(i->Min.XIndir.dstGA, mode64),
i->Min.XIndir.amPC, mode64);
/* move r9, VG_(disp_cp_xindir) */
/* jalr r9 */
/* nop */
- p = mkLoadImm_EXACTLY2or5 ( p, /*r*/9,
- Ptr_to_ULong(disp_cp_xindir), mode64);
- p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4
- p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
+ Ptr_to_ULong(disp_cp_xindir), mode64);
+ p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
+ p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
/* Fix up the conditional jump, if there was one. */
if (i->Min.XIndir.cond != MIPScc_AL) {
Int delta = p - ptmp;
delta = delta / 4 - 3;
vassert(delta > 0 && delta < 40);
- /* lw $9, 316($10) // guest_COND
+
+ /* lw $9, COND_OFFSET($GuestSP)
beq $9, $0, 2
- nop*/
- ptmp = mkFormI(ptmp, 35, 10, 9, 316);
+ nop */
+ ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
}
}
/* Update the guest PC. */
- /* sw r-dstGA, amPC */
- p = do_load_or_store_machine_word(p, False/*!isLoad*/,
+ /* sw/sd r-dstGA, amPC */
+ p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
iregNo(i->Min.XIndir.dstGA, mode64),
i->Min.XIndir.amPC, mode64);
switch (i->Min.XAssisted.jk) {
case Ijk_ClientReq: trcval = VEX_TRC_JMP_CLIENTREQ; break;
case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
- //case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break;
- //case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break;
+ /* case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break;
+ case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break; */
case Ijk_EmWarn: trcval = VEX_TRC_JMP_EMWARN; break;
case Ijk_EmFail: trcval = VEX_TRC_JMP_EMFAIL; break;
- //case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break;
+ /* case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break; */
case Ijk_NoDecode: trcval = VEX_TRC_JMP_NODECODE; break;
case Ijk_TInval: trcval = VEX_TRC_JMP_TINVAL; break;
case Ijk_NoRedir: trcval = VEX_TRC_JMP_NOREDIR; break;
case Ijk_SigTRAP: trcval = VEX_TRC_JMP_SIGTRAP; break;
- //case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break;
+ /* case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break; */
case Ijk_SigBUS: trcval = VEX_TRC_JMP_SIGBUS; break;
case Ijk_SigFPE_IntDiv: trcval = VEX_TRC_JMP_SIGFPE_INTDIV; break;
case Ijk_SigFPE_IntOvf: trcval = VEX_TRC_JMP_SIGFPE_INTOVF; break;
case Ijk_Boring: trcval = VEX_TRC_JMP_BORING; break;
- /* We don't expect to see the following being assisted. */
- //case Ijk_Ret:
- //case Ijk_Call:
- /* fallthrough */
+ /* We don't expect to see the following being assisted.
+ case Ijk_Ret:
+ case Ijk_Call:
+ fallthrough */
default:
ppIRJumpKind(i->Min.XAssisted.jk);
vpanic("emit_MIPSInstr.Min_XAssisted: unexpected jump kind");
}
vassert(trcval != 0);
- p = mkLoadImm_EXACTLY2or5(p, /*r*/10, trcval, mode64);
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/ GuestSP, trcval, mode64);
/* move r9, VG_(disp_cp_xassisted) */
- p = mkLoadImm_EXACTLY2or5(p, /*r*/9,
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
(ULong)Ptr_to_ULong(disp_cp_xassisted), mode64);
/* jalr $9
nop */
- p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4
- p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4
+ p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
+ p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
/* Fix up the conditional jump, if there was one. */
if (i->Min.XAssisted.cond != MIPScc_AL) {
Int delta = p - ptmp;
delta = delta / 4 - 3;
vassert(delta > 0 && delta < 40);
- /* lw $9, 316($10) // guest_COND
+
+ /* lw $9, COND_OFFSET($GuestSP)
beq $9, $0, 2
- nop*/
- ptmp = mkFormI(ptmp, 35, 10, 9, 316);
+ nop */
+ ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
}
}
break;
}
-
+
case Min_Store: {
MIPSAMode *am_addr = i->Min.Store.dst;
if (am_addr->tag == Mam_IR) {
UInt idx = am_addr->Mam.IR.index;
UInt r_dst = iregNo(i->Min.LoadL.dst, mode64);
- p = mkFormI(p, 0x30, r_src, r_dst, idx);
+ if (i->Min.LoadL.sz == 4)
+ p = mkFormI(p, 0x30, r_src, r_dst, idx);
+ else
+ p = mkFormI(p, 0x34, r_src, r_dst, idx);
goto done;
}
case Min_StoreC: {
UInt idx = am_addr->Mam.IR.index;
UInt r_dst = iregNo(am_addr->Mam.IR.base, mode64);
- p = mkFormI(p, 0x38, r_dst, r_src, idx);
+ if (i->Min.StoreC.sz == 4)
+ p = mkFormI(p, 0x38, r_dst, r_src, idx);
+ else
+ p = mkFormI(p, 0x3C, r_dst, r_src, idx);
goto done;
}
case Min_RdWrLR: {
p = mkMoveReg(p, reg, 31);
goto done;
}
-
- // Floating point
-
+
+ /* Floating point */
case Min_FpLdSt: {
MIPSAMode *am_addr = i->Min.FpLdSt.addr;
UChar sz = i->Min.FpLdSt.sz;
case Min_FpUnary: {
switch (i->Min.FpUnary.op) {
- case Mfp_MOVS: { // FP move
+ case Mfp_MOVS: { /* FP move */
UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x6);
break;
}
- case Mfp_MOVD: { // FP move
+ case Mfp_MOVD: { /* FP move */
UInt fr_dst = dregNo(i->Min.FpUnary.dst);
UInt fr_src = dregNo(i->Min.FpUnary.src);
p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x6);
break;
}
- case Mfp_ABSS: { // ABSS
+ case Mfp_ABSS: { /* ABSS */
UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x5);
break;
}
- case Mfp_ABSD: { // ABSD
+ case Mfp_ABSD: { /* ABSD */
UInt fr_dst = dregNo(i->Min.FpUnary.dst);
UInt fr_src = dregNo(i->Min.FpUnary.src);
p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x5);
break;
}
- case Mfp_NEGS: { // ABSS
+ case Mfp_NEGS: { /* ABSS */
UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x7);
break;
}
- case Mfp_NEGD: { // ABSD
+ case Mfp_NEGD: { /* ABSD */
UInt fr_dst = dregNo(i->Min.FpUnary.dst);
UInt fr_src = dregNo(i->Min.FpUnary.src);
p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x7);
break;
}
- case Mfp_CVTD: { //CVT.D
- UInt fr_dst = dregNo(i->Min.FpUnary.dst);
- UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
- p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x21);
- break;
- }
- case Mfp_SQRTS: { //SQRT.S
+ case Mfp_SQRTS: { /* SQRT.S */
UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x04);
break;
}
- case Mfp_SQRTD: { //SQRT.D
+ case Mfp_SQRTD: { /* SQRT.D */
UInt fr_dst = dregNo(i->Min.FpUnary.dst);
UInt fr_src = dregNo(i->Min.FpUnary.src);
p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x04);
break;
}
- case Mfp_RSQRTS: { //RSQRT.S
+ case Mfp_RSQRTS: { /* RSQRT.S */
UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x16);
break;
}
- case Mfp_RSQRTD: { //RSQRT.D
+ case Mfp_RSQRTD: { /* RSQRT.D */
UInt fr_dst = dregNo(i->Min.FpUnary.dst);
UInt fr_src = dregNo(i->Min.FpUnary.src);
p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x16);
break;
}
- case Mfp_RECIPS: { //RECIP.S
+ case Mfp_RECIPS: { /* RECIP.S */
UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x15);
break;
}
- case Mfp_RECIPD: { //RECIP.D
+ case Mfp_RECIPD: { /* RECIP.D */
UInt fr_dst = dregNo(i->Min.FpUnary.dst);
UInt fr_src = dregNo(i->Min.FpUnary.src);
p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x15);
goto done;
}
+ case Min_FpTernary: {
+ switch (i->Min.FpTernary.op) {
+ case Mfp_MADDS: {
+ UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64);
+ UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
+ UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
+ UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
+ p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x20);
+ break;
+ }
+ case Mfp_MADDD: {
+ UInt fr_dst = dregNo(i->Min.FpTernary.dst);
+ UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
+ UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
+ UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
+ p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x21);
+ break;
+ }
+ case Mfp_MSUBS: {
+ UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64);
+ UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
+ UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
+ UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
+ p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x28);
+ break;
+ }
+ case Mfp_MSUBD: {
+ UInt fr_dst = dregNo(i->Min.FpTernary.dst);
+ UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
+ UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
+ UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
+ p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x29);
+ break;
+ }
+ default:
+ goto bad;
+ }
+ goto done;
+ }
+
case Min_FpConvert: {
switch (i->Min.FpConvert.op) {
UInt fr_dst, fr_src;
fr_src = fregNo(i->Min.FpConvert.src, mode64);
p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x21);
break;
+ case Mfp_CVTDL:
+ fr_dst = dregNo(i->Min.FpConvert.dst);
+ fr_src = dregNo(i->Min.FpConvert.src);
+ p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x21);
+ break;
+ case Mfp_CVTDS:
+ fr_dst = dregNo(i->Min.FpConvert.dst);
+ fr_src = fregNo(i->Min.FpConvert.src, mode64);
+ p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x21);
+ break;
+ case Mfp_CVTSL:
+ fr_dst = dregNo(i->Min.FpConvert.dst);
+ fr_src = fregNo(i->Min.FpConvert.src, mode64);
+ p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x20);
+ break;
+ case Mfp_CVTLS:
+ fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+ fr_src = dregNo(i->Min.FpConvert.src);
+ p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x25);
+ break;
+ case Mfp_CVTLD:
+ fr_dst = dregNo(i->Min.FpConvert.dst);
+ fr_src = dregNo(i->Min.FpConvert.src);
+ p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x25);
+ break;
case Mfp_TRUWS:
fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
fr_src = fregNo(i->Min.FpConvert.src, mode64);
}
goto done;
}
+
+ case Min_FpGpMove: {
+ switch (i->Min.FpGpMove.op) {
+ UInt rt, fs;
+ case MFpGpMove_mfc1: {
+ rt = iregNo(i->Min.FpGpMove.dst, mode64);
+ fs = fregNo(i->Min.FpGpMove.src, mode64);
+ p = mkFormR(p, 0x11, 0x0, rt, fs, 0x0, 0x0);
+ break;
+ }
+ case MFpGpMove_dmfc1: {
+ vassert(mode64);
+ rt = iregNo(i->Min.FpGpMove.dst, mode64);
+ fs = fregNo(i->Min.FpGpMove.src, mode64);
+ p = mkFormR(p, 0x11, 0x1, rt, fs, 0x0, 0x0);
+ break;
+ }
+ case MFpGpMove_mtc1: {
+ rt = iregNo(i->Min.FpGpMove.src, mode64);
+ fs = fregNo(i->Min.FpGpMove.dst, mode64);
+ p = mkFormR(p, 0x11, 0x4, rt, fs, 0x0, 0x0);
+ break;
+ }
+ case MFpGpMove_dmtc1: {
+ vassert(mode64);
+ rt = iregNo(i->Min.FpGpMove.src, mode64);
+ fs = fregNo(i->Min.FpGpMove.dst, mode64);
+ p = mkFormR(p, 0x11, 0x5, rt, fs, 0x0, 0x0);
+ break;
+ }
+ default:
+ goto bad;
+ }
+ goto done;
+ }
+
+ case Min_MoveCond: {
+ switch (i->Min.MoveCond.op) {
+ UInt d, s, t;
+ case MFpMoveCond_movns: {
+ d = fregNo(i->Min.MoveCond.dst, mode64);
+ s = fregNo(i->Min.MoveCond.src, mode64);
+ t = iregNo(i->Min.MoveCond.cond, mode64);
+ p = mkFormR(p, 0x11, 0x10, t, s, d, 0x13);
+ break;
+ }
+ case MFpMoveCond_movnd: {
+ d = dregNo(i->Min.MoveCond.dst);
+ s = dregNo(i->Min.MoveCond.src);
+ t = iregNo(i->Min.MoveCond.cond, mode64);
+ p = mkFormR(p, 0x11, 0x11, t, s, d, 0x13);
+ break;
+ }
+ case MMoveCond_movn: {
+ d = iregNo(i->Min.MoveCond.dst, mode64);
+ s = iregNo(i->Min.MoveCond.src, mode64);
+ t = iregNo(i->Min.MoveCond.cond, mode64);
+ p = mkFormR(p, 0, s, t, d, 0, 0xb);
+ break;
+ }
+ default:
+ goto bad;
+ }
+ goto done;
+ }
+
case Min_EvCheck: {
/* This requires a 32-bit dec/test in 32 mode. */
/* We generate:
*/
UChar* p0 = p;
/* lw r9, amCounter */
- p = do_load_or_store_machine_word(p, True/*isLoad*/, /*r*/9,
+ p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 9,
i->Min.EvCheck.amCounter, mode64);
/* addiu r9,r9,-1 */
p = mkFormI(p, 9, 9, 9, 0xFFFF);
/* sw r30, amCounter */
- p = do_load_or_store_machine_word(p, False/*!isLoad*/, /*r*/9,
+ p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 9,
i->Min.EvCheck.amCounter, mode64);
/* bgez t9, nofail */
p = mkFormI(p, 1, 9, 1, 3);
/* lw r9, amFailAddr */
- p = do_load_or_store_machine_word(p, True/*isLoad*/, /*r*/9,
+ p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 9,
i->Min.EvCheck.amFailAddr, mode64);
/* jalr $9 */
- p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4
- p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4
+ p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
+ p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
/* nofail: */
-
+
/* Crosscheck */
vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0);
goto done;
/* Generate a code template to increment a memory location whose
address will be known later as an immediate value. This code
template will be patched once the memory location is known.
- For now we do this with address == 0x65556555.
- 32-bit:
-
- move r9, 0x65556555
- lw r8, 0(r9)
- addiu r8, r8, 1 # add least significant word
- sw r8, 0(r9)
- sltiu r1, r8, 1 # set carry-in bit
- lw r8, 4(r9)
- addu r8, r8, r1
- sw r8, 4(r9) */
-
+ For now we do this with address == 0x65556555. */
if (mode64) {
- vassert(0);
+ /* 64-bit:
+ move r9, 0x6555655565556555ULL
+ ld r8, 0(r9)
+ daddiu r8, r8, 1
+ sd r8, 0(r9) */
+
+ /* move r9, 0x6555655565556555ULL */
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x6555655565556555ULL,
+ True /*mode64*/);
+ /* ld r8, 0(r9) */
+ p = mkFormI(p, 55, 9, 8, 0);
+
+ /* daddiu r8, r8, 1 */
+ p = mkFormI(p, 25, 8, 8, 1);
+
+ /* sd r8, 0(r9) */
+ p = mkFormI(p, 63, 9, 8, 0);
} else {
- // move r9, 0x65556555
- p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 0x65556555ULL,
- False/*!mode64*/);
- // lw r8, 0(r9)
+ /* 32-bit:
+ move r9, 0x65556555
+ lw r8, 0(r9)
+ addiu r8, r8, 1 # add least significant word
+ sw r8, 0(r9)
+ sltiu r1, r8, 1 # set carry-in bit
+ lw r8, 4(r9)
+ addu r8, r8, r1
+ sw r8, 4(r9) */
+
+ /* move r9, 0x65556555 */
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x65556555ULL,
+ False /*!mode64*/);
+ /* lw r8, 0(r9) */
p = mkFormI(p, 35, 9, 8, 0);
- // addiu r8, r8, 1 # add least significant word
+ /* addiu r8, r8, 1 # add least significant word */
p = mkFormI(p, 9, 8, 8, 1);
- // sw r8, 0(r9)
+ /* sw r8, 0(r9) */
p = mkFormI(p, 43, 9, 8, 0);
- // sltiu r1, r8, 1 # set carry-in bit
+ /* sltiu r1, r8, 1 # set carry-in bit */
p = mkFormI(p, 11, 8, 1, 1);
- // lw r8, 4(r9)
+ /* lw r8, 4(r9) */
p = mkFormI(p, 35, 9, 8, 4);
- // addu r8, r8, r1
+ /* addu r8, r8, r1 */
p = mkFormR(p, 0, 8, 1, 8, 0, 33);
- // sw r8, 4(r9)
+ /* sw r8, 4(r9) */
p = mkFormI(p, 43, 9, 8, 4);
}
*is_profInc = True;
goto done;
}
-
+
default:
goto bad;
vex_printf("\n=> ");
ppMIPSInstr(i, mode64);
vpanic("emit_MIPSInstr");
- /*NOTREACHED*/ done:
- //vassert(p - &buf[0] <= 32);
+ /* NOTREACHED */ done:
+ vassert(p - &buf[0] <= 128);
return p - &buf[0];
}
jalr r9
nop
viz
- <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5>
- 0x120F809 // jalr r9
- 0x00000000 // nop
+ <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
+ 0x120F809 # jalr r9
+ 0x00000000 # nop
*/
UChar* p = (UChar*)place_to_chain;
vassert(0 == (3 & (HWord)p));
- vassert(isLoadImm_EXACTLY2or5(p, /*r*/9,
+ vassert(isLoadImm_EXACTLY2or6(p, /*r*/9,
(UInt)Ptr_to_ULong(disp_cp_chain_me_EXPECTED),
mode64));
- vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x120F809);
- vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x00000000);
+ vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
+ vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
/* And what we want to change it to is either:
move r9, place_to_jump_to
jalr r9
nop
viz
- <8 bytes generated by mkLoadImm_EXACTLY2or5>
- 0x120F809 // jalr r9
- 0x00000000 // nop
+ <8 bytes generated by mkLoadImm_EXACTLY2or6>
+ 0x120F809 # jalr r9
+ 0x00000000 # nop
The replacement has the same length as the original.
*/
- p = mkLoadImm_EXACTLY2or5(p, /*r*/9,
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
Ptr_to_ULong(place_to_jump_to), mode64);
p = emit32(p, 0x120F809);
p = emit32(p, 0x00000000);
Int len = p - (UChar*)place_to_chain;
- vassert(len == (mode64 ? 28 : 16)); /* stay sane */
+ vassert(len == (mode64 ? 32 : 16)); /* stay sane */
VexInvalRange vir = {(HWord)place_to_chain, len};
return vir;
}
jalr r9
nop
viz
- <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5>
- 0x120F809 // jalr r9
- 0x00000000 // nop
+ <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
+ 0x120F809 # jalr r9
+ 0x00000000 # nop
*/
UChar* p = (UChar*)place_to_unchain;
vassert(0 == (3 & (HWord)p));
- vassert(isLoadImm_EXACTLY2or5(p, /*r*/9,
+ vassert(isLoadImm_EXACTLY2or6(p, /*r*/ 9,
Ptr_to_ULong(place_to_jump_to_EXPECTED),
mode64));
- vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x120F809);
- vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x00000000);
+ vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
+ vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
/* And what we want to change it to is:
move r9, disp_cp_chain_me
jalr r9
nop
viz
- <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5>
- 0x120F809 // jalr r9
- 0x00000000 // nop
+ <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
+ 0x120F809 # jalr r9
+ 0x00000000 # nop
The replacement has the same length as the original.
*/
- p = mkLoadImm_EXACTLY2or5(p, /*r*/9,
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
Ptr_to_ULong(disp_cp_chain_me), mode64);
p = emit32(p, 0x120F809);
p = emit32(p, 0x00000000);
Int len = p - (UChar*)place_to_unchain;
- vassert(len == (mode64 ? 28 : 16)); /* stay sane */
+ vassert(len == (mode64 ? 32 : 16)); /* stay sane */
VexInvalRange vir = {(HWord)place_to_unchain, len};
return vir;
}
VexInvalRange patchProfInc_MIPS ( void* place_to_patch,
ULong* location_of_counter, Bool mode64 )
{
- vassert(sizeof(ULong*) == 4);
+ if (mode64)
+ vassert(sizeof(ULong*) == 8);
+ else
+ vassert(sizeof(ULong*) == 4);
UChar* p = (UChar*)place_to_patch;
vassert(0 == (3 & (HWord)p));
- vassert(isLoadImm_EXACTLY2or5((UChar *)p, /*r*/9, 0x65556555, mode64));
+ vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9,
+ mode64 ? 0x6555655565556555ULL : 0x65556555,
+ mode64));
- vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x8D280000);
- vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x25080001);
- vassert(fetch32(p + (mode64 ? 20 : 8) + 8) == 0xAD280000);
- vassert(fetch32(p + (mode64 ? 20 : 8) + 12) == 0x2d010001);
- vassert(fetch32(p + (mode64 ? 20 : 8) + 16) == 0x8d280004);
- vassert(fetch32(p + (mode64 ? 20 : 8) + 20) == 0x01014021);
- vassert(fetch32(p + (mode64 ? 20 : 8) + 24) == 0xad280004);
+ if (mode64) {
+ vassert(fetch32(p + 24 + 0) == 0xDD280000);
+ vassert(fetch32(p + 24 + 4) == 0x65080001);
+ vassert(fetch32(p + 24 + 8) == 0xFD280000);
+ } else {
+ vassert(fetch32(p + 8 + 0) == 0x8D280000);
+ vassert(fetch32(p + 8 + 4) == 0x25080001);
+ vassert(fetch32(p + 8 + 8) == 0xAD280000);
+ vassert(fetch32(p + 8 + 12) == 0x2d010001);
+ vassert(fetch32(p + 8 + 16) == 0x8d280004);
+ vassert(fetch32(p + 8 + 20) == 0x01014021);
+ vassert(fetch32(p + 8 + 24) == 0xad280004);
+ }
- p = mkLoadImm_EXACTLY2or5(p, /*r*/9,
+ p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
Ptr_to_ULong(location_of_counter), mode64);
VexInvalRange vir = {(HWord)p, 8};
This file is part of Valgrind, a dynamic binary instrumentation
framework.
- Copyright (C) 2010-2012 RT-RK
+ Copyright (C) 2010-2013 RT-RK
mips-valgrind@rt-rk.com
This program is free software; you can redistribute it and/or
/*---------------------------------------------------------*/
/*--- Register Usage Conventions ---*/
/*---------------------------------------------------------*/
-/*
-
-Integer Regs
-------------
-ZERO0 Reserved
-GPR1:9 Allocateable
-10 GuestStatePointer
-GPR1:9 Allocateable
-SP StackFramePointer
-RA LinkRegister
-*/
+/* Integer Regs
+ ------------
+ ZERO0 Reserved
+ GPR12:22 Allocateable
+ 23 GuestStatePointer
+ 23 Allocateable
+ SP StackFramePointer
+ RA LinkRegister */
static Bool mode64 = False;
/* FPR register class for mips32/64 */
#define HRcFPR(__mode64) (__mode64 ? HRcFlt64 : HRcFlt32)
+/* guest_COND offset */
+#define COND_OFFSET(__mode64) (__mode64 ? 612 : 316)
+
/*---------------------------------------------------------*/
/*--- ISelEnv ---*/
/*---------------------------------------------------------*/
{
HReg sp = StackPointer(mode64);
vassert(n < 256 && (n % 8) == 0);
- addInstr(env, MIPSInstr_Alu(Malu_ADD, sp, sp, MIPSRH_Imm(True,
- toUShort(n))));
+ if (mode64)
+ addInstr(env, MIPSInstr_Alu(Malu_DADD, sp, sp, MIPSRH_Imm(True,
+ toUShort(n))));
+ else
+ addInstr(env, MIPSInstr_Alu(Malu_ADD, sp, sp, MIPSRH_Imm(True,
+ toUShort(n))));
}
static void sub_from_sp(ISelEnv * env, UInt n)
{
HReg sp = StackPointer(mode64);
vassert(n < 256 && (n % 8) == 0);
- addInstr(env, MIPSInstr_Alu(Malu_SUB, sp, sp,
- MIPSRH_Imm(True, toUShort(n))));
+ if (mode64)
+ addInstr(env, MIPSInstr_Alu(Malu_DSUB, sp, sp,
+ MIPSRH_Imm(True, toUShort(n))));
+ else
+ addInstr(env, MIPSInstr_Alu(Malu_SUB, sp, sp,
+ MIPSRH_Imm(True, toUShort(n))));
}
/*---------------------------------------------------------*/
static MIPSRH *iselWordExpr_RH5u_wrk(ISelEnv * env, IRExpr * e);
static MIPSRH *iselWordExpr_RH5u(ISelEnv * env, IRExpr * e);
+/* In 64-bit mode ONLY */
+static MIPSRH *iselWordExpr_RH6u_wrk(ISelEnv * env, IRExpr * e);
+static MIPSRH *iselWordExpr_RH6u(ISelEnv * env, IRExpr * e);
+
/* compute an I8/I16/I32 into a GPR*/
static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e);
static HReg iselWordExpr_R(ISelEnv * env, IRExpr * e);
to +infinity | 10 | 10
to -infinity | 11 | 01
*/
- // rm_MIPS32 = XOR(rm_IR , (rm_IR << 1)) & 2
+ /* rm_MIPS32 = XOR(rm_IR , (rm_IR << 1)) & 2 */
HReg irrm = iselWordExpr_R(env, mode);
HReg tmp = newVRegI(env);
HReg fcsr_old = newVRegI(env);
addInstr(env, MIPSInstr_Alu(Malu_AND, irrm, tmp, MIPSRH_Imm(False, 3)));
/* save old value of FCSR */
addInstr(env, MIPSInstr_MfFCSR(fcsr_old));
- sub_from_sp(env, 8); // Move SP down 4 bytes
+ sub_from_sp(env, 8); /* Move SP down 8 bytes */
am_addr = MIPSAMode_IR(0, StackPointer(mode64));
- //store old FCSR to stack
+ /* store old FCSR to stack */
addInstr(env, MIPSInstr_Store(4, am_addr, fcsr_old, mode64));
- //set new value of FCSR
+ /* set new value of FCSR */
addInstr(env, MIPSInstr_MtFCSR(irrm));
}
static void set_MIPS_rounding_default(ISelEnv * env)
{
HReg fcsr = newVRegI(env);
- // load as float
+ /* load as float */
MIPSAMode *am_addr;
am_addr = MIPSAMode_IR(0, StackPointer(mode64));
addInstr(env, MIPSInstr_Load(4, fcsr, am_addr, mode64));
- add_to_sp(env, 8); // Reset SP
+ add_to_sp(env, 8); /* Reset SP */
- //set new value of FCSR
+ /* set new value of FCSR*/
addInstr(env, MIPSInstr_MtFCSR(fcsr));
}
vassert(hregClass(r_srcHi) == HRcInt32);
vassert(hregClass(r_srcLo) == HRcInt32);
- sub_from_sp(env, 16); // Move SP down 16 bytes
+ sub_from_sp(env, 16); /* Move SP down 16 bytes */
am_addr0 = MIPSAMode_IR(0, StackPointer(mode64));
am_addr1 = MIPSAMode_IR(4, StackPointer(mode64));
- // store hi,lo as Ity_I32's
+ /* store hi,lo as Ity_I32's */
addInstr(env, MIPSInstr_Store(4, am_addr0, r_srcLo, mode64));
addInstr(env, MIPSInstr_Store(4, am_addr1, r_srcHi, mode64));
- // load as float
+ /* load as float */
addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, fr_dst, am_addr0));
- add_to_sp(env, 16); // Reset SP
+ add_to_sp(env, 16); /* Reset SP */
return fr_dst;
}
n_args++;
if (MIPS_N_REGPARMS < n_args + (passBBP ? 1 : 0)) {
- vpanic("doHelperCall(MIPS): cannot currently handle > 4 args");
+ vpanic("doHelperCall(MIPS): cannot currently handle > 4 or 8 args");
+ }
+ if (mode64) {
+ argregs[0] = hregMIPS_GPR4(mode64);
+ argregs[1] = hregMIPS_GPR5(mode64);
+ argregs[2] = hregMIPS_GPR6(mode64);
+ argregs[3] = hregMIPS_GPR7(mode64);
+ argregs[4] = hregMIPS_GPR8(mode64);
+ argregs[5] = hregMIPS_GPR9(mode64);
+ argregs[6] = hregMIPS_GPR10(mode64);
+ argregs[7] = hregMIPS_GPR11(mode64);
+ argiregs = 0;
+ } else {
+ argregs[0] = hregMIPS_GPR4(mode64);
+ argregs[1] = hregMIPS_GPR5(mode64);
+ argregs[2] = hregMIPS_GPR6(mode64);
+ argregs[3] = hregMIPS_GPR7(mode64);
+ argiregs = 0;
}
- argregs[0] = hregMIPS_GPR4(mode64);
- argregs[1] = hregMIPS_GPR5(mode64);
- argregs[2] = hregMIPS_GPR6(mode64);
- argregs[3] = hregMIPS_GPR7(mode64);
- argiregs = 0;
tmpregs[0] = tmpregs[1] = tmpregs[2] = tmpregs[3] = INVALID_HREG;
- /* First decide which scheme (slow or fast) is to be used. First
- assume the fast scheme, and select slow if any contraindications
- (wow) appear. */
+ /* First decide which scheme (slow or fast) is to be used. First assume the
+ fast scheme, and select slow if any contraindications (wow) appear. */
go_fast = True;
}
}
- /* save GuestStatePointer on the stack */
- sub_from_sp(env, 8); // Move SP down 4 bytes
- addInstr(env, MIPSInstr_Store(4, MIPSAMode_IR(0, StackPointer(mode64)),
- GuestStatePointer(mode64), mode64));
-
/* At this point the scheme to use has been established. Generate
code to get the arg values into the argument rregs. */
if (go_fast) {
argiregs |= (1 << (argreg + 4));
addInstr(env, mk_iMOVds_RR(argregs[argreg], iselWordExpr_R(env,
args[i])));
- } else { // Ity_I64
+ } else { /* Ity_I64 */
if (argreg & 1) {
argreg++;
argiregs |= (1 << (argreg + 4));
|| typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32 || mode64) {
tmpregs[argreg] = iselWordExpr_R(env, args[i]);
- } else { // Ity_I64
+ } else { /* Ity_I64 */
if (argreg & 1)
argreg++;
if (argreg + 1 >= MIPS_N_REGPARMS)
}
/* Move the args to their final destinations. */
for (i = 0; i < argreg; i++) {
- if (hregIsInvalid(tmpregs[i])) // Skip invalid regs
+ if (hregIsInvalid(tmpregs[i])) /* Skip invalid regs */
continue;
/* None of these insns, including any spill code that might
be generated, may alter the condition codes. */
}
}
- target = toUInt(Ptr_to_ULong(cee->addr));
+ target = mode64 ? Ptr_to_ULong(cee->addr) :
+ toUInt(Ptr_to_ULong(cee->addr));
/* Finally, the call itself. */
- if (mode64)
- if (cc == MIPScc_AL) {
- addInstr(env, MIPSInstr_CallAlways(cc, target, argiregs, rloc));
- } else {
- addInstr(env, MIPSInstr_Call(cc, target, argiregs, src, rloc));
- } else if (cc == MIPScc_AL) {
- addInstr(env, MIPSInstr_CallAlways(cc, (Addr32) target, argiregs, rloc));
- } else {
- addInstr(env, MIPSInstr_Call(cc, (Addr32) target, argiregs, src, rloc));
- }
- /* restore GuestStatePointer */
- addInstr(env, MIPSInstr_Load(4, GuestStatePointer(mode64),
- MIPSAMode_IR(0, StackPointer(mode64)), mode64));
- add_to_sp(env, 8); // Reset SP
+ if (cc == MIPScc_AL)
+ addInstr(env, MIPSInstr_CallAlways(cc, (Addr64)target, argiregs, rloc));
+ else
+ addInstr(env, MIPSInstr_Call(cc, (Addr64)target, argiregs, src, rloc));
}
/*---------------------------------------------------------*/
return toBool(u == (UInt) i);
}
+static Bool uLong_fits_in_16_bits ( ULong u )
+{
+ Long i = u & 0xFFFFULL;
+ i <<= 48;
+ i >>= 48;
+ return toBool(u == (ULong) i);
+}
+
+static Bool uLong_is_4_aligned ( ULong u )
+{
+ return toBool((u & 3ULL) == 0);
+}
+
static Bool sane_AMode(ISelEnv * env, MIPSAMode * am)
{
switch (am->tag) {
/* DO NOT CALL THIS DIRECTLY ! */
static MIPSAMode *iselWordExpr_AMode_wrk(ISelEnv * env, IRExpr * e,
- IRType xferTy)
+ IRType xferTy)
{
IRType ty = typeOfIRExpr(env->type_env, e);
- {
+ if (env->mode64) {
+ Bool aligned4imm = toBool(xferTy == Ity_I32 || xferTy == Ity_I64);
+ vassert(ty == Ity_I64);
+
+ /* Add64(expr,i), where i == sign-extend of (i & 0xFFFF) */
+ if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add64
+ && e->Iex.Binop.arg2->tag == Iex_Const
+ && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64
+ && (aligned4imm ?
+ uLong_is_4_aligned(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64) : True)
+ && uLong_fits_in_16_bits(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)) {
+ return MIPSAMode_IR((Int) e->Iex.Binop.arg2->Iex.Const.con->Ico.U64,
+ iselWordExpr_R(env, e->Iex.Binop.arg1));
+ }
+
+ /* Add64(expr,expr) */
+ if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add64) {
+ HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1);
+ HReg r_idx = iselWordExpr_R(env, e->Iex.Binop.arg2);
+ return MIPSAMode_RR(r_idx, r_base);
+ }
+ } else {
vassert(ty == Ity_I32);
/* Add32(expr,i), where i == sign-extend of (i & 0xFFFF) */
aluOp = Malu_SUB;
break;
+ case Iop_Sub64:
+ aluOp = Malu_DSUB;
+ break;
+
case Iop_And32:
case Iop_And64:
aluOp = Malu_AND;
aluOp = Malu_XOR;
break;
+ case Iop_Add64:
+ aluOp = Malu_DADD;
+ break;
+
default:
aluOp = Malu_INVALID;
break;
switch (aluOp) {
case Malu_ADD:
case Malu_SUB:
+ case Malu_DADD:
+ case Malu_DSUB:
ri_srcR = iselWordExpr_RH(env, True /*signed */ ,
e->Iex.Binop.arg2);
break;
case Mshft_SLL:
case Mshft_SRL:
case Mshft_SRA:
- ri_srcR = iselWordExpr_RH5u(env, e->Iex.Binop. arg2);
+ if (mode64)
+ ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+ else
+ ri_srcR = iselWordExpr_RH5u(env, e->Iex.Binop.arg2);
break;
default:
vpanic("iselIntExpr_R_wrk-shftOp-arg2");
}
- /* widen the left arg if needed */
- /*TODO do we need this? */
- if (ty == Ity_I8 || ty == Ity_I16)
- goto irreducible;
if (ty == Ity_I64) {
vassert(mode64);
addInstr(env, MIPSInstr_Shft(shftOp, False/*64bit shift */,
r_dst, r_srcL, ri_srcR));
- } else {
+ } else if (ty == Ity_I32) {
addInstr(env, MIPSInstr_Shft(shftOp, True /*32bit shift */,
r_dst, r_srcL, ri_srcR));
- }
+ } else
+ goto irreducible;
return r_dst;
}
size32 = False;
break;
default:
- vpanic
- ("iselCondCode(mips): CmpXX32 or CmpXX64");
+ vpanic("iselCondCode(mips): CmpXX32 or CmpXX64");
}
addInstr(env, MIPSInstr_Cmp(syned, size32, dst, r1, r2, cc));
}
if (e->Iex.Binop.op == Iop_Max32U) {
- /*
- tmp = argR - argL;
- dst = argL;
- bltz tmp,2;
- dst = argR;
-
- */
+ HReg tmp = newVRegI(env);
+ HReg r_dst = newVRegI(env);
HReg argL = iselWordExpr_R(env, e->Iex.Binop.arg1);
- MIPSRH *argR = iselWordExpr_RH(env, False /*signed */ ,
+ HReg argR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+ MIPSRH *argRH = iselWordExpr_RH(env, False /*signed */ ,
e->Iex.Binop.arg2);
- HReg dst = newVRegI(env);
- HReg tmp = newVRegI(env);
- addInstr(env, MIPSInstr_Alu(Malu_SUB, tmp, argL, argR));
- addInstr(env, MIPSInstr_MovCond(dst, argL, argR, tmp, MIPScc_MI));
-
- return dst;
+ /* max (v0, s0)
+ ------------
+ slt v1, v0, s0
+ movn v0, s0, v1 */
+
+ addInstr(env, MIPSInstr_Alu(Malu_SLT, tmp, argL, argRH));
+ addInstr(env, mk_iMOVds_RR(r_dst, argL));
+ addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dst, argR, tmp));
+ return r_dst;
}
if (e->Iex.Binop.op == Iop_Mul32 || e->Iex.Binop.op == Iop_Mul64) {
if (e->Iex.Binop.op == Iop_CmpF64) {
HReg r_srcL, r_srcR;
- {
+ if (mode64) {
+ r_srcL = iselFltExpr(env, e->Iex.Binop.arg1);
+ r_srcR = iselFltExpr(env, e->Iex.Binop.arg2);
+ } else {
r_srcL = iselDblExpr(env, e->Iex.Binop.arg1);
r_srcR = iselDblExpr(env, e->Iex.Binop.arg2);
}
HReg r_ccIR_b6 = newVRegI(env);
/* Create in dst, the IRCmpF64Result encoded result. */
- // chech for EQ
+ /* chech for EQ */
addInstr(env, MIPSInstr_FpCompare(Mfp_CMP, tmp, r_srcL, r_srcR,
toUChar(2)));
addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, r_ccMIPS, tmp,
MIPSRH_Imm(False, 22)));
- // chech for UN
+ /* chech for UN */
addInstr(env, MIPSInstr_FpCompare(Mfp_CMP, tmp, r_srcL, r_srcR,
toUChar(1)));
addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tmp, tmp,
MIPSRH_Imm(False, 23)));
addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS,
MIPSRH_Reg(tmp)));
- // chech for LT
+ /* chech for LT */
addInstr(env, MIPSInstr_FpCompare(Mfp_CMP, tmp, r_srcL, r_srcR,
toUChar(12)));
addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tmp,
tmp, MIPSRH_Imm(False, 21)));
addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS,
MIPSRH_Reg(tmp)));
- // chech for GT
+ /* chech for GT */
addInstr(env, MIPSInstr_FpCompare(Mfp_CMP, tmp, r_srcL, r_srcR,
toUChar(15)));
addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tmp, tmp,
MIPSRH_Imm(False, 8)));
addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS,
MIPSRH_Reg(tmp)));
- /* Map compare result from PPC to IR,
- conforming to CmpF64 definition. */
- /*
+ /* Map compare result from MIPS to IR,
+ conforming to CmpF64 definition.
FP cmp result | MIPS | IR
--------------------------
UN | 0x1 | 0x45
LT | 0x8 | 0x01
*/
- // r_ccIR_b0 = r_ccPPC[0] | r_ccPPC[3]
+ /* r_ccIR_b0 = r_ccMIPS[0] | r_ccMIPS[3] */
addInstr(env, MIPSInstr_Shft(Mshft_SRL, True, r_ccIR_b0, r_ccMIPS,
MIPSRH_Imm(False, 0x3)));
addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR_b0, r_ccMIPS,
addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b0, r_ccIR_b0,
MIPSRH_Imm(False, 0x1)));
- // r_ccIR_b2 = r_ccPPC[0]
+ /* r_ccIR_b2 = r_ccMIPS[0] */
addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, r_ccIR_b2, r_ccMIPS,
MIPSRH_Imm(False, 0x2)));
addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b2, r_ccIR_b2,
MIPSRH_Imm(False, 0x4)));
- // r_ccIR_b6 = r_ccPPC[0] | r_ccPPC[1]
+ /* r_ccIR_b6 = r_ccMIPS[0] | r_ccMIPS[1] */
addInstr(env, MIPSInstr_Shft(Mshft_SRL, True, r_ccIR_b6,
r_ccMIPS, MIPSRH_Imm(False, 0x1)));
addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR_b6, r_ccMIPS,
addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b6, r_ccIR_b6,
MIPSRH_Imm(False, 0x40)));
- // r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6
+ /* r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6 */
addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR, r_ccIR_b0,
MIPSRH_Reg(r_ccIR_b2)));
addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR, r_ccIR,
return r_dst;
}
+ if (e->Iex.Binop.op == Iop_F32toI64S) {
+ vassert(mode64);
+ HReg valS = newVRegI(env);
+ HReg tmpF = newVRegF(env);
+ HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
+
+ /* CVTLS tmpF, valF */
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLS, tmpF, valF));
+ set_MIPS_rounding_default(env);
+
+ /* Doubleword Move from Floating Point
+ dmfc1 valS, tmpF */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmfc1, valS, tmpF));
+
+ return valS;
+ }
+
if (e->Iex.Binop.op == Iop_F64toI32S) {
- HReg valD = iselDblExpr(env, e->Iex.Binop.arg2);
+ HReg valD;
+ if (mode64)
+ valD = iselFltExpr(env, e->Iex.Binop.arg2);
+ else
+ valD = iselDblExpr(env, e->Iex.Binop.arg2);
HReg valS = newVRegF(env);
HReg r_dst = newVRegI(env);
- MIPSAMode *am_addr;
+ /* CVTWD valS, valD */
set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWD, valS, valD));
set_MIPS_rounding_default(env);
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- // store as F32
- addInstr(env, MIPSInstr_FpLdSt(False/*store */ , 4, valS, am_addr));
- // load as I32
- addInstr(env, MIPSInstr_Load(4, r_dst, am_addr, mode64));
-
- add_to_sp(env, 16); // Reset SP
+ /* Move Word From Floating Point
+ mfc1 r_dst, valS */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, valS));
return r_dst;
}
break;
}
- /* --------- UNARY OP --------- */
+ /* --------- UNARY OP --------- */
case Iex_Unop: {
IROp op_unop = e->Iex.Unop.op;
return r_dst;
}
- /*not(x) = nor(x,x) */
+ /* not(x) = nor(x,x) */
case Iop_Not1: {
HReg r_dst = newVRegI(env);
HReg r_srcL = iselWordExpr_R(env, e->Iex.Unop.arg);
}
case Iop_ReinterpF32asI32: {
- MIPSAMode *am_addr;
HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg);
HReg r_dst = newVRegI(env);
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- // store as F32
- addInstr(env, MIPSInstr_FpLdSt(False/*store */ , 4, fr_src,
- am_addr));
- // load as Ity_I32
- addInstr(env, MIPSInstr_Load(4, r_dst, am_addr, mode64));
+ /* Move Word From Floating Point
+ mfc1 r_dst, fr_src */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, fr_src));
- add_to_sp(env, 16); // Reset SP
return r_dst;
}
case Iop_ReinterpF64asI64: {
vassert(mode64);
- MIPSAMode *am_addr;
HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg);
HReg r_dst = newVRegI(env);
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+ /* Doubleword Move from Floating Point
+ mfc1 r_dst, fr_src */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmfc1, r_dst, fr_src));
- // store as F64
- addInstr(env, MIPSInstr_FpLdSt(False/*store */ , 8, fr_src,
- am_addr));
- // load as Ity_I64
- addInstr(env, MIPSInstr_Load(8, r_dst, am_addr, mode64));
+ return r_dst;
+ }
+
+ case Iop_F64toI32S: {
+ HReg valD;
+ if (mode64)
+ valD = iselFltExpr(env, e->Iex.Binop.arg2);
+ else
+ valD = iselDblExpr(env, e->Iex.Binop.arg2);
+ HReg valS = newVRegF(env);
+ HReg r_dst = newVRegI(env);
+
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWD, valS, valD));
+ set_MIPS_rounding_default(env);
+
+ /* Move Word From Floating Point
+ mfc1 r_dst, valS */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, valS));
- add_to_sp(env, 16); // Reset SP
return r_dst;
}
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
vassert(mode64);
- addInstr(env, MIPSInstr_Shft(Mshft_SLL, False/*!32bit shift */,
+ addInstr(env, MIPSInstr_Shft(Mshft_SLL, False /*!32bit shift */,
r_dst, r_src, MIPSRH_Imm(False, 32)));
- addInstr(env, MIPSInstr_Shft(Mshft_SRL, False/*!32bit shift */,
+ addInstr(env, MIPSInstr_Shft(Mshft_SRL, False /*!32bit shift */,
r_dst, r_dst, MIPSRH_Imm(False, 32)));
return r_dst;
}
return iselWordExpr_R(env, e->Iex.Unop.arg);
case Iop_64HIto32: {
- HReg rHi, rLo;
- iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
- return rHi;
+ if (env->mode64) {
+ HReg r_dst = newVRegI(env);
+ HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+ addInstr(env, MIPSInstr_Shft(Mshft_SRA, False /*64bit shift */,
+ r_dst, r_src, MIPSRH_Imm(True, 32)));
+ return r_dst;
+ } else {
+ HReg rHi, rLo;
+ iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
+ return rHi;
+ }
}
case Iop_64to32: {
- HReg rHi, rLo;
- iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
- return rLo;
+ if (env->mode64) {
+ HReg r_dst = newVRegI(env);
+ r_dst = iselWordExpr_R(env, e->Iex.Unop.arg);
+ return r_dst;
+ } else {
+ HReg rHi, rLo;
+ iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
+ return rLo;
+ }
}
-
+
case Iop_64to16: {
vassert(env->mode64);
HReg r_dst = newVRegI(env);
r_dst = iselWordExpr_R(env, e->Iex.Unop.arg);
return r_dst;
}
-
+
case Iop_32Sto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
vassert(mode64);
- addInstr(env, MIPSInstr_Shft(Mshft_SLL, True/*!32bit shift */,
+ addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /*!32bit shift */,
r_dst, r_src, MIPSRH_Imm(True, 0)));
return r_dst;
}
-
+
case Iop_CmpNEZ8: {
HReg r_dst = newVRegI(env);
HReg tmp = newVRegI(env);
return r_dst;
}
+ case Iop_Clz64: {
+ vassert(mode64);
+ HReg r_dst = newVRegI(env);
+ HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+ addInstr(env, MIPSInstr_Unary(Mun_DCLZ, r_dst, r_src));
+ return r_dst;
+ }
+
case Iop_CmpNEZ64: {
HReg hi, lo;
HReg r_dst = newVRegI(env);
HReg r_src;
- r_src = newVRegI(env);
- iselInt64Expr(&hi, &lo, env, e->Iex.Unop.arg);
- addInstr(env, MIPSInstr_Alu(Malu_OR, r_src, lo, MIPSRH_Reg(hi)));
+ if (env->mode64) {
+ r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+ } else {
+ r_src = newVRegI(env);
+ iselInt64Expr(&hi, &lo, env, e->Iex.Unop.arg);
+ addInstr(env, MIPSInstr_Alu(Malu_OR, r_src, lo, MIPSRH_Reg(hi)));
+ }
MIPSCondCode cc;
cc = MIPScc_NE;
vassert(mode64);
HReg rHi, rLo;
iselInt128Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
- return rHi; /* and abandon rLo .. poor wee thing :-) */
+ return rHi; /* and abandon rLo .. poor wee thing :-) */
}
case Iop_128to64: {
vassert(mode64);
HReg rHi, rLo;
iselInt128Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
- return rLo; /* and abandon rLo .. poor wee thing :-) */
+ return rLo; /* and abandon rLo .. poor wee thing :-) */
}
default:
break;
}
- /* --------- GET --------- */
+ /* --------- GET --------- */
case Iex_Get: {
if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32
|| ((ty == Ity_I64) && mode64)) {
break;
}
- /* --------- MULTIPLEX --------- */
+ /* --------- ITE --------- */
case Iex_ITE: {
if ((ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64))) &&
typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) {
/*
- * r_dst = cond && rX
+ * r_dst = cond && r1
* cond = not(cond)
* tmp = cond && r0
* r_dst = tmp + r_dst
HReg r_tmp = newVRegI(env);
HReg r_tmp1 = newVRegI(env);
HReg r_cond_neg = newVRegI(env);
-
/* r_cond = 0 - r_cond_1 */
addInstr(env, MIPSInstr_LI(mask, 0x0));
addInstr(env, MIPSInstr_Alu(Malu_SUB, r_cond,
break;
}
- /* --------- LITERAL --------- */
- /* 32/16/8-bit literals */
+ /* --------- LITERAL --------- */
+ /* 32/16/8-bit literals */
case Iex_Const: {
Long l;
HReg r_dst = newVRegI(env);
return r_dst;
}
- /* --------- CCALL --------- */
+ /* --------- CCALL --------- */
case Iex_CCall: {
HReg r_dst = newVRegI(env);
vassert(ty == e->Iex.CCall.retty);
default:
break;
- } /* end switch(e->tag) */
+ } /* end switch(e->tag) */
/* We get here if no pattern matched. */
irreducible:
return MIPSRH_Reg(iselWordExpr_R(env, e));
}
+/* --------------------- RH6u --------------------- */
+
+/* Only used in 64-bit mode. */
+static MIPSRH *iselWordExpr_RH6u ( ISelEnv * env, IRExpr * e )
+{
+ MIPSRH *ri;
+ vassert(env->mode64);
+ ri = iselWordExpr_RH6u_wrk(env, e);
+ /* sanity checks ... */
+ switch (ri->tag) {
+ case Mrh_Imm:
+ vassert(ri->Mrh.Imm.imm16 >= 1 && ri->Mrh.Imm.imm16 <= 63);
+ vassert(!ri->Mrh.Imm.syned);
+ return ri;
+ case Mrh_Reg:
+ vassert(hregClass(ri->Mrh.Reg.reg) == HRcGPR(env->mode64));
+ vassert(hregIsVirtual(ri->Mrh.Reg.reg));
+ return ri;
+ default:
+ vpanic("iselIntExpr_RH6u: unknown mips64 RI tag");
+ }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static MIPSRH *iselWordExpr_RH6u_wrk ( ISelEnv * env, IRExpr * e )
+{
+ IRType ty = typeOfIRExpr(env->type_env, e);
+ vassert(ty == Ity_I8);
+
+ /* special case: immediate */
+ if (e->tag == Iex_Const
+ && e->Iex.Const.con->tag == Ico_U8
+ && e->Iex.Const.con->Ico.U8 >= 1 && e->Iex.Const.con->Ico.U8 <= 63)
+ {
+ return MIPSRH_Imm(False /*unsigned */ ,
+ e->Iex.Const.con->Ico.U8);
+ }
+
+ /* default case: calculate into a register and return that */
+ return MIPSRH_Reg(iselWordExpr_R(env, e));
+}
+
/* --------------------- CONDCODE --------------------- */
/* Generate code to evaluated a bit-typed expression, returning the
}
addInstr(env, MIPSInstr_Cmp(syned, size32, dst, r1, r2, cc));
- // Store result to guest_COND
+ /* Store result to guest_COND */
MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64));
addInstr(env, MIPSInstr_Store(4,
- MIPSAMode_IR(am_addr->Mam.IR.index + 316, am_addr->Mam.IR.base),
+ MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64),
+ am_addr->Mam.IR.base),
dst, mode64));
return cc;
}
addInstr(env, MIPSInstr_LI(r_dst, 0x1));
addInstr(env, MIPSInstr_Alu(Malu_SUB, r_dst, r_dst, r_srcR));
- // Store result to guest_COND
+ /* Store result to guest_COND */
MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64));
addInstr(env, MIPSInstr_Store(4,
- MIPSAMode_IR(am_addr->Mam.IR.index + 316, am_addr->Mam.IR.base),
+ MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64),
+ am_addr->Mam.IR.base),
r_dst, mode64));
return MIPScc_NE;
}
if (e->tag == Iex_RdTmp || e->tag == Iex_Unop) {
HReg r_dst = iselWordExpr_R_wrk(env, e);
- // Store result to guest_COND
+ /* Store result to guest_COND */
MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64));
addInstr(env, MIPSInstr_Store(4,
- MIPSAMode_IR(am_addr->Mam.IR.index + 316, am_addr->Mam.IR.base),
+ MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64),
+ am_addr->Mam.IR.base),
r_dst, mode64));
return MIPScc_EQ;
}
/* 64-bit ITE */
if (e->tag == Iex_ITE) {
vassert(typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1);
+ vassert(!mode64);
HReg expr0Lo, expr0Hi;
HReg expr1Lo, expr1Hi;
HReg tmpHi = newVRegI(env);
HReg mask = newVRegI(env);
HReg desLo = newVRegI(env);
HReg desHi = newVRegI(env);
-
/* r_cond = 0 - r_cond_1 */
addInstr(env, MIPSInstr_LI(mask, 0x0));
addInstr(env, MIPSInstr_Alu(Malu_SUB, r_cond,
HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
- addInstr(env, MIPSInstr_Mul(syned/*Unsigned or Signed */ ,
+ addInstr(env, MIPSInstr_Mul(syned /*Unsigned or Signed */,
True /*widen */ , True,
r_dst, r_srcL, r_srcR));
addInstr(env, MIPSInstr_Mfhi(tHi));
return;
}
- /* 32HLto64(e1,e2) */
+ /* 32HLto64(e1,e2) */
case Iop_32HLto64:
*rHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
*rLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
return;
- /* Or64/And64/Xor64 */
+ /* Or64/And64/Xor64 */
case Iop_Or64:
case Iop_And64:
case Iop_Xor64: {
MIPSAMode *am_addr;
HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
- sub_from_sp(env, 16); // Move SP down 16 bytes
+ sub_from_sp(env, 16); /* Move SP down 16 bytes */
am_addr = MIPSAMode_IR(0, StackPointer(mode64));
- // store as F64
+ /* store as F64 */
addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
am_addr));
- // load as 2xI32
+ /* load as 2xI32 */
addInstr(env, MIPSInstr_Load(4, tLo, am_addr, mode64));
addInstr(env, MIPSInstr_Load(4, tHi, nextMIPSAModeFloat(am_addr),
mode64));
- add_to_sp(env, 16); // Reset SP
+ /* Reset SP */
+ add_to_sp(env, 16);
*rHi = tHi;
*rLo = tLo;
return;
}
-
+
default:
vex_printf("UNARY: No such op: ");
ppIROp(e->Iex.Unop.op);
/* Nothing interesting here; really just wrappers for
64-bit stuff. */
-
static HReg iselFltExpr(ISelEnv * env, IRExpr * e)
{
HReg r = iselFltExpr_wrk(env, e);
vassert(e->Iex.Load.ty == Ity_F32
|| (e->Iex.Load.ty == Ity_F64 && mode64));
am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, ty);
- addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, r_dst, am_addr));
+ if (mode64 && e->Iex.Load.ty == Ity_F64)
+ addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, r_dst, am_addr));
+ else
+ addInstr(env, MIPSInstr_FpLdSt(True /*load */, 4, r_dst, am_addr));
return r_dst;
}
HReg r_dst = newVRegF(env);
MIPSAMode *am_addr = MIPSAMode_IR(e->Iex.Get.offset,
GuestStatePointer(mode64));
- addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, r_dst, am_addr));
+ if (mode64)
+ addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, r_dst, am_addr));
+ else
+ addInstr(env, MIPSInstr_FpLdSt(True /*load */, 4, r_dst, am_addr));
return r_dst;
}
if (e->tag == Iex_Unop) {
switch (e->Iex.Unop.op) {
case Iop_ReinterpI32asF32: {
- MIPSAMode *am_addr;
HReg fr_src = iselWordExpr_R(env, e->Iex.Unop.arg);
HReg r_dst = newVRegF(env);
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- // store as I32
- addInstr(env, MIPSInstr_Store(4, am_addr, fr_src, mode64));
-
- // load as Ity_F32
- addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, r_dst, am_addr));
+ /* Move Word to Floating Point
+ mtc1 r_dst, valS */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, r_dst, fr_src));
- add_to_sp(env, 16); // Reset SP
return r_dst;
-
}
case Iop_F32toF64: {
- /* first arg is rounding mode; we ignore it. */
- MIPSAMode *am_addr;
- HReg src = iselFltExpr(env, e->Iex.Unop.arg);
- HReg dst = newVRegF(env);
-
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- addInstr(env, MIPSInstr_Store(4,
- MIPSAMode_IR(am_addr->Mam.IR.index + 4,
- am_addr->Mam.IR.base),
- hregMIPS_GPR0(mode64), mode64));
- addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 4, src, am_addr));
-
- // load as Ity_F32
- addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, dst, am_addr));
- add_to_sp(env, 16); // Reset SP
+ if (mode64) {
+ HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+ HReg dst = newVRegD(env);
- return dst;
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDS, dst, src));
+ return dst;
+ } else {
+ MIPSAMode *am_addr;
+ HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+ HReg dst = newVRegF(env);
+
+ sub_from_sp(env, 16); /* Move SP down 16 bytes */
+ am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+ addInstr(env, MIPSInstr_Store(4,
+ MIPSAMode_IR(am_addr->Mam.IR.index +4,
+ am_addr->Mam.IR.base),
+ hregMIPS_GPR0(mode64), mode64));
+ addInstr(env, MIPSInstr_FpLdSt(False /* store */, 4, src, am_addr));
+
+ /* load as Ity_F64 */
+ addInstr(env, MIPSInstr_FpLdSt(True /* load */, 8, dst, am_addr));
+ /* Reset SP */
+ add_to_sp(env, 16);
+
+ return dst;
+ }
}
- case Iop_ReinterpI64asF64:
- {
+ case Iop_ReinterpI64asF64: {
vassert(mode64);
- MIPSAMode *am_addr;
HReg fr_src = iselWordExpr_R(env, e->Iex.Unop.arg);
HReg r_dst = newVRegF(env);
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- // store as I64
- addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64));
+ /* Move Doubleword to Floating Point
+ dmtc1 r_dst, valS */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmtc1, r_dst, fr_src));
- // load as Ity_F64
- addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, r_dst, am_addr));
-
- add_to_sp(env, 16); // Reset SP
return r_dst;
}
+ case Iop_I32StoF64: {
+ vassert(mode64);
+ HReg dst = newVRegF(env);
+ HReg tmp1 = newVRegF(env);
+ HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+ MIPSAMode *am_addr;
+
+ /* Move Word to Floating Point
+ mtc1 tmp1, r_src */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp1, r_src));
+
+ HReg irrm = newVRegI(env);
+
+ MIPSAMode *am_addr1 = MIPSAMode_IR(552, GuestStatePointer(mode64));
+
+ addInstr(env, MIPSInstr_Load(4, irrm, am_addr1, mode64));
+
+ /* set rounding mode */
+ HReg tmp = newVRegI(env);
+ HReg fcsr_old = newVRegI(env);
+
+ addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tmp,
+ irrm, MIPSRH_Imm(False, 1)));
+ addInstr(env, MIPSInstr_Alu(Malu_XOR, tmp, irrm, MIPSRH_Reg(tmp)));
+ addInstr(env, MIPSInstr_Alu(Malu_AND, irrm, tmp,
+ MIPSRH_Imm(False, 3)));
+ /* save old value of FCSR */
+ addInstr(env, MIPSInstr_MfFCSR(fcsr_old));
+ /* Move SP down 8 bytes */
+ sub_from_sp(env, 8);
+ am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+ /* store old FCSR to stack */
+ addInstr(env, MIPSInstr_Store(4, am_addr, fcsr_old, mode64));
+
+ /* set new value of FCSR */
+ addInstr(env, MIPSInstr_MtFCSR(irrm));
+
+ /* and do convert */
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDW, dst, tmp1));
+ /* set MIPS roundig mode to default and reset sp */
+ set_MIPS_rounding_default(env);
+
+ return dst;
+ }
case Iop_AbsF32:
case Iop_AbsF64: {
Bool sz32 = e->Iex.Unop.op == Iop_AbsF32;
addInstr(env, MIPSInstr_FpUnary(sz32 ? Mfp_NEGS : Mfp_NEGD, dst, src));
return dst;
}
+ case Iop_RoundF64toF64_ZERO: {
+ vassert(mode64);
+ HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+ HReg dst = newVRegF(env);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_TRULD, dst, src));
+ return dst;
+ }
default:
break;
}
case Iop_DivF32:
op = Mfp_DIVS;
break;
+ case Iop_DivF64:
+ vassert(mode64);
+ op = Mfp_DIVD;
+ break;
case Iop_MulF32:
op = Mfp_MULS;
break;
+ case Iop_MulF64:
+ vassert(mode64);
+ op = Mfp_MULD;
+ break;
case Iop_AddF32:
op = Mfp_ADDS;
break;
+ case Iop_AddF64:
+ vassert(mode64);
+ op = Mfp_ADDD;
+ break;
case Iop_SubF32:
op = Mfp_SUBS;
break;
+ case Iop_SubF64:
+ vassert(mode64);
+ op = Mfp_SUBD;
+ break;
default:
vassert(0);
}
+ set_MIPS_rounding_mode(env, e->Iex.Triop.details->arg1);
addInstr(env, MIPSInstr_FpBinary(op, dst, argL, argR));
+ set_MIPS_rounding_default(env);
return dst;
}
default:
if (e->tag == Iex_Binop) {
switch (e->Iex.Binop.op) {
case Iop_F64toF32: {
- HReg valD = iselDblExpr(env, e->Iex.Binop.arg2);
+ HReg valD;
+ if (mode64)
+ valD = iselFltExpr(env, e->Iex.Binop.arg2);
+ else
+ valD = iselDblExpr(env, e->Iex.Binop.arg2);
HReg valS = newVRegF(env);
set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWS, valS, valF));
-
set_MIPS_rounding_default(env);
return valS;
}
+ case Iop_RoundF64toInt: {
+ HReg valS = newVRegF(env);
+ HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
+
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLD, valS, valF));
+ set_MIPS_rounding_default(env);
+ return valS;
+ }
+
case Iop_I32StoF32: {
HReg r_dst = newVRegF(env);
+ HReg fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2);
+ HReg tmp = newVRegF(env);
+
+ /* Move Word to Floating Point
+ mtc1 tmp, fr_src */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp, fr_src));
+
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSW, r_dst, tmp));
+ set_MIPS_rounding_default(env);
+
+ return r_dst;
+ }
+
+ case Iop_I64StoF64: {
+ HReg r_dst = newVRegF(env);
MIPSAMode *am_addr;
HReg fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2);
HReg tmp = newVRegF(env);
- sub_from_sp(env, 16); // Move SP down 16 bytes
+ /* Move SP down 8 bytes */
+ sub_from_sp(env, 8);
am_addr = MIPSAMode_IR(0, StackPointer(mode64));
- // store as I32
- addInstr(env, MIPSInstr_Store(4, am_addr, fr_src, mode64));
+ /* store as I64 */
+ addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64));
- // load as Ity_F32
- addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, tmp, am_addr));
+ /* load as Ity_F64 */
+ addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, tmp, am_addr));
- add_to_sp(env, 16); // Reset SP
+ /* Reset SP */
+ add_to_sp(env, 8);
set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
- addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSW, r_dst, tmp));
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDL, r_dst, tmp));
+ set_MIPS_rounding_default(env);
+
+ return r_dst;
+ }
+
+ case Iop_I64StoF32: {
+ HReg r_dst = newVRegF(env);
+
+ MIPSAMode *am_addr;
+ HReg fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2);
+ HReg tmp = newVRegF(env);
+
+ /* Move SP down 8 bytes */
+ sub_from_sp(env, 8);
+ am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+ /* store as I64 */
+ addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64));
+
+ /* load as Ity_F64 */
+ addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, tmp, am_addr));
+
+ /* Reset SP */
+ add_to_sp(env, 8);
+
+ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSL, r_dst, tmp));
set_MIPS_rounding_default(env);
return r_dst;
set_MIPS_rounding_default(env);
return dst;
}
-
+
default:
break;
}
}
+ if (e->tag == Iex_Qop) {
+ switch (e->Iex.Qop.details->op) {
+ case Iop_MAddF32:
+ case Iop_MAddF64:
+ case Iop_MSubF32:
+ case Iop_MSubF64: {
+ MIPSFpOp op = 0;
+ switch (e->Iex.Qop.details->op) {
+ case Iop_MAddF32:
+ op = Mfp_MADDS;
+ break;
+ case Iop_MAddF64:
+ op = Mfp_MADDD;
+ break;
+ case Iop_MSubF32:
+ op = Mfp_MSUBS;
+ break;
+ case Iop_MSubF64:
+ op = Mfp_MSUBD;
+ break;
+ default:
+ vassert(0);
+ }
+ HReg dst = newVRegF(env);
+ HReg src1 = iselFltExpr(env, e->Iex.Qop.details->arg2);
+ HReg src2 = iselFltExpr(env, e->Iex.Qop.details->arg3);
+ HReg src3 = iselFltExpr(env, e->Iex.Qop.details->arg4);
+ set_MIPS_rounding_mode(env, e->Iex.Qop.details->arg1);
+ addInstr(env, MIPSInstr_FpTernary(op, dst,
+ src1, src2, src3));
+ set_MIPS_rounding_default(env);
+ return dst;
+ }
+
+ default:
+ break;
+ }
+ }
+
if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_TruncF64asF32) {
/* This is quite subtle. The only way to do the relevant
truncation is to do a single-precision store and then a
MIPSAMode *zero_r1 = MIPSAMode_IR(0, StackPointer(mode64));
sub_from_sp(env, 16);
- // store as F32, hence truncating
+ /* store as F32, hence truncating */
addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 4, fsrc, zero_r1));
- // and reload. Good huh?! (sigh)
+ /* and reload. Good huh?! (sigh) */
addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, fdst, zero_r1));
add_to_sp(env, 16);
return fdst;
}
+ /* --------- ITE --------- */
+ if (e->tag == Iex_ITE) {
+ if (ty == Ity_F64
+ && typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) {
+ vassert(mode64);
+ HReg r0 = iselFltExpr(env, e->Iex.ITE.iffalse);
+ HReg r1 = iselFltExpr(env, e->Iex.ITE.iftrue);
+ HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
+ HReg r_dst = newVRegF(env);
+ addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, r_dst, r0));
+ addInstr(env, MIPSInstr_MoveCond(MFpMoveCond_movnd, r_dst, r1,
+ r_cond));
+ return r_dst;
+ }
+ }
+
vex_printf("iselFltExpr(mips): No such tag(0x%x)\n", e->tag);
ppIRExpr(e);
vpanic("iselFltExpr_wrk(mips)");
fpop = Mfp_ABSD;
break;
case Iop_F32toF64: {
+ vassert(!mode64);
HReg src = iselFltExpr(env, e->Iex.Unop.arg);
HReg dst = newVRegD(env);
- HReg irrm = newVRegI(env);
-
- MIPSAMode *am_addr1 = MIPSAMode_IR(284, GuestStatePointer(mode64));
-
- addInstr(env, MIPSInstr_Load(4, irrm, am_addr1, mode64));
-
- // set new FCSR
- HReg tmp = newVRegI(env);
- HReg fcsr_old = newVRegI(env);
- MIPSAMode *am_addr;
-
- addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tmp, irrm,
- MIPSRH_Imm(False, 1)));
- addInstr(env, MIPSInstr_Alu(Malu_XOR, tmp, irrm, MIPSRH_Reg(tmp)));
- addInstr(env, MIPSInstr_Alu(Malu_AND, irrm, tmp,
- MIPSRH_Imm(False, 3)));
- /* save old value of FCSR */
- addInstr(env, MIPSInstr_MfFCSR(fcsr_old));
- sub_from_sp(env, 8); // Move SP down 4 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- //store old FCSR to stack
- addInstr(env, MIPSInstr_Store(4, am_addr, fcsr_old, mode64));
-
- //set new value of FCSR
- addInstr(env, MIPSInstr_MtFCSR(irrm));
-
- //set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
- addInstr(env, MIPSInstr_FpUnary(Mfp_CVTD, dst, src));
- set_MIPS_rounding_default(env);
+ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDS, dst, src));
return dst;
}
case Iop_ReinterpI64asF64: {
iselInt64Expr(&Hi, &Lo, env, e->Iex.Unop.arg);
- dst = mk_LoadRR32toFPR(env, Hi, Lo); // 2*I32 -> F64
+ dst = mk_LoadRR32toFPR(env, Hi, Lo); /* 2*I32 -> F64 */
return dst;
}
case Iop_I32StoF64: {
+ vassert(!mode64);
HReg dst = newVRegD(env);
HReg tmp1 = newVRegF(env);
HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
MIPSAMode *am_addr;
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
- // store as I32
- addInstr(env, MIPSInstr_Store(4, am_addr, r_src, mode64));
-
- // load as Ity_F32
- addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, tmp1, am_addr));
-
- add_to_sp(env, 16); // Reset SP
+ /* Move Word to Floating Point
+ mtc1 tmp1, r_src */
+ addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp1, r_src));
HReg irrm = newVRegI(env);
addInstr(env, MIPSInstr_Load(4, irrm, am_addr1, mode64));
- //set rounding mode
+ /* set rounding mode */
HReg tmp = newVRegI(env);
HReg fcsr_old = newVRegI(env);
MIPSRH_Imm(False, 3)));
/* save old value of FCSR */
addInstr(env, MIPSInstr_MfFCSR(fcsr_old));
- sub_from_sp(env, 8); // Move SP down 4 bytes
+ /* Move SP down 8 bytes */
+ sub_from_sp(env, 8);
am_addr = MIPSAMode_IR(0, StackPointer(mode64));
- //store old FCSR to stack
+ /* store old FCSR to stack */
addInstr(env, MIPSInstr_Store(4, am_addr, fcsr_old, mode64));
- //set new value of FCSR
+ /* set new value of FCSR */
addInstr(env, MIPSInstr_MtFCSR(irrm));
- // and do convert
+ /* and do convert */
addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDW, dst, tmp1));
+
+ /* set MIPS roundinf mode to default and reset sp */
set_MIPS_rounding_default(env);
return dst;
}
}
- /* --------- MULTIPLEX --------- */
+ if (e->tag == Iex_Qop) {
+ switch (e->Iex.Qop.details->op) {
+ case Iop_MAddF32:
+ case Iop_MAddF64:
+ case Iop_MSubF32:
+ case Iop_MSubF64: {
+ MIPSFpOp op = 0;
+ switch (e->Iex.Qop.details->op) {
+ case Iop_MAddF32:
+ op = Mfp_MADDS;
+ break;
+ case Iop_MAddF64:
+ op = Mfp_MADDD;
+ break;
+ case Iop_MSubF32:
+ op = Mfp_MSUBS;
+ break;
+ case Iop_MSubF64:
+ op = Mfp_MSUBD;
+ break;
+ default:
+ vassert(0);
+ }
+ HReg dst = newVRegD(env);
+ HReg src1 = iselDblExpr(env, e->Iex.Qop.details->arg2);
+ HReg src2 = iselDblExpr(env, e->Iex.Qop.details->arg3);
+ HReg src3 = iselDblExpr(env, e->Iex.Qop.details->arg4);
+ set_MIPS_rounding_mode(env, e->Iex.Qop.details->arg1);
+ addInstr(env, MIPSInstr_FpTernary(op, dst,
+ src1, src2, src3));
+ set_MIPS_rounding_default(env);
+ return dst;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ /* --------- ITE --------- */
if (e->tag == Iex_ITE) {
if (ty == Ity_F64
&& typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) {
HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse);
HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue);
- HReg r_cond_1 = iselWordExpr_R(env, e->Iex.ITE.cond);
- HReg r_cond = newVRegI(env);
- HReg r_cond_neg = newVRegI(env);
- HReg mask = newVRegI(env);
+ HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
HReg r_dst = newVRegD(env);
- HReg r_tmp_lo = newVRegI(env);
- HReg r_tmp_hi = newVRegI(env);
- HReg r_tmp1_lo = newVRegI(env);
- HReg r_tmp1_hi = newVRegI(env);
- HReg r_r0_lo = newVRegI(env);
- HReg r_r0_hi = newVRegI(env);
- HReg r_r1_lo = newVRegI(env);
- HReg r_r1_hi = newVRegI(env);
- HReg r_dst_lo = newVRegI(env);
- HReg r_dst_hi = newVRegI(env);
-
- /* r_cond = 0 - r_cond_1 */
- addInstr(env, MIPSInstr_LI(mask, 0x0));
- addInstr(env, MIPSInstr_Alu(Malu_SUB, r_cond,
- mask, MIPSRH_Reg(r_cond_1)));
-
- sub_from_sp(env, 16); // Move SP down 16 bytes
- MIPSAMode *am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- // store as Ity_F64
- addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, r0, am_addr));
-
- // load as 2xI32
- addInstr(env, MIPSInstr_Load(4, r_r0_lo, am_addr, mode64));
- addInstr(env, MIPSInstr_Load(4, r_r0_hi, nextMIPSAModeFloat(am_addr),
- mode64));
-
- add_to_sp(env, 16); // Reset SP
-
- addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp_lo, r_cond,
- MIPSRH_Reg(r_r0_lo)));
- addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp_hi, r_cond,
- MIPSRH_Reg(r_r0_hi)));
-
- addInstr(env, MIPSInstr_Alu(Malu_NOR, r_cond_neg, r_cond,
- MIPSRH_Reg(r_cond)));
-
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- // store as Ity_F64
- addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, r1, am_addr));
-
- // load as 2xI32
- addInstr(env, MIPSInstr_Load(4, r_r1_lo, am_addr, mode64));
- addInstr(env, MIPSInstr_Load(4, r_r1_hi, nextMIPSAModeFloat(am_addr),
- mode64));
-
- add_to_sp(env, 16); // Reset SP
-
- addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1_lo, r_cond_neg,
- MIPSRH_Reg(r_r1_lo)));
- addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1_hi, r_cond_neg,
- MIPSRH_Reg(r_r1_hi)));
-
- addInstr(env, MIPSInstr_Alu(Malu_ADD, r_dst_lo, r_tmp_lo,
- MIPSRH_Reg(r_tmp1_lo)));
- addInstr(env, MIPSInstr_Alu(Malu_ADD, r_dst_hi, r_tmp_hi,
- MIPSRH_Reg(r_tmp1_hi)));
-
- sub_from_sp(env, 16); // Move SP down 16 bytes
- am_addr = MIPSAMode_IR(0, StackPointer(mode64));
-
- // store as I32
- addInstr(env, MIPSInstr_Store(4, am_addr, r_dst_lo, mode64));
- addInstr(env, MIPSInstr_Store(4, nextMIPSAModeFloat(am_addr),
- r_dst_hi, mode64));
-
- // load as Ity_F32
- addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, r_dst, am_addr));
-
- add_to_sp(env, 16); // Reset SP
+ addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, r_dst, r0));
+ addInstr(env, MIPSInstr_MoveCond(MFpMoveCond_movnd, r_dst, r1,
+ r_cond));
return r_dst;
}
}
am_addr));
return;
}
-
+ if (tyd == Ity_F64 && mode64) {
+ HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data);
+ addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
+ am_addr));
+ return;
+ }
if (!mode64 && (tyd == Ity_F64)) {
HReg fr_src = iselDblExpr(env, stmt->Ist.Store.data);
addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
if (ty == Ity_F64) {
HReg fr_src;
- fr_src = iselDblExpr(env, stmt->Ist.Put.data);
+ if (mode64) {
+ fr_src = iselFltExpr(env, stmt->Ist.Put.data);
+ } else {
+ fr_src = iselDblExpr(env, stmt->Ist.Put.data);
+ }
MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset,
GuestStatePointer(mode64));
addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
}
if (ty == Ity_I64) {
- HReg rHi, rLo, dstHi, dstLo;
- iselInt64Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data);
- lookupIRTemp64(&dstHi, &dstLo, env, tmp);
- addInstr(env, mk_iMOVds_RR(dstHi, rHi));
- addInstr(env, mk_iMOVds_RR(dstLo, rLo));
- return;
+ if (mode64) {
+ HReg r_dst = lookupIRTemp(env, tmp);
+ HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data);
+ addInstr(env, mk_iMOVds_RR(r_dst, r_src));
+ return;
+ } else {
+ HReg rHi, rLo, dstHi, dstLo;
+ iselInt64Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data);
+ lookupIRTemp64(&dstHi, &dstLo, env, tmp);
+ addInstr(env, mk_iMOVds_RR(dstHi, rHi));
+ addInstr(env, mk_iMOVds_RR(dstLo, rLo));
+ return;
+ }
+ }
+
+ if (mode64 && ty == Ity_I128) {
+ HReg rHi, rLo, dstHi, dstLo;
+ iselInt128Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data);
+ lookupIRTempPair(&dstHi, &dstLo, env, tmp);
+ addInstr(env, mk_iMOVds_RR(dstHi, rHi));
+ addInstr(env, mk_iMOVds_RR(dstLo, rLo));
+ return;
}
if (ty == Ity_F32) {
}
if (ty == Ity_F64) {
- HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data);
- HReg dst = lookupIRTemp(env, tmp);
- addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, dst, src));
- return;
+ if (mode64) {
+ HReg src = iselFltExpr(env, stmt->Ist.WrTmp.data);
+ HReg dst = lookupIRTemp(env, tmp);
+ addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, dst, src));
+ return;
+ } else {
+ HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data);
+ HReg dst = lookupIRTemp(env, tmp);
+ addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, dst, src));
+ return;
+ }
}
break;
}
/* --------- Load Linked or Store Conditional --------- */
case Ist_LLSC: {
- //Temporary solution; this need to be rewritten again for MIPS.
- //On MIPS you can not read from address that is locked with LL before SC.
- // If you read from address that is locked than SC will fall.
+ /* Temporary solution; this need to be rewritten again for MIPS.
+ On MIPS you can not read from address that is locked with LL
+ before SC. If you read from address that is locked than SC will
+ fall. */
IRTemp res = stmt->Ist.LLSC.result;
IRType tyRes = typeOfIRTemp(env->type_env, res);
IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr);
if (stmt->Ist.LLSC.storedata == NULL) {
/* LL */
MIPSAMode *r_addr;
- /*constructs addressing mode from address provided */
+ /* constructs addressing mode from address provided */
r_addr = iselWordExpr_AMode(env, stmt->Ist.LLSC.addr, tyAddr);
HReg r_dst = lookupIRTemp(env, res);
/* fallthru */
}
goto stmt_fail;
- /*NOTREACHED*/}
+ /* NOTREACHED */}
- /* --------- INSTR MARK --------- */
- /* Doesn't generate any executable code ... */
+ /* --------- INSTR MARK --------- */
+ /* Doesn't generate any executable code ... */
case Ist_IMark:
return;
- /* --------- ABI HINT --------- */
- /* These have no meaning (denotation in the IR) and so we ignore
- them ... if any actually made it this far. */
+ /* --------- ABI HINT --------- */
+ /* These have no meaning (denotation in the IR) and so we ignore
+ them ... if any actually made it this far. */
case Ist_AbiHint:
return;
- /* --------- NO-OP --------- */
- /* Fairly self-explanatory, wouldn't you say? */
+ /* --------- NO-OP --------- */
+ /* Fairly self-explanatory, wouldn't you say? */
case Ist_NoOp:
return;
MIPSCondCode cc = iselCondCode(env, stmt->Ist.Exit.guard);
MIPSAMode* amPC = MIPSAMode_IR(stmt->Ist.Exit.offsIP,
- hregMIPS_GPR10(mode64));
+ GuestStatePointer(mode64));
/* Case: boring transfer to known address */
if (stmt->Ist.Exit.jk == Ijk_Boring
vassert(cdst->tag == (env->mode64 ? Ico_U64 :Ico_U32));
if (jk == Ijk_Boring || jk == Ijk_Call) {
/* Boring transfer to known address */
- MIPSAMode* amPC = MIPSAMode_IR(offsIP, hregMIPS_GPR10(env->mode64));
+ MIPSAMode* amPC = MIPSAMode_IR(offsIP, GuestStatePointer(env->mode64));
if (env->chainingAllowed) {
/* .. almost always true .. */
/* Skip the event check at the dst if this is a forwards
case Ijk_Boring: case Ijk_Ret: case Ijk_Call: {
HReg r = iselWordExpr_R(env, next);
- MIPSAMode* amPC = MIPSAMode_IR(offsIP, hregMIPS_GPR10(env->mode64));
+ MIPSAMode* amPC = MIPSAMode_IR(offsIP, GuestStatePointer(env->mode64));
if (env->chainingAllowed) {
addInstr(env, MIPSInstr_XIndir(r, amPC, MIPScc_AL));
} else {
case Ijk_Sys_syscall:
case Ijk_TInval: {
HReg r = iselWordExpr_R(env, next);
- MIPSAMode* amPC = MIPSAMode_IR(offsIP, hregMIPS_GPR10(env->mode64));
+ MIPSAMode* amPC = MIPSAMode_IR(offsIP, GuestStatePointer(env->mode64));
addInstr(env, MIPSInstr_XAssisted(r, amPC, MIPScc_AL, jk));
return;
}
break;
}
- vex_printf( "\n-- PUT(%d) = ", offsIP);
- ppIRExpr( next );
- vex_printf( "; exit-");
+ vex_printf("\n-- PUT(%d) = ", offsIP);
+ ppIRExpr(next );
+ vex_printf("; exit-");
ppIRJumpKind(jk);
- vex_printf( "\n");
- vassert(0); // are we expecting any other kind?
+ vex_printf("\n");
+ vassert(0); /* are we expecting any other kind? */
}
/*---------------------------------------------------------*/
MIPSAMode *amCounter, *amFailAddr;
/* sanity ... */
- vassert(arch_host == VexArchMIPS32);
+ vassert(arch_host == VexArchMIPS32 || arch_host == VexArchMIPS64);
vassert(VEX_PRID_COMP_MIPS == hwcaps_host
- || VEX_PRID_COMP_BROADCOM == hwcaps_host);
+ || VEX_PRID_COMP_BROADCOM == hwcaps_host
+ || VEX_PRID_COMP_NETLOGIC);
mode64 = arch_host != VexArchMIPS32;
case Ity_I1:
case Ity_I8:
case Ity_I16:
- case Ity_I32: {
- hreg = mkHReg(j++, HRcInt32, True);
- break;
- }
- case Ity_I64: {
- hreg = mkHReg(j++, HRcInt32, True);
- hregHI = mkHReg(j++, HRcInt32, True);
- break;
- }
+ case Ity_I32:
+ if (mode64) {
+ hreg = mkHReg(j++, HRcInt64, True);
+ break;
+ } else {
+ hreg = mkHReg(j++, HRcInt32, True);
+ break;
+ }
+ case Ity_I64:
+ if (mode64) {
+ hreg = mkHReg(j++, HRcInt64, True);
+ break;
+ } else {
+ hreg = mkHReg(j++, HRcInt32, True);
+ hregHI = mkHReg(j++, HRcInt32, True);
+ break;
+ }
case Ity_I128:
vassert(mode64);
hreg = mkHReg(j++, HRcInt64, True);
hregHI = mkHReg(j++, HRcInt64, True);
break;
- case Ity_F32: {
- hreg = mkHReg(j++, HRcFlt32, True);
- break;
- }
+ case Ity_F32:
+ if (mode64) {
+ hreg = mkHReg(j++, HRcFlt64, True);
+ break;
+ } else {
+ hreg = mkHReg(j++, HRcFlt32, True);
+ break;
+ }
case Ity_F64:
hreg = mkHReg(j++, HRcFlt64, True);
break;
env->vreg_ctr = j;
/* The very first instruction must be an event check. */
- amCounter = MIPSAMode_IR(offs_Host_EvC_Counter, hregMIPS_GPR10(mode64));
- amFailAddr = MIPSAMode_IR(offs_Host_EvC_FailAddr, hregMIPS_GPR10(mode64));
+ amCounter = MIPSAMode_IR(offs_Host_EvC_Counter, GuestStatePointer(mode64));
+ amFailAddr = MIPSAMode_IR(offs_Host_EvC_FailAddr, GuestStatePointer(mode64));
addInstr(env, MIPSInstr_EvCheck(amCounter, amFailAddr));
/* Possibly a block counter increment (for profiling). At this