From: Petar Jovanovic Date: Wed, 27 Feb 2013 22:57:17 +0000 (+0000) Subject: mips: adding MIPS64LE support to VEX X-Git-Tag: svn/VALGRIND_3_9_0^2~107 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=a430267b625bb9125e2dafd8f96b7cf5d9693261;p=thirdparty%2Fvalgrind.git mips: adding MIPS64LE support to VEX Necessary changes to VEX to support MIPS64LE on Linux. Minor cleanup/style changes embedded in the patch as well. Patch written by Dejan Jevtic and Petar Jovanovic. More information about this issue: https://bugs.kde.org/show_bug.cgi?id=313267 git-svn-id: svn://svn.valgrind.org/vex/trunk@2687 --- diff --git a/VEX/auxprogs/genoffsets.c b/VEX/auxprogs/genoffsets.c index 48455eb651..e04ba0cecc 100644 --- a/VEX/auxprogs/genoffsets.c +++ b/VEX/auxprogs/genoffsets.c @@ -53,6 +53,7 @@ #include "../pub/libvex_guest_arm.h" #include "../pub/libvex_guest_s390x.h" #include "../pub/libvex_guest_mips32.h" +#include "../pub/libvex_guest_mips64.h" #define VG_STRINGIFZ(__str) #__str #define VG_STRINGIFY(__str) VG_STRINGIFZ(__str) @@ -211,6 +212,43 @@ void foo ( void ) GENOFFSET(MIPS32,mips32,PC); GENOFFSET(MIPS32,mips32,HI); GENOFFSET(MIPS32,mips32,LO); + + // MIPS64 + GENOFFSET(MIPS64,mips64,r0); + GENOFFSET(MIPS64,mips64,r1); + GENOFFSET(MIPS64,mips64,r2); + GENOFFSET(MIPS64,mips64,r3); + GENOFFSET(MIPS64,mips64,r4); + GENOFFSET(MIPS64,mips64,r5); + GENOFFSET(MIPS64,mips64,r6); + GENOFFSET(MIPS64,mips64,r7); + GENOFFSET(MIPS64,mips64,r8); + GENOFFSET(MIPS64,mips64,r9); + GENOFFSET(MIPS64,mips64,r10); + GENOFFSET(MIPS64,mips64,r11); + GENOFFSET(MIPS64,mips64,r12); + GENOFFSET(MIPS64,mips64,r13); + GENOFFSET(MIPS64,mips64,r14); + GENOFFSET(MIPS64,mips64,r15); + GENOFFSET(MIPS64,mips64,r15); + GENOFFSET(MIPS64,mips64,r17); + GENOFFSET(MIPS64,mips64,r18); + GENOFFSET(MIPS64,mips64,r19); + GENOFFSET(MIPS64,mips64,r20); + GENOFFSET(MIPS64,mips64,r21); + GENOFFSET(MIPS64,mips64,r22); + GENOFFSET(MIPS64,mips64,r23); + GENOFFSET(MIPS64,mips64,r24); + GENOFFSET(MIPS64,mips64,r25); + GENOFFSET(MIPS64,mips64,r26); + GENOFFSET(MIPS64,mips64,r27); + GENOFFSET(MIPS64,mips64,r28); + GENOFFSET(MIPS64,mips64,r29); + GENOFFSET(MIPS64,mips64,r30); + GENOFFSET(MIPS64,mips64,r31); + GENOFFSET(MIPS64,mips64,PC); + GENOFFSET(MIPS64,mips64,HI); + GENOFFSET(MIPS64,mips64,LO); } /*--------------------------------------------------------------------*/ diff --git a/VEX/priv/guest_mips_defs.h b/VEX/priv/guest_mips_defs.h index d38ca5370b..45a91a76d2 100644 --- a/VEX/priv/guest_mips_defs.h +++ b/VEX/priv/guest_mips_defs.h @@ -7,7 +7,7 @@ This file is part of Valgrind, a dynamic binary instrumentation framework. - Copyright (C) 2010-2012 RT-RK + Copyright (C) 2010-2013 RT-RK mips-valgrind@rt-rk.com This program is free software; you can redistribute it and/or @@ -34,14 +34,13 @@ #define __VEX_GUEST_MIPS_DEFS_H #include "libvex_basictypes.h" -#include "guest_generic_bb_to_IR.h" // DisResult +#include "guest_generic_bb_to_IR.h" /* DisResult */ /*---------------------------------------------------------*/ -/*--- mips to IR conversion ---*/ +/*--- mips to IR conversion ---*/ /*---------------------------------------------------------*/ -/* Convert one MIPS insn to IR. See the type DisOneInstrFn in - bb_to_IR.h. */ +/* Convert one MIPS insn to IR. See the type DisOneInstrFn in bb_to_IR.h. */ extern DisResult disInstr_MIPS ( IRSB* irbb, Bool (*resteerOkFn) (void *, Addr64), Bool resteerCisOk, @@ -56,46 +55,59 @@ extern DisResult disInstr_MIPS ( IRSB* irbb, Bool sigill_diag ); /* Used by the optimiser to specialise calls to helpers. */ -extern IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args, - IRStmt ** precedingStmts, - Int n_precedingStmts); +extern IRExpr *guest_mips32_spechelper ( const HChar * function_name, + IRExpr ** args, + IRStmt ** precedingStmts, + Int n_precedingStmts ); + +extern IRExpr *guest_mips64_spechelper ( const HChar * function_name, + IRExpr ** args, + IRStmt ** precedingStmts, + Int n_precedingStmts); /* Describes to the optimser which part of the guest state require precise memory exceptions. This is logically part of the guest state description. */ -extern Bool guest_mips32_state_requires_precise_mem_exns(Int, Int); +extern Bool guest_mips32_state_requires_precise_mem_exns ( Int, Int ); + +extern Bool guest_mips64_state_requires_precise_mem_exns ( Int, Int ); extern VexGuestLayout mips32Guest_layout; +extern VexGuestLayout mips64Guest_layout; /*---------------------------------------------------------*/ -/*--- mips guest helpers ---*/ +/*--- mips guest helpers ---*/ /*---------------------------------------------------------*/ -extern UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel); +extern UInt mips32_dirtyhelper_mfc0 ( UInt rd, UInt sel ); + +extern ULong mips64_dirtyhelper_dmfc0 ( UInt rd, UInt sel ); -extern void mips32_dirtyhelper_sync(UInt sync); +extern void mips32_dirtyhelper_sync ( UInt sync ); + +#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2)) +extern ULong mips64_dirtyhelper_rdhwr ( ULong rt, ULong rd ); +#endif /*---------------------------------------------------------*/ -/*--- Condition code stuff ---*/ +/*--- Condition code stuff ---*/ /*---------------------------------------------------------*/ -/* Defines conditions which we can ask for (MIPS MIPS 2e page A3-6) */ - typedef enum { - MIPSCondEQ = 0, /* equal : Z=1 */ - MIPSCondNE = 1, /* not equal : Z=0 */ + MIPSCondEQ = 0, /* equal : Z=1 */ + MIPSCondNE = 1, /* not equal : Z=0 */ - MIPSCondHS = 2, /* >=u (higher or same) : C=1 */ - MIPSCondLO = 3, /* =u (higher or same) : C=1 */ + MIPSCondLO = 3, /* u (higher) : C=1 && Z=0 */ - MIPSCondLS = 9, /* <=u (lower or same) : C=0 || Z=1 */ + MIPSCondHI = 8, /* >u (higher) : C=1 && Z=0 */ + MIPSCondLS = 9, /* <=u (lower or same) : C=0 || Z=1 */ MIPSCondGE = 10, /* >=s (signed greater or equal) : N=V */ MIPSCondLT = 11, /* field) } +#define ALWAYSDEFD64(field) \ + { offsetof(VexGuestMIPS64State, field), \ + (sizeof ((VexGuestMIPS64State*)0)->field) } + IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args, IRStmt ** precedingStmts, Int n_precedingStmts) { return NULL; } +IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args, + IRStmt ** precedingStmts, + Int n_precedingStmts ) +{ + return NULL; +} + /* VISIBLE TO LIBVEX CLIENT */ void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state) { @@ -144,7 +156,108 @@ void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state) vex_state->host_EvC_FAILADDR = 0; /* Used to record the unredirected guest address at the start of - a translation whose start has been redirected. By reading + a translation whose start has been redirected. By reading + this pseudo-register shortly afterwards, the translation can + find out what the corresponding no-redirection address was. + Note, this is only set for wrap-style redirects, not for + replace-style ones. */ + vex_state->guest_NRADDR = 0; + + vex_state->guest_COND = 0; +} + +void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state ) +{ + vex_state->guest_r0 = 0; /* Hardwired to 0 */ + vex_state->guest_r1 = 0; /* Assembler temporary */ + vex_state->guest_r2 = 0; /* Values for function returns ... */ + vex_state->guest_r3 = 0; + vex_state->guest_r4 = 0; /* Function arguments */ + vex_state->guest_r5 = 0; + vex_state->guest_r6 = 0; + vex_state->guest_r7 = 0; + vex_state->guest_r8 = 0; + vex_state->guest_r9 = 0; + vex_state->guest_r10 = 0; + vex_state->guest_r11 = 0; + vex_state->guest_r12 = 0; /* Temporaries */ + vex_state->guest_r13 = 0; + vex_state->guest_r14 = 0; + vex_state->guest_r15 = 0; + vex_state->guest_r16 = 0; /* Saved temporaries */ + vex_state->guest_r17 = 0; + vex_state->guest_r18 = 0; + vex_state->guest_r19 = 0; + vex_state->guest_r20 = 0; + vex_state->guest_r21 = 0; + vex_state->guest_r22 = 0; + vex_state->guest_r23 = 0; + vex_state->guest_r24 = 0; /* Temporaries */ + vex_state->guest_r25 = 0; + vex_state->guest_r26 = 0; /* Reserved for OS kernel */ + vex_state->guest_r27 = 0; + vex_state->guest_r28 = 0; /* Global pointer */ + vex_state->guest_r29 = 0; /* Stack pointer */ + vex_state->guest_r30 = 0; /* Frame pointer */ + vex_state->guest_r31 = 0; /* Return address */ + vex_state->guest_PC = 0; /* Program counter */ + vex_state->guest_HI = 0; /* Multiply and divide register higher result */ + vex_state->guest_LO = 0; /* Multiply and divide register lower result */ + + /* FPU Registers */ + vex_state->guest_f0 = 0xffffffffffffffff; /* Floting point registers */ + vex_state->guest_f1 = 0xffffffffffffffff; + vex_state->guest_f2 = 0xffffffffffffffff; + vex_state->guest_f3 = 0xffffffffffffffff; + vex_state->guest_f4 = 0xffffffffffffffff; + vex_state->guest_f5 = 0xffffffffffffffff; + vex_state->guest_f6 = 0xffffffffffffffff; + vex_state->guest_f7 = 0xffffffffffffffff; + vex_state->guest_f8 = 0xffffffffffffffff; + vex_state->guest_f9 = 0xffffffffffffffff; + vex_state->guest_f10 = 0xffffffffffffffff; + vex_state->guest_f11 = 0xffffffffffffffff; + vex_state->guest_f12 = 0xffffffffffffffff; + vex_state->guest_f13 = 0xffffffffffffffff; + vex_state->guest_f14 = 0xffffffffffffffff; + vex_state->guest_f15 = 0xffffffffffffffff; + vex_state->guest_f16 = 0xffffffffffffffff; + vex_state->guest_f17 = 0xffffffffffffffff; + vex_state->guest_f18 = 0xffffffffffffffff; + vex_state->guest_f19 = 0xffffffffffffffff; + vex_state->guest_f20 = 0xffffffffffffffff; + vex_state->guest_f21 = 0xffffffffffffffff; + vex_state->guest_f22 = 0xffffffffffffffff; + vex_state->guest_f23 = 0xffffffffffffffff; + vex_state->guest_f24 = 0xffffffffffffffff; + vex_state->guest_f25 = 0xffffffffffffffff; + vex_state->guest_f26 = 0xffffffffffffffff; + vex_state->guest_f27 = 0xffffffffffffffff; + vex_state->guest_f28 = 0xffffffffffffffff; + vex_state->guest_f29 = 0xffffffffffffffff; + vex_state->guest_f30 = 0xffffffffffffffff; + vex_state->guest_f31 = 0xffffffffffffffff; + + vex_state->guest_FIR = 0; /* FP implementation and revision register */ + vex_state->guest_FCCR = 0; /* FP condition codes register */ + vex_state->guest_FEXR = 0; /* FP exceptions register */ + vex_state->guest_FENR = 0; /* FP enables register */ + vex_state->guest_FCSR = 0; /* FP control/status register */ + + vex_state->guest_ULR = 0; + + /* Various pseudo-regs mandated by Vex or Valgrind. */ + /* Emulation notes */ + vex_state->guest_EMNOTE = 0; + + /* For clflush: record start and length of area to invalidate */ + vex_state->guest_TISTART = 0; + vex_state->guest_TILEN = 0; + vex_state->host_EvC_COUNTER = 0; + vex_state->host_EvC_FAILADDR = 0; + + /* Used to record the unredirected guest address at the start of + a translation whose start has been redirected. By reading this pseudo-register shortly afterwards, the translation can find out what the corresponding no-redirection address was. Note, this is only set for wrap-style redirects, not for @@ -177,7 +290,7 @@ Bool guest_mips32_state_requires_precise_mem_exns(Int minoff, Int maxoff) if (maxoff < sp_min || minoff > sp_max) { /* no overlap with sp */ if (vex_control.iropt_register_updates == VexRegUpdSpAtMemAccess) - return False; // We only need to check stack pointer. + return False; /* We only need to check stack pointer. */ } else { return True; } @@ -202,6 +315,39 @@ Bool guest_mips32_state_requires_precise_mem_exns(Int minoff, Int maxoff) return False; } +Bool guest_mips64_state_requires_precise_mem_exns ( Int minoff, Int maxoff ) +{ + Int sp_min = offsetof(VexGuestMIPS64State, guest_r29); + Int sp_max = sp_min + 8 - 1; + Int pc_min = offsetof(VexGuestMIPS64State, guest_PC); + Int pc_max = pc_min + 8 - 1; + + if ( maxoff < sp_min || minoff > sp_max ) { + /* no overlap with sp */ + if (vex_control.iropt_register_updates == VexRegUpdSpAtMemAccess) + return False; /* We only need to check stack pointer. */ + } else { + return True; + } + + if ( maxoff < pc_min || minoff > pc_max ) { + /* no overlap with pc */ + } else { + return True; + } + + Int fp_min = offsetof(VexGuestMIPS64State, guest_r30); + Int fp_max = fp_min + 8 - 1; + + if ( maxoff < fp_min || minoff > fp_max ) { + /* no overlap with fp */ + } else { + return True; + } + + return False; +} + VexGuestLayout mips32Guest_layout = { /* Total size of the guest state, in bytes. */ .total_sizeB = sizeof(VexGuestMIPS32State), @@ -230,8 +376,37 @@ VexGuestLayout mips32Guest_layout = { } }; +VexGuestLayout mips64Guest_layout = { + /* Total size of the guest state, in bytes. */ + .total_sizeB = sizeof(VexGuestMIPS64State), + /* Describe the stack pointer. */ + .offset_SP = offsetof(VexGuestMIPS64State, guest_r29), + .sizeof_SP = 8, + /* Describe the frame pointer. */ + .offset_FP = offsetof(VexGuestMIPS64State, guest_r30), + .sizeof_FP = 8, + /* Describe the instruction pointer. */ + .offset_IP = offsetof(VexGuestMIPS64State, guest_PC), + .sizeof_IP = 8, + /* Describe any sections to be regarded by Memcheck as + 'always-defined'. */ + .n_alwaysDefd = 7, + /* ? :( */ + .alwaysDefd = { + /* 0 */ ALWAYSDEFD64 (guest_r0), + /* 1 */ ALWAYSDEFD64 (guest_EMNOTE), + /* 2 */ ALWAYSDEFD64 (guest_TISTART), + /* 3 */ ALWAYSDEFD64 (guest_TILEN), + /* 4 */ ALWAYSDEFD64 (guest_r29), + /* 5 */ ALWAYSDEFD64 (guest_r31), + /* 6 */ ALWAYSDEFD64 (guest_ULR) + } +}; + #define ASM_VOLATILE_CASE(rd, sel) \ - case rd: asm volatile ("mfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); break; + case rd: \ + asm volatile ("mfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \ + break; UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) { @@ -239,7 +414,7 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2)) switch (sel) { case 0: - //__asm__("mfc0 %0, $1, 0" :"=r" (x)); + /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ switch (rd) { ASM_VOLATILE_CASE(0, 0); ASM_VOLATILE_CASE(1, 0); @@ -278,7 +453,7 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) } break; case 1: - //__asm__("mfc0 %0, $1, 0" :"=r" (x)); + /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ switch (rd) { ASM_VOLATILE_CASE(0, 1); ASM_VOLATILE_CASE(1, 1); @@ -317,7 +492,7 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) } break; case 2: - //__asm__("mfc0 %0, $1, 0" :"=r" (x)); + /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ switch (rd) { ASM_VOLATILE_CASE(0, 2); ASM_VOLATILE_CASE(1, 2); @@ -356,7 +531,7 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) } break; case 3: - //__asm__("mfc0 %0, $1, 0" :"=r" (x)); + /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ switch (rd) { ASM_VOLATILE_CASE(0, 3); ASM_VOLATILE_CASE(1, 3); @@ -395,7 +570,7 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) } break; case 4: - //__asm__("mfc0 %0, $1, 0" :"=r" (x)); + /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ switch (rd) { ASM_VOLATILE_CASE(0, 4); ASM_VOLATILE_CASE(1, 4); @@ -434,7 +609,7 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) } break; case 5: - //__asm__("mfc0 %0, $1, 0" :"=r" (x)); + /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ switch (rd) { ASM_VOLATILE_CASE(0, 5); ASM_VOLATILE_CASE(1, 5); @@ -473,7 +648,7 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) } break; case 6: - //__asm__("mfc0 %0, $1, 0" :"=r" (x)); + /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ switch (rd) { ASM_VOLATILE_CASE(0, 6); ASM_VOLATILE_CASE(1, 6); @@ -512,7 +687,7 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) } break; case 7: - //__asm__("mfc0 %0, $1, 0" :"=r" (x)); + /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */ switch (rd) { ASM_VOLATILE_CASE(0, 7); ASM_VOLATILE_CASE(1, 7); @@ -560,6 +735,336 @@ UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel) #undef ASM_VOLATILE_CASE +#define ASM_VOLATILE_CASE(rd, sel) \ + case rd: \ + asm volatile ("dmfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \ + break; + +ULong mips64_dirtyhelper_dmfc0 ( UInt rd, UInt sel ) +{ + ULong x = 0; +#if defined(VGP_mips64_linux) + switch (sel) { + case 0: + /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ + switch (rd) { + ASM_VOLATILE_CASE (0, 0); + ASM_VOLATILE_CASE (1, 0); + ASM_VOLATILE_CASE (2, 0); + ASM_VOLATILE_CASE (3, 0); + ASM_VOLATILE_CASE (4, 0); + ASM_VOLATILE_CASE (5, 0); + ASM_VOLATILE_CASE (6, 0); + ASM_VOLATILE_CASE (7, 0); + ASM_VOLATILE_CASE (8, 0); + ASM_VOLATILE_CASE (9, 0); + ASM_VOLATILE_CASE (10, 0); + ASM_VOLATILE_CASE (11, 0); + ASM_VOLATILE_CASE (12, 0); + ASM_VOLATILE_CASE (13, 0); + ASM_VOLATILE_CASE (14, 0); + ASM_VOLATILE_CASE (15, 0); + ASM_VOLATILE_CASE (16, 0); + ASM_VOLATILE_CASE (17, 0); + ASM_VOLATILE_CASE (18, 0); + ASM_VOLATILE_CASE (19, 0); + ASM_VOLATILE_CASE (20, 0); + ASM_VOLATILE_CASE (21, 0); + ASM_VOLATILE_CASE (22, 0); + ASM_VOLATILE_CASE (23, 0); + ASM_VOLATILE_CASE (24, 0); + ASM_VOLATILE_CASE (25, 0); + ASM_VOLATILE_CASE (26, 0); + ASM_VOLATILE_CASE (27, 0); + ASM_VOLATILE_CASE (28, 0); + ASM_VOLATILE_CASE (29, 0); + ASM_VOLATILE_CASE (30, 0); + ASM_VOLATILE_CASE (31, 0); + default: + break; + } + break; + case 1: + /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ + switch (rd) { + ASM_VOLATILE_CASE (0, 1); + ASM_VOLATILE_CASE (1, 1); + ASM_VOLATILE_CASE (2, 1); + ASM_VOLATILE_CASE (3, 1); + ASM_VOLATILE_CASE (4, 1); + ASM_VOLATILE_CASE (5, 1); + ASM_VOLATILE_CASE (6, 1); + ASM_VOLATILE_CASE (7, 1); + ASM_VOLATILE_CASE (8, 1); + ASM_VOLATILE_CASE (9, 1); + ASM_VOLATILE_CASE (10, 1); + ASM_VOLATILE_CASE (11, 1); + ASM_VOLATILE_CASE (12, 1); + ASM_VOLATILE_CASE (13, 1); + ASM_VOLATILE_CASE (14, 1); + ASM_VOLATILE_CASE (15, 1); + ASM_VOLATILE_CASE (16, 1); + ASM_VOLATILE_CASE (17, 1); + ASM_VOLATILE_CASE (18, 1); + ASM_VOLATILE_CASE (19, 1); + ASM_VOLATILE_CASE (20, 1); + ASM_VOLATILE_CASE (21, 1); + ASM_VOLATILE_CASE (22, 1); + ASM_VOLATILE_CASE (23, 1); + ASM_VOLATILE_CASE (24, 1); + ASM_VOLATILE_CASE (25, 1); + ASM_VOLATILE_CASE (26, 1); + ASM_VOLATILE_CASE (27, 1); + ASM_VOLATILE_CASE (28, 1); + ASM_VOLATILE_CASE (29, 1); + ASM_VOLATILE_CASE (30, 1); + ASM_VOLATILE_CASE (31, 1); + default: + break; + } + break; + case 2: + /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ + switch (rd) { + ASM_VOLATILE_CASE (0, 2); + ASM_VOLATILE_CASE (1, 2); + ASM_VOLATILE_CASE (2, 2); + ASM_VOLATILE_CASE (3, 1); + ASM_VOLATILE_CASE (4, 2); + ASM_VOLATILE_CASE (5, 2); + ASM_VOLATILE_CASE (6, 2); + ASM_VOLATILE_CASE (7, 2); + ASM_VOLATILE_CASE (8, 2); + ASM_VOLATILE_CASE (9, 2); + ASM_VOLATILE_CASE (10, 2); + ASM_VOLATILE_CASE (11, 2); + ASM_VOLATILE_CASE (12, 2); + ASM_VOLATILE_CASE (13, 2); + ASM_VOLATILE_CASE (14, 2); + ASM_VOLATILE_CASE (15, 2); + ASM_VOLATILE_CASE (16, 2); + ASM_VOLATILE_CASE (17, 2); + ASM_VOLATILE_CASE (18, 2); + ASM_VOLATILE_CASE (19, 2); + ASM_VOLATILE_CASE (20, 2); + ASM_VOLATILE_CASE (21, 2); + ASM_VOLATILE_CASE (22, 2); + ASM_VOLATILE_CASE (23, 2); + ASM_VOLATILE_CASE (24, 2); + ASM_VOLATILE_CASE (25, 2); + ASM_VOLATILE_CASE (26, 2); + ASM_VOLATILE_CASE (27, 2); + ASM_VOLATILE_CASE (28, 2); + ASM_VOLATILE_CASE (29, 2); + ASM_VOLATILE_CASE (30, 2); + ASM_VOLATILE_CASE (31, 2); + default: + break; + } + break; + case 3: + /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ + switch (rd) { + ASM_VOLATILE_CASE (0, 3); + ASM_VOLATILE_CASE (1, 3); + ASM_VOLATILE_CASE (2, 3); + ASM_VOLATILE_CASE (3, 3); + ASM_VOLATILE_CASE (4, 3); + ASM_VOLATILE_CASE (5, 3); + ASM_VOLATILE_CASE (6, 3); + ASM_VOLATILE_CASE (7, 3); + ASM_VOLATILE_CASE (8, 3); + ASM_VOLATILE_CASE (9, 3); + ASM_VOLATILE_CASE (10, 3); + ASM_VOLATILE_CASE (11, 3); + ASM_VOLATILE_CASE (12, 3); + ASM_VOLATILE_CASE (13, 3); + ASM_VOLATILE_CASE (14, 3); + ASM_VOLATILE_CASE (15, 3); + ASM_VOLATILE_CASE (16, 3); + ASM_VOLATILE_CASE (17, 3); + ASM_VOLATILE_CASE (18, 3); + ASM_VOLATILE_CASE (19, 3); + ASM_VOLATILE_CASE (20, 3); + ASM_VOLATILE_CASE (21, 3); + ASM_VOLATILE_CASE (22, 3); + ASM_VOLATILE_CASE (23, 3); + ASM_VOLATILE_CASE (24, 3); + ASM_VOLATILE_CASE (25, 3); + ASM_VOLATILE_CASE (26, 3); + ASM_VOLATILE_CASE (27, 3); + ASM_VOLATILE_CASE (28, 3); + ASM_VOLATILE_CASE (29, 3); + ASM_VOLATILE_CASE (30, 3); + ASM_VOLATILE_CASE (31, 3); + default: + break; + } + break; + case 4: + /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ + switch (rd) { + ASM_VOLATILE_CASE (0, 4); + ASM_VOLATILE_CASE (1, 4); + ASM_VOLATILE_CASE (2, 4); + ASM_VOLATILE_CASE (3, 4); + ASM_VOLATILE_CASE (4, 4); + ASM_VOLATILE_CASE (5, 4); + ASM_VOLATILE_CASE (6, 4); + ASM_VOLATILE_CASE (7, 4); + ASM_VOLATILE_CASE (8, 4); + ASM_VOLATILE_CASE (9, 4); + ASM_VOLATILE_CASE (10, 4); + ASM_VOLATILE_CASE (11, 4); + ASM_VOLATILE_CASE (12, 4); + ASM_VOLATILE_CASE (13, 4); + ASM_VOLATILE_CASE (14, 4); + ASM_VOLATILE_CASE (15, 4); + ASM_VOLATILE_CASE (16, 4); + ASM_VOLATILE_CASE (17, 4); + ASM_VOLATILE_CASE (18, 4); + ASM_VOLATILE_CASE (19, 4); + ASM_VOLATILE_CASE (20, 4); + ASM_VOLATILE_CASE (21, 4); + ASM_VOLATILE_CASE (22, 4); + ASM_VOLATILE_CASE (23, 4); + ASM_VOLATILE_CASE (24, 4); + ASM_VOLATILE_CASE (25, 4); + ASM_VOLATILE_CASE (26, 4); + ASM_VOLATILE_CASE (27, 4); + ASM_VOLATILE_CASE (28, 4); + ASM_VOLATILE_CASE (29, 4); + ASM_VOLATILE_CASE (30, 4); + ASM_VOLATILE_CASE (31, 4); + default: + break; + } + break; + case 5: + /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ + switch (rd) { + ASM_VOLATILE_CASE (0, 5); + ASM_VOLATILE_CASE (1, 5); + ASM_VOLATILE_CASE (2, 5); + ASM_VOLATILE_CASE (3, 5); + ASM_VOLATILE_CASE (4, 5); + ASM_VOLATILE_CASE (5, 5); + ASM_VOLATILE_CASE (6, 5); + ASM_VOLATILE_CASE (7, 5); + ASM_VOLATILE_CASE (8, 5); + ASM_VOLATILE_CASE (9, 5); + ASM_VOLATILE_CASE (10, 5); + ASM_VOLATILE_CASE (11, 5); + ASM_VOLATILE_CASE (12, 5); + ASM_VOLATILE_CASE (13, 5); + ASM_VOLATILE_CASE (14, 5); + ASM_VOLATILE_CASE (15, 5); + ASM_VOLATILE_CASE (16, 5); + ASM_VOLATILE_CASE (17, 5); + ASM_VOLATILE_CASE (18, 5); + ASM_VOLATILE_CASE (19, 5); + ASM_VOLATILE_CASE (20, 5); + ASM_VOLATILE_CASE (21, 5); + ASM_VOLATILE_CASE (22, 5); + ASM_VOLATILE_CASE (23, 5); + ASM_VOLATILE_CASE (24, 5); + ASM_VOLATILE_CASE (25, 5); + ASM_VOLATILE_CASE (26, 5); + ASM_VOLATILE_CASE (27, 5); + ASM_VOLATILE_CASE (28, 5); + ASM_VOLATILE_CASE (29, 5); + ASM_VOLATILE_CASE (30, 5); + ASM_VOLATILE_CASE (31, 5); + default: + break; + } + break; + case 6: + /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ + switch (rd) { + ASM_VOLATILE_CASE (0, 6); + ASM_VOLATILE_CASE (1, 6); + ASM_VOLATILE_CASE (2, 6); + ASM_VOLATILE_CASE (3, 6); + ASM_VOLATILE_CASE (4, 6); + ASM_VOLATILE_CASE (5, 6); + ASM_VOLATILE_CASE (6, 6); + ASM_VOLATILE_CASE (7, 6); + ASM_VOLATILE_CASE (8, 6); + ASM_VOLATILE_CASE (9, 6); + ASM_VOLATILE_CASE (10, 6); + ASM_VOLATILE_CASE (11, 6); + ASM_VOLATILE_CASE (12, 6); + ASM_VOLATILE_CASE (13, 6); + ASM_VOLATILE_CASE (14, 6); + ASM_VOLATILE_CASE (15, 6); + ASM_VOLATILE_CASE (16, 6); + ASM_VOLATILE_CASE (17, 6); + ASM_VOLATILE_CASE (18, 6); + ASM_VOLATILE_CASE (19, 6); + ASM_VOLATILE_CASE (20, 6); + ASM_VOLATILE_CASE (21, 6); + ASM_VOLATILE_CASE (22, 6); + ASM_VOLATILE_CASE (23, 6); + ASM_VOLATILE_CASE (24, 6); + ASM_VOLATILE_CASE (25, 6); + ASM_VOLATILE_CASE (26, 6); + ASM_VOLATILE_CASE (27, 6); + ASM_VOLATILE_CASE (28, 6); + ASM_VOLATILE_CASE (29, 6); + ASM_VOLATILE_CASE (30, 6); + ASM_VOLATILE_CASE (31, 6); + default: + break; + } + break; + case 7: + /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */ + switch (rd) { + ASM_VOLATILE_CASE (0, 7); + ASM_VOLATILE_CASE (1, 7); + ASM_VOLATILE_CASE (2, 7); + ASM_VOLATILE_CASE (3, 7); + ASM_VOLATILE_CASE (4, 7); + ASM_VOLATILE_CASE (5, 7); + ASM_VOLATILE_CASE (6, 7); + ASM_VOLATILE_CASE (7, 7); + ASM_VOLATILE_CASE (8, 7); + ASM_VOLATILE_CASE (9, 7); + ASM_VOLATILE_CASE (10, 7); + ASM_VOLATILE_CASE (11, 7); + ASM_VOLATILE_CASE (12, 7); + ASM_VOLATILE_CASE (13, 7); + ASM_VOLATILE_CASE (14, 7); + ASM_VOLATILE_CASE (15, 7); + ASM_VOLATILE_CASE (16, 7); + ASM_VOLATILE_CASE (17, 7); + ASM_VOLATILE_CASE (18, 7); + ASM_VOLATILE_CASE (19, 7); + ASM_VOLATILE_CASE (20, 7); + ASM_VOLATILE_CASE (21, 7); + ASM_VOLATILE_CASE (22, 7); + ASM_VOLATILE_CASE (23, 7); + ASM_VOLATILE_CASE (24, 7); + ASM_VOLATILE_CASE (25, 7); + ASM_VOLATILE_CASE (26, 7); + ASM_VOLATILE_CASE (27, 7); + ASM_VOLATILE_CASE (28, 7); + ASM_VOLATILE_CASE (29, 7); + ASM_VOLATILE_CASE (30, 7); + ASM_VOLATILE_CASE (31, 7); + default: + break; + } + break; + + default: + break; + } +#endif + return x; +} + #define ASM_VOLATILE_CASE(rd, sel) \ case rd: asm volatile ("dmfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); break; @@ -573,6 +1078,23 @@ void mips32_dirtyhelper_sync(UInt stype) #endif } +#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2)) +ULong mips64_dirtyhelper_rdhwr ( ULong rt, ULong rd ) +{ + ULong x = 0; + switch (rd) { + case 1: /* x = SYNCI_StepSize() */ + __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) ); + break; + + default: + vassert(0); + break; + } + return x; +} +#endif + /*---------------------------------------------------------------*/ /*--- end guest_mips_helpers.c ---*/ /*---------------------------------------------------------------*/ diff --git a/VEX/priv/guest_mips_toIR.c b/VEX/priv/guest_mips_toIR.c index da43c177ad..0c70d6bf2e 100644 --- a/VEX/priv/guest_mips_toIR.c +++ b/VEX/priv/guest_mips_toIR.c @@ -7,7 +7,7 @@ This file is part of Valgrind, a dynamic binary instrumentation framework. - Copyright (C) 2010-2012 RT-RK + Copyright (C) 2010-2013 RT-RK mips-valgrind@rt-rk.com This program is free software; you can redistribute it and/or @@ -34,6 +34,7 @@ #include "libvex_ir.h" #include "libvex.h" #include "libvex_guest_mips32.h" +#include "libvex_guest_mips64.h" #include "main_util.h" #include "main_globals.h" @@ -41,13 +42,12 @@ #include "guest_mips_defs.h" /*------------------------------------------------------------*/ -/*--- Globals ---*/ +/*--- Globals ---*/ /*------------------------------------------------------------*/ /* These are set at the start of the translation of a instruction, so - that we don't have to pass them around endlessly. CONST means does - not change during translation of the instruction. -*/ + that we don't have to pass them around endlessly. CONST means does + not change during translation of the instruction. */ /* CONST: is the host bigendian? This has to do with float vs double register accesses on VFP, but it's complex and not properly thought @@ -57,22 +57,23 @@ static Bool host_is_bigendian; /* Pointer to the guest code area. */ static UChar *guest_code; -/* The guest address corresponding to guest_code[0]. */ -static Addr32 guest_PC_bbstart; - /* CONST: The guest address for the instruction currently being translated. */ +#if defined(VGP_mips32_linux) static Addr32 guest_PC_curr_instr; +#else +static Addr64 guest_PC_curr_instr; +#endif /* MOD: The IRSB* into which we're generating code. */ static IRSB *irsb; -/* Is our guest binary 32 or 64bit? Set at each call to +/* Is our guest binary 32 or 64bit? Set at each call to disInstr_MIPS below. */ static Bool mode64 = False; /*------------------------------------------------------------*/ -/*--- Debugging output ---*/ +/*--- Debugging output ---*/ /*------------------------------------------------------------*/ #define DIP(format, args...) \ @@ -92,79 +93,154 @@ static UInt integerGuestRegOffset(UInt iregNo) registers are accessed, but I don't think that ever happens on MIPS. */ UInt ret; - switch (iregNo) { - case 0: - ret = offsetof(VexGuestMIPS32State, guest_r0); break; - case 1: - ret = offsetof(VexGuestMIPS32State, guest_r1); break; - case 2: - ret = offsetof(VexGuestMIPS32State, guest_r2); break; - case 3: - ret = offsetof(VexGuestMIPS32State, guest_r3); break; - case 4: - ret = offsetof(VexGuestMIPS32State, guest_r4); break; - case 5: - ret = offsetof(VexGuestMIPS32State, guest_r5); break; - case 6: - ret = offsetof(VexGuestMIPS32State, guest_r6); break; - case 7: - ret = offsetof(VexGuestMIPS32State, guest_r7); break; - case 8: - ret = offsetof(VexGuestMIPS32State, guest_r8); break; - case 9: - ret = offsetof(VexGuestMIPS32State, guest_r9); break; - case 10: - ret = offsetof(VexGuestMIPS32State, guest_r10); break; - case 11: - ret = offsetof(VexGuestMIPS32State, guest_r11); break; - case 12: - ret = offsetof(VexGuestMIPS32State, guest_r12); break; - case 13: - ret = offsetof(VexGuestMIPS32State, guest_r13); break; - case 14: - ret = offsetof(VexGuestMIPS32State, guest_r14); break; - case 15: - ret = offsetof(VexGuestMIPS32State, guest_r15); break; - case 16: - ret = offsetof(VexGuestMIPS32State, guest_r16); break; - case 17: - ret = offsetof(VexGuestMIPS32State, guest_r17); break; - case 18: - ret = offsetof(VexGuestMIPS32State, guest_r18); break; - case 19: - ret = offsetof(VexGuestMIPS32State, guest_r19); break; - case 20: - ret = offsetof(VexGuestMIPS32State, guest_r20); break; - case 21: - ret = offsetof(VexGuestMIPS32State, guest_r21); break; - case 22: - ret = offsetof(VexGuestMIPS32State, guest_r22); break; - case 23: - ret = offsetof(VexGuestMIPS32State, guest_r23); break; - case 24: - ret = offsetof(VexGuestMIPS32State, guest_r24); break; - case 25: - ret = offsetof(VexGuestMIPS32State, guest_r25); break; - case 26: - ret = offsetof(VexGuestMIPS32State, guest_r26); break; - case 27: - ret = offsetof(VexGuestMIPS32State, guest_r27); break; - case 28: - ret = offsetof(VexGuestMIPS32State, guest_r28); break; - case 29: - ret = offsetof(VexGuestMIPS32State, guest_r29); break; - case 30: - ret = offsetof(VexGuestMIPS32State, guest_r30); break; - case 31: - ret = offsetof(VexGuestMIPS32State, guest_r31); break; - default: - vassert(0); - break; - } + if (!mode64) + switch (iregNo) { + case 0: + ret = offsetof(VexGuestMIPS32State, guest_r0); break; + case 1: + ret = offsetof(VexGuestMIPS32State, guest_r1); break; + case 2: + ret = offsetof(VexGuestMIPS32State, guest_r2); break; + case 3: + ret = offsetof(VexGuestMIPS32State, guest_r3); break; + case 4: + ret = offsetof(VexGuestMIPS32State, guest_r4); break; + case 5: + ret = offsetof(VexGuestMIPS32State, guest_r5); break; + case 6: + ret = offsetof(VexGuestMIPS32State, guest_r6); break; + case 7: + ret = offsetof(VexGuestMIPS32State, guest_r7); break; + case 8: + ret = offsetof(VexGuestMIPS32State, guest_r8); break; + case 9: + ret = offsetof(VexGuestMIPS32State, guest_r9); break; + case 10: + ret = offsetof(VexGuestMIPS32State, guest_r10); break; + case 11: + ret = offsetof(VexGuestMIPS32State, guest_r11); break; + case 12: + ret = offsetof(VexGuestMIPS32State, guest_r12); break; + case 13: + ret = offsetof(VexGuestMIPS32State, guest_r13); break; + case 14: + ret = offsetof(VexGuestMIPS32State, guest_r14); break; + case 15: + ret = offsetof(VexGuestMIPS32State, guest_r15); break; + case 16: + ret = offsetof(VexGuestMIPS32State, guest_r16); break; + case 17: + ret = offsetof(VexGuestMIPS32State, guest_r17); break; + case 18: + ret = offsetof(VexGuestMIPS32State, guest_r18); break; + case 19: + ret = offsetof(VexGuestMIPS32State, guest_r19); break; + case 20: + ret = offsetof(VexGuestMIPS32State, guest_r20); break; + case 21: + ret = offsetof(VexGuestMIPS32State, guest_r21); break; + case 22: + ret = offsetof(VexGuestMIPS32State, guest_r22); break; + case 23: + ret = offsetof(VexGuestMIPS32State, guest_r23); break; + case 24: + ret = offsetof(VexGuestMIPS32State, guest_r24); break; + case 25: + ret = offsetof(VexGuestMIPS32State, guest_r25); break; + case 26: + ret = offsetof(VexGuestMIPS32State, guest_r26); break; + case 27: + ret = offsetof(VexGuestMIPS32State, guest_r27); break; + case 28: + ret = offsetof(VexGuestMIPS32State, guest_r28); break; + case 29: + ret = offsetof(VexGuestMIPS32State, guest_r29); break; + case 30: + ret = offsetof(VexGuestMIPS32State, guest_r30); break; + case 31: + ret = offsetof(VexGuestMIPS32State, guest_r31); break; + default: + vassert(0); + break; + } + else + switch (iregNo) { + case 0: + ret = offsetof(VexGuestMIPS64State, guest_r0); break; + case 1: + ret = offsetof(VexGuestMIPS64State, guest_r1); break; + case 2: + ret = offsetof(VexGuestMIPS64State, guest_r2); break; + case 3: + ret = offsetof(VexGuestMIPS64State, guest_r3); break; + case 4: + ret = offsetof(VexGuestMIPS64State, guest_r4); break; + case 5: + ret = offsetof(VexGuestMIPS64State, guest_r5); break; + case 6: + ret = offsetof(VexGuestMIPS64State, guest_r6); break; + case 7: + ret = offsetof(VexGuestMIPS64State, guest_r7); break; + case 8: + ret = offsetof(VexGuestMIPS64State, guest_r8); break; + case 9: + ret = offsetof(VexGuestMIPS64State, guest_r9); break; + case 10: + ret = offsetof(VexGuestMIPS64State, guest_r10); break; + case 11: + ret = offsetof(VexGuestMIPS64State, guest_r11); break; + case 12: + ret = offsetof(VexGuestMIPS64State, guest_r12); break; + case 13: + ret = offsetof(VexGuestMIPS64State, guest_r13); break; + case 14: + ret = offsetof(VexGuestMIPS64State, guest_r14); break; + case 15: + ret = offsetof(VexGuestMIPS64State, guest_r15); break; + case 16: + ret = offsetof(VexGuestMIPS64State, guest_r16); break; + case 17: + ret = offsetof(VexGuestMIPS64State, guest_r17); break; + case 18: + ret = offsetof(VexGuestMIPS64State, guest_r18); break; + case 19: + ret = offsetof(VexGuestMIPS64State, guest_r19); break; + case 20: + ret = offsetof(VexGuestMIPS64State, guest_r20); break; + case 21: + ret = offsetof(VexGuestMIPS64State, guest_r21); break; + case 22: + ret = offsetof(VexGuestMIPS64State, guest_r22); break; + case 23: + ret = offsetof(VexGuestMIPS64State, guest_r23); break; + case 24: + ret = offsetof(VexGuestMIPS64State, guest_r24); break; + case 25: + ret = offsetof(VexGuestMIPS64State, guest_r25); break; + case 26: + ret = offsetof(VexGuestMIPS64State, guest_r26); break; + case 27: + ret = offsetof(VexGuestMIPS64State, guest_r27); break; + case 28: + ret = offsetof(VexGuestMIPS64State, guest_r28); break; + case 29: + ret = offsetof(VexGuestMIPS64State, guest_r29); break; + case 30: + ret = offsetof(VexGuestMIPS64State, guest_r30); break; + case 31: + ret = offsetof(VexGuestMIPS64State, guest_r31); break; + default: + vassert(0); + break; + } return ret; } +#if defined(VGP_mips32_linux) #define OFFB_PC offsetof(VexGuestMIPS32State, guest_PC) +#else +#define OFFB_PC offsetof(VexGuestMIPS64State, guest_PC) +#endif /* ---------------- Floating point registers ---------------- */ @@ -172,75 +248,146 @@ static UInt floatGuestRegOffset(UInt fregNo) { vassert(fregNo < 32); UInt ret; - switch (fregNo) { - case 0: - ret = offsetof(VexGuestMIPS32State, guest_f0); break; - case 1: - ret = offsetof(VexGuestMIPS32State, guest_f1); break; - case 2: - ret = offsetof(VexGuestMIPS32State, guest_f2); break; - case 3: - ret = offsetof(VexGuestMIPS32State, guest_f3); break; - case 4: - ret = offsetof(VexGuestMIPS32State, guest_f4); break; - case 5: - ret = offsetof(VexGuestMIPS32State, guest_f5); break; - case 6: - ret = offsetof(VexGuestMIPS32State, guest_f6); break; - case 7: - ret = offsetof(VexGuestMIPS32State, guest_f7); break; - case 8: - ret = offsetof(VexGuestMIPS32State, guest_f8); break; - case 9: - ret = offsetof(VexGuestMIPS32State, guest_f9); break; - case 10: - ret = offsetof(VexGuestMIPS32State, guest_f10); break; - case 11: - ret = offsetof(VexGuestMIPS32State, guest_f11); break; - case 12: - ret = offsetof(VexGuestMIPS32State, guest_f12); break; - case 13: - ret = offsetof(VexGuestMIPS32State, guest_f13); break; - case 14: - ret = offsetof(VexGuestMIPS32State, guest_f14); break; - case 15: - ret = offsetof(VexGuestMIPS32State, guest_f15); break; - case 16: - ret = offsetof(VexGuestMIPS32State, guest_f16); break; - case 17: - ret = offsetof(VexGuestMIPS32State, guest_f17); break; - case 18: - ret = offsetof(VexGuestMIPS32State, guest_f18); break; - case 19: - ret = offsetof(VexGuestMIPS32State, guest_f19); break; - case 20: - ret = offsetof(VexGuestMIPS32State, guest_f20); break; - case 21: - ret = offsetof(VexGuestMIPS32State, guest_f21); break; - case 22: - ret = offsetof(VexGuestMIPS32State, guest_f22); break; - case 23: - ret = offsetof(VexGuestMIPS32State, guest_f23); break; - case 24: - ret = offsetof(VexGuestMIPS32State, guest_f24); break; - case 25: - ret = offsetof(VexGuestMIPS32State, guest_f25); break; - case 26: - ret = offsetof(VexGuestMIPS32State, guest_f26); break; - case 27: - ret = offsetof(VexGuestMIPS32State, guest_f27); break; - case 28: - ret = offsetof(VexGuestMIPS32State, guest_f28); break; - case 29: - ret = offsetof(VexGuestMIPS32State, guest_f29); break; - case 30: - ret = offsetof(VexGuestMIPS32State, guest_f30); break; - case 31: - ret = offsetof(VexGuestMIPS32State, guest_f31); break; - default: - vassert(0); - break; - } + if (!mode64) + switch (fregNo) { + case 0: + ret = offsetof(VexGuestMIPS32State, guest_f0); break; + case 1: + ret = offsetof(VexGuestMIPS32State, guest_f1); break; + case 2: + ret = offsetof(VexGuestMIPS32State, guest_f2); break; + case 3: + ret = offsetof(VexGuestMIPS32State, guest_f3); break; + case 4: + ret = offsetof(VexGuestMIPS32State, guest_f4); break; + case 5: + ret = offsetof(VexGuestMIPS32State, guest_f5); break; + case 6: + ret = offsetof(VexGuestMIPS32State, guest_f6); break; + case 7: + ret = offsetof(VexGuestMIPS32State, guest_f7); break; + case 8: + ret = offsetof(VexGuestMIPS32State, guest_f8); break; + case 9: + ret = offsetof(VexGuestMIPS32State, guest_f9); break; + case 10: + ret = offsetof(VexGuestMIPS32State, guest_f10); break; + case 11: + ret = offsetof(VexGuestMIPS32State, guest_f11); break; + case 12: + ret = offsetof(VexGuestMIPS32State, guest_f12); break; + case 13: + ret = offsetof(VexGuestMIPS32State, guest_f13); break; + case 14: + ret = offsetof(VexGuestMIPS32State, guest_f14); break; + case 15: + ret = offsetof(VexGuestMIPS32State, guest_f15); break; + case 16: + ret = offsetof(VexGuestMIPS32State, guest_f16); break; + case 17: + ret = offsetof(VexGuestMIPS32State, guest_f17); break; + case 18: + ret = offsetof(VexGuestMIPS32State, guest_f18); break; + case 19: + ret = offsetof(VexGuestMIPS32State, guest_f19); break; + case 20: + ret = offsetof(VexGuestMIPS32State, guest_f20); break; + case 21: + ret = offsetof(VexGuestMIPS32State, guest_f21); break; + case 22: + ret = offsetof(VexGuestMIPS32State, guest_f22); break; + case 23: + ret = offsetof(VexGuestMIPS32State, guest_f23); break; + case 24: + ret = offsetof(VexGuestMIPS32State, guest_f24); break; + case 25: + ret = offsetof(VexGuestMIPS32State, guest_f25); break; + case 26: + ret = offsetof(VexGuestMIPS32State, guest_f26); break; + case 27: + ret = offsetof(VexGuestMIPS32State, guest_f27); break; + case 28: + ret = offsetof(VexGuestMIPS32State, guest_f28); break; + case 29: + ret = offsetof(VexGuestMIPS32State, guest_f29); break; + case 30: + ret = offsetof(VexGuestMIPS32State, guest_f30); break; + case 31: + ret = offsetof(VexGuestMIPS32State, guest_f31); break; + default: + vassert(0); + break; + } + else + switch (fregNo) { + case 0: + ret = offsetof(VexGuestMIPS64State, guest_f0); break; + case 1: + ret = offsetof(VexGuestMIPS64State, guest_f1); break; + case 2: + ret = offsetof(VexGuestMIPS64State, guest_f2); break; + case 3: + ret = offsetof(VexGuestMIPS64State, guest_f3); break; + case 4: + ret = offsetof(VexGuestMIPS64State, guest_f4); break; + case 5: + ret = offsetof(VexGuestMIPS64State, guest_f5); break; + case 6: + ret = offsetof(VexGuestMIPS64State, guest_f6); break; + case 7: + ret = offsetof(VexGuestMIPS64State, guest_f7); break; + case 8: + ret = offsetof(VexGuestMIPS64State, guest_f8); break; + case 9: + ret = offsetof(VexGuestMIPS64State, guest_f9); break; + case 10: + ret = offsetof(VexGuestMIPS64State, guest_f10); break; + case 11: + ret = offsetof(VexGuestMIPS64State, guest_f11); break; + case 12: + ret = offsetof(VexGuestMIPS64State, guest_f12); break; + case 13: + ret = offsetof(VexGuestMIPS64State, guest_f13); break; + case 14: + ret = offsetof(VexGuestMIPS64State, guest_f14); break; + case 15: + ret = offsetof(VexGuestMIPS64State, guest_f15); break; + case 16: + ret = offsetof(VexGuestMIPS64State, guest_f16); break; + case 17: + ret = offsetof(VexGuestMIPS64State, guest_f17); break; + case 18: + ret = offsetof(VexGuestMIPS64State, guest_f18); break; + case 19: + ret = offsetof(VexGuestMIPS64State, guest_f19); break; + case 20: + ret = offsetof(VexGuestMIPS64State, guest_f20); break; + case 21: + ret = offsetof(VexGuestMIPS64State, guest_f21); break; + case 22: + ret = offsetof(VexGuestMIPS64State, guest_f22); break; + case 23: + ret = offsetof(VexGuestMIPS64State, guest_f23); break; + case 24: + ret = offsetof(VexGuestMIPS64State, guest_f24); break; + case 25: + ret = offsetof(VexGuestMIPS64State, guest_f25); break; + case 26: + ret = offsetof(VexGuestMIPS64State, guest_f26); break; + case 27: + ret = offsetof(VexGuestMIPS64State, guest_f27); break; + case 28: + ret = offsetof(VexGuestMIPS64State, guest_f28); break; + case 29: + ret = offsetof(VexGuestMIPS64State, guest_f29); break; + case 30: + ret = offsetof(VexGuestMIPS64State, guest_f30); break; + case 31: + ret = offsetof(VexGuestMIPS64State, guest_f31); break; + default: + vassert(0); + break; + } return ret; } @@ -284,8 +431,26 @@ static inline UInt getUInt(UChar * p) | BITS4((_b3),(_b2),(_b1),(_b0))) #define LOAD_STORE_PATTERN \ - t1 = newTemp(Ity_I32); \ - assign(t1, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm)))); \ + t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \ + if(!mode64) \ + assign(t1, binop(Iop_Add32, getIReg(rs), \ + mkU32(extend_s_16to32(imm)))); \ + else \ + assign(t1, binop(Iop_Add64, getIReg(rs), \ + mkU64(extend_s_16to64(imm)))); \ + +#define LWX_SWX_PATTERN64 \ + t2 = newTemp(Ity_I64); \ + assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFC))); \ + t4 = newTemp(Ity_I32); \ + assign(t4, mkNarrowTo32( ty, binop(Iop_And64, \ + mkexpr(t1), mkU64(0x3)))); + +#define LWX_SWX_PATTERN64_1 \ + t2 = newTemp(Ity_I64); \ + assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8))); \ + t4 = newTemp(Ity_I64); \ + assign(t4, binop(Iop_And64, mkexpr(t1), mkU64(0x7))); #define LWX_SWX_PATTERN \ t2 = newTemp(Ity_I32); \ @@ -305,6 +470,18 @@ static inline UInt getUInt(UChar * p) ) \ ) +#define SXXV_PATTERN64(op) \ + putIReg(rd, mkWidenFrom32(ty, binop(op, \ + mkNarrowTo32(ty, getIReg(rt)), \ + unop(Iop_32to8, \ + binop(Iop_And32, \ + mkNarrowTo32(ty, getIReg(rs)), \ + mkU32(0x0000001F) \ + ) \ + ) \ + ), True \ + )) + #define SXX_PATTERN(op) \ putIReg(rd, binop(op, getIReg(rt), mkU8(sa))); @@ -317,6 +494,11 @@ static inline UInt getUInt(UChar * p) #define ALUI_PATTERN64(op) \ putIReg(rt, binop(op, getIReg(rs), mkU64(imm))); +#define ALU_PATTERN64(op) \ + putIReg(rd, mkWidenFrom32(ty, binop(op, \ + mkNarrowTo32(ty, getIReg(rs)), \ + mkNarrowTo32(ty, getIReg(rt))), True)); + #define FP_CONDITIONAL_CODE \ t3 = newTemp(Ity_I32); \ assign(t3, binop(Iop_And32, \ @@ -326,7 +508,7 @@ static inline UInt getUInt(UChar * p) mkU32(0x1))); /*------------------------------------------------------------*/ -/*--- Field helpers ---*/ +/*--- Field helpers ---*/ /*------------------------------------------------------------*/ static UInt get_opcode(UInt mipsins) @@ -613,6 +795,12 @@ static IRExpr *triop(IROp op, IRExpr * a1, IRExpr * a2, IRExpr * a3) return IRExpr_Triop(op, a1, a2, a3); } +static IRExpr *qop ( IROp op, IRExpr * a1, IRExpr * a2, IRExpr * a3, + IRExpr * a4 ) +{ + return IRExpr_Qop(op, a1, a2, a3, a4); +} + static IRExpr *load(IRType ty, IRExpr * addr) { IRExpr *load1 = NULL; @@ -682,8 +870,22 @@ static UInt extend_s_18to32(UInt x) return (UInt) ((((Int) x) << 14) >> 14); } -static void jmp_lit( /*MOD*/DisResult* dres, - IRJumpKind kind, Addr32 d32 ) +static ULong extend_s_16to64 ( UInt x ) +{ + return (ULong) ((((Long) x) << 48) >> 48); +} + +static ULong extend_s_18to64 ( UInt x ) +{ + return (ULong) ((((Long) x) << 46) >> 46); +} + +static ULong extend_s_32to64 ( UInt x ) +{ + return (ULong) ((((Long) x) << 32) >> 32); +} + +static void jmp_lit32 ( /*MOD*/ DisResult* dres, IRJumpKind kind, Addr32 d32 ) { vassert(dres->whatNext == Dis_Continue); vassert(dres->len == 0); @@ -694,6 +896,17 @@ static void jmp_lit( /*MOD*/DisResult* dres, stmt( IRStmt_Put( OFFB_PC, mkU32(d32) ) ); } +static void jmp_lit64 ( /*MOD*/ DisResult* dres, IRJumpKind kind, Addr64 d64 ) +{ + vassert(dres->whatNext == Dis_Continue); + vassert(dres->len == 0); + vassert(dres->continueAt == 0); + vassert(dres->jk_StopHere == Ijk_INVALID); + dres->whatNext = Dis_StopHere; + dres->jk_StopHere = kind; + stmt(IRStmt_Put(OFFB_PC, mkU64(d64))); +} + /* Fetch a byte from the guest insn stream. */ static UChar getIByte(Int delta) { @@ -713,27 +926,42 @@ static IRExpr *getIReg(UInt iregNo) static IRExpr *getHI(void) { - return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_HI), Ity_I32); + if (mode64) + return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_HI), Ity_I64); + else + return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_HI), Ity_I32); } static IRExpr *getLO(void) { - return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_LO), Ity_I32); + if (mode64) + return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_LO), Ity_I64); + else + return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_LO), Ity_I32); } static IRExpr *getFCSR(void) { - return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_FCSR), Ity_I32); + if (mode64) + return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_FCSR), Ity_I32); + else + return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_FCSR), Ity_I32); } static void putFCSR(IRExpr * e) { - stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_FCSR), e)); + if (mode64) + stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_FCSR), e)); + else + stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_FCSR), e)); } static IRExpr *getULR(void) { - return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_ULR), Ity_I32); + if (mode64) + return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_ULR), Ity_I64); + else + return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_ULR), Ity_I32); } static void putIReg(UInt archreg, IRExpr * e) @@ -747,12 +975,24 @@ static void putIReg(UInt archreg, IRExpr * e) static void putLO(IRExpr * e) { - stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_LO), e)); + if (mode64) + stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_LO), e)); + else + stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_LO), e)); } static void putHI(IRExpr * e) { - stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_HI), e)); + if (mode64) + stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_HI), e)); + else + stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_HI), e)); +} + +static IRExpr *mkNarrowTo8 ( IRType ty, IRExpr * src ) +{ + vassert(ty == Ity_I32 || ty == Ity_I64); + return ty == Ity_I64 ? unop(Iop_64to8, src) : unop(Iop_32to8, src); } static void putPC(IRExpr * e) @@ -823,7 +1063,14 @@ static IRExpr *getLoFromF64(IRType ty, IRExpr * src) static IRExpr *mkWidenFromF32(IRType ty, IRExpr * src) { vassert(ty == Ity_F32 || ty == Ity_F64); - return ty == Ity_F64 ? unop(Iop_F32toF64, src) : src; + if (ty == Ity_F64) { + IRTemp t0 = newTemp(Ity_I32); + IRTemp t1 = newTemp(Ity_I64); + assign(t0, unop(Iop_ReinterpF32asI32, src)); + assign(t1, binop(Iop_32HLto64, mkU32(0x0), mkexpr(t0))); + return unop(Iop_ReinterpI64asF64, mkexpr(t1)); + } else + return src; } static IRExpr *dis_branch_likely(IRExpr * guard, UInt imm) @@ -836,17 +1083,27 @@ static IRExpr *dis_branch_likely(IRExpr * guard, UInt imm) is added to the address of the instruction following the branch (not the branch itself), in the branch delay slot, to form a PC-relative effective target address. */ - branch_offset = extend_s_18to32(imm << 2); + if (mode64) + branch_offset = extend_s_18to64(imm << 2); + else + branch_offset = extend_s_18to32(imm << 2); t0 = newTemp(Ity_I1); assign(t0, guard); - stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring, - IRConst_U32(guest_PC_curr_instr + 8), OFFB_PC)); + if (mode64) + stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring, + IRConst_U64(guest_PC_curr_instr + 8), OFFB_PC)); + else + stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring, + IRConst_U32(guest_PC_curr_instr + 8), OFFB_PC)); irsb->jumpkind = Ijk_Boring; - return mkU32(guest_PC_curr_instr + 4 + branch_offset); + if (mode64) + return mkU64(guest_PC_curr_instr + 4 + branch_offset); + else + return mkU32(guest_PC_curr_instr + 4 + branch_offset); } static void dis_branch(Bool link, IRExpr * guard, UInt imm, IRStmt ** set) @@ -854,8 +1111,11 @@ static void dis_branch(Bool link, IRExpr * guard, UInt imm, IRStmt ** set) ULong branch_offset; IRTemp t0; - if (link) { // LR (GPR31) = addr of the 2nd instr after branch instr - putIReg(31, mkU32(guest_PC_curr_instr + 8)); + if (link) { /* LR (GPR31) = addr of the 2nd instr after branch instr */ + if (mode64) + putIReg(31, mkU64(guest_PC_curr_instr + 8)); + else + putIReg(31, mkU32(guest_PC_curr_instr + 8)); } /* PC = PC + (SignExtend(signed_immed_24) << 2) @@ -864,13 +1124,21 @@ static void dis_branch(Bool link, IRExpr * guard, UInt imm, IRStmt ** set) the branch (not the branch itself), in the branch delay slot, to form a PC-relative effective target address. */ - branch_offset = extend_s_18to32(imm << 2); + if (mode64) + branch_offset = extend_s_18to64(imm << 2); + else + branch_offset = extend_s_18to32(imm << 2); t0 = newTemp(Ity_I1); assign(t0, guard); - *set = IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring, - IRConst_U32(guest_PC_curr_instr + 4 + (UInt) branch_offset), - OFFB_PC); + if (mode64) + *set = IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring, + IRConst_U64(guest_PC_curr_instr + 4 + branch_offset), + OFFB_PC); + else + *set = IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring, + IRConst_U32(guest_PC_curr_instr + 4 + + (UInt) branch_offset), OFFB_PC); } static IRExpr *getFReg(UInt dregNo) @@ -882,28 +1150,34 @@ static IRExpr *getFReg(UInt dregNo) static IRExpr *getDReg(UInt dregNo) { - vassert(dregNo < 32); - IRTemp t0 = newTemp(Ity_F32); - IRTemp t1 = newTemp(Ity_F32); - IRTemp t2 = newTemp(Ity_F64); - IRTemp t3 = newTemp(Ity_I32); - IRTemp t4 = newTemp(Ity_I32); - IRTemp t5 = newTemp(Ity_I64); + if (mode64) { + vassert(dregNo < 32); + IRType ty = Ity_F64; + return IRExpr_Get(floatGuestRegOffset(dregNo), ty); + } else { + vassert(dregNo < 32); + IRTemp t0 = newTemp(Ity_F32); + IRTemp t1 = newTemp(Ity_F32); + IRTemp t2 = newTemp(Ity_F64); + IRTemp t3 = newTemp(Ity_I32); + IRTemp t4 = newTemp(Ity_I32); + IRTemp t5 = newTemp(Ity_I64); #if defined (_MIPSEL) - assign(t0, getFReg(dregNo)); - assign(t1, getFReg(dregNo + 1)); + assign(t0, getFReg(dregNo)); + assign(t1, getFReg(dregNo + 1)); #elif defined (_MIPSEB) - assign(t0, getFReg(dregNo + 1)); - assign(t1, getFReg(dregNo)); + assign(t0, getFReg(dregNo + 1)); + assign(t1, getFReg(dregNo)); #endif - assign(t3, unop(Iop_ReinterpF32asI32, mkexpr(t0))); - assign(t4, unop(Iop_ReinterpF32asI32, mkexpr(t1))); - assign(t5, binop(Iop_32HLto64, mkexpr(t4), mkexpr(t3))); - assign(t2, unop(Iop_ReinterpI64asF64, mkexpr(t5))); + assign(t3, unop(Iop_ReinterpF32asI32, mkexpr(t0))); + assign(t4, unop(Iop_ReinterpF32asI32, mkexpr(t1))); + assign(t5, binop(Iop_32HLto64, mkexpr(t4), mkexpr(t3))); + assign(t2, unop(Iop_ReinterpI64asF64, mkexpr(t5))); - return mkexpr(t2); + return mkexpr(t2); + } } static void putFReg(UInt dregNo, IRExpr * e) @@ -916,23 +1190,30 @@ static void putFReg(UInt dregNo, IRExpr * e) static void putDReg(UInt dregNo, IRExpr * e) { - vassert(dregNo < 32); - vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64); - IRTemp t1 = newTemp(Ity_F64); - IRTemp t4 = newTemp(Ity_I32); - IRTemp t5 = newTemp(Ity_I32); - IRTemp t6 = newTemp(Ity_I64); - assign(t1, e); - assign(t6, unop(Iop_ReinterpF64asI64, mkexpr(t1))); - assign(t4, unop(Iop_64HIto32, mkexpr(t6))); // hi - assign(t5, unop(Iop_64to32, mkexpr(t6))); //lo + if (mode64) { + vassert(dregNo < 32); + IRType ty = Ity_F64; + vassert(typeOfIRExpr(irsb->tyenv, e) == ty); + stmt(IRStmt_Put(floatGuestRegOffset(dregNo), e)); + } else { + vassert(dregNo < 32); + vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64); + IRTemp t1 = newTemp(Ity_F64); + IRTemp t4 = newTemp(Ity_I32); + IRTemp t5 = newTemp(Ity_I32); + IRTemp t6 = newTemp(Ity_I64); + assign(t1, e); + assign(t6, unop(Iop_ReinterpF64asI64, mkexpr(t1))); + assign(t4, unop(Iop_64HIto32, mkexpr(t6))); /* hi */ + assign(t5, unop(Iop_64to32, mkexpr(t6))); /* lo */ #if defined (_MIPSEL) - putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t5))); - putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t4))); + putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t5))); + putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t4))); #elif defined (_MIPSEB) - putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t5))); - putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t4))); + putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t5))); + putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t4))); #endif + } } static void setFPUCondCode(IRExpr * e, UInt cc) @@ -949,7 +1230,7 @@ static void setFPUCondCode(IRExpr * e, UInt cc) } } -static IRExpr */* :: Ity_I32 */get_IR_roundingmode(void) +static IRExpr* get_IR_roundingmode ( void ) { /* rounding mode | MIPS | IR @@ -962,21 +1243,216 @@ static IRExpr */* :: Ity_I32 */get_IR_roundingmode(void) IRTemp rm_MIPS = newTemp(Ity_I32); /* Last two bits in FCSR are rounding mode. */ - assign(rm_MIPS, binop(Iop_And32, IRExpr_Get(offsetof(VexGuestMIPS32State, - guest_FCSR), Ity_I32), mkU32(3))); + if (mode64) + assign(rm_MIPS, binop(Iop_And32, IRExpr_Get(offsetof(VexGuestMIPS64State, + guest_FCSR), Ity_I32), mkU32(3))); + else + assign(rm_MIPS, binop(Iop_And32, IRExpr_Get(offsetof(VexGuestMIPS32State, + guest_FCSR), Ity_I32), mkU32(3))); - // rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2) + /* rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2) */ return binop(Iop_Xor32, mkexpr(rm_MIPS), binop(Iop_And32, binop(Iop_Shl32, mkexpr(rm_MIPS), mkU8(1)), mkU32(2))); } +/* sz, ULong -> IRExpr */ +static IRExpr *mkSzImm ( IRType ty, ULong imm64 ) +{ + vassert(ty == Ity_I32 || ty == Ity_I64); + return ty == Ity_I64 ? mkU64(imm64) : mkU32((UInt) imm64); +} + +static IRConst *mkSzConst ( IRType ty, ULong imm64 ) +{ + vassert(ty == Ity_I32 || ty == Ity_I64); + return (ty == Ity_I64 ? IRConst_U64(imm64) : IRConst_U32((UInt) imm64)); +} + +/* Make sure we get valid 32 and 64bit addresses */ +static Addr64 mkSzAddr ( IRType ty, Addr64 addr ) +{ + vassert(ty == Ity_I32 || ty == Ity_I64); + return (ty == Ity_I64 ? (Addr64) addr : + (Addr64) extend_s_32to64(toUInt(addr))); +} + +/* Shift and Rotate instructions for MIPS64 */ +static Bool dis_instr_shrt ( UInt theInstr ) +{ + UInt opc2 = get_function(theInstr); + UChar regRs = get_rs(theInstr); + UChar regRt = get_rt(theInstr); + UChar regRd = get_rd(theInstr); + UChar uImmsa = get_sa(theInstr); + Long sImmsa = extend_s_16to64(uImmsa); + IRType ty = mode64 ? Ity_I64 : Ity_I32; + IRTemp tmp = newTemp(ty); + IRTemp tmpOr = newTemp(ty); + IRTemp tmpRt = newTemp(ty); + IRTemp tmpRs = newTemp(ty); + IRTemp tmpRd = newTemp(ty); + + assign(tmpRs, getIReg(regRs)); + assign(tmpRt, getIReg(regRt)); + + switch (opc2) { + case 0x3A: + if ((regRs & 0x01) == 0) { + /* Doubleword Shift Right Logical - DSRL; MIPS64 */ + DIP("dsrl r%u, r%u,r%u\n", regRd, regRt, (Int) sImmsa); + assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa))); + putIReg(regRd, mkexpr(tmpRd)); + } else if ((regRs & 0x01) == 1) { + /* Doubleword Rotate Right - DROTR; MIPS64r2 */ + vassert(mode64); + DIP("drotr r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa); + IRTemp tmpL = newTemp(ty); + IRTemp tmpR = newTemp(ty); + assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa))); + assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(63 - uImmsa))); + assign(tmpL, binop(Iop_Shl64, mkexpr(tmp), mkU8(1))); + assign(tmpRd, binop(Iop_Or64, mkexpr(tmpL), mkexpr(tmpR))); + putIReg(regRd, mkexpr(tmpRd)); + } else + return False; + break; + + case 0x3E: + if ((regRs & 0x01) == 0) { + /* Doubleword Shift Right Logical Plus 32 - DSRL32; MIPS64 */ + DIP("dsrl32 r%u, r%u,r%u\n", regRd, regRt, (Int) (sImmsa + 32)); + assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa + 32))); + putIReg(regRd, mkexpr(tmpRd)); + } else if ((regRs & 0x01) == 1) { + /* Doubleword Rotate Right Plus 32 - DROTR32; MIPS64r2 */ + DIP("drotr32 r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa); + vassert(mode64); + IRTemp tmpL = newTemp(ty); + IRTemp tmpR = newTemp(ty); + /* (tmpRt >> sa) | (tmpRt << (64 - sa)) */ + assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa + 32))); + assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt), + mkU8(63 - (uImmsa + 32)))); + assign(tmpL, binop(Iop_Shl64, mkexpr(tmp), mkU8(1))); + assign(tmpRd, binop(Iop_Or64, mkexpr(tmpL), mkexpr(tmpR))); + putIReg(regRd, mkexpr(tmpRd)); + } else + return False; + break; + + case 0x16: + if ((uImmsa & 0x01) == 0) { + /* Doubleword Shift Right Logical Variable - DSRLV; MIPS64 */ + DIP("dsrlv r%u, r%u,r%u\n", regRd, regRt, regRs); + IRTemp tmpRs8 = newTemp(Ity_I8); + /* s = tmpRs[5..0] */ + assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkU64(63))); + assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp))); + assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkexpr(tmpRs8))); + putIReg(regRd, mkexpr(tmpRd)); + } else if ((uImmsa & 0x01) == 1) { + /* Doubleword Rotate Right Variable - DROTRV; MIPS64r2 */ + DIP("drotrv r%u, r%u,r%u\n", regRd, regRt, regRs); + IRTemp tmpL = newTemp(ty); + IRTemp tmpR = newTemp(ty); + IRTemp tmpRs8 = newTemp(Ity_I8); + IRTemp tmpLs8 = newTemp(Ity_I8); + IRTemp tmp64 = newTemp(ty); + /* s = tmpRs[5...0] + m = 64 - s + (tmpRt << s) | (tmpRt >> m) */ + + assign(tmp64, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63))); + assign(tmp, binop(Iop_Sub64, mkU64(63), mkexpr(tmp64))); + + assign(tmpLs8, mkNarrowTo8(ty, mkexpr(tmp))); + assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp64))); + + assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkexpr(tmpRs8))); + assign(tmpL, binop(Iop_Shl64, mkexpr(tmpRt), mkexpr(tmpLs8))); + assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpL), mkU8(1))); + assign(tmpOr, binop(Iop_Or64, mkexpr(tmpRd), mkexpr(tmpR))); + + putIReg(regRd, mkexpr(tmpOr)); + } else + return False; + break; + + case 0x38: /* Doubleword Shift Left Logical - DSLL; MIPS64 */ + DIP("dsll r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa); + vassert(mode64); + assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa))); + putIReg(regRd, mkexpr(tmpRd)); + break; + + case 0x3C: /* Doubleword Shift Left Logical Plus 32 - DSLL32; MIPS64 */ + DIP("dsll32 r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa); + assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa + 32))); + putIReg(regRd, mkexpr(tmpRd)); + break; + + case 0x14: { /* Doubleword Shift Left Logical Variable - DSLLV; MIPS64 */ + DIP("dsllv r%u, r%u,r%u\n", regRd, regRt, regRs); + IRTemp tmpRs8 = newTemp(Ity_I8); + + assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63))); + assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp))); + assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkexpr(tmpRs8))); + putIReg(regRd, mkexpr(tmpRd)); + break; + } + + case 0x3B: /* Doubleword Shift Right Arithmetic - DSRA; MIPS64 */ + DIP("dsra r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa); + assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkU8(uImmsa))); + putIReg(regRd, mkexpr(tmpRd)); + break; + + case 0x3F: /* Doubleword Shift Right Arithmetic Plus 32 - DSRA32; + MIPS64 */ + DIP("dsra32 r%u, r%u,%d\n", regRd, regRt, (Int) sImmsa); + assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkU8(uImmsa + 32))); + putIReg(regRd, mkexpr(tmpRd)); + break; + + case 0x17: { /* Doubleword Shift Right Arithmetic Variable - DSRAV; + MIPS64 */ + DIP("dsrav r%u, r%u,r%u\n", regRd, regRt, regRs); + IRTemp tmpRs8 = newTemp(Ity_I8); + assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63))); + assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp))); + assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkexpr(tmpRs8))); + putIReg(regRd, mkexpr(tmpRd)); + break; + + } + + default: + return False; + + } + return True; +} + +static IROp mkSzOp ( IRType ty, IROp op8 ) +{ + Int adj; + vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ty == Ity_I64); + vassert(op8 == Iop_Add8 || op8 == Iop_Sub8 || op8 == Iop_Mul8 + || op8 == Iop_Or8 || op8 == Iop_And8 || op8 == Iop_Xor8 + || op8 == Iop_Shl8 || op8 == Iop_Shr8 || op8 == Iop_Sar8 + || op8 == Iop_CmpEQ8 || op8 == Iop_CmpNE8 || op8 == Iop_Not8); + adj = ty == Ity_I8 ? 0 : (ty == Ity_I16 ? 1 : (ty == Ity_I32 ? 2 : 3)); + return adj + op8; +} + /*********************************************************/ /*--- Floating Point Compare ---*/ /*********************************************************/ -static Bool dis_instr_CCondFmt(UInt cins) +static Bool dis_instr_CCondFmt ( UInt cins ) { - IRTemp t0, t1, t2, t3; + IRTemp t0, t1, t2, t3, tmp5, tmp6; IRTemp ccIR = newTemp(Ity_I32); IRTemp ccMIPS = newTemp(Ity_I32); UInt FC = get_FC(cins); @@ -985,116 +1461,226 @@ static Bool dis_instr_CCondFmt(UInt cins) UInt ft = get_ft(cins); UInt cond = get_cond(cins); - if (FC == 0x3) { // C.cond.fmt + if (FC == 0x3) { /* C.cond.fmt */ UInt fpc_cc = get_fpc_cc(cins); switch (fmt) { - case 0x10: { //C.cond.S + case 0x10: { /* C.cond.S */ DIP("C.cond.S %d f%d, f%d\n", fpc_cc, fs, ft); - t0 = newTemp(Ity_I32); - t1 = newTemp(Ity_I32); - t2 = newTemp(Ity_I32); - t3 = newTemp(Ity_I32); - - assign(ccIR, binop(Iop_CmpF64, unop(Iop_F32toF64, getFReg(fs)), - unop(Iop_F32toF64, getFReg(ft)))); - /* Map compare result from IR to MIPS */ - /* - FP cmp result | MIPS | IR - -------------------------- - UN | 0x1 | 0x45 - EQ | 0x2 | 0x40 - GT | 0x4 | 0x00 - LT | 0x8 | 0x01 - */ - - // ccMIPS = Shl(1, (~(ccIR>>5) & 2) - // | ((ccIR ^ (ccIR>>6)) & 1) - assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8, - binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32, - binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))), mkU32(2)), - binop(Iop_And32, binop(Iop_Xor32, mkexpr(ccIR), - binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))), - mkU32(1)))))); - assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1))); // UN - assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS), - mkU8(0x1)), mkU32(0x1))); // EQ - assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32, - mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1))); // NGT - assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS), - mkU8(0x3)), mkU32(0x1))); // LT - - switch (cond) { - case 0x0: - setFPUCondCode(mkU32(0), fpc_cc); - break; - case 0x1: - DIP("unorderd: %d\n", fpc_cc); - setFPUCondCode(mkexpr(t0), fpc_cc); - break; - case 0x2: - setFPUCondCode(mkexpr(t1), fpc_cc); - break; - case 0x3: - setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)), - fpc_cc); - break; - case 0x4: - setFPUCondCode(mkexpr(t3), fpc_cc); - break; - case 0x5: - setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)), - fpc_cc); - break; - case 0x6: - setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)), - fpc_cc); - break; - case 0x7: - setFPUCondCode(mkexpr(t2), fpc_cc); - break; - case 0x8: - setFPUCondCode(mkU32(0), fpc_cc); - break; - case 0x9: - setFPUCondCode(mkexpr(t0), fpc_cc); - break; - case 0xA: - setFPUCondCode(mkexpr(t1), fpc_cc); - break; - case 0xB: - setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)), - fpc_cc); - break; - case 0xC: - setFPUCondCode(mkexpr(t3), fpc_cc); - break; - case 0xD: - setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)), - fpc_cc); - break; - case 0xE: - setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)), - fpc_cc); - break; - case 0xF: - setFPUCondCode(mkexpr(t2), fpc_cc); - break; + if (mode64) { + t0 = newTemp(Ity_I32); + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I32); + + tmp5 = newTemp(Ity_F64); + tmp6 = newTemp(Ity_F64); + + assign(tmp5, unop(Iop_F32toF64, getLoFromF64(Ity_F64, + getFReg(fs)))); + assign(tmp6, unop(Iop_F32toF64, getLoFromF64(Ity_F64, + getFReg(ft)))); + + assign(ccIR, binop(Iop_CmpF64, mkexpr(tmp5), mkexpr(tmp6))); + putHI(mkWidenFrom32(Ity_I64, mkexpr(ccIR), True)); + /* Map compare result from IR to MIPS + FP cmp result | MIPS | IR + -------------------------- + UN | 0x1 | 0x45 + EQ | 0x2 | 0x40 + GT | 0x4 | 0x00 + LT | 0x8 | 0x01 + */ + + /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */ + assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8, + binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32, + binop(Iop_Shr32, mkexpr(ccIR),mkU8(5))),mkU32(2)), + binop(Iop_And32, binop(Iop_Xor32, mkexpr(ccIR), + binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))), + mkU32(1)))))); + putLO(mkWidenFrom32(Ity_I64, mkexpr(ccMIPS), True)); + + /* UN */ + assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1))); + /* EQ */ + assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS), + mkU8(0x1)), mkU32(0x1))); + /* NGT */ + assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32, + mkexpr(ccMIPS), mkU8(0x2))),mkU32(0x1))); + /* LT */ + assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS), + mkU8(0x3)), mkU32(0x1))); + switch (cond) { + case 0x0: + setFPUCondCode(mkU32(0), fpc_cc); + break; + case 0x1: + DIP("unorderd: %d\n", fpc_cc); + setFPUCondCode(mkexpr(t0), fpc_cc); + break; + case 0x2: + setFPUCondCode(mkexpr(t1), fpc_cc); + break; + case 0x3: + setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)), + fpc_cc); + break; + case 0x4: + setFPUCondCode(mkexpr(t3), fpc_cc); + break; + case 0x5: + setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)), + fpc_cc); + break; + case 0x6: + setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)), + fpc_cc); + break; + case 0x7: + setFPUCondCode(mkexpr(t2), fpc_cc); + break; + case 0x8: + setFPUCondCode(mkU32(0), fpc_cc); + break; + case 0x9: + setFPUCondCode(mkexpr(t0), fpc_cc); + break; + case 0xA: + setFPUCondCode(mkexpr(t1), fpc_cc); + break; + case 0xB: + setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)), + fpc_cc); + break; + case 0xC: + setFPUCondCode(mkexpr(t3), fpc_cc); + break; + case 0xD: + setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)), + fpc_cc); + break; + case 0xE: + setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)), + fpc_cc); + break; + case 0xF: + setFPUCondCode(mkexpr(t2), fpc_cc); + break; + + default: + return False; + } - default: - return False; + } else { + t0 = newTemp(Ity_I32); + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I32); + + assign(ccIR, binop(Iop_CmpF64, unop(Iop_F32toF64, getFReg(fs)), + unop(Iop_F32toF64, getFReg(ft)))); + /* Map compare result from IR to MIPS + FP cmp result | MIPS | IR + -------------------------- + UN | 0x1 | 0x45 + EQ | 0x2 | 0x40 + GT | 0x4 | 0x00 + LT | 0x8 | 0x01 + */ + + /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */ + assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8, + binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32, + binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))), + mkU32(2)), binop(Iop_And32, + binop(Iop_Xor32, mkexpr(ccIR), + binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))), + mkU32(1)))))); + /* UN */ + assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1))); + /* EQ */ + assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS), + mkU8(0x1)), mkU32(0x1))); + /* NGT */ + assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32, + mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1))); + /* LT */ + assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS), + mkU8(0x3)), mkU32(0x1))); + + switch (cond) { + case 0x0: + setFPUCondCode(mkU32(0), fpc_cc); + break; + case 0x1: + DIP("unorderd: %d\n", fpc_cc); + setFPUCondCode(mkexpr(t0), fpc_cc); + break; + case 0x2: + setFPUCondCode(mkexpr(t1), fpc_cc); + break; + case 0x3: + setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)), + fpc_cc); + break; + case 0x4: + setFPUCondCode(mkexpr(t3), fpc_cc); + break; + case 0x5: + setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)), + fpc_cc); + break; + case 0x6: + setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)), + fpc_cc); + break; + case 0x7: + setFPUCondCode(mkexpr(t2), fpc_cc); + break; + case 0x8: + setFPUCondCode(mkU32(0), fpc_cc); + break; + case 0x9: + setFPUCondCode(mkexpr(t0), fpc_cc); + break; + case 0xA: + setFPUCondCode(mkexpr(t1), fpc_cc); + break; + case 0xB: + setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)), + fpc_cc); + break; + case 0xC: + setFPUCondCode(mkexpr(t3), fpc_cc); + break; + case 0xD: + setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)), + fpc_cc); + break; + case 0xE: + setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)), + fpc_cc); + break; + case 0xF: + setFPUCondCode(mkexpr(t2), fpc_cc); + break; + + default: + return False; + } } } break; - case 0x11: //C.cond.D + case 0x11: { /* C.cond.D */ DIP("C.%d.D %d f%d, f%d\n", cond, fpc_cc, fs, ft); t0 = newTemp(Ity_I32); t1 = newTemp(Ity_I32); t2 = newTemp(Ity_I32); t3 = newTemp(Ity_I32); assign(ccIR, binop(Iop_CmpF64, getDReg(fs), getDReg(ft))); - /* Map compare result from IR to MIPS */ - /* + /* Map compare result from IR to MIPS FP cmp result | MIPS | IR -------------------------- UN | 0x1 | 0x45 @@ -1103,22 +1689,25 @@ static Bool dis_instr_CCondFmt(UInt cins) LT | 0x8 | 0x01 */ - // ccMIPS = Shl(1, (~(ccIR>>5) & 2) - // | ((ccIR ^ (ccIR>>6)) & 1) + /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */ assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8, binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))), mkU32(2)), binop(Iop_And32, binop(Iop_Xor32, mkexpr(ccIR), binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))), mkU32(1)))))); - - assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1))); // UN + + /* UN */ + assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1))); + /* EQ */ assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS), - mkU8(0x1)), mkU32(0x1))); // EQ + mkU8(0x1)), mkU32(0x1))); + /* NGT */ assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32, - mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1))); // NGT + mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1))); + /* LT */ assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS), - mkU8(0x3)), mkU32(0x1))); // LT + mkU8(0x3)), mkU32(0x1))); switch (cond) { case 0x0: @@ -1179,10 +1768,11 @@ static Bool dis_instr_CCondFmt(UInt cins) default: return False; } - break; + } + break; - default: - return False; + default: + return False; } } else { return False; @@ -1191,32 +1781,145 @@ static Bool dis_instr_CCondFmt(UInt cins) return True; } -/*------------------------------------------------------------*/ -/*--- Disassemble a single instruction ---*/ -/*------------------------------------------------------------*/ +/*********************************************************/ +/*--- Branch Instructions for mips64 ---*/ +/*********************************************************/ +static Bool dis_instr_branch ( UInt theInstr, DisResult * dres, + Bool(*resteerOkFn) (void *, Addr64), + void *callback_opaque, IRStmt ** set ) +{ + UInt jmpKind = 0; + UChar opc1 = get_opcode(theInstr); + UChar regRs = get_rs(theInstr); + UChar regRt = get_rt(theInstr); + UInt offset = get_imm(theInstr); + Long sOffset = extend_s_16to64(offset); + IRType ty = mode64 ? Ity_I64 : Ity_I32; + IROp opSlt = mode64 ? Iop_CmpLT64S : Iop_CmpLT32S; -/* Disassemble a single instruction into IR. The instruction is - located in host memory at guest_instr, and has guest IP of - guest_PC_curr_instr, which will have been set before the call - here. */ + IRTemp tmp = newTemp(ty); + IRTemp tmpRs = newTemp(ty); + IRTemp tmpRt = newTemp(ty); + IRTemp tmpLt = newTemp(ty); + IRTemp tmpReg0 = newTemp(ty); -static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, - Addr64), - Bool resteerCisOk, - void* callback_opaque, - Long delta64, - VexArchInfo* archinfo, - VexAbiInfo* abiinfo, - Bool sigill_diag ) -{ - IRTemp t0, t1, t2, t3, t4, t5, t6, t7, t8; - UInt opcode, cins, rs, rt, rd, sa, ft, fs, fd, fmt, tf, nd, function, - trap_code, imm, instr_index, p, msb, lsb, size, rot, sel; + UChar regLnk = 31; /* reg 31 is link reg in MIPS */ + Addr64 addrTgt = 0; + Addr64 cia = guest_PC_curr_instr; - DisResult dres; + IRExpr *eConst0 = mkSzImm(ty, (UInt) 0); + IRExpr *eNia = mkSzImm(ty, cia + 8); + IRExpr *eCond = NULL; - static IRExpr *lastn = NULL; /* last jump addr */ - static IRStmt *bstmt = NULL; /* branch (Exit) stmt */ + assign(tmpRs, getIReg(regRs)); + assign(tmpRt, getIReg(regRt)); + assign(tmpReg0, getIReg(0)); + + eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpReg0), mkexpr(tmpReg0)); + + switch (opc1) { + case 0x01: + switch (regRt) { + case 0x00: { /* BLTZ rs, offset */ + addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2)); + IRTemp tmpLtRes = newTemp(Ity_I1); + + assign(tmp, eConst0); + assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp))); + assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) : + unop(Iop_1Uto32, mkexpr(tmpLtRes))); + + eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpLt), + mkexpr(tmpReg0)); + + jmpKind = Ijk_Call; + break; + } + + case 0x01: { /* BGEZ rs, offset */ + IRTemp tmpLtRes = newTemp(Ity_I1); + addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2)); + + assign(tmp, eConst0); + assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp))); + assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) : + unop(Iop_1Uto32, mkexpr(tmpLtRes))); + eCond = binop(mkSzOp(ty, Iop_CmpEQ8), mkexpr(tmpLt), + mkexpr(tmpReg0)); + + jmpKind = Ijk_Call; + break; + } + + case 0x11: { /* BGEZAL rs, offset */ + addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2)); + putIReg(regLnk, eNia); + IRTemp tmpLtRes = newTemp(Ity_I1); + + assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), eConst0)); + assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) : + unop(Iop_1Uto32, mkexpr(tmpLtRes))); + + eCond = binop(mkSzOp(ty, Iop_CmpEQ8), mkexpr(tmpLt), + mkexpr(tmpReg0)); + + jmpKind = Ijk_Call; + break; + } + + case 0x10: { /* BLTZAL rs, offset */ + IRTemp tmpLtRes = newTemp(Ity_I1); + IRTemp tmpRes = newTemp(ty); + + addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2)); + putIReg(regLnk, eNia); + + assign(tmp, eConst0); + assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp))); + assign(tmpRes, mode64 ? unop(Iop_1Uto64, + mkexpr(tmpLtRes)) : unop(Iop_1Uto32, mkexpr(tmpLtRes))); + eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpRes), + mkexpr(tmpReg0)); + + jmpKind = Ijk_Call; + break; + } + + } + break; + default: + return False; + } + *set = IRStmt_Exit(eCond, jmpKind, mkSzConst(ty, addrTgt), OFFB_PC); + return True; +} + +/*------------------------------------------------------------*/ +/*--- Disassemble a single instruction ---*/ +/*------------------------------------------------------------*/ + +/* Disassemble a single instruction into IR. The instruction is + located in host memory at guest_instr, and has guest IP of + guest_PC_curr_instr, which will have been set before the call + here. */ + +static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, + Addr64), + Bool resteerCisOk, + void* callback_opaque, + Long delta64, + VexArchInfo* archinfo, + VexAbiInfo* abiinfo, + Bool sigill_diag ) +{ + IRTemp t0, t1, t2, t3, t4, t5, t6, t7, t8; + UInt opcode, cins, rs, rt, rd, sa, ft, fs, fd, fmt, tf, nd, function, + trap_code, imm, instr_index, p, msb, lsb, size, rot, sel; + + DisResult dres; + + static IRExpr *lastn = NULL; /* last jump addr */ + static IRStmt *bstmt = NULL; /* branch (Exit) stmt */ /* The running delta */ Int delta = (Int) delta64; @@ -1272,39 +1975,56 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, "srl $0, $0, 13 "srl $0, $0, 29 "srl $0, $0, 3 - "srl $0, $0, 19 */ - UInt word1 = 0x00000342; - UInt word2 = 0x00000742; - UInt word3 = 0x000000C2; - UInt word4 = 0x000004C2; + "srl $0, $0, 19 + + ****mips64**** + dsll $0, $0, 3 + dsll $0, $0, 13 + dsll $0, $0, 29 + dsll $0, $0, 19 */ + + UInt word1 = mode64 ? 0xF8 : 0x342; + UInt word2 = mode64 ? 0x378 : 0x742; + UInt word3 = mode64 ? 0x778 : 0xC2; + UInt word4 = mode64 ? 0x4F8 : 0x4C2; if (getUInt(code + 0) == word1 && getUInt(code + 4) == word2 && getUInt(code + 8) == word3 && getUInt(code + 12) == word4) { - /* Got a "Special" instruction preamble. Which one is it? */ - if (getUInt(code + 16) == 0x01ad6825 /* or t5, t5, t5 */ ) { - /* v0 = client_request ( t9 ) */ - DIP("v0 = client_request ( t9 )\n"); - putPC(mkU32(guest_PC_curr_instr + 20)); + /* Got a "Special" instruction preamble. Which one is it? */ + if (getUInt(code + 16) == 0x01ad6825 /* or $13, $13, $13 */ ) { + /* $11 = client_request ( $12 ) */ + DIP("$11 = client_request ( $12 )\n"); + if (mode64) + putPC(mkU64(guest_PC_curr_instr + 20)); + else + putPC(mkU32(guest_PC_curr_instr + 20)); dres.jk_StopHere = Ijk_ClientReq; dres.whatNext = Dis_StopHere; goto decode_success; - } else if (getUInt(code + 16) == 0x01ce7025 /* or t6,t6,t6 */ ) { - /* t9 = guest_NRADDR */ - DIP("t9 = guest_NRADDR\n"); + } else if (getUInt(code + 16) == 0x01ce7025 /* or $14, $14, $14 */ ) { + /* $11 = guest_NRADDR */ + DIP("$11 = guest_NRADDR\n"); dres.len = 20; delta += 20; - putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS32State, guest_NRADDR), - Ity_I32)); + if (mode64) + putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS64State, + guest_NRADDR), Ity_I64)); + else + putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS32State, + guest_NRADDR), Ity_I32)); goto decode_success; - } else if (getUInt(code + 16) == 0x01ef7825/* or t7,t7,t7 */ ) { - /* branch-and-link-to-noredir t9 */ - DIP("branch-and-link-to-noredir t9\n"); - putIReg(31, mkU32(guest_PC_curr_instr + 20)); + } else if (getUInt(code + 16) == 0x01ef7825 /* or $15, $15, $15 */ ) { + /* branch-and-link-to-noredir $25 */ + DIP("branch-and-link-to-noredir $25\n"); + if (mode64) + putIReg(31, mkU64(guest_PC_curr_instr + 20)); + else + putIReg(31, mkU32(guest_PC_curr_instr + 20)); putPC(getIReg(25)); dres.jk_StopHere = Ijk_NoRedir; dres.whatNext = Dis_StopHere; goto decode_success; - } else if (getUInt(code + 16) == 0x016b5825/* or t3,t3,t3 */ ) { + } else if (getUInt(code + 16) == 0x016b5825 /* or $11,$11,$11 */ ) { /* IR injection */ DIP("IR injection\n"); #if defined (_MIPSEL) @@ -1312,12 +2032,21 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, #elif defined (_MIPSEB) vex_inject_ir(irsb, Iend_BE); #endif - stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_TISTART), - mkU32(guest_PC_curr_instr))); - stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_TILEN), - mkU32(20))); + if (mode64) { + stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_TISTART), + mkU64(guest_PC_curr_instr))); + stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_TILEN), + mkU64(20))); + + putPC(mkU64(guest_PC_curr_instr + 20)); + } else { + stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_TISTART), + mkU32(guest_PC_curr_instr))); + stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_TILEN), + mkU32(20))); - putPC(mkU32(guest_PC_curr_instr + 20)); + putPC(mkU32(guest_PC_curr_instr + 20)); + } dres.whatNext = Dis_StopHere; dres.jk_StopHere = Ijk_TInval; dres.len = 20; @@ -1357,17 +2086,28 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, case 0x03: /* JAL */ DIP("jal 0x%x", instr_index); - putIReg(31, mkU32(guest_PC_curr_instr + 8)); - t0 = newTemp(ty); - assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) | - (instr_index << 2))); + if (mode64) { + putIReg(31, mkU64(guest_PC_curr_instr + 8)); + t0 = newTemp(ty); + assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000) | + (instr_index << 2))); + } else { + putIReg(31, mkU32(guest_PC_curr_instr + 8)); + t0 = newTemp(ty); + assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) | + (instr_index << 2))); + } lastn = mkexpr(t0); break; case 0x02: /* J */ DIP("j 0x%x", instr_index); t0 = newTemp(ty); - assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) | - (instr_index << 2))); + if (mode64) + assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000) | + (instr_index << 2))); + else + assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) | + (instr_index << 2))); lastn = mkexpr(t0); break; @@ -1376,10 +2116,10 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, UInt bc1_cc = get_bc1_cc(cins); if (0x08 == fmt) { switch (fmt) { - case 0x08: //BC + case 0x08: /* BC */ { DIP("tf: %d, nd: %d\n", tf, nd); - //FcConditionalCode(bc1_cc) + /* FcConditionalCode(bc1_cc) */ t1 = newTemp(Ity_I1); t2 = newTemp(Ity_I32); t3 = newTemp(Ity_I1); @@ -1397,13 +2137,13 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, )); if (tf == 1 && nd == 0) { - //branch on true + /* branch on true */ DIP("bc1t %d, %d", bc1_cc, imm); assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2))); dis_branch(False, mkexpr(t3), imm, &bstmt); break; } else if (tf == 0 && nd == 0) { - //branch on false + /* branch on false */ DIP("bc1f %d, %d", bc1_cc, imm); assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2))); dis_branch(False, mkexpr(t3), imm, &bstmt); @@ -1411,7 +2151,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } else if (nd == 1 && tf == 0) { DIP("bc1fl %d, %d", bc1_cc, imm); lastn = dis_branch_likely(binop(Iop_CmpNE32, mkexpr(t2), - mode64 ? mkU64(0x0) : mkU32(0x0)), imm); + mkU32(0x0)), imm); break; } else if (nd == 1 && tf == 1) { DIP("bc1tl %d, %d", bc1_cc, imm); @@ -1428,17 +2168,17 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } else { switch (function) { - case 0x4: //SQRT.fmt + case 0x4: /* SQRT.fmt */ { switch (fmt) { - case 0x10: //S + case 0x10: /* S */ { IRExpr *rm = get_IR_roundingmode(); putFReg(fd, mkWidenFromF32(tyF, binop(Iop_SqrtF32, rm, getLoFromF64(tyF, getFReg(fs))))); } break; - case 0x11: //D + case 0x11: /* D */ { IRExpr *rm = get_IR_roundingmode(); putDReg(fd, binop(Iop_SqrtF64, rm, getDReg(fs))); @@ -1447,25 +2187,25 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } } break; - case 0x5: //abs.fmt + case 0x5: /* abs.fmt */ switch (fmt) { - case 0x10: //S + case 0x10: /* S */ DIP("abs.s f%d, f%d\n", fd, fs); putFReg(fd, mkWidenFromF32(tyF, unop(Iop_AbsF32, getLoFromF64(tyF, getFReg(fs))))); break; - case 0x11: //D + case 0x11: /* D */ DIP("abs.d f%d, f%d\n", fd, fs); putDReg(fd, unop(Iop_AbsF64, getDReg(fs))); break; default: goto decode_failure; } - break; //case 0x5 + break; /* case 0x5 */ - case 0x02: // MUL.fmt + case 0x02: /* MUL.fmt */ switch (fmt) { - case 0x11: // D + case 0x11: /* D */ { DIP("mul.d f%d, f%d, f%d", fd, fs, ft); IRExpr *rm = get_IR_roundingmode(); @@ -1473,7 +2213,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, getDReg(ft))); break; } - case 0x10: // S + case 0x10: /* S */ { DIP("mul.s f%d, f%d, f%d", fd, fs, ft); IRExpr *rm = get_IR_roundingmode(); @@ -1485,11 +2225,11 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, default: goto decode_failure; } - break; // MUL.fmt + break; /* MUL.fmt */ - case 0x03: // DIV.fmt + case 0x03: /* DIV.fmt */ switch (fmt) { - case 0x11: // D + case 0x11: /* D */ { DIP("div.d f%d, f%d, f%d", fd, fs, ft); IRExpr *rm = get_IR_roundingmode(); @@ -1497,7 +2237,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, getDReg(ft))); break; } - case 0x10: // S + case 0x10: /* S */ { DIP("div.s f%d, f%d, f%d", fd, fs, ft); IRExpr *rm = get_IR_roundingmode(); @@ -1509,18 +2249,19 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, default: goto decode_failure; } - break; // DIV.fmt + break; /* DIV.fmt */ - case 0x01: // SUB.fmt + case 0x01: /* SUB.fmt */ switch (fmt) { - case 0x11: // D + case 0x11: /* D */ { DIP("sub.d f%d, f%d, f%d", fd, fs, ft); IRExpr *rm = get_IR_roundingmode(); - putDReg(fd, triop(Iop_SubF64, rm, getDReg(fs), getDReg(ft))); + putDReg(fd, triop(Iop_SubF64, rm, getDReg(fs), + getDReg(ft))); break; } - case 0x10: // S + case 0x10: /* S */ { DIP("sub.s f%d, f%d, f%d", fd, fs, ft); IRExpr *rm = get_IR_roundingmode(); @@ -1532,45 +2273,89 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, default: goto decode_failure; } - break; // SUB.fmt + break; /* SUB.fmt */ - case 0x06: // MOV.fmt + case 0x06: /* MOV.fmt */ switch (fmt) { - case 0x11: // D - /* TODO: Check this for 64 bit FPU registers. */ - DIP("mov.d f%d, f%d", fd, fs); - putFReg(fd, getFReg(fs)); - putFReg(fd + 1, getFReg(fs + 1)); - break; - case 0x10: // S - DIP("mov.s f%d, f%d", fd, fs); - putFReg(fd, getFReg(fs)); - break; - default: - goto decode_failure; + case 0x11: /* D */ + DIP("mov.d f%d, f%d", fd, fs); + if (mode64) { + putFReg(fd, getFReg(fs)); + } else { + putFReg(fd, getFReg(fs)); + putFReg(fd + 1, getFReg(fs + 1)); + } + break; + case 0x10: /* S */ + DIP("mov.s f%d, f%d", fd, fs); + putFReg(fd, getFReg(fs)); + break; + default: + goto decode_failure; } - break; // MOV.fmt + break; /* MOV.fmt */ - case 0x7: //neg.fmt + case 0x7: /* neg.fmt */ switch (fmt) { - case 0x10: //S - DIP("neg.s f%d, f%d", fd, fs); - putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, - getLoFromF64(tyF, getFReg(fs))))); - break; - case 0x11: //D - DIP("neg.d f%d, f%d", fd, fs); - putDReg(fd, unop(Iop_NegF64, getDReg(fs))); + case 0x10: /* S */ + DIP("neg.s f%d, f%d", fd, fs); + putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, + getLoFromF64(tyF, getFReg(fs))))); + break; + case 0x11: /* D */ + DIP("neg.d f%d, f%d", fd, fs); + putDReg(fd, unop(Iop_NegF64, getDReg(fs))); + break; + default: + goto decode_failure; + } + break; /* case 0x7 */ + + case 0x08: /* ROUND.L.fmt */ + switch (fmt) { + case 0x10: /* S */ + DIP("round.l.s f%d, f%d\n", fd, fs); + t0 = newTemp(Ity_I64); + + assign(t0, binop(Iop_F32toI64S, mkU32(0x0), + getLoFromF64(Ity_F64, getFReg(fs)))); + + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0))); break; - default: - goto decode_failure; + case 0x11: /* D */ + DIP("round.l.d f%d, f%d\n", fd, fs); + putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x0), + getFReg(fs))); + break; + default: + goto decode_failure; + } - break; //case 0x7 + break; /* ROUND.L.fmt */ + + case 0x09: /* TRUNC.L.fmt */ + switch (fmt) { + case 0x10: /* S */ + DIP("trunc.l.s f%d, f%d\n", fd, fs); + t0 = newTemp(Ity_I64); + assign(t0, binop(Iop_F32toI64S, mkU32(0x3), + getLoFromF64(Ity_F64, getFReg(fs)))); + + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0))); + break; + case 0x11: /* D */ + DIP("trunc.l.d f%d, f%d\n", fd, fs); + putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x3), + getFReg(fs))); + break; + default: + goto decode_failure; + } + break; /* TRUNC.L.fmt */ - case 0x15: //RECIP.fmt + case 0x15: /* RECIP.fmt */ switch (fmt) { - case 0x10: - { //S + case 0x10: { /* S */ DIP("recip.s f%d, f%d\n", fd, fs); IRExpr *rm = get_IR_roundingmode(); putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32, @@ -1579,8 +2364,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, getFReg(fs))))); break; } - case 0x11: - { //D + case 0x11: { /* D */ DIP("recip.d f%d, f%d\n", fd, fs); #if defined (_MIPSEL) IRExpr *rm = get_IR_roundingmode(); @@ -1599,80 +2383,119 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, goto decode_failure; } - break; //case 0x15 + break; /* case 0x15 */ - case 0x13: //MOVN.fmt + case 0x13: /* MOVN.fmt */ switch (fmt) { - case 0x10: // S + case 0x10: /* S */ DIP("movn.s f%d, f%d, r%d", fd, fs, rt); t1 = newTemp(Ity_F64); t2 = newTemp(Ity_F64); t3 = newTemp(Ity_I1); t4 = newTemp(Ity_F64); - - assign(t1, unop(Iop_F32toF64, getFReg(fs))); - assign(t2, unop(Iop_F32toF64, getFReg(fd))); - assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt))); + if (mode64) { + assign(t1, getFReg(fs)); + assign(t2, getFReg(fd)); + assign(t3, binop(Iop_CmpNE64, mkU64(0), getIReg(rt))); + } else { + assign(t1, unop(Iop_F32toF64, getFReg(fs))); + assign(t2, unop(Iop_F32toF64, getFReg(fd))); + assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt))); + } assign(t4, IRExpr_ITE(mkexpr(t3), mkexpr(t1), mkexpr(t2))); - - putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), - mkexpr(t4))); + if (mode64) { + IRTemp f = newTemp(Ity_F64); + IRTemp fd_hi = newTemp(Ity_I32); + t5 = newTemp(Ity_I64); + assign(f, getFReg(fd)); + assign(fd_hi, unop(Iop_64HIto32, unop(Iop_ReinterpF64asI64, + mkexpr(f)))); + + assign(t5, mkWidenFrom32(ty, unop(Iop_64to32, + unop(Iop_ReinterpF64asI64, mkexpr(t4))), True)); + + putFReg(fd, unop (Iop_ReinterpI64asF64, mkexpr(t5))); + } else + putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), + mkexpr(t4))); break; - case 0x11: // D + case 0x11: /* D */ DIP("movn.d f%d, f%d, r%d", fd, fs, rt); t3 = newTemp(Ity_I1); t4 = newTemp(Ity_F64); - assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt))); + if (mode64) + assign(t3, binop(Iop_CmpNE64, mkU64(0), getIReg(rt))); + else + assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt))); + putDReg(fd, IRExpr_ITE(mkexpr(t3), getDReg(fs), getDReg(fd))); break; default: goto decode_failure; } - break; // MOVN.fmt + break; /* MOVN.fmt */ - case 0x12: //MOVZ.fmt + case 0x12: /* MOVZ.fmt */ switch (fmt) { - case 0x10: // S + case 0x10: /* S */ DIP("movz.s f%d, f%d, r%d", fd, fs, rt); t1 = newTemp(Ity_F64); t2 = newTemp(Ity_F64); t3 = newTemp(Ity_I1); t4 = newTemp(Ity_F64); - - assign(t1, unop(Iop_F32toF64, getFReg(fs))); - assign(t2, unop(Iop_F32toF64, getFReg(fd))); - assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt))); + if (mode64) { + assign(t1, getFReg(fs)); + assign(t2, getFReg(fd)); + assign(t3, binop(Iop_CmpEQ64, mkU64(0), getIReg(rt))); + } else { + assign(t1, unop(Iop_F32toF64, getFReg(fs))); + assign(t2, unop(Iop_F32toF64, getFReg(fd))); + assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt))); + } assign(t4, IRExpr_ITE(mkexpr(t3), mkexpr(t1), mkexpr(t2))); - putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), - mkexpr(t4))); + if (mode64) { + IRTemp f = newTemp(Ity_F64); + IRTemp fd_hi = newTemp(Ity_I32); + t7 = newTemp(Ity_I64); + assign(f, getFReg(fd)); + assign(fd_hi, unop(Iop_64HIto32, + unop(Iop_ReinterpF64asI64, mkexpr(f)))); + assign(t7, mkWidenFrom32(ty, unop(Iop_64to32, + unop(Iop_ReinterpF64asI64, mkexpr(t4))), True)); + + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7))); + } else + putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), + mkexpr(t4))); break; - case 0x11: // D + case 0x11: /* D */ DIP("movz.d f%d, f%d, r%d", fd, fs, rt); - t3 = newTemp(Ity_I1); t4 = newTemp(Ity_F64); + if (mode64) + assign(t3, binop(Iop_CmpEQ64, mkU64(0), getIReg(rt))); + else + assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt))); - assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt))); putDReg(fd, IRExpr_ITE(mkexpr(t3), getDReg(fs), getDReg(fd))); break; default: goto decode_failure; } - break; // MOVZ.fmt + break; /* MOVZ.fmt */ - case 0x11: // MOVT.fmt + case 0x11: /* MOVT.fmt */ if (tf == 1) { UInt mov_cc = get_mov_cc(cins); - switch (fmt) // MOVCF = 010001 - { - case 0x11: // D + switch (fmt) { /* MOVCF = 010001 */ + case 0x11: /* D */ DIP("movt.d f%d, f%d, %d", fd, fs, mov_cc); t1 = newTemp(Ity_I1); t2 = newTemp(Ity_I32); @@ -1693,10 +2516,10 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2))); assign(t4, IRExpr_ITE(mkexpr(t3), - getDReg(fd), getDReg(fs))); + getDReg(fs), getDReg(fd))); putDReg(fd, mkexpr(t4)); break; - case 0x10: // S + case 0x10: /* S */ DIP("movt.s f%d, f%d, %d", fd, fs, mov_cc); t1 = newTemp(Ity_I1); t2 = newTemp(Ity_I32); @@ -1706,8 +2529,13 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, t6 = newTemp(Ity_F64); t7 = newTemp(Ity_I64); - assign(t5, unop(Iop_F32toF64, getFReg(fs))); - assign(t6, unop(Iop_F32toF64, getFReg(fd))); + if (mode64) { + assign(t5, getFReg(fs)); + assign(t6, getFReg(fd)); + } else { + assign(t5, unop(Iop_F32toF64, getFReg(fs))); + assign(t6, unop(Iop_F32toF64, getFReg(fd))); + } assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc))); assign(t2, IRExpr_ITE(mkexpr(t1), @@ -1723,20 +2551,32 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2))); assign(t4, IRExpr_ITE(mkexpr(t3), - mkexpr(t6), mkexpr(t5))); - - putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), - mkexpr(t4))); + mkexpr(t5), mkexpr(t6))); + + if (mode64) { + IRTemp f = newTemp(Ity_F64); + IRTemp fd_hi = newTemp(Ity_I32); + assign(f, getFReg(fd)); + assign(fd_hi, unop(Iop_64HIto32, + unop(Iop_ReinterpF64asI64, mkexpr(f)))); + assign(t7, mkWidenFrom32(ty, unop(Iop_64to32, + unop(Iop_ReinterpF64asI64, mkexpr(t4))), + True)); + + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7))); + } else + putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), + mkexpr(t4))); break; default: goto decode_failure; } - } else if (tf == 0) //movf.fmt + } else if (tf == 0) /* movf.fmt */ { UInt mov_cc = get_mov_cc(cins); - switch (fmt) // MOVCF = 010001 + switch (fmt) /* MOVCF = 010001 */ { - case 0x11: // D + case 0x11: /* D */ DIP("movf.d f%d, f%d, %d", fd, fs, mov_cc); t1 = newTemp(Ity_I1); t2 = newTemp(Ity_I32); @@ -1755,53 +2595,70 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, mkU32(0x1)) )); - assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2))); + assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2))); assign(t4, IRExpr_ITE(mkexpr(t3), getDReg(fs), getDReg(fd))); putDReg(fd, mkexpr(t4)); break; - case 0x10: // S + case 0x10: /* S */ DIP("movf.s f%d, f%d, %d", fd, fs, mov_cc); - { - t1 = newTemp(Ity_I1); - t2 = newTemp(Ity_I32); - t3 = newTemp(Ity_I1); - t4 = newTemp(Ity_F64); - t5 = newTemp(Ity_F64); - t6 = newTemp(Ity_F64); + t1 = newTemp(Ity_I1); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I1); + t4 = newTemp(Ity_F64); + t5 = newTemp(Ity_F64); + t6 = newTemp(Ity_F64); + if (mode64) { + assign(t5, getFReg(fs)); + assign(t6, getFReg(fd)); + } else { assign(t5, unop(Iop_F32toF64, getFReg(fs))); assign(t6, unop(Iop_F32toF64, getFReg(fd))); + } + + assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc))); + assign(t2, IRExpr_ITE(mkexpr(t1), + binop(Iop_And32, + binop(Iop_Shr32, getFCSR(), + mkU8(23)), + mkU32(0x1)), + binop(Iop_And32, + binop(Iop_Shr32, getFCSR(), + mkU8(24 + mov_cc)), + mkU32(0x1)) + )); - assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc))); - assign(t2, IRExpr_ITE(mkexpr(t1), - binop(Iop_And32, - binop(Iop_Shr32, getFCSR(), - mkU8(23)), - mkU32(0x1)), - binop(Iop_And32, - binop(Iop_Shr32, getFCSR(), - mkU8(24 + mov_cc)), - mkU32(0x1)) - )); - - assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2))); - assign(t4, IRExpr_ITE(mkexpr(t3), - mkexpr(t5), mkexpr(t6))); + assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2))); + assign(t4, IRExpr_ITE(mkexpr(t3), + mkexpr(t5), mkexpr(t6))); + + if (mode64) { + IRTemp f = newTemp(Ity_F64); + IRTemp fd_hi = newTemp(Ity_I32); + t7 = newTemp(Ity_I64); + assign(f, getFReg(fd)); + assign(fd_hi, unop(Iop_64HIto32, + unop(Iop_ReinterpF64asI64, mkexpr(f)))); + assign(t7, mkWidenFrom32(ty, unop(Iop_64to32, + unop(Iop_ReinterpF64asI64, mkexpr(t4))), + True)); + + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7))); + } else putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), mkexpr(t4))); - } break; default: goto decode_failure; } } - break; // MOVT.fmt + break; /* MOVT.fmt */ - case 0x0: //add.fmt + case 0x0: /* add.fmt */ switch (fmt) { - case 0x10: //S + case 0x10: /* S */ { DIP("add.s f%d, f%d, f%d\n", fd, fs, ft); IRExpr *rm = get_IR_roundingmode(); @@ -1810,26 +2667,51 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, getLoFromF64(tyF, getFReg(ft))))); break; } - case 0x11: //D - { - DIP("add.d f%d, f%d, f%d\n", fd, fs, ft); - IRExpr *rm = get_IR_roundingmode(); - putDReg(fd, triop(Iop_AddF64, rm, getDReg(fs), - getDReg(ft))); - break; - } + case 0x11: { /* D */ + DIP("add.d f%d, f%d, f%d\n", fd, fs, ft); + IRExpr *rm = get_IR_roundingmode(); + putDReg(fd, triop(Iop_AddF64, rm, getDReg(fs), getDReg(ft))); + break; + } - case 0x4: //MTC1 (Move Word to Floating Point) + case 0x4: /* MTC1 (Move Word to Floating Point) */ DIP("mtc1 r%d, f%d", rt, fs); - putFReg(fs, unop(Iop_ReinterpI32asF32, getIReg(rt))); + if (mode64) { + t0 = newTemp(Ity_I32); + t1 = newTemp(Ity_F32); + assign(t0, unop(Iop_64to32, getIReg(rt))); + assign(t1, unop(Iop_ReinterpI32asF32, mkexpr(t0))); + + putFReg(fs, mkWidenFromF32(tyF, mkexpr(t1))); + } else + putFReg(fs, unop(Iop_ReinterpI32asF32, getIReg(rt))); + break; + + case 0x5: /* Doubleword Move to Floating Point DMTC1; MIPS64 */ + DIP("dmtc1 r%d, f%d", rt, fs); + vassert(mode64); + putFReg(fs, unop(Iop_ReinterpI64asF64, getIReg(rt))); break; - case 0x0: //MFC1 + case 0x0: /* MFC1 */ DIP("mfc1 r%d, f%d", rt, fs); - putIReg(rt, unop(Iop_ReinterpF32asI32, getFReg(fs))); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + assign(t1, unop(Iop_64to32, mkexpr(t0))); + putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True)); + } else + putIReg(rt, unop(Iop_ReinterpF32asI32, getFReg(fs))); break; - case 0x6: //CTC1 + case 0x1: /* Doubleword Move from Floating Point DMFC1; + MIPS64 */ + DIP("dmfc1 r%d, f%d", rt, fs); + putIReg(rt, unop(Iop_ReinterpF64asI64, getFReg(fs))); + break; + + case 0x6: /* CTC1 */ DIP("ctc1 r%d, f%d", rt, fs); t0 = newTemp(Ity_I32); t1 = newTemp(Ity_I32); @@ -1839,7 +2721,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, t5 = newTemp(Ity_I32); t6 = newTemp(Ity_I32); assign(t0, mkNarrowTo32(ty, getIReg(rt))); - if (fs == 25) { //FCCR + if (fs == 25) { /* FCCR */ assign(t1, binop(Iop_Shl32, binop(Iop_And32, mkexpr(t0), mkU32(0x000000FE)), mkU8(24))); assign(t2, binop(Iop_And32, mkexpr(t0), @@ -1851,7 +2733,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, putFCSR(binop(Iop_Or32, binop(Iop_Or32, mkexpr(t1), mkexpr(t2)), binop(Iop_Or32, mkexpr(t3), mkexpr(t4)))); - } else if (fs == 26) { //FEXR + } else if (fs == 26) { /* FEXR */ assign(t1, binop(Iop_And32, getFCSR(), mkU32(0xFFFC0000))); assign(t2, binop(Iop_And32, mkexpr(t0), mkU32(0x0003F000))); @@ -1880,7 +2762,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, putFCSR(mkexpr(t0)); } break; - case 0x2: //CFC1 + case 0x2: /* CFC1 */ DIP("cfc1 r%d, f%d", rt, fs); t0 = newTemp(Ity_I32); t1 = newTemp(Ity_I32); @@ -1930,61 +2812,141 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, default: goto decode_failure; } - break; //case 0x0: //add.fmt + break; - case 0x21: //CVT.D + case 0x21: /* CVT.D */ switch (fmt) { - case 0x10: //S - DIP("cvt.d.s f%d, f%d", fd, fs); - putDReg(fd, unop(Iop_F32toF64, getFReg(fs))); - break; + case 0x10: /* S */ + DIP("cvt.d.s f%d, f%d", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + t3 = newTemp(Ity_F32); + t4 = newTemp(Ity_F32); + /* get lo half of FPR */ + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + + assign(t1, unop(Iop_64to32, mkexpr(t0))); + + assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1))); + + putFReg(fd, unop(Iop_F32toF64, mkexpr(t3))); + break; + } else { + putDReg(fd, unop(Iop_F32toF64, getFReg(fs))); + break; + } - case 0x14: - { //W + case 0x14: DIP("cvt.d.w %d, %d\n", fd, fs); - t0 = newTemp(Ity_I32); - assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs))); - putDReg(fd, unop(Iop_I32StoF64, mkexpr(t0))); - } - break; + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + t3 = newTemp(Ity_F32); + t4 = newTemp(Ity_F32); + /* get lo half of FPR */ + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + + assign(t1, unop(Iop_64to32, mkexpr(t0))); + putDReg(fd,unop(Iop_I32StoF64, mkexpr(t1))); + break; + } else { + t0 = newTemp(Ity_I32); + assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs))); + putDReg(fd, unop(Iop_I32StoF64, mkexpr(t0))); + break; + } - default: - goto decode_failure; + case 0x15: { /* L */ + if (mode64) { + DIP("cvt.d.l %d, %d\n", fd, fs); + t0 = newTemp(Ity_I64); + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + + putFReg(fd, binop(Iop_I64StoF64, + get_IR_roundingmode(), mkexpr(t0))); + break; + } else + goto decode_failure; + } + default: + goto decode_failure; } - break; //CVT.D + break; /* CVT.D */ - case 0x20: //cvt.s + case 0x20: /* cvt.s */ switch (fmt) { - case 0x14: //W - DIP("cvt.s.w %d, %d\n", fd, fs); - t0 = newTemp(Ity_I32); - assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs))); - putFReg(fd, binop(Iop_I32StoF32, get_IR_roundingmode(), - mkexpr(t0))); - break; + case 0x14: /* W */ + DIP("cvt.s.w %d, %d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + t3 = newTemp(Ity_F32); + t4 = newTemp(Ity_F32); + /* get lo half of FPR */ + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + + assign(t1, unop(Iop_64to32, mkexpr(t0))); + putFReg(fd, mkWidenFromF32(tyF, binop(Iop_I32StoF32, + get_IR_roundingmode(), mkexpr(t1)))); + break; + } else { + t0 = newTemp(Ity_I32); + assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs))); + putFReg(fd, binop(Iop_I32StoF32, get_IR_roundingmode(), + mkexpr(t0))); + break; + } + + case 0x11: /* D */ + DIP("cvt.s.d %d, %d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_F32); + assign(t0, binop(Iop_F64toF32, get_IR_roundingmode(), + getFReg(fs))); + putFReg(fd, mkWidenFromF32(tyF, mkexpr(t0))); + } else + putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), + getDReg(fs))); + break; - case 0x11: //D - DIP("cvt.s.d %d, %d\n", fd, fs); - putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(), - getDReg(fs))); - break; + case 0x15: /* L */ + DIP("cvt.s.l %d, %d\n", fd, fs); + t0 = newTemp(Ity_I64); + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); - default: - goto decode_failure; + putFReg(fd, mkWidenFromF32(tyF, binop(Iop_I64StoF32, + get_IR_roundingmode(), mkexpr(t0)))); + break; + + default: + goto decode_failure; } - break; //cvt.s + break; /* cvt.s */ - case 0x24: //cvt.w + case 0x24: /* cvt.w */ switch (fmt) { - case 0x10: //S + case 0x10: /* S */ DIP("cvt.w.s %d, %d\n", fd, fs); - putFReg(fd, binop(Iop_RoundF32toInt, get_IR_roundingmode(), - getFReg(fs))); + if (mode64) { + putFReg(fd, mkWidenFromF32(tyF, binop(Iop_RoundF32toInt, + get_IR_roundingmode(), getLoFromF64(tyF, + getFReg(fs))))); + } else + putFReg(fd, binop(Iop_RoundF32toInt, get_IR_roundingmode(), + getFReg(fs))); break; case 0x11: - { //D - DIP("cvt.w.d %d, %d\n", fd, fs); + DIP("cvt.w.d %d, %d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I32); + t1 = newTemp(Ity_F32); + assign(t0, binop(Iop_F64toI32S, get_IR_roundingmode(), + getFReg(fs))); + assign(t1, unop(Iop_ReinterpI32asF32, mkexpr(t0))); + putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1))); + } else { t0 = newTemp(Ity_I32); assign(t0, binop(Iop_F64toI32S, get_IR_roundingmode(), @@ -2000,133 +2962,280 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } break; - case 0x09: //TRUNC.L + case 0x25: /* cvt.l */ switch (fmt) { - case 0x10: //S - DIP("trunc.l.s %d, %d\n", fd, fs); - goto decode_failure; + case 0x10: /* S */ + DIP("cvt.l.s %d, %d\n", fd, fs); + t0 = newTemp(Ity_I64); - case 0x11: //D - DIP("trunc.l.d %d, %d\n", fd, fs); - goto decode_failure; + assign(t0, binop(Iop_F32toI64S, get_IR_roundingmode(), + getLoFromF64(Ity_F64, getFReg(fs)))); - default: - goto decode_failure; + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0))); + break; + + case 0x11: { /* D */ + DIP("cvt.l.d %d, %d\n", fd, fs); + putFReg(fd, binop(Iop_RoundF64toInt, + get_IR_roundingmode(), getFReg(fs))); + break; + } + default: + goto decode_failure; } - break; //trunc.l + break; - case 0x0C: //ROUND.W.fmt + case 0x0B: /* FLOOR.L.fmt */ switch (fmt) { - case 0x10: //S - DIP("round.w.s f%d, f%d\n", fd, fs); - putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x0), - getFReg(fs))); - break; + case 0x10: /* S */ + DIP("floor.l.s %d, %d\n", fd, fs); + t0 = newTemp(Ity_I64); - case 0x11: //D - DIP("round.w.d f%d, f%d\n", fd, fs); - t0 = newTemp(Ity_I32); - - assign(t0, binop(Iop_F64toI32S, mkU32(0x0), getDReg(fs))); - - putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0))); - break; + assign(t0, binop(Iop_F32toI64S, mkU32(0x1), + getLoFromF64(Ity_F64, getFReg(fs)))); - default: - goto decode_failure; + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0))); + break; + case 0x11: /* D */ + DIP("floor.l.d %d, %d\n", fd, fs); + putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x1), + getFReg(fs))); + break; + default: + goto decode_failure; } - break; //ROUND.W.fmt + break; - case 0x0F: //FLOOR.W.fmt + case 0x0C: /* ROUND.W.fmt */ switch (fmt) { - case 0x10: //S - DIP("floor.w.s f%d, f%d\n", fd, fs); - putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x1), - getFReg(fs))); - break; + case 0x10: /* S */ + DIP("round.w.s f%d, f%d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + t3 = newTemp(Ity_F32); + t4 = newTemp(Ity_F32); + /* get lo half of FPR */ + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + + assign(t1, unop(Iop_64to32, mkexpr(t0))); + + assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1))); + + assign(t4, binop(Iop_RoundF32toInt, mkU32(0x0), + mkexpr(t3))); + + putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4))); + break; + } else { + putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x0), + getFReg(fs))); + break; + } - case 0x11: //D - DIP("floor.w.d f%d, f%d\n", fd, fs); - t0 = newTemp(Ity_I32); + case 0x11: /* D */ + DIP("round.w.d f%d, f%d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_F64toI32S, mkU32(0x0), + getDReg(fs))); + putFReg(fd, mkWidenFromF32(tyF, + unop(Iop_ReinterpI32asF32, mkexpr(t0)))); + break; + } else { + t0 = newTemp(Ity_I32); + + assign(t0, binop(Iop_F64toI32S, mkU32(0x0), + getDReg(fs))); + + putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0))); + break; + } + default: + goto decode_failure; - assign(t0, binop(Iop_F64toI32S, mkU32(0x1), getDReg(fs))); + } + break; /* ROUND.W.fmt */ - putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0))); - break; + case 0x0F: /* FLOOR.W.fmt */ + switch (fmt) { + case 0x10: /* S */ + DIP("floor.w.s f%d, f%d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + t3 = newTemp(Ity_F32); + t4 = newTemp(Ity_F32); + /* get lo half of FPR */ + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + + assign(t1, unop(Iop_64to32, mkexpr(t0))); + + assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1))); + + assign(t4, binop(Iop_RoundF32toInt, mkU32(0x1), + mkexpr(t3))); + + putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4))); + break; + } else { + putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x1), + getFReg(fs))); + break; + } - default: - goto decode_failure; + case 0x11: /* D */ + DIP("floor.w.d f%d, f%d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_F64toI32S, mkU32(0x1), + getDReg(fs))); + putFReg(fd, mkWidenFromF32(tyF, + unop(Iop_ReinterpI32asF32, mkexpr(t0)))); + break; + } else { + t0 = newTemp(Ity_I32); + + assign(t0, binop(Iop_F64toI32S, mkU32(0x1), + getDReg(fs))); + + putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0))); + break; + } + default: + goto decode_failure; } - break; //FLOOR.W.fmt + break; /* FLOOR.W.fmt */ - case 0x0D: //TRUNC.W + case 0x0D: /* TRUNC.W */ switch (fmt) { - case 0x10: //S - DIP("trunc.w.s %d, %d\n", fd, fs); - putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x3), - getFReg(fs))); - break; + case 0x10: /* S */ + DIP("trunc.w.s %d, %d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + t3 = newTemp(Ity_F32); + t4 = newTemp(Ity_F32); + /* get lo half of FPR */ + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + + assign(t1, unop(Iop_64to32, mkexpr(t0))); + + assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1))); + + assign(t4, binop(Iop_RoundF32toInt, mkU32(0x3), + mkexpr(t3))); + + putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4))); + break; + } else { + putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x3), + getFReg(fs))); + break; + } + case 0x11: /* D */ + DIP("trunc.w.d %d, %d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I32); - case 0x11: //D - DIP("trunc.w.d %d, %d\n", fd, fs); - t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_F64toI32S, mkU32(0x3), + getFReg(fs))); - assign(t0, binop(Iop_F64toI32S, mkU32(0x3), getDReg(fs))); + putFReg(fd, mkWidenFromF32(tyF, + unop(Iop_ReinterpI32asF32, mkexpr(t0)))); + break; + } else { + t0 = newTemp(Ity_I32); - putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0))); - break; + assign(t0, binop(Iop_F64toI32S, mkU32(0x3), + getDReg(fs))); - default: - goto decode_failure; + putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0))); + break; + } + default: + goto decode_failure; } break; - case 0x0E: //CEIL.W.fmt - switch (fmt) { - case 0x10: //S - DIP("ceil.w.s %d, %d\n", fd, fs); - putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x2), - getFReg(fs))); - break; - - case 0x11: //D - DIP("ceil.w.d %d, %d\n", fd, fs); - t0 = newTemp(Ity_I32); - assign(t0, binop(Iop_F64toI32S, mkU32(0x2), getDReg(fs))); - - putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0))); - break; + case 0x0E: /* CEIL.W.fmt */ + switch (fmt) { + case 0x10: /* S */ + DIP("ceil.w.s %d, %d\n", fd, fs); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + t3 = newTemp(Ity_F32); + t4 = newTemp(Ity_F32); + /* get lo half of FPR */ + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs))); + + assign(t1, unop(Iop_64to32, mkexpr(t0))); + + assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1))); + + assign(t4, binop(Iop_RoundF32toInt, mkU32(0x2), + mkexpr(t3))); + + putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4))); + } else + putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x2), + getFReg(fs))); + break; - default: - goto decode_failure; + case 0x11: /* D */ + DIP("ceil.w.d %d, %d\n", fd, fs); + if (!mode64) { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_F64toI32S, mkU32(0x2), + getDReg(fs))); + putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0))); + break; + } else { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_F64toI32S, mkU32(0x2), + getDReg(fs))); + putFReg(fd, mkWidenFromF32(tyF, + unop(Iop_ReinterpI32asF32, mkexpr(t0)))); + break; + } + default: + goto decode_failure; } break; - case 0x0A: //CEIL.L.fmt + + case 0x0A: /* CEIL.L.fmt */ switch (fmt) { - case 0x10: //S - DIP("ceil.l.s %d, %d\n", fd, fs); - goto decode_failure; + case 0x10: /* S */ + DIP("ceil.l.s %d, %d\n", fd, fs); + t0 = newTemp(Ity_I64); - case 0x11: //D - DIP("ceil.l.d %d, %d\n", fd, fs); + assign(t0, binop(Iop_F32toI64S, mkU32(0x2), + getLoFromF64(Ity_F64, getFReg(fs)))); - goto decode_failure; + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0))); + break; - default: - goto decode_failure; + case 0x11: /* D */ + DIP("ceil.l.d %d, %d\n", fd, fs); + putFReg(fd, binop(Iop_RoundF64toInt, mkU32(0x2), + getFReg(fs))); + break; + + default: + goto decode_failure; } break; - case 0x16: //RSQRT.fmt + case 0x16: /* RSQRT.fmt */ switch (fmt) { - case 0x10: - { //S + case 0x10: { /* S */ DIP("rsqrt.s %d, %d\n", fd, fs); IRExpr *rm = get_IR_roundingmode(); putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32, rm, @@ -2135,8 +3244,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, getFReg(fs)))))); break; } - case 0x11: - { //D + case 0x11: { /* D */ DIP("rsqrt.d %d, %d\n", fd, fs); IRExpr *rm = get_IR_roundingmode(); putDReg(fd, triop(Iop_DivF64, rm, @@ -2145,8 +3253,8 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, binop(Iop_SqrtF64, rm, getDReg(fs)))); break; } - default: - goto decode_failure; + default: + goto decode_failure; } break; @@ -2160,11 +3268,10 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } } - break; /*COP1 */ - case 0x10: /* COP0 */ - if (rs == 0) { /* MFC0 */ + break; /* COP1 */ + case 0x10: /* COP0 */ + if (rs == 0) { /* MFC0 */ DIP("mfc0 r%d, r%d, %d", rt, rd, sel); - IRTemp val = newTemp(Ity_I32); IRExpr** args = mkIRExprVec_2 (mkU32(rd), mkU32(sel)); IRDirty *d = unsafeIRDirty_1_N(val, @@ -2172,244 +3279,334 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, "mips32_dirtyhelper_mfc0", &mips32_dirtyhelper_mfc0, args); - stmt(IRStmt_Dirty(d)); putIReg(rt, mkexpr(val)); + } else if (rs == 1) { + /* Doubleword Move from Coprocessor 0 - DMFC0; MIPS64 */ + DIP("dmfc0 r%d, r%d, %d", rt, rd, sel); + IRTemp val = newTemp(Ity_I64); + IRExpr** args = mkIRExprVec_2 (mkU64(rd), mkU64(sel)); + IRDirty *d = unsafeIRDirty_1_N(val, + 0, + "mips64_dirtyhelper_dmfc0", + &mips64_dirtyhelper_dmfc0, + args); + stmt(IRStmt_Dirty(d)); + putDReg(rt, mkexpr(val)); } else goto decode_failure; break; - case 0x31: /* LWC1 */ - /* Load Word to Floating Point - LWC1 (MIPS32) */ - LOAD_STORE_PATTERN; - putFReg(ft, load(Ity_F32, mkexpr(t1))); + case 0x31: /* LWC1 */ + /* Load Word to Floating Point - LWC1 (MIPS32) */ DIP("lwc1 f%d, %d(r%d)", ft, imm, rs); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_F32); + t2 = newTemp(Ity_I64); + /* new LO */ + assign(t0, binop(Iop_Add64, getIReg(rs), + mkU64(extend_s_16to64(imm)))); + assign(t1, load(Ity_F32, mkexpr(t0))); + assign(t2, mkWidenFrom32(ty, unop(Iop_ReinterpF32asI32, + mkexpr(t1)), True)); + putFReg(ft, unop(Iop_ReinterpI64asF64, mkexpr(t2))); + } else { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_Add32, getIReg(rs), + mkU32(extend_s_16to32(imm)))); + putFReg(ft, load(Ity_F32, mkexpr(t0))); + } break; - case 0x39: /* SWC1 */ - LOAD_STORE_PATTERN; - store(mkexpr(t1), getFReg(ft)); + case 0x39: /* SWC1 */ + if (mode64) { + t0 = newTemp(Ity_I64); + t2 = newTemp(Ity_I32); + LOAD_STORE_PATTERN; + assign(t0, unop(Iop_ReinterpF64asI64, getFReg(ft))); + assign(t2, unop(Iop_64to32, mkexpr(t0))); + store(mkexpr(t1), unop(Iop_ReinterpI32asF32, mkexpr(t2))); + } else { + LOAD_STORE_PATTERN; + store(mkexpr(t1), getFReg(ft)); + } DIP("swc1 f%d, %d(r%d)", ft, imm, rs); break; - case 0x33: /* PREF */ + case 0x33: /* PREF */ DIP("pref"); break; case 0x35: /* Load Doubleword to Floating Point - LDC1 (MIPS32) */ LOAD_STORE_PATTERN; - putDReg(ft, load(Ity_F64, mkexpr(t1))); - DIP("ldc1 f%d, %d(%d) \n", rt, imm, rs); + if (mode64) + putFReg(ft, load(Ity_F64, mkexpr(t1))); + else + putDReg(ft, load(Ity_F64, mkexpr(t1))); + DIP("ldc1 f%d, %d(%d)", rt, imm, rs); break; case 0x3D: /* Store Doubleword from Floating Point - SDC1 */ LOAD_STORE_PATTERN; - store(mkexpr(t1), getDReg(ft)); + if (mode64) + store(mkexpr(t1), getFReg(ft)); + else + store(mkexpr(t1), getDReg(ft)); DIP("sdc1 f%d, %d(%d)", ft, imm, rs); break; - case 0x23: /* LW */ + case 0x23: /* LW */ DIP("lw r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; putIReg(rt, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), True)); break; - case 0x20: /* LB */ + case 0x20: /* LB */ DIP("lb r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; - putIReg(rt, unop(Iop_8Sto32, load(Ity_I8, mkexpr(t1)))); + if (mode64) + putIReg(rt, unop(Iop_8Sto64, load(Ity_I8, mkexpr(t1)))); + else + putIReg(rt, unop(Iop_8Sto32, load(Ity_I8, mkexpr(t1)))); break; - case 0x24: /* LBU */ + case 0x24: /* LBU */ DIP("lbu r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; - putIReg(rt, unop(Iop_8Uto32, load(Ity_I8, mkexpr(t1)))); + if (mode64) + putIReg(rt, unop(Iop_8Uto64, load(Ity_I8, mkexpr(t1)))); + else + putIReg(rt, unop(Iop_8Uto32, load(Ity_I8, mkexpr(t1)))); break; - case 0x21: /* LH */ + case 0x21: /* LH */ DIP("lh r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; - putIReg(rt, unop(Iop_16Sto32, load(Ity_I16, mkexpr(t1)))); + if (mode64) + putIReg(rt, unop(Iop_16Sto64, load(Ity_I16, mkexpr(t1)))); + else + putIReg(rt, unop(Iop_16Sto32, load(Ity_I16, mkexpr(t1)))); break; - case 0x25: /* LHU */ + case 0x25: /* LHU */ DIP("lhu r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; - putIReg(rt, unop(Iop_16Uto32, load(Ity_I16, mkexpr(t1)))); + if (mode64) + putIReg(rt, unop(Iop_16Uto64, load(Ity_I16, mkexpr(t1)))); + else + putIReg(rt, unop(Iop_16Uto32, load(Ity_I16, mkexpr(t1)))); break; - case 0x0F: /* LUI */ + case 0x0F: /* LUI */ p = (imm << 16); DIP("lui rt: %d, imm: %d, imm << 16: %d", rt, imm, p); if ((vex_traceflags & VEX_TRACE_FE) && !mode64) ppIRExpr(mkU32(p)); - putIReg(rt, mkU32(p)); + if (mode64) + putIReg(rt, mkU64(extend_s_32to64(p))); + else + putIReg(rt, mkU32(p)); break; - case 0x13: /* COP1X */ + case 0x13: /* COP1X */ switch (function) { - case 0x0: { /* LWXC1 */ + case 0x0: { /* LWXC1 */ /* Load Word Indexed to Floating Point - LWXC1 (MIPS32r2) */ DIP("lwxc1 f%d, r%d(r%d) \n", fd, rt, rs); - t0 = newTemp(Ity_I32); - assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); - putFReg(fd, load(Ity_F32, mkexpr(t0))); + if (mode64) { + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I64); + t3 = newTemp(Ity_F32); + t4 = newTemp(Ity_I64); + + /* new LO */ + assign(t2, binop(Iop_Add64, getIReg(rs), getIReg(rt))); + assign(t3, load(Ity_F32, mkexpr(t2))); + + assign(t4, mkWidenFrom32(ty, unop(Iop_ReinterpF32asI32, + mkexpr(t3)), True)); + + putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t4))); + } else { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); + putFReg(fd, load(Ity_F32, mkexpr(t0))); + } break; } - case 0x1: { /* LDXC1 */ - /* Load Doubleword Indexed to Floating Point - LDXC1 (MIPS32r2) */ - t0 = newTemp(Ity_I32); - assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); + case 0x1: { /* LDXC1 */ + /* Load Doubleword Indexed to Floating Point + LDXC1 (MIPS32r2 and MIPS64) */ + if (mode64) { + DIP("ldxc1 f%d, r%d(r%d) \n", fd, rt, rs); + t0 = newTemp(Ity_I64); + assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt))); + putFReg(fd, load(Ity_F64, mkexpr(t0))); + break; + } else { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); - t1 = newTemp(Ity_I32); - assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4))); + t1 = newTemp(Ity_I32); + assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4))); #if defined (_MIPSEL) - putFReg(fd, load(Ity_F32, mkexpr(t0))); - putFReg(fd + 1, load(Ity_F32, mkexpr(t1))); + putFReg(fd, load(Ity_F32, mkexpr(t0))); + putFReg(fd + 1, load(Ity_F32, mkexpr(t1))); #elif defined (_MIPSEB) - putFReg(fd + 1, load(Ity_F32, mkexpr(t0))); - putFReg(fd, load(Ity_F32, mkexpr(t1))); + putFReg(fd + 1, load(Ity_F32, mkexpr(t0))); + putFReg(fd, load(Ity_F32, mkexpr(t1))); #endif - DIP("ldxc1 f%d, r%d(r%d) \n", fd, rt, rs); - break; + break; + } } - case 0x5: // Load Doubleword Indexed Unaligned - // to Floating Point - LUXC1; MIPS32r2 + case 0x5: /* Load Doubleword Indexed Unaligned to Floating Point - LUXC1; + MIPS32r2 */ DIP("luxc1 f%d, r%d(r%d) \n", fd, rt, rs); t0 = newTemp(Ity_I64); t1 = newTemp(Ity_I64); assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt))); - assign(t1, binop(Iop_And64, mkexpr(t0), mkU64(0xfffffffffffffff8ULL))); + assign(t1, binop(Iop_And64, mkexpr(t0), + mkU64(0xfffffffffffffff8ULL))); putFReg(fd, load(Ity_F64, mkexpr(t1))); break; - case 0x8: { /* SWXC1 */ - /* Store Word Indexed from Floating Point - SWXC1 */ - t0 = newTemp(Ity_I32); - assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); + case 0x8: { /* Store Word Indexed from Floating Point - SWXC1 */ + DIP("swxc1 f%d, r%d(r%d)", ft, rt, rs); + if (mode64) { + t0 = newTemp(Ity_I64); + assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt))); - store(mkexpr(t0), getFReg(fs)); + store(mkexpr(t0), getLoFromF64(tyF, getFReg(fs))); - DIP("swxc1 f%d, r%d(r%d)", ft, rt, rs); + } else { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); + + store(mkexpr(t0), getFReg(fs)); + } break; } - case 0x9: { /* SDXC1 */ - /* Store Doubleword Indexed from Floating Point - SDXC1 */ - t0 = newTemp(Ity_I32); - assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); + case 0x9: { /* Store Doubleword Indexed from Floating Point - SDXC1 */ + DIP("sdc1 f%d, %d(%d)", ft, imm, rs); + if (mode64) { + t0 = newTemp(Ity_I64); + assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt))); + store(mkexpr(t0), getFReg(fs)); + } else { + t0 = newTemp(Ity_I32); + assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); - t1 = newTemp(Ity_I32); - assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4))); + t1 = newTemp(Ity_I32); + assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4))); #if defined (_MIPSEL) - store(mkexpr(t0), getFReg(fs)); - store(mkexpr(t1), getFReg(fs + 1)); + store(mkexpr(t0), getFReg(fs)); + store(mkexpr(t1), getFReg(fs + 1)); #elif defined (_MIPSEB) - store(mkexpr(t0), getFReg(fs + 1)); - store(mkexpr(t1), getFReg(fs)); + store(mkexpr(t0), getFReg(fs + 1)); + store(mkexpr(t1), getFReg(fs)); #endif - - DIP("sdxc1 f%d, %d(%d)", ft, imm, rs); + } break; } + case 0xD: /* Store Doubleword Indexed Unaligned from Floating Point - + SUXC1; MIPS64 MIPS32r2 */ + DIP("suxc1 f%d, r%d(r%d) \n", fd, rt, rs); + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I64); + assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt))); + assign(t1, binop(Iop_And64, mkexpr(t0), mkU64(0xfffffffffffffff8))); + store(mkexpr(t1), getFReg(fs)); + break; + case 0x0F: { DIP("prefx"); break; } - case 0x20: { /* MADD.S */ - DIP("madd.s f%d, f%d, f%d, f%d", fmt, ft, fs, fd); + case 0x20: { /* MADD.S */ + DIP("madd.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft); IRExpr *rm = get_IR_roundingmode(); t1 = newTemp(Ity_F32); - assign(t1, triop(Iop_MulF32, rm, getLoFromF64(tyF, getFReg(fs)), - getLoFromF64(tyF, getFReg(ft)))); - - putFReg(fd, mkWidenFromF32(tyF, triop(Iop_AddF32, rm, mkexpr(t1), - getLoFromF64(tyF, getFReg(fmt))))); - break; /* MADD.S */ + assign(t1, qop(Iop_MAddF32, rm, + getLoFromF64(tyF, getFReg(fmt)), + getLoFromF64(tyF, getFReg(fs)), + getLoFromF64(tyF, getFReg(ft)))); + putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1))); + break; /* MADD.S */ } - case 0x21: { /* MADD.D */ - DIP("madd.d f%d, f%d, f%d, f%d", fmt, ft, fs, fd); + case 0x21: { /* MADD.D */ + DIP("madd.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft); IRExpr *rm = get_IR_roundingmode(); - t1 = newTemp(Ity_F64); - assign(t1, triop(Iop_MulF64, rm, getDReg(fs), getDReg(ft))); - - putDReg(fd, triop(Iop_AddF64, rm, mkexpr(t1), getDReg(fmt))); - break; /* MADD.D */ + putDReg(fd, qop(Iop_MAddF64, rm, getDReg(fmt), getDReg(fs), + getDReg(ft))); + break; /* MADD.D */ } - case 0x28: { /* MSUB.S */ - DIP("msub.s f%d, f%d, f%d, f%d", fmt, ft, fs, fd); + case 0x28: { /* MSUB.S */ + DIP("msub.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft); IRExpr *rm = get_IR_roundingmode(); t1 = newTemp(Ity_F32); - assign(t1, triop(Iop_MulF32, rm, getLoFromF64(tyF, getFReg(fs)), - getLoFromF64(tyF, getFReg(ft)))); - - putFReg(fd, mkWidenFromF32(tyF, triop(Iop_SubF32, rm, - mkexpr(t1), getLoFromF64(tyF, getFReg(fmt))))); - break; /* MSUB.S */ + assign(t1, qop(Iop_MSubF32, rm, + getLoFromF64(tyF, getFReg(fmt)), + getLoFromF64(tyF, getFReg(fs)), + getLoFromF64(tyF, getFReg(ft)))); + putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1))); + break; /* MSUB.S */ } - case 0x29: { /* MSUB.D */ - DIP("msub.d f%d, f%d, f%d, f%d", fmt, ft, fs, fd); + case 0x29: { /* MSUB.D */ + DIP("msub.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft); IRExpr *rm = get_IR_roundingmode(); - t1 = newTemp(Ity_F64); - assign(t1, triop(Iop_MulF64, rm, getDReg(fs), getDReg(ft))); - - putDReg(fd, triop(Iop_SubF64, rm, mkexpr(t1), getDReg(fmt))); - break; /* MSUB.D */ + putDReg(fd, qop(Iop_MSubF64, rm, getDReg(fmt), getDReg(fs), + getDReg(ft))); + break; /* MSUB.D */ } - case 0x30: { /* NMADD.S */ - DIP("nmadd.s f%d, f%d, f%d, f%d", fmt, ft, fs, fd); + case 0x30: { /* NMADD.S */ + DIP("nmadd.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft); IRExpr *rm = get_IR_roundingmode(); t1 = newTemp(Ity_F32); - t2 = newTemp(Ity_F32); - assign(t1, triop(Iop_MulF32, rm, getLoFromF64(tyF, getFReg(fs)), - getLoFromF64(tyF, getFReg(ft)))); + assign(t1, qop(Iop_MAddF32, rm, + getLoFromF64(tyF, getFReg(fmt)), + getLoFromF64(tyF, getFReg(fs)), + getLoFromF64(tyF, getFReg(ft)))); - assign(t2, triop(Iop_AddF32, rm, mkexpr(t1), - getLoFromF64(tyF, getFReg(fmt)))); - - putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t2)))); - break; /* NMADD.S */ + putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t1)))); + break; /* NMADD.S */ } - case 0x31: { /* NMADD.D */ - DIP("nmadd.d f%d, f%d, f%d, f%d", fmt, ft, fs, fd); + case 0x31: { /* NMADD.D */ + DIP("nmadd.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft); IRExpr *rm = get_IR_roundingmode(); t1 = newTemp(Ity_F64); - t2 = newTemp(Ity_F64); - assign(t1, triop(Iop_MulF64, rm, getDReg(fs), getDReg(ft))); - - assign(t2, triop(Iop_AddF64, rm, mkexpr(t1), getDReg(fmt))); - putDReg(fd, unop(Iop_NegF64, mkexpr(t2))); - break; /* NMADD.D */ + assign(t1, qop(Iop_MAddF64, rm, getDReg(fmt), getDReg(fs), + getDReg(ft))); + putDReg(fd, unop(Iop_NegF64, mkexpr(t1))); + break; /* NMADD.D */ } - case 0x38: { /* NMSUBB.S */ - DIP("nmsub.s f%d, f%d, f%d, f%d", fmt, ft, fs, fd); + case 0x38: { /* NMSUBB.S */ + DIP("nmsub.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft); IRExpr *rm = get_IR_roundingmode(); t1 = newTemp(Ity_F32); - t2 = newTemp(Ity_F32); - assign(t1, triop(Iop_MulF32, rm, getLoFromF64(tyF, getFReg(fs)), - getLoFromF64(tyF, getFReg(ft)))); - - assign(t2, triop(Iop_SubF32, rm, mkexpr(t1), getLoFromF64(tyF, - getFReg(fmt)))); + assign(t1, qop(Iop_MSubF32, rm, + getLoFromF64(tyF, getFReg(fmt)), + getLoFromF64(tyF, getFReg(fs)), + getLoFromF64(tyF, getFReg(ft)))); - putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t2)))); - break; /* NMSUBB.S */ + putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t1)))); + break; /* NMSUBB.S */ } - case 0x39: { /* NMSUBB.D */ - DIP("nmsub.d f%d, f%d, f%d, f%d", fmt, ft, fs, fd); + case 0x39: { /* NMSUBB.D */ + DIP("nmsub.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft); IRExpr *rm = get_IR_roundingmode(); t1 = newTemp(Ity_F64); - t2 = newTemp(Ity_F64); - assign(t1, triop(Iop_MulF64, rm, getDReg(fs), getDReg(ft))); - - assign(t2, triop(Iop_SubF64, rm, mkexpr(t1), getDReg(fmt))); - putDReg(fd, unop(Iop_NegF64, mkexpr(t2))); - break; /* NMSUBB.D */ + assign(t1, qop(Iop_MSubF64, rm, getDReg(fmt), getDReg(fs), + getDReg(ft))); + putDReg(fd, unop(Iop_NegF64, mkexpr(t1))); + break; /* NMSUBB.D */ } default: @@ -2417,10 +3614,40 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } break; - case 0x22: /* LWL */ + case 0x22: /* LWL */ DIP("lwl r%d, %d(r%d)", rt, imm, rs); - { + if (mode64) { + /* t1 = addr */ +#if defined (_MIPSEL) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); +#elif defined (_MIPSEB) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Xor64, mkU64(0x3), + binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))))); +#endif + + /* t2 = word addr */ + /* t4 = addr mod 4 */ + LWX_SWX_PATTERN64; + + /* t3 = word content - shifted */ + t3 = newTemp(Ity_I32); + assign(t3, binop(Iop_Shl32, mkNarrowTo32(ty, load(Ity_I64, + mkexpr(t2))), narrowTo(Ity_I8, binop(Iop_Shl32, + binop(Iop_Sub32, mkU32(0x03), mkexpr(t4)), mkU8(3))))); + + /* rt content - adjusted */ + t5 = newTemp(Ity_I32); + assign(t5, binop(Iop_And32, mkNarrowTo32(ty, getIReg(rt)), + binop(Iop_Shr32, mkU32(0xFFFFFFFF), narrowTo(Ity_I8, + binop(Iop_Shl32, binop(Iop_Add32, mkexpr(t4), mkU32(0x1)), + mkU8(0x3)))))); + + putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t5), + mkexpr(t3)), True)); + } else { /* t1 = addr */ t1 = newTemp(Ity_I32); #if defined (_MIPSEL) @@ -2450,10 +3677,38 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } break; - case 0x26: /* LWR */ + case 0x26: /* LWR */ DIP("lwr r%d, %d(r%d)", rt, imm, rs); - { + if (mode64) { + /* t1 = addr */ +#if defined (_MIPSEL) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); +#elif defined (_MIPSEB) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs), + mkU64(extend_s_16to64(imm))))); +#endif + + /* t2 = word addr */ + /* t4 = addr mod 4 */ + LWX_SWX_PATTERN64; + + /* t3 = word content - shifted */ + t3 = newTemp(Ity_I32); + assign(t3, binop(Iop_Shr32, mkNarrowTo32(ty, load(Ity_I64,mkexpr(t2))), + narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(3))))); + + /* rt content - adjusted */ + t5 = newTemp(Ity_I32); + assign(t5, binop(Iop_And32, mkNarrowTo32(ty, getIReg(rt)), + unop(Iop_Not32, binop(Iop_Shr32, mkU32(0xFFFFFFFF), + narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(0x3))))))); + + putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t5), + mkexpr(t3)), True)); + } else { /* t1 = addr */ t1 = newTemp(Ity_I32); #if defined (_MIPSEL) @@ -2483,28 +3738,132 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } break; - case 0x2B: /* SW */ + case 0x2B: /* SW */ DIP("sw r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; store(mkexpr(t1), mkNarrowTo32(ty, getIReg(rt))); break; - case 0x28: /* SB */ + case 0x2C: { /* SDL rt, offset(base) MIPS64 */ + DIP("sdl r%u,%d(r%u)\n", rt, (Int) imm, rs); + /* t1 = addr */ +#if defined (_MIPSEL) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); +#elif defined (_MIPSEB) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Xor64, mkU64(0x7), binop(Iop_Add64, getIReg(rs), + mkU64(extend_s_16to64(imm))))); +#endif + + /* t2 = word addr */ + /* t4 = addr mod 4 */ + LWX_SWX_PATTERN64_1; + + /* t3 = rt content - shifted */ + t3 = newTemp(Ity_I64); + assign(t3, binop(Iop_Shr64, getIReg(rt), narrowTo(Ity_I8, binop(Iop_Shl64, + binop(Iop_Sub64, mkU64(0x07), mkexpr(t4)), mkU8(3))))); + + /* word content - adjusted */ + t5 = newTemp(Ity_I64); + t6 = newTemp(Ity_I64); + t7 = newTemp(Ity_I64); + t8 = newTemp(Ity_I64); + + /* neg(shr(0xFFFFFFFF, mul(sub(7,n), 8))) */ + assign(t5, binop(Iop_Mul64, binop(Iop_Sub64, mkU64(0x7), mkexpr(t4)), + mkU64(0x8))); + + assign(t6, binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFF), + narrowTo(Ity_I8, mkexpr(t5)))); + assign(t7, binop(Iop_Xor64, mkU64(0xFFFFFFFFFFFFFFFF), mkexpr(t6))); + assign(t8, binop(Iop_And64, load(Ity_I64, mkexpr(t2)), mkexpr(t7))); + store(mkexpr(t2), binop(Iop_Or64, mkexpr(t8), mkexpr(t3))); + break; + } + + case 0x2D: { + /* SDR rt, offset(base) - MIPS64 */ + vassert(mode64); + DIP("sdr r%u,%d(r%u)\n", rt, imm, rs); + /* t1 = addr */ +#if defined (_MIPSEL) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); +#elif defined (_MIPSEB) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Xor64, mkU64(0x7), binop(Iop_Add64, getIReg(rs), + mkU64(extend_s_16to64(imm))))); +#endif + + /* t2 = word addr */ + /* t4 = addr mod 4 */ + LWX_SWX_PATTERN64_1; + + /* t3 = rt content - shifted */ + t3 = newTemp(Ity_I64); + assign(t3, binop(Iop_Shl64, getIReg(rt), narrowTo(Ity_I8, + binop(Iop_Shl64, mkexpr(t4), mkU8(3))))); + + /* word content - adjusted */ + t5 = newTemp(Ity_I64); + assign(t5, binop(Iop_And64, load(Ity_I64, mkexpr(t2)), unop(Iop_Not64, + binop(Iop_Shl64, mkU64(0xFFFFFFFFFFFFFFFF), + narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(0x3))))))); + + store(mkexpr(t2), binop(Iop_Xor64, mkexpr(t5), mkexpr(t3))); + } + + case 0x28: /* SB */ DIP("sb r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; store(mkexpr(t1), narrowTo(Ity_I8, getIReg(rt))); break; - case 0x29: /* SH */ + case 0x29: /* SH */ DIP("sh r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; store(mkexpr(t1), narrowTo(Ity_I16, getIReg(rt))); break; - case 0x2A: /* SWL */ - + case 0x2A: /* SWL */ DIP("swl r%d, %d(r%d)", rt, imm, rs); - { + if (mode64) { +#if defined (_MIPSEL) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); +#elif defined (_MIPSEB) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs), + mkU64(extend_s_16to64(imm))))); +#endif + /* t2 = word addr */ + /* t4 = addr mod 4 */ + LWX_SWX_PATTERN64; + + /* t3 = rt content - shifted */ + t3 = newTemp(Ity_I32); + assign(t3, binop(Iop_Shr32, mkNarrowTo32(ty, getIReg(rt)), + narrowTo(Ity_I8, binop(Iop_Shl32, binop(Iop_Sub32, + mkU32(0x03), mkexpr(t4)), mkU8(3))))); + + /* word content - adjusted */ + t5 = newTemp(Ity_I32); + t6 = newTemp(Ity_I32); + t7 = newTemp(Ity_I32); + t8 = newTemp(Ity_I32); + + /* neg(shr(0xFFFFFFFF, mul(sub(3,n), 8))) */ + assign(t5, binop(Iop_Mul32, binop(Iop_Sub32, mkU32(0x3), mkexpr(t4)), + mkU32(0x8))); + + assign(t6, binop(Iop_Shr32, mkU32(0xFFFFFFFF), narrowTo(Ity_I8, + mkexpr(t5)))); + assign(t7, binop(Iop_Xor32, mkU32(0xFFFFFFFF), mkexpr(t6))); + assign(t8, binop(Iop_And32, load(Ity_I32, mkexpr(t2)), mkexpr(t7))); + store(mkexpr(t2), binop(Iop_Or32, mkexpr(t8), mkexpr(t3))); + } else { /* t1 = addr */ t1 = newTemp(Ity_I32); #if defined (_MIPSEL) @@ -2513,7 +3872,6 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, assign(t1, binop(Iop_Xor32, mkU32(0x3), binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm))))); #endif - /* t2 = word addr */ /* t4 = addr mod 4 */ LWX_SWX_PATTERN; @@ -2530,7 +3888,7 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, t7 = newTemp(Ity_I32); t8 = newTemp(Ity_I32); - // neg(shr(0xFFFFFFFF, mul(sub(3,n), 8))) + /* neg(shr(0xFFFFFFFF, mul(sub(3,n), 8))) */ assign(t5, binop(Iop_Mul32, binop(Iop_Sub32, mkU32(0x3), mkexpr(t4)), mkU32(0x8))); @@ -2542,10 +3900,37 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } break; - case 0x2E: /* SWR */ + case 0x2E: /* SWR */ DIP("swr r%d, %d(r%d)", rt, imm, rs); - { + if (mode64) { + /* t1 = addr */ +#if defined (_MIPSEL) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); +#elif defined (_MIPSEB) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs), + mkU64(extend_s_16to64(imm))))); +#endif + + /* t2 = word addr */ + /* t4 = addr mod 4 */ + LWX_SWX_PATTERN64; + + /* t3 = rt content - shifted */ + t3 = newTemp(Ity_I32); + assign(t3, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rt)), + narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(3))))); + + /* word content - adjusted */ + t5 = newTemp(Ity_I32); + assign(t5, binop(Iop_And32, load(Ity_I32, mkexpr(t2)), unop(Iop_Not32, + binop(Iop_Shl32, mkU32(0xFFFFFFFF), narrowTo(Ity_I8, + binop(Iop_Shl32, mkexpr(t4), mkU8(0x3))))))); + + store(mkexpr(t2), binop(Iop_Xor32, mkexpr(t5), mkexpr(t3))); + } else { /* t1 = addr */ t1 = newTemp(Ity_I32); #if defined (_MIPSEL) @@ -2574,157 +3959,617 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } break; - case 0x1C: /*Special2 */ + case 0x1C: /* Special2 */ switch (function) { - case 0x02: { /* MUL */ + case 0x02: { /* MUL */ DIP("mul r%d, r%d, r%d", rd, rs, rt); - putIReg(rd, binop(Iop_Mul32, getIReg(rs), getIReg(rt))); + if (mode64) { + IRTemp tmpRs32 = newTemp(Ity_I32); + IRTemp tmpRt32 = newTemp(Ity_I32); + IRTemp tmpRes = newTemp(Ity_I32); + + assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs))); + assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt))); + assign(tmpRes, binop(Iop_Mul32, mkexpr(tmpRs32), mkexpr(tmpRt32))); + putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpRes), True)); + } else + putIReg(rd, binop(Iop_Mul32, getIReg(rs), getIReg(rt))); break; } - case 0x00: { /* MADD */ + case 0x00: { /* MADD */ DIP("madd r%d, r%d", rs, rt); - t1 = newTemp(Ity_I32); - t2 = newTemp(Ity_I32); - t3 = newTemp(Ity_I64); - t4 = newTemp(Ity_I32); - t5 = newTemp(Ity_I32); - t6 = newTemp(Ity_I32); + if (mode64) { + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I64); + t5 = newTemp(Ity_I64); + t6 = newTemp(Ity_I32); + + assign(t1, mkNarrowTo32(ty, getHI())); + assign(t2, mkNarrowTo32(ty, getLO())); + + assign(t3, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)), + mkNarrowTo32(ty, getIReg(rt)))); + + assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2))); + assign(t5, binop(Iop_Add64, mkexpr(t3), mkexpr(t4))); + + putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True)); + putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True)); + } else { + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I32); + t5 = newTemp(Ity_I32); + t6 = newTemp(Ity_I32); - assign(t1, getHI()); - assign(t2, getLO()); + assign(t1, getHI()); + assign(t2, getLO()); - assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt))); + assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt))); - assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32, - mkexpr(t3)))); + assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32, + mkexpr(t3)))); - assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4), - unop(Iop_64to32, mkexpr(t3))))); - assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1))); + assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4), + unop(Iop_64to32, mkexpr(t3))))); + assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1))); - putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3)))); - putLO(mkexpr(t4)); + putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3)))); + putLO(mkexpr(t4)); + } break; } - case 0x01: { /* MADDU */ + case 0x01: { /* MADDU */ DIP("maddu r%d, r%d", rs, rt); - t1 = newTemp(Ity_I32); - t2 = newTemp(Ity_I32); - t3 = newTemp(Ity_I64); - t4 = newTemp(Ity_I32); - t5 = newTemp(Ity_I32); - t6 = newTemp(Ity_I32); + if (mode64) { + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I64); + t5 = newTemp(Ity_I64); + t6 = newTemp(Ity_I32); + + assign(t1, mkNarrowTo32(ty, getHI())); + assign(t2, mkNarrowTo32(ty, getLO())); + + assign(t3, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)), + mkNarrowTo32(ty, getIReg(rt)))); + + assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2))); + assign(t5, binop(Iop_Add64, mkexpr(t3), mkexpr(t4))); + + putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True)); + putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True)); + } else { + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I32); + t5 = newTemp(Ity_I32); + t6 = newTemp(Ity_I32); - assign(t1, getHI()); - assign(t2, getLO()); + assign(t1, getHI()); + assign(t2, getLO()); - assign(t3, binop(Iop_MullU32, getIReg(rs), getIReg(rt))); + assign(t3, binop(Iop_MullU32, getIReg(rs), getIReg(rt))); - assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32, - mkexpr(t3)))); - assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4), - unop(Iop_64to32, mkexpr(t3))))); - assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1))); + assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32, + mkexpr(t3)))); + assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4), + unop(Iop_64to32, mkexpr(t3))))); + assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1))); - putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3)))); - putLO(mkexpr(t4)); + putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3)))); + putLO(mkexpr(t4)); + } break; } - case 0x04: { /* MSUB */ + case 0x04: { /* MSUB */ DIP("msub r%d, r%d", rs, rt); - t1 = newTemp(Ity_I32); - t2 = newTemp(Ity_I32); - t3 = newTemp(Ity_I64); - t4 = newTemp(Ity_I32); - t5 = newTemp(Ity_I1); - t6 = newTemp(Ity_I32); + if (mode64) { + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I64); + t5 = newTemp(Ity_I64); + t6 = newTemp(Ity_I32); + + assign(t1, mkNarrowTo32(ty, getHI())); + assign(t2, mkNarrowTo32(ty, getLO())); + + assign(t3, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)), + mkNarrowTo32(ty, getIReg(rt)))); + + assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2))); + assign(t5, binop(Iop_Sub64, mkexpr(t4), mkexpr(t3))); + + putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True)); + putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True)); + } else { + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I32); + t5 = newTemp(Ity_I1); + t6 = newTemp(Ity_I32); - assign(t1, getHI()); - assign(t2, getLO()); + assign(t1, getHI()); + assign(t2, getLO()); - assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt))); - assign(t4, unop(Iop_64to32, mkexpr(t3))); //new lo + assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt))); + assign(t4, unop(Iop_64to32, mkexpr(t3))); /* new lo */ - //if lo= 32 && srcPos < 64); + vassert(dstSz > 0 && dstSz <= 32); + vassert((srcPos + dstSz) > 32 && (srcPos + dstSz) <= 64); + + UChar lsAmt = 64 - (srcPos + dstSz); /* left shift amount; */ + UChar rsAmt = 64 - dstSz; /* right shift amount; */ + + assign(t1, binop(Iop_Shl64, getIReg(rs), mkU8(lsAmt))); + putIReg(rt, binop(Iop_Shr64, mkexpr(t1), mkU8(rsAmt))); + break; + } + case 0x05: { + /* Doubleword Insert Bit Field Middle - DINSM; MIPS64r2 */ + msb = get_msb(cins); + lsb = get_lsb(cins); + size = msb + 1; + UInt dstPos = lsb; + UInt srcSz = msb - lsb + 33; + t1 = newTemp(ty); + t2 = newTemp(ty); + t3 = newTemp(ty); + t4 = newTemp(ty); + IRTemp tmpT1 = newTemp(ty); + IRTemp tmpT2 = newTemp(ty); + IRTemp tmpT3 = newTemp(ty); + IRTemp tmpT4 = newTemp(ty); + IRTemp tmpT5 = newTemp(ty); + IRTemp tmpT6 = newTemp(ty); + IRTemp tmpT7 = newTemp(ty); + IRTemp tmpRs = newTemp(ty); + IRTemp tmpRt = newTemp(ty); + IRTemp tmpRd = newTemp(ty); + + assign(tmpRs, getIReg(rs)); + assign(tmpRt, getIReg(rt)); + DIP("dinsm r%u, r%u, %d, %d\n", rt, rs, lsb, msb); + + UChar lsAmt = dstPos + srcSz - 1; /* left shift amount; */ + UChar rsAmt = dstPos + srcSz - 1; /* right shift amount; */ + + assign(t1, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt))); + assign(tmpT1, binop(Iop_Shr64, mkexpr(t1), mkU8(1))); + assign(t2, binop(Iop_Shl64, mkexpr(tmpT1), mkU8(lsAmt))); + assign(tmpT2, binop(Iop_Shl64, mkexpr(t2), mkU8(1))); + + lsAmt = 63 - dstPos; /* left shift amount; */ + rsAmt = 63 - dstPos; /* right shift amount; */ + + assign(t3, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt))); + assign(tmpT3, binop(Iop_Shl64, mkexpr(t3), mkU8(1))); + assign(t4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(rsAmt))); + assign(tmpT4, binop(Iop_Shr64, mkexpr(t4), mkU8(1))); + + /* extract size from src register */ + lsAmt = 64 - srcSz; /* left shift amount; */ + rsAmt = 64 - (lsb + srcSz); /* right shift amount; */ + + assign(tmpT5, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt))); + assign(tmpT6, binop(Iop_Shr64, mkexpr(tmpT5), mkU8(rsAmt))); + + assign(tmpT7, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT4))); + assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT6), mkexpr(tmpT7))); + putIReg(rt, mkexpr(tmpRd)); + break; + } + case 0x06: { + /* Doubleword Insert Bit Field Upper - DINSU; MIPS64r2 */ + msb = get_msb(cins); + lsb = get_lsb(cins); + size = msb + 1; + UInt dstPos = lsb + 32; + UInt srcSz = msb - lsb + 1; + IRTemp tmpT1 = newTemp(ty); + IRTemp tmpT2 = newTemp(ty); + IRTemp tmpT3 = newTemp(ty); + IRTemp tmpT4 = newTemp(ty); + IRTemp tmpT5 = newTemp(ty); + IRTemp tmpT6 = newTemp(ty); + IRTemp tmpT7 = newTemp(ty); + IRTemp tmpT8 = newTemp(ty); + IRTemp tmpT9 = newTemp(ty); + IRTemp tmpRs = newTemp(ty); + IRTemp tmpRt = newTemp(ty); + IRTemp tmpRd = newTemp(ty); + + assign(tmpRs, getIReg(rs)); + assign(tmpRt, getIReg(rt)); + DIP("dinsu r%u, r%u, %d, %d\n", rt, rs, lsb, msb); + + UChar lsAmt = 64 - srcSz; /* left shift amount; */ + UChar rsAmt = 64 - (dstPos + srcSz); /* right shift amount; */ + assign(tmpT1, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt))); + assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(rsAmt))); + + lsAmt = 64 - dstPos; /* left shift amount; */ + rsAmt = 64 - dstPos; /* right shift amount; */ + assign(tmpT3, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt))); + assign(tmpT4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(rsAmt))); + + lsAmt = dstPos; /* left shift amount; */ + rsAmt = srcSz; /* right shift amount; */ + assign(tmpT5, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt))); + assign(tmpT6, binop(Iop_Shr64, mkexpr(tmpT5), mkU8(lsAmt))); + + assign(tmpT7, binop(Iop_Shl64, mkexpr(tmpT6), mkU8(rsAmt))); + assign(tmpT8, binop(Iop_Shl64, mkexpr(tmpT7), mkU8(lsAmt))); + + assign(tmpT9, binop(Iop_Or64, mkexpr(tmpT8), mkexpr(tmpT4))); + assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT9))); + putIReg(rt, mkexpr(tmpRd)); + break; + } + case 0x07: { + /* Doubleword Insert Bit Field - DINS; MIPS64r2 */ + IRTemp tmp1 = newTemp(ty); + IRTemp tmpT1 = newTemp(ty); + IRTemp tmpT2 = newTemp(ty); + IRTemp tmpT3 = newTemp(ty); + IRTemp tmpT4 = newTemp(ty); + IRTemp tmpT5 = newTemp(ty); + IRTemp tmpT6 = newTemp(ty); + IRTemp tmpT7 = newTemp(ty); + IRTemp tmpT8 = newTemp(ty); + IRTemp tmpT9 = newTemp(ty); + IRTemp tmp = newTemp(ty); + IRTemp tmpRs = newTemp(ty); + IRTemp tmpRt = newTemp(ty); + IRTemp tmpRd = newTemp(ty); + + assign(tmpRs, getIReg(rs)); + assign(tmpRt, getIReg(rt)); + + msb = get_msb(cins); + lsb = get_lsb(cins); + size = msb + 1; + DIP("dins r%u, r%u, %d, %d\n", rt, rs, lsb, + msb - lsb + 1); + UChar lsAmt = 63 - lsb; /* left shift amount; */ + UChar rsAmt = 63 - lsb; /* right shift amount; */ + assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt))); + assign(tmpT1, binop(Iop_Shl64, mkexpr(tmp), mkU8(1))); + assign(tmp1, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(rsAmt))); + assign(tmpT2, binop(Iop_Shr64, mkexpr(tmp1), mkU8(1))); + + lsAmt = msb; /* left shift amount; */ + rsAmt = 1; /*right shift amount; */ + assign(tmpT3, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt))); + assign(tmpT4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(lsAmt))); + assign(tmpT5, binop(Iop_Shl64, mkexpr(tmpT4), mkU8(rsAmt))); + assign(tmpT6, binop(Iop_Shl64, mkexpr(tmpT5), mkU8(lsAmt))); + + lsAmt = 64 - (msb - lsb + 1); /* left shift amount; */ + rsAmt = 64 - (msb + 1); /* right shift amount; */ + assign(tmpT7, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt))); + assign(tmpT8, binop(Iop_Shr64, mkexpr(tmpT7), mkU8(rsAmt))); + + assign(tmpT9, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT8))); + assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT6), mkexpr(tmpT9))); + putIReg(rt, mkexpr(tmpRd)); + break; + } + case 0x24: /* DBSHFL */ + lsb = get_lsb(cins); + IRTemp tmpRs = newTemp(ty); + IRTemp tmpRt = newTemp(ty); + IRTemp tmpRd = newTemp(ty); + assign(tmpRs, getIReg(rs)); + assign(tmpRt, getIReg(rt)); + switch (lsb) { + case 0x02: { /* DSBH */ + IRTemp tmpT1 = newTemp(ty); + IRTemp tmpT2 = newTemp(ty); + IRTemp tmpT3 = newTemp(ty); + IRTemp tmpT4 = newTemp(ty); + IRTemp tmpT5 = newTemp(Ity_I64); + IRTemp tmpT6 = newTemp(ty); + DIP("dsbh r%u, r%u\n", rd, rt); + assign(tmpT5, mkU64(0xFF00FF00FF00FF00ULL)); + assign(tmpT6, mkU64(0x00FF00FF00FF00FFULL)); + assign(tmpT1, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT5))); + assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(8))); + assign(tmpT3, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT6))); + assign(tmpT4, binop(Iop_Shl64, mkexpr(tmpT3), mkU8(8))); + assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT4), mkexpr(tmpT2))); + putIReg(rd, mkexpr(tmpRd)); + break; + } + case 0x05: { /* DSHD */ + IRTemp tmpT1 = newTemp(ty); + IRTemp tmpT2 = newTemp(ty); + IRTemp tmpT3 = newTemp(ty); + IRTemp tmpT4 = newTemp(ty); + IRTemp tmpT5 = newTemp(Ity_I64); + IRTemp tmpT6 = newTemp(ty); + IRTemp tmpT7 = newTemp(ty); + IRTemp tmpT8 = newTemp(ty); + IRTemp tmpT9 = newTemp(ty); + DIP("dshd r%u, r%u\n", rd, rt); + assign(tmpT5, mkU64(0xFFFF0000FFFF0000ULL)); + assign(tmpT6, mkU64(0x0000FFFF0000FFFFULL)); + assign(tmpT1, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT5))); + assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(16))); + assign(tmpT3, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT6))); + assign(tmpT4, binop(Iop_Shl64, mkexpr(tmpT3), mkU8(16))); + assign(tmpT7, binop(Iop_Or64, mkexpr(tmpT4), mkexpr(tmpT2))); + assign(tmpT8, binop(Iop_Shl64, mkexpr(tmpT7), mkU8(32))); + assign(tmpT9, binop(Iop_Shr64, mkexpr(tmpT7), mkU8(32))); + assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT8), mkexpr(tmpT9))); + putIReg(rd, mkexpr(tmpRd)); + break; + } + default: + vex_printf("\nop6o10 = %d", lsb); + goto decode_failure;; + } + break; + case 0x3B: { /* RDHWR */ + DIP("rdhwr r%d, r%d\n", rt, rd); if (rd == 29) { putIReg(rt, getULR()); +#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2)) + } else if (rd == 1) { + IRTemp val = newTemp(Ity_I64); + IRExpr** args = mkIRExprVec_2 (mkU64(rt), mkU64(rd)); + IRDirty *d = unsafeIRDirty_1_N(val, + 0, + "mips64_dirtyhelper_rdhwr", + &mips64_dirtyhelper_rdhwr, + args); + stmt(IRStmt_Dirty(d)); + putIReg(rt, mkexpr(val)); +#endif } else goto decode_failure; break; } - case 0x04: - /*INS*/ msb = get_msb(cins); + case 0x04: /* INS */ + msb = get_msb(cins); lsb = get_lsb(cins); size = msb - lsb + 1; @@ -2733,180 +4578,210 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, vassert(lsb + size > 0); DIP("ins size:%d msb:%d lsb:%d", size, msb, lsb); - /*put size bits from rs at the pos in temporary */ + /* put size bits from rs at the pos in temporary */ t0 = newTemp(Ity_I32); t3 = newTemp(Ity_I32); - /*shift left for 32 - size to clear leading bits and get zeros - at the end */ - assign(t0, binop(Iop_Shl32, getIReg(rs), mkU8(32 - size))); - /*now set it at pos */ + /* shift left for 32 - size to clear leading bits and get zeros + at the end */ + assign(t0, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rs)), + mkU8(32 - size))); + /* now set it at pos */ t1 = newTemp(Ity_I32); assign(t1, binop(Iop_Shr32, mkexpr(t0), mkU8(32 - size - lsb))); if (lsb > 0) { t2 = newTemp(Ity_I32); - /*clear everything but lower pos bits from rt */ - assign(t2, binop(Iop_Shl32, getIReg(rt), mkU8(32 - lsb))); + /* clear everything but lower pos bits from rt */ + assign(t2, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rt)), + mkU8(32 - lsb))); assign(t3, binop(Iop_Shr32, mkexpr(t2), mkU8(32 - lsb))); } if (msb < 31) { t4 = newTemp(Ity_I32); - /*clear everything but upper msb + 1 bits from rt */ - assign(t4, binop(Iop_Shr32, getIReg(rt), mkU8(msb + 1))); + /* clear everything but upper msb + 1 bits from rt */ + assign(t4, binop(Iop_Shr32, mkNarrowTo32(ty, getIReg(rt)), + mkU8(msb + 1))); t5 = newTemp(Ity_I32); assign(t5, binop(Iop_Shl32, mkexpr(t4), mkU8(msb + 1))); - /*now combine these registers */ + /* now combine these registers */ if (lsb > 0) { t6 = newTemp(Ity_I32); assign(t6, binop(Iop_Or32, mkexpr(t5), mkexpr(t1))); - putIReg(rt, binop(Iop_Or32, mkexpr(t6), mkexpr(t3))); + putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t6), + mkexpr(t3)), False)); } else { - putIReg(rt, binop(Iop_Or32, mkexpr(t1), mkexpr(t5))); + putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t1), + mkexpr(t5)), False)); } - } - - else { - putIReg(rt, binop(Iop_Or32, mkexpr(t1), mkexpr(t3))); - + } else { + putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t1), + mkexpr(t3)), False)); } break; - case 0x00: - /*EXT*/ msb = get_msb(cins); + case 0x00: /* EXT */ + msb = get_msb(cins); lsb = get_lsb(cins); size = msb + 1; DIP("ext size:%d msb:%d lsb:%d", size, msb, lsb); vassert(lsb + size <= 32); vassert(lsb + size > 0); - /*put size bits from rs at the top of in temporary */ + /* put size bits from rs at the top of in temporary */ if (lsb + size < 32) { t0 = newTemp(Ity_I32); - assign(t0, binop(Iop_Shl32, getIReg(rs), mkU8(32 - lsb - size))); - putIReg(rt, binop(Iop_Shr32, mkexpr(t0), mkU8(32 - size))); - } else { - putIReg(rt, binop(Iop_Shr32, getIReg(rs), mkU8(32 - size))); + assign(t0, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rs)), + mkU8(32 - lsb - size))); + putIReg(rt, mkWidenFrom32(ty, binop(Iop_Shr32, mkexpr(t0), + mkU8(32 - size)), True)); + } else { + putIReg(rt, mkWidenFrom32(ty, binop(Iop_Shr32, + mkNarrowTo32(ty, getIReg(rs)), + mkU8(32 - size)), True)); } break; - case 0x20: - /*BSHFL*/ switch (sa) { - case 0x10: - /*SEB*/ DIP("seb r%d, r%d", rd, rt); - putIReg(rd, unop(Iop_8Sto32, unop(Iop_32to8, getIReg(rt)))); - break; + case 0x03: /* Doubleword Extract Bit Field - DEXT; MIPS64r2 */ + msb = get_msb(cins); + lsb = get_lsb(cins); + size = msb + 1; + t1 = newTemp(Ity_I64); + DIP("dext r%u, r%u, %d, %d\n", rt, rs, lsb, msb + 1); + vassert(lsb >= 0 && lsb < 32); + vassert(size > 0 && size <= 32); + vassert((lsb + size) > 0 && (lsb + size) <= 63); - case 0x18: - /*SEH*/ DIP("seh r%d, r%d", rd, rt); - putIReg(rd, unop(Iop_16Sto32, unop(Iop_32to16, getIReg(rt)))); - break; + UChar lsAmt = 63 - (lsb + msb); /* left shift amount; */ + UChar rsAmt = 63 - msb; /* right shift amount; */ - case 0x02: - /*WSBH*/ DIP("wsbh r%d, r%d", rd, rt); - t0 = newTemp(Ity_I32); - t1 = newTemp(Ity_I32); - t2 = newTemp(Ity_I32); - t3 = newTemp(Ity_I32); - assign(t0, binop(Iop_Shl32, binop(Iop_And32, getIReg(rt), - mkU32(0x00FF0000)), mkU8(0x8))); - assign(t1, binop(Iop_Shr32, binop(Iop_And32, getIReg(rt), - mkU32(0xFF000000)), mkU8(0x8))); - assign(t2, binop(Iop_Shl32, binop(Iop_And32, getIReg(rt), - mkU32(0x000000FF)), mkU8(0x8))); - assign(t3, binop(Iop_Shr32, binop(Iop_And32, getIReg(rt), - mkU32(0x0000FF00)), mkU8(0x8))); - putIReg(rd, binop(Iop_Or32, binop(Iop_Or32, mkexpr(t0), - mkexpr(t1)), binop(Iop_Or32, mkexpr(t2), mkexpr(t3)))); - break; + assign(t1, binop(Iop_Shl64, getIReg(rs), mkU8(lsAmt))); + putIReg(rt, binop(Iop_Shr64, mkexpr(t1), mkU8(rsAmt))); - default: - goto decode_failure; + break; + + case 0x20: /* BSHFL */ + switch (sa) { + case 0x02: /* WSBH */ + DIP("wsbh r%d, r%d", rd, rt); + t0 = newTemp(Ity_I32); + t1 = newTemp(Ity_I32); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I32); + assign(t0, binop(Iop_Shl32, binop(Iop_And32, mkNarrowTo32(ty, + getIReg(rt)), mkU32(0x00FF0000)), + mkU8(0x8))); + assign(t1, binop(Iop_Shr32, binop(Iop_And32, mkNarrowTo32(ty, + getIReg(rt)), mkU32(0xFF000000)), mkU8(0x8))); + assign(t2, binop(Iop_Shl32, binop(Iop_And32, mkNarrowTo32(ty, + getIReg(rt)), mkU32(0x000000FF)), mkU8(0x8))); + assign(t3, binop(Iop_Shr32, binop(Iop_And32, mkNarrowTo32(ty, + getIReg(rt)), mkU32(0x0000FF00)), mkU8(0x8))); + putIReg(rd, mkWidenFrom32(ty, binop(Iop_Or32, binop(Iop_Or32, + mkexpr(t0), mkexpr(t1)), + binop(Iop_Or32, mkexpr(t2), + mkexpr(t3))), True)); + break; + + case 0x10: /* SEB */ + DIP("seb"); + if (mode64) + putIReg(rd, unop(Iop_8Sto64, unop(Iop_64to8, getIReg(rt)))); + else + putIReg(rd, unop(Iop_8Sto32, unop(Iop_32to8, getIReg(rt)))); + break; + + case 0x18: /* SEH */ + DIP("seh"); + if (mode64) + putIReg(rd, unop(Iop_16Sto64, unop(Iop_64to16, getIReg(rt)))); + else + putIReg(rd, unop(Iop_16Sto32, unop(Iop_32to16, getIReg(rt)))); + break; + + default: + goto decode_failure; } - break; - /*BSHFL*/ default: - goto decode_failure; + break; /* BSHFL */ + default: + goto decode_failure; } - break; /*Special3 */ + break; /* Special3 */ case 0x3B: if (0x3B == function && (archinfo->hwcaps & VEX_PRID_COMP_BROADCOM)) { - /*RDHWR*/ - DIP("rdhwr r%d, r%d", rt, rd); - if (rd == 29) { - putIReg(rt, getULR()); - } else - goto decode_failure; - break; + /*RDHWR*/ + DIP("rdhwr r%d, r%d", rt, rd); + if (rd == 29) { + putIReg(rt, getULR()); + } else + goto decode_failure; + break; } else { goto decode_failure; } - case 0x00: /*Special */ + case 0x00: /* Special */ switch (function) { case 0x1: { UInt mov_cc = get_mov_cc(cins); - if (tf == 0) { /* MOVF */ + if (tf == 0) { /* MOVF */ DIP("movf r%d, r%d, %d", rd, rs, mov_cc); - { - t1 = newTemp(Ity_I1); - t2 = newTemp(Ity_I32); - t3 = newTemp(Ity_I1); - t4 = newTemp(Ity_I32); - - assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc))); - assign(t2, IRExpr_ITE(mkexpr(t1), - binop(Iop_And32, - binop(Iop_Shr32, getFCSR(), - mkU8(23)), - mkU32(0x1)), - binop(Iop_And32, - binop(Iop_Shr32, getFCSR(), - mkU8(24 + mov_cc)), - mkU32(0x1)) - )); - - assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2))); - assign(t4, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd))); - putIReg(rd, mkexpr(t4)); - } - } else if (tf == 1) { /* MOVT */ + t1 = newTemp(Ity_I1); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I1); + + assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc))); + assign(t2, IRExpr_ITE(mkexpr(t1), + binop(Iop_And32, + binop(Iop_Shr32, getFCSR(), + mkU8(23)), + mkU32(0x1)), + binop(Iop_And32, + binop(Iop_Shr32, getFCSR(), + mkU8(24 + mov_cc)), + mkU32(0x1)) + )); + assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2))); + putIReg(rd, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd))); + } else if (tf == 1) { /* MOVT */ DIP("movt r%d, r%d, %d", rd, rs, mov_cc); - { - t1 = newTemp(Ity_I1); - t2 = newTemp(Ity_I32); - t3 = newTemp(Ity_I1); - t4 = newTemp(Ity_I32); - - assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc))); - assign(t2, IRExpr_ITE(mkexpr(t1), - binop(Iop_And32, - binop(Iop_Shr32, getFCSR(), - mkU8(23)), - mkU32(0x1)), - binop(Iop_And32, - binop(Iop_Shr32, getFCSR(), - mkU8(24 + mov_cc)), - mkU32(0x1)) - )); - - assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2))); - assign(t4, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd))); - putIReg(rd, mkexpr(t4)); - } + t1 = newTemp(Ity_I1); + t2 = newTemp(Ity_I32); + t3 = newTemp(Ity_I1); + + assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc))); + assign(t2, IRExpr_ITE(mkexpr(t1), + binop(Iop_And32, + binop(Iop_Shr32, getFCSR(), + mkU8(23)), + mkU32(0x1)), + binop(Iop_And32, + binop(Iop_Shr32, getFCSR(), + mkU8(24 + mov_cc)), + mkU32(0x1)) + )); + assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2))); + putIReg(rd, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd))); } break; } - case 0x0A: { - /* MOVZ */ + case 0x0A: { /* MOVZ */ DIP("movz r%d, r%d, r%d", rd, rs, rt); t1 = newTemp(ty); t2 = newTemp(ty); - { + if (mode64) { + assign(t1, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpEQ64, + getIReg(rt), mkU64(0x0))))); + assign(t2, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpNE64, + getIReg(rt), mkU64(0x0))))); + putIReg(rd, binop(Iop_Add64, binop(Iop_And64, getIReg(rs), + mkexpr(t1)), binop(Iop_And64, getIReg(rd),mkexpr(t2)))); + } else { assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, getIReg(rt), mkU32(0x0)))); assign(t2, unop(Iop_1Sto32, binop(Iop_CmpNE32, getIReg(rt), @@ -2918,12 +4793,19 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, break; } - case 0x0B: { - /* MOVN */ + case 0x0B: { /* MOVN */ DIP("movn r%d, r%d, r%d", rd, rs, rt); t1 = newTemp(ty); t2 = newTemp(ty); - { + if (mode64) { + assign(t1, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpEQ64, + getIReg(rt), mkU64(0x0))))); + assign(t2, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpNE64, + getIReg(rt), mkU64(0x0))))); + putIReg(rd, binop(Iop_Add64, binop(Iop_And64, getIReg(rs), + mkexpr(t2)), binop(Iop_And64, getIReg(rd), + mkexpr(t1)))); + } else { assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, getIReg(rt), mkU32(0x0)))); assign(t2, unop(Iop_1Sto32, binop(Iop_CmpNE32, getIReg(rt), @@ -2957,45 +4839,61 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True)); break; - case 0x20: /* ADD */ + case 0x20: { /* ADD */ DIP("add r%d, r%d, r%d", rd, rs, rt); + IRTemp tmpRs32 = newTemp(Ity_I32); + IRTemp tmpRt32 = newTemp(Ity_I32); + + assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs))); + assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt))); + t0 = newTemp(Ity_I32); t1 = newTemp(Ity_I32); t2 = newTemp(Ity_I32); t3 = newTemp(Ity_I32); t4 = newTemp(Ity_I32); /* dst = src0 + src1 - * if(sign(src0 ) != sign(src1 )) - * goto no overflow; - * if(sign(dst) == sign(src0 )) - * goto no overflow; - * # we have overflow! */ - - assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt))); - assign(t1, binop(Iop_Xor32, getIReg(rs), getIReg(rt))); + if(sign(src0 ) != sign(src1 )) + goto no overflow; + if(sign(dst) == sign(src0 )) + goto no overflow; + we have overflow! */ + + assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), mkexpr(tmpRt32))); + assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), mkexpr(tmpRt32))); assign(t2, unop(Iop_1Uto32, binop(Iop_CmpEQ32, binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)), mkU32(0x80000000)))); - assign(t3, binop(Iop_Xor32, mkexpr(t0), getIReg(rs))); + assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32))); assign(t4, unop(Iop_1Uto32, binop(Iop_CmpNE32, binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)), mkU32(0x80000000)))); - + stmt(IRStmt_Exit(binop(Iop_CmpEQ32, binop(Iop_Or32, mkexpr(t2), mkexpr(t4)), mkU32(0)), Ijk_SigFPE_IntOvf, - IRConst_U32(guest_PC_curr_instr + 4), OFFB_PC)); + mode64 ? IRConst_U64(guest_PC_curr_instr + 4) : + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); - putIReg(rd, mkexpr(t0)); + putIReg(rd, mkWidenFrom32(ty, mkexpr(t0), True)); break; - + } case 0x1A: /* DIV */ DIP("div r%d, r%d", rs, rt); - { + if (mode64) { + t2 = newTemp(Ity_I64); + + assign(t2, binop(Iop_DivModS64to32, + getIReg(rs), mkNarrowTo32(ty, getIReg(rt)))); + + putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True)); + putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True)); + } else { t1 = newTemp(Ity_I64); t2 = newTemp(Ity_I64); @@ -3009,7 +4907,15 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, case 0x1B: /* DIVU */ DIP("divu r%d, r%d", rs, rt); - { + if (mode64) { + t2 = newTemp(Ity_I64); + + assign(t2, binop(Iop_DivModU64to32, + getIReg(rs), mkNarrowTo32(ty, getIReg(rt)))); + + putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True)); + putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True)); + } else { t1 = newTemp(Ity_I64); t2 = newTemp(Ity_I64); assign(t1, unop(Iop_32Uto64, getIReg(rs))); @@ -3019,6 +4925,49 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, } break; + case 0x1C: /* Doubleword Multiply - DMULT; MIPS64 */ + DIP("dmult r%u, r%u\n", rs, rt); + t0 = newTemp(Ity_I128); + + assign(t0, binop(Iop_MullS64, getIReg(rs), getIReg(rt))); + + putHI(unop(Iop_128HIto64, mkexpr(t0))); + putLO(unop(Iop_128to64, mkexpr(t0))); + break; + + case 0x1D: /* Doubleword Multiply Unsigned - DMULTU; MIPS64 */ + DIP("dmultu r%u, r%u\n", rs, rt); + t0 = newTemp(Ity_I128); + + assign(t0, binop(Iop_MullU64, getIReg(rs), getIReg(rt))); + + putHI(unop(Iop_128HIto64, mkexpr(t0))); + putLO(unop(Iop_128to64, mkexpr(t0))); + break; + + case 0x1E: /* Doubleword Divide DDIV; MIPS64 */ + DIP("ddiv"); + t1 = newTemp(Ity_I128); + + assign(t1, binop(Iop_DivModS64to64, getIReg(rs), getIReg(rt))); + + putHI(unop(Iop_128HIto64, mkexpr(t1))); + putLO(unop(Iop_128to64, mkexpr(t1))); + break; + + case 0x1F: /* Doubleword Divide Unsigned DDIVU; MIPS64 check this */ + DIP("ddivu"); + t1 = newTemp(Ity_I128); + t2 = newTemp(Ity_I128); + + assign(t1, binop(Iop_64HLto128, mkU64(0), getIReg(rs))); + + assign(t2, binop(Iop_DivModU128to64, mkexpr(t1), getIReg(rt))); + + putHI(unop(Iop_128HIto64, mkexpr(t2))); + putLO(unop(Iop_128to64, mkexpr(t2))); + break; + case 0x10: /* MFHI */ DIP("mfhi r%d", rd); putIReg(rd, getHI()); @@ -3041,11 +4990,20 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, case 0x21: /* ADDU */ DIP("addu r%d, r%d, r%d", rd, rs, rt); - ALU_PATTERN(Iop_Add32); + if (mode64) { + ALU_PATTERN64(Iop_Add32); + } else { + ALU_PATTERN(Iop_Add32); + } break; - case 0x22: /* SUB */ + case 0x22: { /* SUB */ DIP("sub r%d, r%d, r%d", rd, rs, rt); + IRTemp tmpRs32 = newTemp(Ity_I32); + IRTemp tmpRt32 = newTemp(Ity_I32); + + assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs))); + assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt))); t0 = newTemp(Ity_I32); t1 = newTemp(Ity_I32); t2 = newTemp(Ity_I32); @@ -3053,58 +5011,75 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, t4 = newTemp(Ity_I32); t5 = newTemp(Ity_I32); /* dst = src0 + (-1 * src1) - * if(sign(src0 ) != sign((-1 * src1) )) - * goto no overflow; - * if(sign(dst) == sign(src0 )) - * goto no overflow; - * # we have overflow! */ - - assign(t5, binop(Iop_Mul32, getIReg(rt), mkU32(-1))); - assign(t0, binop(Iop_Add32, getIReg(rs), mkexpr(t5))); - assign(t1, binop(Iop_Xor32, getIReg(rs), mkexpr(t5))); - assign(t2, unop(Iop_1Sto32, - binop(Iop_CmpEQ32, - binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)), - mkU32(0x80000000)))); - - assign(t3, binop(Iop_Xor32, mkexpr(t0), getIReg(rs))); - assign(t4, unop(Iop_1Sto32, - binop(Iop_CmpNE32, - binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)), - mkU32(0x80000000)))); - - stmt(IRStmt_Exit(binop(Iop_CmpEQ32, - binop(Iop_Or32, mkexpr(t2), mkexpr(t4)), - mkU32(0)), - Ijk_SigFPE_IntOvf, - IRConst_U32(guest_PC_curr_instr + 4), OFFB_PC)); - - putIReg(rd, mkexpr(t0)); + if(sign(src0 ) != sign((-1 * src1) )) + goto no overflow; + if(sign(dst) == sign(src0 )) + goto no overflow; + we have overflow! */ + + assign(t5, binop(Iop_Mul32, mkexpr(tmpRt32), mkU32(-1))); + assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), mkexpr(t5))); + assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), mkexpr(t5))); + assign(t2, unop(Iop_1Sto32, binop(Iop_CmpEQ32, binop(Iop_And32, + mkexpr(t1), mkU32(0x80000000)), mkU32(0x80000000)))); + + assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32))); + assign(t4, unop(Iop_1Sto32, binop(Iop_CmpNE32, binop(Iop_And32, + mkexpr(t3), mkU32(0x80000000)), mkU32(0x80000000)))); + + stmt(IRStmt_Exit(binop(Iop_CmpEQ32, binop(Iop_Or32, mkexpr(t2), + mkexpr(t4)), mkU32(0)), Ijk_SigFPE_IntOvf, + mode64 ? IRConst_U64(guest_PC_curr_instr + 4) : + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + + putIReg(rd, mkWidenFrom32(ty, mkexpr(t0), True)); break; - + } case 0x23: /* SUBU */ DIP("subu r%d, r%d, r%d", rd, rs, rt); - ALU_PATTERN(Iop_Sub32); + if (mode64) { + ALU_PATTERN64(Iop_Sub32); + } else { + ALU_PATTERN(Iop_Sub32); + } break; case 0x24: /* AND */ DIP("and r%d, r%d, r%d", rd, rs, rt); - ALU_PATTERN(Iop_And32); + if (mode64) { + ALU_PATTERN(Iop_And64); + } else { + ALU_PATTERN(Iop_And32); + } break; case 0x25: /* OR */ DIP("or r%d, r%d, r%d", rd, rs, rt); - ALU_PATTERN(Iop_Or32); + if (mode64) { + ALU_PATTERN(Iop_Or64); + } else { + ALU_PATTERN(Iop_Or32); + } break; case 0x26: /* XOR */ DIP("xor r%d, r%d, r%d", rd, rs, rt); - ALU_PATTERN(Iop_Xor32); + if (mode64) { + ALU_PATTERN(Iop_Xor64); + } else { + ALU_PATTERN(Iop_Xor32); + } break; case 0x27: /* NOR */ DIP("nor r%d, r%d, r%d", rd, rs, rt); - putIReg(rd, unop(Iop_Not32, binop(Iop_Or32, getIReg(rs),getIReg(rt)))); + if (mode64) + putIReg(rd, unop(Iop_Not64, binop(Iop_Or64, getIReg(rs), + getIReg(rt)))); + else + putIReg(rd, unop(Iop_Not32, binop(Iop_Or32, getIReg(rs), + getIReg(rt)))); break; case 0x08: /* JR */ @@ -3116,50 +5091,133 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, case 0x09: /* JALR */ DIP("jalr r%d r%d", rd, rs); - putIReg(rd, mkU32(guest_PC_curr_instr + 8)); - t0 = newTemp(Ity_I32); - assign(t0, getIReg(rs)); - lastn = mkexpr(t0); + if (mode64) { + putIReg(rd, mkU64(guest_PC_curr_instr + 8)); + t0 = newTemp(Ity_I64); + assign(t0, getIReg(rs)); + lastn = mkexpr(t0); + } else { + putIReg(rd, mkU32(guest_PC_curr_instr + 8)); + t0 = newTemp(Ity_I32); + assign(t0, getIReg(rs)); + lastn = mkexpr(t0); + } break; case 0x0C: /* SYSCALL */ DIP("syscall"); - putPC(mkU32(guest_PC_curr_instr + 4)); + if (mode64) + putPC(mkU64(guest_PC_curr_instr + 4)); + else + putPC(mkU32(guest_PC_curr_instr + 4)); dres.jk_StopHere = Ijk_Sys_syscall; dres.whatNext = Dis_StopHere; break; case 0x2A: /* SLT */ DIP("slt r%d, r%d, r%d", rd, rs, rt); - putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs), - getIReg(rt)))); + if (mode64) + putIReg(rd, unop(Iop_1Uto64, binop(Iop_CmpLT64S, getIReg(rs), + getIReg(rt)))); + else + putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs), + getIReg(rt)))); break; case 0x2B: /* SLTU */ DIP("sltu r%d, r%d, r%d", rd, rs, rt); - putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs), - getIReg(rt)))); + if (mode64) + putIReg(rd, unop(Iop_1Uto64, binop(Iop_CmpLT64U, getIReg(rs), + getIReg(rt)))); + else + putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs), + getIReg(rt)))); break; - case 0x00: - /* SLL */ + case 0x00: { /* SLL */ DIP("sll r%d, r%d, %d", rd, rt, sa); - SXX_PATTERN(Iop_Shl32); + IRTemp tmpRt32 = newTemp(Ity_I32); + IRTemp tmpSh32 = newTemp(Ity_I32); + IRTemp tmpRd = newTemp(Ity_I64); + if (mode64) { + assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt))); + assign(tmpSh32, binop(Iop_Shl32, mkexpr(tmpRt32), mkU8(sa))); + assign(tmpRd, mkWidenFrom32(ty, mkexpr(tmpSh32), True)); + putIReg(rd, mkexpr(tmpRd)); + } else + SXX_PATTERN(Iop_Shl32); break; + } - case 0x04: /* SLLV */ + case 0x04: { /* SLLV */ DIP("sllv r%d, r%d, r%d", rd, rt, rs); - SXXV_PATTERN(Iop_Shl32); + if (mode64) { + IRTemp tmpRs8 = newTemp(Ity_I8); + IRTemp tmpRt32 = newTemp(Ity_I32); + IRTemp tmpSh32 = newTemp(Ity_I32); + IRTemp tmp = newTemp(ty); + assign(tmp, binop(mkSzOp(ty, Iop_And8), getIReg(rs), + mkSzImm(ty, 31))); + assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp))); + assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt))); + assign(tmpSh32, binop(Iop_Shl32, mkexpr(tmpRt32), mkexpr(tmpRs8))); + putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True)); + } else { + SXXV_PATTERN(Iop_Shl32); + } break; + } case 0x03: /* SRA */ DIP("sra r%d, r%d, %d", rd, rt, sa); - SXX_PATTERN(Iop_Sar32); + if (mode64) { + IRTemp tmpRt32 = newTemp(Ity_I32); + IRTemp tmpSh32 = newTemp(Ity_I32); + + t1 = newTemp(Ity_I64); + t2 = newTemp(Ity_I64); + t3 = newTemp(Ity_I64); + + assign(t1, binop(Iop_And64, getIReg(rt), /* hi */ + mkU64(0xFFFFFFFF00000000))); + + assign(t2, binop(Iop_Sar64, mkexpr(t1), mkU8(sa))); + + assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt))); + assign(tmpSh32, binop(Iop_Sar32, mkexpr(tmpRt32), mkU8(sa))); + + putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True)); + } else { + SXX_PATTERN(Iop_Sar32); + } break; case 0x07: /* SRAV */ DIP("srav r%d, r%d, r%d", rd, rt, rs); - SXXV_PATTERN(Iop_Sar32); + if (mode64) { + IRTemp tmpRt32 = newTemp(Ity_I32); + IRTemp tmpSh32 = newTemp(Ity_I32); + + t1 = newTemp(Ity_I64); + t2 = newTemp(Ity_I64); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I8); + + assign(t4, unop(Iop_32to8, binop(Iop_And32, + mkNarrowTo32(ty, getIReg(rs)), mkU32(0x0000001F)))); + + assign(t1, binop(Iop_And64, getIReg(rt), /* hi */ + mkU64(0xFFFFFFFF00000000))); + + assign(t2, binop(Iop_Sar64, mkexpr(t1), mkexpr(t4))); + + assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt))); + assign(tmpSh32, binop(Iop_Sar32, mkexpr(tmpRt32), mkexpr(t4))); + + putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True)); + } else { + SXXV_PATTERN(Iop_Sar32); + } break; case 0x02: { /* SRL */ @@ -3167,10 +5225,19 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, if (rot) { DIP("rotr r%d, r%d, %d", rd, rt, sa); putIReg(rd, mkWidenFrom32(ty, genROR32(mkNarrowTo32(ty, - getIReg(rt)), sa), False)); + getIReg(rt)), sa), True)); } else { DIP("srl r%d, r%d, %d", rd, rt, sa); - SXX_PATTERN(Iop_Shr32); + if (mode64) { + IRTemp tmpSh32 = newTemp(Ity_I32); + IRTemp tmpRt32 = newTemp(Ity_I32); + + assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt))); + assign(tmpSh32, binop(Iop_Shr32, mkexpr(tmpRt32), mkU8(sa))); + putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True)); + } else { + SXX_PATTERN(Iop_Shr32); + } } break; } @@ -3180,119 +5247,301 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, if (rot) { DIP("rotrv r%d, r%d, r%d", rd, rt, rs); putIReg(rd, mkWidenFrom32(ty, genRORV32(mkNarrowTo32(ty, - getIReg(rt)), mkNarrowTo32(ty, getIReg(rs))),False)); + getIReg(rt)), mkNarrowTo32(ty, getIReg(rs))), True)); break; - } else { - /* SRLV */ + } else { /* SRLV */ DIP("srlv r%d, r%d, r%d", rd, rt, rs); - SXXV_PATTERN(Iop_Shr32); + if (mode64) { + SXXV_PATTERN64(Iop_Shr32); + } else { + SXXV_PATTERN(Iop_Shr32); + } break; } } case 0x0D: /* BREAK */ DIP("Info: Breakpoint...code = %d", trap_code); - jmp_lit(&dres, Ijk_SigTRAP, (guest_PC_curr_instr + 4)); + if (mode64) + jmp_lit64(&dres, Ijk_SigTRAP, (guest_PC_curr_instr + 4)); + else + jmp_lit32(&dres, Ijk_SigTRAP, (guest_PC_curr_instr + 4)); vassert(dres.whatNext == Dis_StopHere); break; - case 0x30: { /* TGE */ - /*tge */ DIP("tge r%d, r%d %d", rs, rt, trap_code); - if (trap_code == 7) - stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rt), getIReg (rs)), - Ijk_SigFPE_IntDiv, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else if (trap_code == 6) - stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rt), getIReg (rs)), - Ijk_SigFPE_IntOvf, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else - stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rt), getIReg (rs)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - break; - } - case 0x31: { /* TGEU */ - /*tgeu */ DIP("tgeu r%d, r%d %d", rs, rt, trap_code); - if (trap_code == 7) - stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rt), getIReg (rs)), - Ijk_SigFPE_IntDiv, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else if (trap_code == 6) - stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rt), getIReg (rs)), - Ijk_SigFPE_IntOvf, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else - stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rt), getIReg (rs)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - break; - } - case 0x32: { /* TLT */ - /*tlt */ DIP("tlt r%d, r%d %d", rs, rt, trap_code); - if (trap_code == 7) - stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), getIReg (rt)), - Ijk_SigFPE_IntDiv, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else if (trap_code == 6) - stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), getIReg (rt)), - Ijk_SigFPE_IntOvf, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else - stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), getIReg (rt)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - break; - } - case 0x33: { /* TLTU */ - /*tltu */ DIP("tltu r%d, r%d %d", rs, rt, trap_code); - if (trap_code == 7) - stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), getIReg (rt)), - Ijk_SigFPE_IntDiv, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else if (trap_code == 6) - stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), getIReg (rt)), - Ijk_SigFPE_IntOvf, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else - stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), getIReg (rt)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - break; - } - case 0x34: { /* TEQ */ - /*teq */ DIP("teq r%d, r%d %d", rs, rt, trap_code); - if (trap_code == 7) - stmt (IRStmt_Exit (binop (Iop_CmpEQ32, getIReg (rs), getIReg (rt)), - Ijk_SigFPE_IntDiv, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else if (trap_code == 6) - stmt (IRStmt_Exit(binop (Iop_CmpEQ32, getIReg (rs), getIReg (rt)), - Ijk_SigFPE_IntOvf, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else - stmt (IRStmt_Exit(binop (Iop_CmpEQ32, getIReg (rs), getIReg (rt)), - Ijk_SigTRAP, IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - break; - } - case 0x36: { /* TNE */ - /*tne */ DIP("tne r%d, r%d %d", rs, rt, trap_code); - if (trap_code == 7) - stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), getIReg (rt)), - Ijk_SigFPE_IntDiv, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else if (trap_code == 6) - stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), getIReg (rt)), - Ijk_SigFPE_IntOvf, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); - else - stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), getIReg (rt)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); + case 0x30: { /* TGE */ + DIP("tge r%d, r%d %d", rs, rt, trap_code); + if (mode64) { + if (trap_code == 7) + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT64S, + getIReg (rs), + getIReg (rt))), + Ijk_SigFPE_IntDiv, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT64S, + getIReg (rs), + getIReg (rt))), + Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT64S, + getIReg (rs), + getIReg (rt))), + Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + if (trap_code == 7) + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT32S, + getIReg (rs), + getIReg (rt))), + Ijk_SigFPE_IntDiv, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT32S, + getIReg (rs), + getIReg (rt))), + Ijk_SigFPE_IntOvf, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT32S, + getIReg (rs), + getIReg (rt))), + Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } break; } - case 0x0F: { - /*SYNC*/ DIP("sync r%d, r%d, %d", rt, rd, sel); + case 0x31: { /* TGEU */ + DIP("tgeu r%d, r%d %d", rs, rt, trap_code); + if (mode64) { + if (trap_code == 7) + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT64U, + getIReg (rs), + getIReg (rt))), + Ijk_SigFPE_IntDiv, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT64U, + getIReg (rs), + getIReg (rt))), + Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT64U, + getIReg (rs), + getIReg (rt))), + Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + if (trap_code == 7) + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT32U, + getIReg (rs), + getIReg (rt))), + Ijk_SigFPE_IntDiv, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT32U, + getIReg (rs), + getIReg (rt))), + Ijk_SigFPE_IntOvf, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT32U, + getIReg (rs), + getIReg (rt))), + Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } + break; + } + case 0x32: { /* TLT */ + DIP("tlt r%d, r%d %d", rs, rt, trap_code); + if (mode64) { + if (trap_code == 7) + stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntDiv, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs), + getIReg(rt)), Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + if (trap_code == 7) + stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntDiv, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntOvf, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs), + getIReg(rt)), Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } + break; + } + case 0x33: { /* TLTU */ + DIP("tltu r%d, r%d %d", rs, rt, trap_code); + if (mode64) { + if (trap_code == 7) + stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntDiv, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs), + getIReg(rt)), Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + if (trap_code == 7) + stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntDiv, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntOvf, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs), + getIReg (rt)), Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } + break; + } + case 0x34: { /* TEQ */ + DIP("teq r%d, r%d, %d", rs, rt, trap_code); + if (mode64) { + if (trap_code == 7) + stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntDiv, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs), + getIReg(rt)), Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + if (trap_code == 7) + stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntDiv, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntOvf, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs), + getIReg(rt)), Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } + break; + } + case 0x36: { /* TNE */ + DIP("tne r%d, r%d %d", rs, rt, trap_code); + if (mode64) { + if (trap_code == 7) + stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntDiv, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs), + getIReg(rt)), Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + if (trap_code == 7) + stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntDiv, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else if (trap_code == 6) + stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs), + getIReg(rt)), Ijk_SigFPE_IntOvf, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + else + stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs), + getIReg(rt)), Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } + break; + } + case 0x14: + case 0x16: + case 0x17: /* DSLLV, DROTRV:DSRLV, DSRAV */ + case 0x38: + case 0x3A: + case 0x3B: /* DSLL, DROTL:DSRL, DSRA */ + case 0x3C: + case 0x3E: + case 0x3F: /* DSLL32, DROTR32:DSRL32, DSRA32 */ + if (dis_instr_shrt(cins)) + break; + goto decode_failure; + + case 0x0F: { /* SYNC */ + DIP("sync r%d, r%d, %d", rt, rd, sel); lsb = get_lsb(cins); IRDirty *d = unsafeIRDirty_0_N(0, "mips32_dirtyhelper_sync", @@ -3307,116 +5556,314 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, break; } - default: - goto decode_failure; + case 0x2C: { /* Doubleword Add - DADD; MIPS64 */ + DIP("dadd r%d, r%d, r%d", rd, rs, rt); + + IRTemp tmpRs64 = newTemp(Ity_I64); + IRTemp tmpRt64 = newTemp(Ity_I64); + + assign(tmpRs64, getIReg(rs)); + assign(tmpRt64, getIReg(rt)); + + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I64); + t2 = newTemp(Ity_I64); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I64); + /* dst = src0 + src1 + if(sign(src0 ) != sign(src1 )) + goto no overflow; + if(sign(dst) == sign(src0 )) + goto no overflow; + we have overflow! */ + + assign(t0, binop(Iop_Add64, mkexpr(tmpRs64), mkexpr(tmpRt64))); + assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64), mkexpr(tmpRt64))); + assign(t2, unop(Iop_1Uto64, + binop(Iop_CmpEQ64, + binop(Iop_And64, mkexpr(t1), + mkU64(0x8000000000000000)), + mkU64(0x8000000000000000)))); + + assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64))); + assign(t4, unop(Iop_1Uto64, + binop(Iop_CmpNE64, + binop(Iop_And64, mkexpr(t3), + mkU64(0x8000000000000000)), + mkU64(0x8000000000000000)))); + + stmt(IRStmt_Exit(binop(Iop_CmpEQ64, + binop(Iop_Or64, mkexpr(t2), mkexpr(t4)), + mkU64(0)), + Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + + putIReg(rd, mkexpr(t0)); + break; } - break; - case 0x01: /* Regimm */ + case 0x2D: /* Doubleword Add Unsigned - DADDU; MIPS64 */ + DIP("daddu r%d, r%d, r%d", rd, rs, rt); + ALU_PATTERN(Iop_Add64); + break; + + case 0x2E: { /* Doubleword Subtract - DSUB; MIPS64 */ + DIP("dsub r%u, r%u,r%u\n", rd, rs, rt); - switch (rt) { - case 0x01: /* BGEZ */ - DIP("bgez r%d, %d", rs, imm); - dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs), - mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt); + IRTemp tmpRs64 = newTemp(Ity_I64); + IRTemp tmpRt64 = newTemp(Ity_I64); + + assign(tmpRs64, getIReg(rs)); + assign(tmpRt64, getIReg(rt)); + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I64); + t2 = newTemp(Ity_I64); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I64); + t5 = newTemp(Ity_I64); + /* dst = src0 + (-1 * src1) + if(sign(src0 ) != sign((-1 * src1) )) + goto no overflow; + if(sign(dst) == sign(src0 )) + goto no overflow; + we have overflow! */ + + assign(t5, binop(Iop_Mul64, mkexpr(tmpRt64), mkU64(0xffffffffffffffff))); + assign(t0, binop(Iop_Add64, mkexpr(tmpRs64), mkexpr(t5))); + assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64), mkexpr(t5))); + assign(t2, unop(Iop_1Sto64, binop(Iop_CmpEQ64, binop(Iop_And64, + mkexpr(t1), mkU64(0x8000000000000000)), + mkU64(0x8000000000000000)))); + + assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64))); + assign(t4, unop(Iop_1Sto64, binop(Iop_CmpNE64, binop(Iop_And64, + mkexpr(t3), mkU64(0x8000000000000000)), + mkU64(0x8000000000000000)))); + + stmt(IRStmt_Exit(binop(Iop_CmpEQ64, binop(Iop_Or64, mkexpr(t2), + mkexpr(t4)), mkU64(0)), Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + + putIReg(rd, binop(Iop_Sub64, getIReg(rs), getIReg(rt))); break; + } - case 0x03: /* BGEZL */ - DIP("bgezl r%d, %d", rs, imm); - lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32, - getIReg(rs), mode64 ? - mkU64(0x8000000000000000ULL) - :mkU32(0x80000000)), - mkU32(0x0)), imm); + case 0x2F: /* Doubleword Subtract Unsigned - DSUBU; MIPS64 */ + DIP("dsub r%u, r%u,r%u\n", rd, rt, rt); + ALU_PATTERN(Iop_Sub64); break; + default: + goto decode_failure; + } + break; + + case 0x01: /* Regimm */ + + switch (rt) { case 0x00: /* BLTZ */ DIP("bltz r%d, %d", rs, imm); - dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs), - mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt); + if (mode64) { + if (!dis_instr_branch(cins, &dres, resteerOkFn, + callback_opaque, &bstmt)) + goto decode_failure; + } else + dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs), + mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt); + break; + + case 0x01: /* BGEZ */ + DIP("bgez r%d, %d", rs, imm); + if (mode64) { + if (!dis_instr_branch(cins, &dres, resteerOkFn, + callback_opaque, &bstmt)) + goto decode_failure; + } else + dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs), + mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt); break; case 0x02: /* BLTZL */ DIP("bltzl r%d, %d", rs, imm); - lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32, - getIReg(rs), mkU32(0x80000000)), - mkU32(0x80000000)), imm); + lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32, + binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs), + mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)), + mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)), + imm); + break; + + case 0x03: /* BGEZL */ + DIP("bgezl r%d, %d", rs, imm); + lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32, + binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs), + mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)), + mode64 ? mkU64(0x0) : mkU32(0x0)), imm); break; case 0x10: /* BLTZAL */ DIP("bltzal r%d, %d", rs, imm); - dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs), - mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt); + if (mode64) { + if (!dis_instr_branch(cins, &dres, resteerOkFn, + callback_opaque, &bstmt)) + goto decode_failure; + } else + dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs), + mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt); break; case 0x12: /* BLTZALL */ DIP("bltzall r%d, %d", rs, imm); - putIReg(31, mkU32(guest_PC_curr_instr + 8)); - lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32, - getIReg(rs), mkU32(0x80000000)), - mkU32(0x80000000)), imm); + putIReg(31, mode64 ? mkU64(guest_PC_curr_instr + 8) : + mkU32(guest_PC_curr_instr + 8)); + lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32, + binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs), + mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)), + mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)), + imm); break; case 0x11: /* BGEZAL */ DIP("bgezal r%d, %d", rs, imm); - dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs), - mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt); + if (mode64) { + if (!dis_instr_branch(cins, &dres, resteerOkFn, + callback_opaque, &bstmt)) + goto decode_failure; + } else + dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs), + mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt); break; case 0x13: /* BGEZALL */ DIP("bgezall r%d, %d", rs, imm); - putIReg(31, mkU32(guest_PC_curr_instr + 8)); - lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32, - getIReg(rs), mkU32(0x80000000)), - mkU32(0x0)), imm); + if (mode64) { + putIReg(31, mkU64(guest_PC_curr_instr + 8)); + lastn = dis_branch_likely(binop(Iop_CmpNE64, binop(Iop_And64, + getIReg(rs), mkU64(0x8000000000000000)), + mkU64(0x0)), imm); + } else { + putIReg(31, mkU32(guest_PC_curr_instr + 8)); + lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32, + getIReg(rs), mkU32(0x80000000)), + mkU32(0x0)), imm); + } break; - case 0x08: { /* TGEI */ - /*tgei */ DIP("tgei r%d, %d %d", rs, imm, trap_code); - stmt (IRStmt_Exit (binop (Iop_CmpLT32S, mkU32 (imm), getIReg (rs)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); + case 0x08: /* TGEI */ + DIP("tgei r%d, %d %d", rs, imm, trap_code); + if (mode64) { + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT64S, + getIReg (rs), + mkU64 (extend_s_16to64 (imm)))), + Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT32S, + getIReg (rs), + mkU32 (extend_s_16to32 (imm)))), + Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } break; - } - case 0x09: { /* TGEIU */ - /*tqeiu */ DIP("tgeiu r%d, %d %d", rs, imm, trap_code); - stmt (IRStmt_Exit (binop (Iop_CmpLT32U, mkU32 (imm), getIReg (rs)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); + + case 0x09: { /* TGEIU */ + DIP("tgeiu r%d, %d %d", rs, imm, trap_code); + if (mode64) { + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT64U, + getIReg (rs), + mkU64 (extend_s_16to64 (imm)))), + Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + stmt (IRStmt_Exit (unop (Iop_Not1, + binop (Iop_CmpLT32U, + getIReg (rs), + mkU32 (extend_s_16to32 (imm)))), + Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } break; } case 0x0A: { /* TLTI */ - /*tlti */ DIP("tlti r%d, %d %d", rs, imm, trap_code); - stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), mkU32 (imm)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); + DIP("tlti r%d, %d %d", rs, imm, trap_code); + if (mode64) { + stmt (IRStmt_Exit (binop (Iop_CmpLT64S, getIReg (rs), + mkU64 (extend_s_16to64 (imm))), + Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs), + mkU32 (extend_s_16to32 (imm))), + Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } break; } - case 0x0B: { /* TLTIU */ - /*tltiu */ DIP("tltiu r%d, %d %d", rs, imm, trap_code); - stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), mkU32 (imm)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); + case 0x0B: { /* TLTIU */ + DIP("tltiu r%d, %d %d", rs, imm, trap_code); + if (mode64) { + stmt (IRStmt_Exit (binop (Iop_CmpLT64U, getIReg (rs), + mkU64 (extend_s_16to64 (imm))), + Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs), + mkU32 (extend_s_16to32 (imm))), + Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } break; } - case 0x0C: { /* TEQI */ - /*teqi */ DIP("teqi r%d, %d %d", rs, imm, trap_code); - stmt (IRStmt_Exit (binop (Iop_CmpEQ32, getIReg (rs), mkU32 (imm)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); + case 0x0C: { /* TEQI */ + DIP("teqi r%d, %d %d", rs, imm, trap_code); + if (mode64) { + stmt (IRStmt_Exit (binop (Iop_CmpEQ64, getIReg (rs), + mkU64 (extend_s_16to64 (imm))), + Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + stmt (IRStmt_Exit (binop (Iop_CmpEQ32, getIReg (rs), + mkU32 (extend_s_16to32 (imm))), + Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } break; } - case 0x0E: { /* TNEI */ - /*tnei */ DIP("tnei r%d, %d %d", rs, imm, trap_code); - stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), mkU32 (imm)), - Ijk_SigTRAP, - IRConst_U32 (guest_PC_curr_instr + 4), OFFB_PC)); + case 0x0E: { /* TNEI */ + DIP("tnei r%d, %d %d", rs, imm, trap_code); + if (mode64) { + stmt (IRStmt_Exit (binop (Iop_CmpNE64, getIReg (rs), + mkU64 (extend_s_16to64 (imm))), + Ijk_SigTRAP, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + } else { + stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs), + mkU32 (extend_s_16to32 (imm))), + Ijk_SigTRAP, + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + } break; } case 0x1F: - /*SYNCI*/ - //Just ignore it - break; + /* SYNCI */ + /* Just ignore it */ + break; default: goto decode_failure; @@ -3425,121 +5872,291 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, case 0x04: DIP("beq r%d, r%d, %d", rs, rt, imm); - dis_branch(False, binop(Iop_CmpEQ32, getIReg(rs), getIReg(rt)), - imm, &bstmt); + if (mode64) + dis_branch(False, binop(Iop_CmpEQ64, getIReg(rs), getIReg(rt)), + imm, &bstmt); + else + dis_branch(False, binop(Iop_CmpEQ32, getIReg(rs), getIReg(rt)), + imm, &bstmt); break; case 0x14: DIP("beql r%d, r%d, %d", rs, rt, imm); - lastn = dis_branch_likely(binop(Iop_CmpNE32, getIReg(rs), getIReg(rt)), - imm); + lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32, + getIReg(rs), getIReg(rt)), imm); break; case 0x05: DIP("bne r%d, r%d, %d", rs, rt, imm); - dis_branch(False, binop(Iop_CmpNE32, getIReg(rs), getIReg(rt)), - imm, &bstmt); + if (mode64) + dis_branch(False, binop(Iop_CmpNE64, getIReg(rs), getIReg(rt)), + imm, &bstmt); + else + dis_branch(False, binop(Iop_CmpNE32, getIReg(rs), getIReg(rt)), + imm, &bstmt); break; case 0x15: DIP("bnel r%d, r%d, %d", rs, rt, imm); - lastn = - dis_branch_likely(binop(Iop_CmpEQ32, getIReg(rs), getIReg(rt)), imm); + lastn = dis_branch_likely(binop(mode64 ? Iop_CmpEQ64 : Iop_CmpEQ32, + getIReg(rs), getIReg(rt)), imm); break; - case 0x07: /* BGTZ */ + case 0x07: /* BGTZ */ DIP("bgtz r%d, %d", rs, imm); - dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE32S, getIReg(rs), - mkU32(0x00))), imm, &bstmt); + if (mode64) + dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE64S, getIReg(rs), + mkU64(0x00))), imm, &bstmt); + else + dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE32S, getIReg(rs), + mkU32(0x00))), imm, &bstmt); break; - case 0x17: /* BGTZL */ + case 0x17: /* BGTZL */ DIP("bgtzl r%d, %d", rs, imm); - lastn = dis_branch_likely(binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x00)), - imm); + if (mode64) + lastn = dis_branch_likely(binop(Iop_CmpLE64S, getIReg(rs), + mkU64(0x00)), imm); + else + lastn = dis_branch_likely(binop(Iop_CmpLE32S, getIReg(rs), + mkU32(0x00)), imm); break; - case 0x06: /* BLEZ */ + case 0x06: /* BLEZ */ DIP("blez r%d, %d", rs, imm); - dis_branch(False,binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x0)), imm, - &bstmt); + if (mode64) + dis_branch(False, binop(Iop_CmpLE64S, getIReg(rs), mkU64(0x0)), + imm, &bstmt); + else + dis_branch(False,binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x0)), imm, + &bstmt); break; - case 0x16: /* BLEZL */ + case 0x16: /* BLEZL */ DIP("blezl r%d, %d", rs, imm); - lastn = dis_branch_likely(unop(Iop_Not1, (binop(Iop_CmpLE32S, - getIReg(rs), mkU32(0x0)))), imm); + lastn = dis_branch_likely(unop(Iop_Not1, (binop(mode64 ? Iop_CmpLE64S : + Iop_CmpLE32S, getIReg(rs), mode64 ? + mkU64(0x0) : mkU32(0x0)))), imm); break; - case 0x08: /* ADDI */ + case 0x08: { /* ADDI */ DIP("addi r%d, r%d, %d", rt, rs, imm); + IRTemp tmpRs32 = newTemp(Ity_I32); + assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs))); + t0 = newTemp(Ity_I32); t1 = newTemp(Ity_I32); t2 = newTemp(Ity_I32); t3 = newTemp(Ity_I32); t4 = newTemp(Ity_I32); /* dst = src0 + sign(imm) - * if(sign(src0 ) != sign(imm )) - * goto no overflow; - * if(sign(dst) == sign(src0 )) - * goto no overflow; - * # we have overflow! */ - - assign(t0, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm)))); - assign(t1, binop(Iop_Xor32, getIReg(rs), mkU32(extend_s_16to32(imm)))); - assign(t2, unop(Iop_1Sto32, - binop(Iop_CmpEQ32, - binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)), - mkU32(0x80000000)))); - - assign(t3, binop(Iop_Xor32, mkexpr(t0), getIReg(rs))); - assign(t4, unop(Iop_1Sto32, - binop(Iop_CmpNE32, - binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)), - mkU32(0x80000000)))); - - stmt(IRStmt_Exit(binop(Iop_CmpEQ32, - binop(Iop_Or32, mkexpr(t2), mkexpr(t4)), - mkU32(0)), - Ijk_SigFPE_IntOvf, - IRConst_U32(guest_PC_curr_instr + 4), OFFB_PC)); - - putIReg(rt, mkexpr(t0)); + if(sign(src0 ) != sign(imm )) + goto no overflow; + if(sign(dst) == sign(src0 )) + goto no overflow; + we have overflow! */ + + assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), + mkU32(extend_s_16to32(imm)))); + assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), + mkU32(extend_s_16to32(imm)))); + assign(t2, unop(Iop_1Sto32, binop(Iop_CmpEQ32, binop(Iop_And32, + mkexpr(t1), mkU32(0x80000000)), mkU32(0x80000000)))); + + assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32))); + assign(t4, unop(Iop_1Sto32, binop(Iop_CmpNE32, binop(Iop_And32, + mkexpr(t3), mkU32(0x80000000)), mkU32(0x80000000)))); + + stmt(IRStmt_Exit(binop(Iop_CmpEQ32, binop(Iop_Or32, mkexpr(t2), + mkexpr(t4)), mkU32(0)), Ijk_SigFPE_IntOvf, + mode64 ? IRConst_U64(guest_PC_curr_instr + 4) : + IRConst_U32(guest_PC_curr_instr + 4), + OFFB_PC)); + + putIReg(rt, mkWidenFrom32(ty, mkexpr(t0), True)); break; - - case 0x09: /* ADDIU */ + } + case 0x09: /* ADDIU */ DIP("addiu r%d, r%d, %d", rt, rs, imm); - putIReg(rt, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm)))); + if (mode64) { + putIReg(rt, mkWidenFrom32(ty, binop(Iop_Add32, + mkNarrowTo32(ty, getIReg(rs)),mkU32(extend_s_16to32(imm))), + True)); + } else + putIReg(rt, binop(Iop_Add32, getIReg(rs),mkU32(extend_s_16to32(imm)))); break; - case 0x0C: /* ANDI */ + case 0x0C: /* ANDI */ DIP("andi r%d, r%d, %d", rt, rs, imm); - ALUI_PATTERN(Iop_And32); + if (mode64) { + ALUI_PATTERN64(Iop_And64); + } else { + ALUI_PATTERN(Iop_And32); + } break; - case 0x0E: /* XORI */ + case 0x0E: /* XORI */ DIP("xori r%d, r%d, %d", rt, rs, imm); - ALUI_PATTERN(Iop_Xor32); + if (mode64) { + ALUI_PATTERN64(Iop_Xor64); + } else { + ALUI_PATTERN(Iop_Xor32); + } break; - case 0x0D: /* ORI */ + case 0x0D: /* ORI */ DIP("ori r%d, r%d, %d", rt, rs, imm); - ALUI_PATTERN(Iop_Or32); + if (mode64) { + ALUI_PATTERN64(Iop_Or64); + } else { + ALUI_PATTERN(Iop_Or32); + } break; - case 0x0A: /* SLTI */ + case 0x0A: /* SLTI */ DIP("slti r%d, r%d, %d", rt, rs, imm); - putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs), - mkU32(extend_s_16to32(imm))))); + if (mode64) + putIReg(rt, unop(Iop_1Uto64, binop(Iop_CmpLT64S, getIReg(rs), + mkU64(extend_s_16to64(imm))))); + else + putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs), + mkU32(extend_s_16to32(imm))))); break; - case 0x0B: /* SLTIU */ + case 0x0B: /* SLTIU */ DIP("sltiu r%d, r%d, %d", rt, rs, imm); - putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs), - mkU32(extend_s_16to32(imm))))); + if (mode64) + putIReg(rt, unop(Iop_1Uto64, binop(Iop_CmpLT64U, getIReg(rs), + mkU64(extend_s_16to64(imm))))); + else + putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs), + mkU32(extend_s_16to32(imm))))); break; - case 0x30: /* LL / LWC0 */ + case 0x18: { /* Doubleword Add Immidiate - DADD; MIPS64 */ + DIP("daddi r%d, r%d, %d", rt, rs, imm); + IRTemp tmpRs64 = newTemp(Ity_I64); + assign(tmpRs64, getIReg(rs)); + + t0 = newTemp(Ity_I64); + t1 = newTemp(Ity_I64); + t2 = newTemp(Ity_I64); + t3 = newTemp(Ity_I64); + t4 = newTemp(Ity_I64); + /* dst = src0 + sign(imm) + if(sign(src0 ) != sign(imm )) + goto no overflow; + if(sign(dst) == sign(src0 )) + goto no overflow; + we have overflow! */ + + assign(t0, binop(Iop_Add64, mkexpr(tmpRs64), + mkU64(extend_s_16to64(imm)))); + assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64), + mkU64(extend_s_16to64(imm)))); + assign(t2, unop(Iop_1Sto64, binop(Iop_CmpEQ64, binop(Iop_And64, + mkexpr(t1), mkU64(0x8000000000000000)), + mkU64(0x8000000000000000)))); + + assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64))); + assign(t4, unop(Iop_1Sto64, binop(Iop_CmpNE64, binop(Iop_And64, + mkexpr(t3), mkU64(0x8000000000000000)), + mkU64(0x8000000000000000)))); + + stmt(IRStmt_Exit(binop(Iop_CmpEQ64, binop(Iop_Or64, mkexpr(t2), + mkexpr(t4)), mkU64(0)), Ijk_SigFPE_IntOvf, + IRConst_U64(guest_PC_curr_instr + 4), + OFFB_PC)); + + putIReg(rt, mkexpr(t0)); + break; + } + + case 0x19: /* Doubleword Add Immidiate Unsigned - DADDIU; MIPS64 */ + DIP("daddiu r%d, r%d, %d", rt, rs, imm); + putIReg(rt, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); + break; + + case 0x1A: { + /* Load Doubleword Left - LDL; MIPS64 */ + vassert(mode64); + DIP("ldl r%u,%d(r%u)\n", rt, imm, rs); + /* t1 = addr */ +#if defined (_MIPSEL) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); +#elif defined (_MIPSEB) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Xor64, mkU64(0x7), binop(Iop_Add64, getIReg(rs), + mkU64(extend_s_16to64(imm))))); +#endif + /* t2 = word addr */ + /* t4 = addr mod 4 */ + LWX_SWX_PATTERN64_1; + + /* t3 = word content - shifted */ + t3 = newTemp(Ity_I64); + assign(t3, binop(Iop_Shl64, load(Ity_I64, mkexpr(t2)), + narrowTo(Ity_I8, binop(Iop_Shl64, binop(Iop_Sub64, mkU64(0x07), + mkexpr(t4)), mkU8(3))))); + + /* rt content - adjusted */ + t5 = newTemp(Ity_I64); + t6 = newTemp(Ity_I64); + t7 = newTemp(Ity_I64); + t8 = newTemp(Ity_I64); + + assign(t5, binop(Iop_Mul64, mkexpr(t4), mkU64(0x8))); + + assign(t6, binop(Iop_Shr64, mkU64(0x00FFFFFFFFFFFFFF), + narrowTo(Ity_I8, mkexpr(t5)))); + + assign(t7, binop(Iop_And64, getIReg(rt), mkexpr(t6))); + + putIReg(rt, binop(Iop_Or64, mkexpr(t7), mkexpr(t3))); + break; + } + + case 0x1B: { + /* Load Doubleword Right - LDR; MIPS64 */ + vassert(mode64); + DIP("ldr r%u,%d(r%u)\n", rt, imm, rs); + /* t1 = addr */ +#if defined (_MIPSEL) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))); +#elif defined (_MIPSEB) + t1 = newTemp(Ity_I64); + assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs), + mkU64(extend_s_16to64(imm))))); +#endif + /* t2 = word addr */ + /* t4 = addr mod 4 */ + LWX_SWX_PATTERN64_1; + + /* t3 = word content - shifted */ + t3 = newTemp(Ity_I64); + assign(t3, binop(Iop_Shr64, load(Ity_I64, mkexpr(t2)), + narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(3))))); + + /* rt content - adjusted */ + t5 = newTemp(Ity_I64); + assign(t5, binop(Iop_And64, getIReg(rt), unop(Iop_Not64, + binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFF), + narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(0x3))))))); + + putIReg(rt, binop(Iop_Or64, mkexpr(t5), mkexpr(t3))); + break; + } + + case 0x27: /* Load Word unsigned - LWU; MIPS64 */ + DIP("lwu r%u,%d(r%u)\n", rt, imm, rs); + LOAD_STORE_PATTERN; + + putIReg(rt, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), False)); + break; + + case 0x30: /* LL / LWC0 */ DIP("ll r%d, %d(r%d)", rt, imm, rs); LOAD_STORE_PATTERN; @@ -3549,6 +6166,24 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, #elif defined (_MIPSEB) stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), NULL /*this is a load */ )); #endif + if (mode64) + putIReg(rt, unop(Iop_32Sto64, mkexpr(t2))); + else + putIReg(rt, mkexpr(t2)); + break; + + case 0x34: /* Load Linked Doubleword - LLD; MIPS64 */ + DIP("lld r%d, %d(r%d)", rt, imm, rs); + LOAD_STORE_PATTERN; + + t2 = newTemp(Ity_I64); +#if defined (_MIPSEL) + stmt(IRStmt_LLSC + (Iend_LE, t2, mkexpr(t1), NULL /*this is a load */ )); +#elif defined (_MIPSEB) + stmt(IRStmt_LLSC + (Iend_BE, t2, mkexpr(t1), NULL /*this is a load */ )); +#endif putIReg(rt, mkexpr(t2)); break; @@ -3564,10 +6199,37 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), mkNarrowTo32(ty, getIReg(rt)))); #endif - putIReg(rt, unop(Iop_1Uto32, mkexpr(t2))); + putIReg(rt, unop(mode64 ? Iop_1Uto64 : Iop_1Uto32, mkexpr(t2))); + break; + + case 0x3C: /* Store Conditional Doubleword - SCD; MIPS64 */ + DIP("sdc r%d, %d(r%d)", rt, imm, rs); + LOAD_STORE_PATTERN; + + t2 = newTemp(Ity_I1); +#if defined (_MIPSEL) + stmt(IRStmt_LLSC(Iend_LE, t2, mkexpr(t1), getIReg(rt))); +#elif defined (_MIPSEB) + stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), getIReg(rt))); +#endif + + putIReg(rt, unop(Iop_1Uto64, mkexpr(t2))); break; - decode_failure: + case 0x37: /* Load Doubleword - LD; MIPS64 */ + DIP("ld r%u,%d(r%u)", rt, imm, rs); + LOAD_STORE_PATTERN; + putIReg(rt, load(Ity_I64, mkexpr(t1))); + break; + + case 0x3F: /* Store Doubleword - SD; MIPS64 */ + DIP("sd r%u,%d(r%u)", rt, imm, rs); + LOAD_STORE_PATTERN; + + store(mkexpr(t1), getIReg(rt)); + break; + + decode_failure: /* All decode failures end up here. */ if (sigill_diag) vex_printf("vex mips->IR: unhandled instruction bytes: " @@ -3582,13 +6244,19 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, EIP should be up-to-date since it made so at the start bnezof each insn, but nevertheless be paranoid and update it again right now. */ - stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_PC), - mkU32(guest_PC_curr_instr))); - jmp_lit(&dres, Ijk_NoDecode, guest_PC_curr_instr); + if (mode64) { + stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_PC), + mkU64(guest_PC_curr_instr))); + jmp_lit64(&dres, Ijk_NoDecode, guest_PC_curr_instr); + } else { + stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_PC), + mkU32(guest_PC_curr_instr))); + jmp_lit32(&dres, Ijk_NoDecode, guest_PC_curr_instr); + } dres.whatNext = Dis_StopHere; dres.len = 0; return dres; - } /* switch (opc) for the main (primary) opcode switch. */ + } /* switch (opc) for the main (primary) opcode switch. */ /* All MIPS insn have 4 bytes */ @@ -3596,7 +6264,10 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, delay_slot_branch = False; stmt(bstmt); bstmt = NULL; - putPC(mkU32(guest_PC_curr_instr + 4)); + if (mode64) + putPC(mkU64(guest_PC_curr_instr + 4)); + else + putPC(mkU32(guest_PC_curr_instr + 4)); dres.jk_StopHere = is_Branch_or_Jump_and_Link(guest_code + delta - 4) ? Ijk_Call : Ijk_Boring; } @@ -3618,7 +6289,10 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, /* All decode successes end up here. */ switch (dres.whatNext) { case Dis_Continue: - putPC(mkU32(guest_PC_curr_instr + 4)); + if (mode64) + putPC(mkU64(guest_PC_curr_instr + 4)); + else + putPC(mkU32(guest_PC_curr_instr + 4)); break; case Dis_ResteerU: case Dis_ResteerC: @@ -3638,7 +6312,10 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, if (branch_or_jump(guest_code + delta + 4)) { dres.whatNext = Dis_StopHere; dres.jk_StopHere = Ijk_Boring; - putPC(mkU32(guest_PC_curr_instr + 4)); + if (mode64) + putPC(mkU64(guest_PC_curr_instr + 4)); + else + putPC(mkU32(guest_PC_curr_instr + 4)); } dres.len = 4; @@ -3655,32 +6332,33 @@ static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *, /* Disassemble a single instruction into IR. The instruction is located in host memory at &guest_code[delta]. */ -DisResult -disInstr_MIPS(IRSB* irsb_IN, - Bool (*resteerOkFn) (void *, Addr64), - Bool resteerCisOk, - void* callback_opaque, - UChar* guest_code_IN, - Long delta, - Addr64 guest_IP, - VexArch guest_arch, - VexArchInfo* archinfo, - VexAbiInfo* abiinfo, - Bool host_bigendian_IN, - Bool sigill_diag_IN) +DisResult disInstr_MIPS( IRSB* irsb_IN, + Bool (*resteerOkFn) ( void *, Addr64 ), + Bool resteerCisOk, + void* callback_opaque, + UChar* guest_code_IN, + Long delta, + Addr64 guest_IP, + VexArch guest_arch, + VexArchInfo* archinfo, + VexAbiInfo* abiinfo, + Bool host_bigendian_IN, + Bool sigill_diag_IN ) { DisResult dres; - /* Set globals (see top of this file) */ - vassert(guest_arch == VexArchMIPS32); + vassert(guest_arch == VexArchMIPS32 || guest_arch == VexArchMIPS64); mode64 = guest_arch != VexArchMIPS32; guest_code = guest_code_IN; irsb = irsb_IN; host_is_bigendian = host_bigendian_IN; - guest_PC_curr_instr = (Addr32) guest_IP; - guest_PC_bbstart = (Addr32) toUInt(guest_IP - delta); +#if defined(VGP_mips32_linux) + guest_PC_curr_instr = (Addr32)guest_IP; +#elif defined(VGP_mips64_linux) + guest_PC_curr_instr = (Addr64)guest_IP; +#endif dres = disInstr_MIPS_WRK(resteerOkFn, resteerCisOk, callback_opaque, delta, archinfo, abiinfo, sigill_diag_IN); diff --git a/VEX/priv/host_mips_defs.c b/VEX/priv/host_mips_defs.c index 2e144c2c41..51d41a2acb 100644 --- a/VEX/priv/host_mips_defs.c +++ b/VEX/priv/host_mips_defs.c @@ -7,7 +7,7 @@ This file is part of Valgrind, a dynamic binary instrumentation framework. - Copyright (C) 2010-2012 RT-RK + Copyright (C) 2010-2013 RT-RK mips-valgrind@rt-rk.com This program is free software; you can redistribute it and/or @@ -36,6 +36,18 @@ #include "host_generic_regs.h" #include "host_mips_defs.h" +/* guest_COND offset. */ +#define COND_OFFSET(__mode64) (__mode64 ? 612 : 316) + +/* Register number for guest state pointer in host code. */ +#define GuestSP 23 + +#define MkHRegGPR(_n, _mode64) \ + mkHReg(_n, _mode64 ? HRcInt64 : HRcInt32, False) + +#define MkHRegFPR(_n, _mode64) \ + mkHReg(_n, _mode64 ? HRcFlt64 : HRcFlt32, False) + /*---------------- Registers ----------------*/ void ppHRegMIPS(HReg reg, Bool mode64) @@ -72,35 +84,36 @@ void ppHRegMIPS(HReg reg, Bool mode64) hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64); /* But specific for real regs. */ - { - switch (hregClass(reg)) { - case HRcInt32: - r = hregNumber(reg); - vassert(r >= 0 && r < 32); - vex_printf("%s", ireg32_names[r]); - return; - case HRcFlt32: - r = hregNumber(reg); - vassert(r >= 0 && r < 32); - vex_printf("%s", freg32_names[r]); - return; - case HRcFlt64: - r = hregNumber(reg); - vassert(r >= 0 && r < 32); - vex_printf("%s", freg64_names[r]); - return; - default: - vpanic("ppHRegMIPS"); - break; - } + switch (hregClass(reg)) { + case HRcInt32: + r = hregNumber(reg); + vassert(r >= 0 && r < 32); + vex_printf("%s", ireg32_names[r]); + return; + case HRcInt64: + vassert(mode64); + r = hregNumber (reg); + vassert (r >= 0 && r < 32); + vex_printf ("%s", ireg32_names[r]); + return; + case HRcFlt32: + r = hregNumber(reg); + vassert(r >= 0 && r < 32); + vex_printf("%s", freg32_names[r]); + return; + case HRcFlt64: + r = hregNumber(reg); + vassert(r >= 0 && r < 32); + vex_printf("%s", freg64_names[r]); + return; + default: + vpanic("ppHRegMIPS"); + break; } return; } -#define MkHRegGPR(_n, _mode64) \ - mkHReg(_n, _mode64 ? HRcInt64 : HRcInt32, False) - HReg hregMIPS_GPR0(Bool mode64) { return MkHRegGPR(0, mode64); @@ -261,9 +274,6 @@ HReg hregMIPS_GPR31(Bool mode64) return MkHRegGPR(31, mode64); } -#define MkHRegFPR(_n, _mode64) \ - mkHReg(_n, _mode64 ? HRcFlt64 : HRcFlt32, False) - HReg hregMIPS_F0(Bool mode64) { return MkHRegFPR(0, mode64); @@ -551,22 +561,20 @@ HReg hregMIPS_COND(void) void getAllocableRegs_MIPS(Int * nregs, HReg ** arr, Bool mode64) { - /* - * The list of allocable registers is shorten to fit MIPS32 mode on Loongson. - * More precisely, we workaround Loongson MIPS32 issues by avoiding usage of - * odd single precision FP registers. - */ + /* The list of allocable registers is shorten to fit MIPS32 mode on Loongson. + More precisely, we workaround Loongson MIPS32 issues by avoiding usage of + odd single precision FP registers. */ if (mode64) - *nregs = 24; + *nregs = 20; else - *nregs = 29; + *nregs = 28; UInt i = 0; *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); - //ZERO = constant 0 - //AT = assembler temporary - // callee saves ones are listed first, since we prefer them - // if they're available + /* ZERO = constant 0 + AT = assembler temporary + callee saves ones are listed first, since we prefer them + if they're available */ (*arr)[i++] = hregMIPS_GPR16(mode64); (*arr)[i++] = hregMIPS_GPR17(mode64); (*arr)[i++] = hregMIPS_GPR18(mode64); @@ -574,32 +582,13 @@ void getAllocableRegs_MIPS(Int * nregs, HReg ** arr, Bool mode64) (*arr)[i++] = hregMIPS_GPR20(mode64); (*arr)[i++] = hregMIPS_GPR21(mode64); (*arr)[i++] = hregMIPS_GPR22(mode64); - if (!mode64) - (*arr)[i++] = hregMIPS_GPR23(mode64); - // otherwise we'll have to slum it out with caller-saves ones - if (mode64) { - (*arr)[i++] = hregMIPS_GPR8(mode64); - (*arr)[i++] = hregMIPS_GPR9(mode64); - (*arr)[i++] = hregMIPS_GPR10(mode64); - (*arr)[i++] = hregMIPS_GPR11(mode64); - } (*arr)[i++] = hregMIPS_GPR12(mode64); (*arr)[i++] = hregMIPS_GPR13(mode64); (*arr)[i++] = hregMIPS_GPR14(mode64); (*arr)[i++] = hregMIPS_GPR15(mode64); (*arr)[i++] = hregMIPS_GPR24(mode64); - /***********mips32********************/ - // t0 (=dispatch_ctr) - // t1 spill reg temp - // t2 (=guest_state) - // t3 (=PC = next guest address) - // K0 and K1 are reserved for OS kernel - // GP = global pointer - // SP = stack pointer - // FP = frame pointer - // RA = link register - // + PC, HI and LO + /* s7 (=guest_state) */ (*arr)[i++] = hregMIPS_F16(mode64); (*arr)[i++] = hregMIPS_F18(mode64); (*arr)[i++] = hregMIPS_F20(mode64); @@ -630,52 +619,52 @@ const HChar *showMIPSCondCode(MIPSCondCode cond) const HChar* ret; switch (cond) { case MIPScc_EQ: - ret = "EQ"; /* equal */ + ret = "EQ"; /* equal */ break; case MIPScc_NE: - ret = "NEQ"; /* not equal */ + ret = "NEQ"; /* not equal */ break; case MIPScc_HS: - ret = "GE"; /* >=u (Greater Than or Equal) */ + ret = "GE"; /* >=u (Greater Than or Equal) */ break; case MIPScc_LO: - ret = "LT"; /* u (higher) */ + ret = "HI"; /* >u (higher) */ break; case MIPScc_LS: - ret = "ls"; /* <=u (lower or same) */ + ret = "LS"; /* <=u (lower or same) */ break; case MIPScc_GE: - ret = "ge"; /* >=s (signed greater or equal) */ + ret = "GE"; /* >=s (signed greater or equal) */ break; case MIPScc_LT: - ret = "lt"; /* s (signed greater) */ + ret = "GT"; /* >s (signed greater) */ break; case MIPScc_LE: - ret = "le"; /* <=s (signed less or equal) */ + ret = "LE"; /* <=s (signed less or equal) */ break; case MIPScc_AL: - ret = "al"; /* always (unconditional) */ + ret = "AL"; /* always (unconditional) */ break; case MIPScc_NV: - ret = "nv"; /* never (unconditional): */ + ret = "NV"; /* never (unconditional): */ break; default: vpanic("showMIPSCondCode"); @@ -760,9 +749,6 @@ const HChar *showMIPSFpOp(MIPSFpOp op) case Mfp_MOVD: ret = "MOV.D"; break; - case Mfp_RES: - ret = "RES"; - break; case Mfp_ROUNDWS: ret = "ROUND.W.S"; break; @@ -779,8 +765,13 @@ const HChar *showMIPSFpOp(MIPSFpOp op) ret = "frsqrte"; break; case Mfp_CVTDW: - case Mfp_CVTD: - ret = "CVT.D"; + ret = "CVT.D.W"; + break; + case Mfp_CVTDL: + ret = "CVT.D.L"; + break; + case Mfp_CVTDS: + ret = "CVT.D.S"; break; case Mfp_CVTSD: case Mfp_CVTSW: @@ -816,6 +807,51 @@ const HChar *showMIPSFpOp(MIPSFpOp op) return ret; } +/* Show move from/to fpr to/from gpr */ +const HChar* showMIPSFpGpMoveOp ( MIPSFpGpMoveOp op ) +{ + const HChar *ret; + switch (op) { + case MFpGpMove_mfc1: + ret = "mfc1"; + break; + case MFpGpMove_dmfc1: + ret = "dmfc1"; + break; + case MFpGpMove_mtc1: + ret = "mtc1"; + break; + case MFpGpMove_dmtc1: + ret = "dmtc1"; + break; + default: + vpanic("showMIPSFpGpMoveOp"); + break; + } + return ret; +} + +/* Show floating point move conditional */ +const HChar* showMIPSMoveCondOp ( MIPSMoveCondOp op ) +{ + const HChar *ret; + switch (op) { + case MFpMoveCond_movns: + ret = "movn.s"; + break; + case MFpMoveCond_movnd: + ret = "movn.d"; + break; + case MMoveCond_movn: + ret = "movn"; + break; + default: + vpanic("showMIPSFpMoveCondOp"); + break; + } + return ret; +} + /* --------- MIPSAMode: memory address expressions. --------- */ MIPSAMode *MIPSAMode_IR(Int idx, HReg base) @@ -1038,6 +1074,12 @@ const HChar *showMIPSUnaryOp(MIPSUnaryOp op) case Mun_NOP: ret = "nop"; break; + case Mun_DCLO: + ret = "dclo"; + break; + case Mun_DCLZ: + ret = "dclz"; + break; default: vpanic("showMIPSUnaryOp"); break; @@ -1068,6 +1110,15 @@ const HChar *showMIPSAluOp(MIPSAluOp op, Bool immR) case Malu_XOR: ret = immR ? "xori" : "xor"; break; + case Malu_DADD: + ret = immR ? "daddi" : "dadd"; + break; + case Malu_DSUB: + ret = immR ? "dsubi" : "dsub"; + break; + case Malu_SLT: + ret = immR ? "slti" : "slt"; + break; default: vpanic("showMIPSAluOp"); break; @@ -1222,8 +1273,8 @@ MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR) return i; } -MIPSInstr *MIPSInstr_Call(MIPSCondCode cond, Addr32 target, UInt argiregs, - HReg src, RetLoc rloc) +MIPSInstr *MIPSInstr_Call ( MIPSCondCode cond, Addr64 target, UInt argiregs, + HReg src, RetLoc rloc ) { UInt mask; MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); @@ -1233,15 +1284,16 @@ MIPSInstr *MIPSInstr_Call(MIPSCondCode cond, Addr32 target, UInt argiregs, i->Min.Call.argiregs = argiregs; i->Min.Call.src = src; i->Min.Call.rloc = rloc; - /* Only r4 .. r7 inclusive may be used as arg regs. Hence: */ - mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); + /* Only $4 .. $7/$11 inclusive may be used as arg regs. */ + mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) + | (1 << 10) | (1 << 11); vassert(0 == (argiregs & ~mask)); vassert(rloc != RetLocINVALID); return i; } -MIPSInstr *MIPSInstr_CallAlways(MIPSCondCode cond, Addr32 target, UInt argiregs, - RetLoc rloc) +MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode cond, Addr64 target, + UInt argiregs, RetLoc rloc ) { UInt mask; MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); @@ -1250,14 +1302,15 @@ MIPSInstr *MIPSInstr_CallAlways(MIPSCondCode cond, Addr32 target, UInt argiregs, i->Min.Call.target = target; i->Min.Call.argiregs = argiregs; i->Min.Call.rloc = rloc; - /* Only r4 .. r7 inclusive may be used as arg regs. Hence: */ - mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); + /* Only $4 .. $7/$11 inclusive may be used as arg regs. */ + mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) + | (1 << 10) | (1 << 11); vassert(0 == (argiregs & ~mask)); vassert(rloc != RetLocINVALID); return i; } -MIPSInstr *MIPSInstr_XDirect ( Addr32 dstGA, MIPSAMode* amPC, +MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC, MIPSCondCode cond, Bool toFastEP ) { MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); i->tag = Min_XDirect; @@ -1420,6 +1473,19 @@ MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR) return i; } +MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2, + HReg src3 ) +{ + MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + i->tag = Min_FpTernary; + i->Min.FpTernary.op = op; + i->Min.FpTernary.dst = dst; + i->Min.FpTernary.src1 = src1; + i->Min.FpTernary.src2 = src2; + i->Min.FpTernary.src3 = src3; + return i; +} + MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src) { MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); @@ -1444,19 +1510,6 @@ MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR, return i; } -MIPSInstr *MIPSInstr_MovCond(HReg dst, HReg argL, MIPSRH * argR, HReg condR, - MIPSCondCode cond) -{ - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); - i->tag = Min_MovCond; - i->Min.MovCond.dst = dst; - i->Min.MovCond.srcL = argL; - i->Min.MovCond.srcR = argR; - i->Min.MovCond.condR = condR; - i->Min.MovCond.cond = cond; - return i; -} - MIPSInstr *MIPSInstr_MtFCSR(HReg src) { MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); @@ -1473,6 +1526,28 @@ MIPSInstr *MIPSInstr_MfFCSR(HReg dst) return i; } +MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src ) +{ + MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + i->tag = Min_FpGpMove; + i->Min.FpGpMove.op = op; + i->Min.FpGpMove.dst = dst; + i->Min.FpGpMove.src = src; + return i; +} + +MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src, + HReg cond ) +{ + MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + i->tag = Min_MoveCond; + i->Min.MoveCond.op = op; + i->Min.MoveCond.dst = dst; + i->Min.MoveCond.src = src; + i->Min.MoveCond.cond = cond; + return i; +} + MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter, MIPSAMode* amFailAddr ) { MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); @@ -1611,27 +1686,31 @@ void ppMIPSInstr(MIPSInstr * i, Bool mode64) if (i->Min.Call.cond != MIPScc_AL) { vex_printf("if (%s) ", showMIPSCondCode(i->Min.Call.cond)); } - vex_printf("{ "); - ppLoadImm(hregMIPS_GPR11(mode64), i->Min.Call.target, mode64); + vex_printf(" {"); + if (!mode64) + vex_printf(" addiu $29, $29, -16"); - vex_printf(" ; mtctr r10 ; bctrl ["); + ppLoadImm(hregMIPS_GPR25(mode64), i->Min.Call.target, mode64); + + vex_printf(" ; jarl $31, $25; # args ["); for (n = 0; n < 32; n++) { if (i->Min.Call.argiregs & (1 << n)) { - vex_printf("r%d", n); + vex_printf("$%d", n); if ((i->Min.Call.argiregs >> n) > 1) vex_printf(","); } } - vex_printf(","); - ppRetLoc(i->Min.Call.rloc); - vex_printf("] }"); + vex_printf("] nop; "); + if (!mode64) + vex_printf("addiu $29, $29, 16; ]"); + break; } case Min_XDirect: vex_printf("(xDirect) "); vex_printf("if (guest_COND.%s) { ", showMIPSCondCode(i->Min.XDirect.cond)); - vex_printf("move $9, 0x%x,", i->Min.XDirect.dstGA); + vex_printf("move $9, 0x%x,", (UInt)i->Min.XDirect.dstGA); vex_printf("; sw $9, "); ppMIPSAMode(i->Min.XDirect.amPC, mode64); vex_printf("; move $9, $disp_cp_chain_me_to_%sEP; jalr $9; nop}", @@ -1711,6 +1790,16 @@ void ppMIPSInstr(MIPSInstr * i, Bool mode64) vex_printf(","); ppHRegMIPS(i->Min.FpBinary.srcR, mode64); return; + case Min_FpTernary: + vex_printf("%s", showMIPSFpOp(i->Min.FpTernary.op)); + ppHRegMIPS(i->Min.FpTernary.dst, mode64); + vex_printf(","); + ppHRegMIPS(i->Min.FpTernary.src1, mode64); + vex_printf(","); + ppHRegMIPS(i->Min.FpTernary.src2, mode64); + vex_printf(","); + ppHRegMIPS(i->Min.FpTernary.src3, mode64); + return; case Min_FpConvert: vex_printf("%s", showMIPSFpOp(i->Min.FpConvert.op)); ppHRegMIPS(i->Min.FpConvert.dst, mode64); @@ -1762,27 +1851,34 @@ void ppMIPSInstr(MIPSInstr * i, Bool mode64) } return; } - case Min_MovCond: { - if (i->Min.MovCond.cond == MIPScc_MI) { - vex_printf("\ncond move\n"); - return; - - } - break; - } case Min_MtFCSR: { vex_printf("ctc1 "); ppHRegMIPS(i->Min.MtFCSR.src, mode64); vex_printf(", $31"); return; } - case Min_MfFCSR: { vex_printf("ctc1 "); ppHRegMIPS(i->Min.MfFCSR.dst, mode64); vex_printf(", $31"); return; } + case Min_FpGpMove: { + vex_printf("%s", showMIPSFpGpMoveOp(i->Min.FpGpMove.op)); + ppHRegMIPS(i->Min.FpGpMove.dst, mode64); + vex_printf(", "); + ppHRegMIPS(i->Min.FpGpMove.src, mode64); + return; + } + case Min_MoveCond: { + vex_printf("%s", showMIPSMoveCondOp(i->Min.MoveCond.op)); + ppHRegMIPS(i->Min.MoveCond.dst, mode64); + vex_printf(", "); + ppHRegMIPS(i->Min.MoveCond.src, mode64); + vex_printf(", "); + ppHRegMIPS(i->Min.MoveCond.cond, mode64); + return; + } case Min_EvCheck: vex_printf("(evCheck) lw $9, "); ppMIPSAMode(i->Min.EvCheck.amCounter, mode64); @@ -1794,14 +1890,20 @@ void ppMIPSInstr(MIPSInstr * i, Bool mode64) vex_printf("; nofail:"); return; case Min_ProfInc: - vex_printf("(profInc) move $9, ($NotKnownYet); " - "lw $8, 0($9); " - "addiu $8, $8, 1; " - "sw $8, 0($9); " - "sltiu $1, $8, 1; " - "lw $8, 4($9); " - "addu $8, $8, $1; " - "sw $8, 4($9); " ); + if (mode64) + vex_printf("(profInc) move $9, ($NotKnownYet); " + "ld $8, 0($9); " + "daddiu $8, $8, 1; " + "sd $8, 0($9); " ); + else + vex_printf("(profInc) move $9, ($NotKnownYet); " + "lw $8, 0($9); " + "addiu $8, $8, 1; " + "sw $8, 0($9); " + "sltiu $1, $8, 1; " + "lw $8, 4($9); " + "addu $8, $8, $1; " + "sw $8, 4($9); " ); return; default: vpanic("ppMIPSInstr"); @@ -1873,6 +1975,9 @@ void getRegUsage_MIPSInstr(HRegUsage * u, MIPSInstr * i, Bool mode64) addHRegUse(u, HRmRead, i->Min.Div.srcR); return; case Min_Call: { + /* Logic and comments copied/modified from x86, ppc and arm back end. + First off, claim it trashes all the caller-saved regs + which fall within the register allocator's jurisdiction. */ if (i->Min.Call.cond != MIPScc_AL) addHRegUse(u, HRmRead, i->Min.Call.src); UInt argir; @@ -1897,22 +2002,24 @@ void getRegUsage_MIPSInstr(HRegUsage * u, MIPSInstr * i, Bool mode64) addHRegUse(u, HRmWrite, hregMIPS_GPR24(mode64)); addHRegUse(u, HRmWrite, hregMIPS_GPR25(mode64)); - addHRegUse(u, HRmWrite, hregMIPS_GPR26(mode64)); - addHRegUse(u, HRmWrite, hregMIPS_GPR27(mode64)); + addHRegUse(u, HRmWrite, hregMIPS_GPR31(mode64)); /* Now we have to state any parameter-carrying registers - which might be read. This depends on the argiregs field. */ + which might be read. This depends on the argiregs field. */ argir = i->Min.Call.argiregs; - if (argir & (1 << 7)) - addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64)); - if (argir & (1 << 6)) - addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64)); - if (argir & (1 << 5)) - addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64)); - if (argir & (1 << 4)) - addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64)); + if (argir & (1<<11)) addHRegUse(u, HRmRead, hregMIPS_GPR11(mode64)); + if (argir & (1<<10)) addHRegUse(u, HRmRead, hregMIPS_GPR10(mode64)); + if (argir & (1<<9)) addHRegUse(u, HRmRead, hregMIPS_GPR9(mode64)); + if (argir & (1<<8)) addHRegUse(u, HRmRead, hregMIPS_GPR8(mode64)); + if (argir & (1<<7)) addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64)); + if (argir & (1<<6)) addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64)); + if (argir & (1<<5)) addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64)); + if (argir & (1<<4)) addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64)); + + vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6) + | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10) + | (1 << 11)))); - vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6) | (1 << 7)))); return; } /* XDirect/XIndir/XAssisted are also a bit subtle. They @@ -1966,20 +2073,20 @@ void getRegUsage_MIPSInstr(HRegUsage * u, MIPSInstr * i, Bool mode64) } break; case Min_FpUnary: - if (i->Min.FpUnary.op == Mfp_CVTD) { - addHRegUse(u, HRmWrite, i->Min.FpUnary.dst); - addHRegUse(u, HRmRead, i->Min.FpUnary.src); - return; - } else { - addHRegUse(u, HRmWrite, i->Min.FpUnary.dst); - addHRegUse(u, HRmRead, i->Min.FpUnary.src); - return; - } + addHRegUse(u, HRmWrite, i->Min.FpUnary.dst); + addHRegUse(u, HRmRead, i->Min.FpUnary.src); + return; case Min_FpBinary: addHRegUse(u, HRmWrite, i->Min.FpBinary.dst); addHRegUse(u, HRmRead, i->Min.FpBinary.srcL); addHRegUse(u, HRmRead, i->Min.FpBinary.srcR); return; + case Min_FpTernary: + addHRegUse(u, HRmWrite, i->Min.FpTernary.dst); + addHRegUse(u, HRmRead, i->Min.FpTernary.src1); + addHRegUse(u, HRmRead, i->Min.FpTernary.src2); + addHRegUse(u, HRmRead, i->Min.FpTernary.src3); + return; case Min_FpConvert: addHRegUse(u, HRmWrite, i->Min.FpConvert.dst); addHRegUse(u, HRmRead, i->Min.FpConvert.src); @@ -1989,13 +2096,14 @@ void getRegUsage_MIPSInstr(HRegUsage * u, MIPSInstr * i, Bool mode64) addHRegUse(u, HRmRead, i->Min.FpCompare.srcL); addHRegUse(u, HRmRead, i->Min.FpCompare.srcR); return; - case Min_MovCond: - if (i->Min.MovCond.srcR->tag == Mrh_Reg) { - addHRegUse(u, HRmRead, i->Min.MovCond.srcR->Mrh.Reg.reg); - } - addHRegUse(u, HRmRead, i->Min.MovCond.srcL); - addHRegUse(u, HRmRead, i->Min.MovCond.condR); - addHRegUse(u, HRmWrite, i->Min.MovCond.dst); + case Min_FpGpMove: + addHRegUse(u, HRmWrite, i->Min.FpGpMove.dst); + addHRegUse(u, HRmRead, i->Min.FpGpMove.src); + return; + case Min_MoveCond: + addHRegUse(u, HRmWrite, i->Min.MoveCond.dst); + addHRegUse(u, HRmRead, i->Min.MoveCond.src); + addHRegUse(u, HRmRead, i->Min.MoveCond.cond); return; case Min_EvCheck: /* We expect both amodes only to mention %ebp, so this is in @@ -2113,20 +2221,20 @@ void mapRegs_MIPSInstr(HRegRemap * m, MIPSInstr * i, Bool mode64) } break; case Min_FpUnary: - if (i->Min.FpUnary.op == Mfp_CVTD) { - mapReg(m, &i->Min.FpUnary.dst); - mapReg(m, &i->Min.FpUnary.src); - return; - } else { - mapReg(m, &i->Min.FpUnary.dst); - mapReg(m, &i->Min.FpUnary.src); - return; - } + mapReg(m, &i->Min.FpUnary.dst); + mapReg(m, &i->Min.FpUnary.src); + return; case Min_FpBinary: mapReg(m, &i->Min.FpBinary.dst); mapReg(m, &i->Min.FpBinary.srcL); mapReg(m, &i->Min.FpBinary.srcR); return; + case Min_FpTernary: + mapReg(m, &i->Min.FpTernary.dst); + mapReg(m, &i->Min.FpTernary.src1); + mapReg(m, &i->Min.FpTernary.src2); + mapReg(m, &i->Min.FpTernary.src3); + return; case Min_FpConvert: mapReg(m, &i->Min.FpConvert.dst); mapReg(m, &i->Min.FpConvert.src); @@ -2142,14 +2250,14 @@ void mapRegs_MIPSInstr(HRegRemap * m, MIPSInstr * i, Bool mode64) case Min_MfFCSR: mapReg(m, &i->Min.MfFCSR.dst); return; - case Min_MovCond: - if (i->Min.MovCond.srcR->tag == Mrh_Reg) { - mapReg(m, &(i->Min.MovCond.srcR->Mrh.Reg.reg)); - } - mapReg(m, &i->Min.MovCond.srcL); - mapReg(m, &i->Min.MovCond.condR); - mapReg(m, &i->Min.MovCond.dst); - + case Min_FpGpMove: + mapReg(m, &i->Min.FpGpMove.dst); + mapReg(m, &i->Min.FpGpMove.src); + return; + case Min_MoveCond: + mapReg(m, &i->Min.MoveCond.dst); + mapReg(m, &i->Min.MoveCond.src); + mapReg(m, &i->Min.MoveCond.cond); return; case Min_EvCheck: /* We expect both amodes only to mention %ebp, so this is in @@ -2176,7 +2284,7 @@ Bool isMove_MIPSInstr(MIPSInstr * i, HReg * src, HReg * dst) { /* Moves between integer regs */ if (i->tag == Min_Alu) { - // or Rd,Rs,Rs == mr Rd,Rs + /* or Rd,Rs,Rs == mr Rd,Rs */ if (i->Min.Alu.op != Malu_OR) return False; if (i->Min.Alu.srcR->tag != Mrh_Reg) @@ -2192,8 +2300,7 @@ Bool isMove_MIPSInstr(MIPSInstr * i, HReg * src, HReg * dst) } /* Generate mips spill/reload instructions under the direction of the - register allocator. -*/ + register allocator. */ void genSpill_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg, Int offsetB, Bool mode64) { @@ -2399,7 +2506,7 @@ static UChar *doAMode_IR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am, r_dst = rSD; if (opc1 < 40) { - //load + /* load */ if (rSD == 33) /* mfhi */ p = mkFormR(p, 0, 0, 0, r_dst, 0, 16); @@ -2411,7 +2518,7 @@ static UChar *doAMode_IR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am, p = mkFormI(p, opc1, rA, r_dst, idx); if (opc1 >= 40) { - //store + /* store */ if (rSD == 33) /* mthi */ p = mkFormR(p, 0, r_dst, 0, 0, 0, 17); @@ -2438,7 +2545,7 @@ static UChar *doAMode_RR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am, r_dst = rSD; if (opc1 < 40) { - //load + /* load */ if (rSD == 33) /* mfhi */ p = mkFormR(p, 0, 0, 0, r_dst, 0, 16); @@ -2446,20 +2553,27 @@ static UChar *doAMode_RR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am, /* mflo */ p = mkFormR(p, 0, 0, 0, r_dst, 0, 18); } - /* addiu sp, sp, -4 - * sw rA, 0(sp) - * addu rA, rA, rB - * sw/lw r_dst, 0(rA) - * lw rA, 0(sp) - * addiu sp, sp, 4 */ + if (mode64) { - p = mkFormI(p, 25, 29, 29, 0xFFFC); + /* addiu sp, sp, -8 + sd rA, 0(sp) + daddu rA, rA, rB + sd/ld r_dst, 0(rA) + ld rA, 0(sp) + daddiu sp, sp, 8 */ + p = mkFormI(p, 25, 29, 29, 0xFFF8); p = mkFormI(p, 63, 29, rA, 0); p = mkFormR(p, 0, rA, rB, rA, 0, 45); p = mkFormI(p, opc1, rA, r_dst, 0); p = mkFormI(p, 55, 29, rA, 0); - p = mkFormI(p, 25, 29, 29, 4); + p = mkFormI(p, 25, 29, 29, 8); } else { + /* addiu sp, sp, -4 + sw rA, 0(sp) + addu rA, rA, rB + sw/lw r_dst, 0(rA) + lw rA, 0(sp) + addiu sp, sp, 4 */ p = mkFormI(p, 9, 29, 29, 0xFFFC); p = mkFormI(p, 43, 29, rA, 0); p = mkFormR(p, 0, rA, rB, rA, 0, 33); @@ -2468,7 +2582,7 @@ static UChar *doAMode_RR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am, p = mkFormI(p, 9, 29, 29, 4); } if (opc1 >= 40) { - //store + /* store */ if (rSD == 33) /* mthi */ p = mkFormR(p, 0, r_dst, 0, 0, 0, 17); @@ -2492,48 +2606,48 @@ static UChar *mkLoadImm(UChar * p, UInt r_dst, ULong imm, Bool mode64) } if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) { - // sign-extendable from 16 bits - // addiu r_dst,0,imm => li r_dst,imm + /* sign-extendable from 16 bits + addiu r_dst, 0, imm => li r_dst, imm */ p = mkFormI(p, 9, 0, r_dst, imm & 0xFFFF); } else { if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) { - // sign-extendable from 32 bits - // addiu r_dst,r0,(imm>>16) => lis r_dst, (imm>>16) - // lui r_dst, (imm>>16) + /* sign-extendable from 32 bits + addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16) + lui r_dst, (imm >> 16) */ p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF); - // ori r_dst, r_dst, (imm & 0xFFFF) + /* ori r_dst, r_dst, (imm & 0xFFFF) */ p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); } else { vassert(mode64); - // lui load in upper half of low word + /* lui load in upper half of low word */ p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF); - // ori + /* ori */ p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF); - //shift + /* shift */ p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56); - // ori + /* ori */ p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF); - //shift + /* shift */ p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56); - // ori + /* ori */ p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); } } return p; } -/* A simplified version of mkLoadImm that always generates 2 or 5 +/* A simplified version of mkLoadImm that always generates 2 or 6 instructions (32 or 64 bits respectively) even if it could generate fewer. This is needed for generating fixed sized patchable sequences. */ -static UChar* mkLoadImm_EXACTLY2or5 ( UChar* p, - UInt r_dst, ULong imm, Bool mode64 ) +static UChar* mkLoadImm_EXACTLY2or6 ( UChar* p, + UInt r_dst, ULong imm, Bool mode64) { vassert(r_dst < 0x20); if (!mode64) { /* In 32-bit mode, make sure the top 32 bits of imm are a sign - extension of the bottom 32 bits. (Probably unnecessary.) */ + extension of the bottom 32 bits. (Probably unnecessary.) */ UInt u32 = (UInt)imm; Int s32 = (Int)u32; Long s64 = (Long)s32; @@ -2541,21 +2655,34 @@ static UChar* mkLoadImm_EXACTLY2or5 ( UChar* p, } if (!mode64) { - // sign-extendable from 32 bits - // addiu r_dst,r0,(imm>>16) => lis r_dst, (imm>>16) - // lui r_dst, (imm>>16) + /* sign-extendable from 32 bits + addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16) + lui r_dst, (imm >> 16) */ p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF); - // ori r_dst, r_dst, (imm & 0xFFFF) + /* ori r_dst, r_dst, (imm & 0xFFFF) */ p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); } else { - vassert(0); + /* full 64bit immediate load: 6 (six!) insns. */ + vassert(mode64); + /* lui load in upper half of low word */ + p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF); + /* ori */ + p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF); + /* shift */ + p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56); + /* ori */ + p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF); + /* shift */ + p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56); + /* ori */ + p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); } return p; } /* Checks whether the sequence of bytes at p was indeed created - by mkLoadImm_EXACTLY2or5 with the given parameters. */ -static Bool isLoadImm_EXACTLY2or5 ( UChar* p_to_check, + by mkLoadImm_EXACTLY2or6 with the given parameters. */ +static Bool isLoadImm_EXACTLY2or6 ( UChar* p_to_check, UInt r_dst, ULong imm, Bool mode64 ) { vassert(r_dst < 0x20); @@ -2572,17 +2699,37 @@ static Bool isLoadImm_EXACTLY2or5 ( UChar* p_to_check, if (!mode64) { UInt expect[2] = { 0, 0 }; UChar* p = (UChar*)&expect[0]; - // lui r_dst, (imm>>16) + /* lui r_dst, (immi >> 16) */ p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF); - // ori r_dst, r_dst, (imm & 0xFFFF) + /* ori r_dst, r_dst, (imm & 0xFFFF) */ p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); vassert(p == (UChar*)&expect[2]); ret = fetch32(p_to_check + 0) == expect[0] - && fetch32(p_to_check + 4) == expect[1]; - + && fetch32(p_to_check + 4) == expect[1]; } else { - vassert(0); + UInt expect[6] = { 0, 0, 0, 0, 0, 0}; + UChar* p = (UChar*)&expect[0]; + /* lui load in upper half of low word */ + p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF); + /* ori */ + p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF); + /* shift */ + p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56); + /* ori */ + p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF); + /* shift */ + p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56); + /* ori */ + p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF); + vassert(p == (UChar*)&expect[6]); + + ret = fetch32(p_to_check + 0) == expect[0] + && fetch32(p_to_check + 4) == expect[1] + && fetch32(p_to_check + 8) == expect[2] + && fetch32(p_to_check + 12) == expect[3] + && fetch32(p_to_check + 16) == expect[4] + && fetch32(p_to_check + 20) == expect[5]; } return ret; } @@ -2662,44 +2809,19 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, vassert(nbuf >= 32); switch (i->tag) { - case Min_MovCond: { - MIPSRH *srcR = i->Min.MovCond.srcR; - UInt condR = iregNo(i->Min.MovCond.condR, mode64); - UInt dst = iregNo(i->Min.MovCond.dst, mode64); - - UInt srcL = iregNo(i->Min.MovCond.srcL, mode64); - - p = mkMoveReg(p, dst, srcL); - if (i->Min.MovCond.cond == MIPScc_MI) { - p = mkFormI(p, 7, condR, 0, 2); //bgtz cond,2 - } - - p = mkFormR(p, 0, 0, 0, 0, 0, 0); //nop - - if (srcR->tag == Mrh_Reg) { - //or dst,src,src - p = mkMoveReg(p, dst, iregNo(srcR->Mrh.Reg.reg, mode64)); - /*p = mkFormR(p, 0, dst, iregNo(src->Mrh.Reg.reg, mode64), - iregNo(src->Mrh.Reg.reg, mode64), 0, 37);*/ - } else { - p = mkLoadImm(p, dst, srcR->Mrh.Imm.imm16, mode64); - } - } - goto done; - case Min_LI: p = mkLoadImm(p, iregNo(i->Min.LI.dst, mode64), i->Min.LI.imm, mode64); goto done; - + case Min_Alu: { MIPSRH *srcR = i->Min.Alu.srcR; Bool immR = toBool(srcR->tag == Mrh_Imm); UInt r_dst = iregNo(i->Min.Alu.dst, mode64); UInt r_srcL = iregNo(i->Min.Alu.srcL, mode64); - UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg, mode64); - + UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg, + mode64); switch (i->Min.Alu.op) { - /*Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR */ + /* Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR, Malu_SLT */ case Malu_ADD: if (immR) { vassert(srcR->Mrh.Imm.imm16 != 0x8000); @@ -2743,16 +2865,16 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } else { /* or */ if (r_srcL == 33) - //MFHI + /* MFHI */ p = mkFormR(p, 0, 0, 0, r_dst, 0, 16); else if (r_srcL == 34) - //MFLO + /* MFLO */ p = mkFormR(p, 0, 0, 0, r_dst, 0, 18); else if (r_dst == 33) - //MTHI + /* MTHI */ p = mkFormR(p, 0, r_srcL, 0, 0, 0, 17); else if (r_dst == 34) - //MTLO + /* MTLO */ p = mkFormR(p, 0, r_srcL, 0, 0, 0, 19); else p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 37); @@ -2773,7 +2895,30 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38); } break; - + case Malu_DADD: + if (immR) { + vassert(srcR->Mrh.Imm.syned); + vassert(srcR->Mrh.Imm.imm16 != 0x8000); + p = mkFormI(p, 25, r_srcL, r_dst, srcR->Mrh.Imm.imm16); + } else { + p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 45); + } + break; + case Malu_DSUB: + if (immR) { + p = mkFormI(p, 25, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16)); + } else { + p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 47); + } + break; + case Malu_SLT: + if (immR) { + goto bad; + } else { + p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42); + } + break; + default: goto bad; } @@ -2795,7 +2940,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, if (sz32) { if (immR) { UInt n = srcR->Mrh.Imm.imm16; - vassert(n >= 0 && n < 32); + vassert(n >= 0 && n <= 32); p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 0); } else { /* shift variable */ @@ -2815,10 +2960,10 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } } break; - + case Mshft_SRL: if (sz32) { - // SRL, SRLV + /* SRL, SRLV */ if (immR) { UInt n = srcR->Mrh.Imm.imm16; vassert(n >= 0 && n < 32); @@ -2828,7 +2973,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 6); } } else { - // DSRL, DSRL32, DSRLV + /* DSRL, DSRL32, DSRLV */ if (immR) { UInt n = srcR->Mrh.Imm.imm16; vassert((n >= 0 && n < 32) || (n > 31 && n < 64)); @@ -2842,10 +2987,10 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } } break; - + case Mshft_SRA: if (sz32) { - // SRA, SRAV + /* SRA, SRAV */ if (immR) { UInt n = srcR->Mrh.Imm.imm16; vassert(n >= 0 && n < 32); @@ -2855,7 +3000,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 7); } } else { - // DSRA, DSRA32, DSRAV + /* DSRA, DSRA32, DSRAV */ if (immR) { UInt n = srcR->Mrh.Imm.imm16; vassert((n >= 0 && n < 32) || (n > 31 && n < 64)); @@ -2876,26 +3021,32 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, goto done; } - + case Min_Unary: { UInt r_dst = iregNo(i->Min.Unary.dst, mode64); UInt r_src = iregNo(i->Min.Unary.src, mode64); switch (i->Min.Unary.op) { - /*Mun_CLO, Mun_CLZ, Mun_NOP */ - case Mun_CLO: //clo + /* Mun_CLO, Mun_CLZ, Mun_NOP, Mun_DCLO, Mun_DCLZ */ + case Mun_CLO: /* clo */ p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 33); break; - case Mun_CLZ: //clz + case Mun_CLZ: /* clz */ p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 32); break; - case Mun_NOP: //nop (sll r0,r0,0) + case Mun_NOP: /* nop (sll r0,r0,0) */ p = mkFormR(p, 0, 0, 0, 0, 0, 0); break; + case Mun_DCLO: /* clo */ + p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 37); + break; + case Mun_DCLZ: /* clz */ + p = mkFormR(p, 28, r_src, 0 /*whatever */ , r_dst, 0, 36); + break; } goto done; } - + case Min_Cmp: { UInt r_srcL = iregNo(i->Min.Cmp.srcL, mode64); UInt r_srcR = iregNo(i->Min.Cmp.srcR, mode64); @@ -2903,7 +3054,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, switch (i->Min.Cmp.cond) { case MIPScc_EQ: - /* addiu r_dst, r0, 1 + /* addiu r_dst, r0, 1 beq r_srcL, r_srcR, 2 nop addiu r_dst, r0, 0 @@ -2914,7 +3065,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, p = mkFormI(p, 9, 0, r_dst, 0); break; case MIPScc_NE: - /* addiu r_dst, r0, 1 + /* addiu r_dst, r0, 1 bne r_srcL, r_srcR, 2 nop addiu r_dst, r0, 0 @@ -2925,15 +3076,15 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, p = mkFormI(p, 9, 0, r_dst, 0); break; case MIPScc_LT: - /* slt r_dst, r_srcL, r_srcR */ + /* slt r_dst, r_srcL, r_srcR */ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42); break; case MIPScc_LO: - /* sltu r_dst, r_srcL, r_srcR */ + /* sltu r_dst, r_srcL, r_srcR */ p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 43); break; case MIPScc_LE: - /* addiu r_dst, r0, 1 + /* addiu r_dst, r0, 1 beq r_srcL, r_srcR, 2 nop slt r_dst, r_srcL, r_srcR */ @@ -2943,7 +3094,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42); break; case MIPScc_LS: - /* addiu r_dst, r0, 1 + /* addiu r_dst, r0, 1 beq r_srcL, r_srcR, 2 nop sltu r_dst, r_srcL, r_srcR */ @@ -2957,7 +3108,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } goto done; } - + case Min_Mul: { Bool syned = i->Min.Mul.syned; Bool widening = i->Min.Mul.widening; @@ -2965,7 +3116,6 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, UInt r_srcL = iregNo(i->Min.Mul.srcL, mode64); UInt r_srcR = iregNo(i->Min.Mul.srcR, mode64); UInt r_dst = iregNo(i->Min.Mul.dst, mode64); - if (widening) { if (sz32) { if (syned) @@ -2991,7 +3141,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } goto done; } - + case Min_Macc: { Bool syned = i->Min.Macc.syned; UInt r_srcL = iregNo(i->Min.Macc.srcL, mode64); @@ -3000,11 +3150,11 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, if (syned) { switch (i->Min.Macc.op) { case Macc_ADD: - //madd + /* madd */ p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 0); break; case Macc_SUB: - //msub + /* msub */ p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 4); break; @@ -3014,12 +3164,12 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } else { switch (i->Min.Macc.op) { case Macc_ADD: - //maddu + /* maddu */ p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 1); break; case Macc_SUB: - //msubu + /* msubu */ p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 5); break; @@ -3054,45 +3204,45 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, goto done; } } - + case Min_Mthi: { UInt r_src = iregNo(i->Min.MtHL.src, mode64); p = mkFormR(p, 0, r_src, 0, 0, 0, 17); goto done; } - + case Min_Mtlo: { UInt r_src = iregNo(i->Min.MtHL.src, mode64); p = mkFormR(p, 0, r_src, 0, 0, 0, 19); goto done; } - + case Min_Mfhi: { UInt r_dst = iregNo(i->Min.MfHL.dst, mode64); p = mkFormR(p, 0, 0, 0, r_dst, 0, 16); goto done; } - + case Min_Mflo: { UInt r_dst = iregNo(i->Min.MfHL.dst, mode64); p = mkFormR(p, 0, 0, 0, r_dst, 0, 18); goto done; } - + case Min_MtFCSR: { UInt r_src = iregNo(i->Min.MtFCSR.src, mode64); /* ctc1 */ p = mkFormR(p, 17, 6, r_src, 31, 0, 0); goto done; } - + case Min_MfFCSR: { UInt r_dst = iregNo(i->Min.MfFCSR.dst, mode64); /* cfc1 */ p = mkFormR(p, 17, 2, r_dst, 31, 0, 0); goto done; } - + case Min_Call: { if (i->Min.Call.cond != MIPScc_AL && i->Min.Call.rloc != RetLocNone) { /* The call might not happen (it isn't unconditional) and @@ -3106,23 +3256,33 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } MIPSCondCode cond = i->Min.Call.cond; UInt r_dst = 25; /* using %r25 as address temporary - - see getRegUsage_MIPSInstr */ + see getRegUsage_MIPSInstr */ /* jump over the following insns if condition does not hold */ if (cond != MIPScc_AL) { /* jmp fwds if !condition */ /* don't know how many bytes to jump over yet... make space for a jump instruction + nop!!! and fill in later. */ - ptmp = p; /* fill in this bit later */ - p += 8; // p += 8 + ptmp = p; /* fill in this bit later */ + p += 8; /* p += 8 */ + } + + if (!mode64) { + /* addiu $29, $29, -16 */ + p = mkFormI(p, 9, 29, 29, 0xFFF0); } - /* load target to r_dst */// p += 4|8 + /* load target to r_dst; p += 4|8 */ p = mkLoadImm(p, r_dst, i->Min.Call.target, mode64); - /* jalr %r_dst */ - p = mkFormR(p, 0, r_dst, 0, 31, 0, 9); // p += 4 - p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 + /* jalr r_dst */ + p = mkFormR(p, 0, r_dst, 0, 31, 0, 9); /* p += 4 */ + p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */ + + if (!mode64) { + /* addiu $29, $29, 16 */ + p = mkFormI(p, 9, 29, 29, 0x0010); + } /* Fix up the conditional jump, if there was one. */ if (cond != MIPScc_AL) { @@ -3130,9 +3290,8 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, Int delta = p - ptmp; vassert(delta >= 20 && delta <= 32); - /* bc !ct,cf,delta/4 */ - /* blez r_src, delta/4-1 */ - vassert(cond == MIPScc_EQ); + /* blez r_src, delta/4-1 + nop */ ptmp = mkFormI(ptmp, 6, r_src, 0, delta / 4 - 1); mkFormR(ptmp, 0, 0, 0, 0, 0, 0); } @@ -3162,11 +3321,11 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, /* Update the guest PC. */ /* move r9, dstGA */ - /* sw r9, amPC */ - p = mkLoadImm_EXACTLY2or5(p, /*r*/9, - (ULong)i->Min.XDirect.dstGA, mode64); - p = do_load_or_store_machine_word(p, False/*!isLoad*/, - /*r*/9, i->Min.XDirect.amPC, mode64); + /* sw/sd r9, amPC */ + p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, (ULong)i->Min.XDirect.dstGA, + mode64); + p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 9, + i->Min.XDirect.amPC, mode64); /* --- FIRST PATCHABLE BYTE follows --- */ /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're @@ -3178,12 +3337,12 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, void* disp_cp_chain_me = i->Min.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP : disp_cp_chain_me_to_slowEP; - p = mkLoadImm_EXACTLY2or5(p, /*r*/9, + p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, Ptr_to_ULong(disp_cp_chain_me), mode64); /* jalr $9 */ /* nop */ - p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4 - p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 + p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */ + p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */ /* --- END of PATCHABLE BYTES --- */ /* Fix up the conditional jump, if there was one. */ @@ -3191,10 +3350,11 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, Int delta = p - ptmp; delta = delta / 4 - 3; vassert(delta > 0 && delta < 40); - /* lw $9, 316($10) // guest_COND + + /* lw $9, COND_OFFSET(GuestSP) beq $9, $0, 2 - nop*/ - ptmp = mkFormI(ptmp, 35, 10, 9, 316); + nop */ + ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64)); ptmp = mkFormI(ptmp, 4, 0, 9, (delta)); mkFormR(ptmp, 0, 0, 0, 0, 0, 0); } @@ -3223,27 +3383,28 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, /* Update the guest PC. */ /* sw r-dstGA, amPC */ - p = do_load_or_store_machine_word(p, False/*!isLoad*/, + p = do_load_or_store_machine_word(p, False /*!isLoad*/ , iregNo(i->Min.XIndir.dstGA, mode64), i->Min.XIndir.amPC, mode64); /* move r9, VG_(disp_cp_xindir) */ /* jalr r9 */ /* nop */ - p = mkLoadImm_EXACTLY2or5 ( p, /*r*/9, - Ptr_to_ULong(disp_cp_xindir), mode64); - p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4 - p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 + p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, + Ptr_to_ULong(disp_cp_xindir), mode64); + p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */ + p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */ /* Fix up the conditional jump, if there was one. */ if (i->Min.XIndir.cond != MIPScc_AL) { Int delta = p - ptmp; delta = delta / 4 - 3; vassert(delta > 0 && delta < 40); - /* lw $9, 316($10) // guest_COND + + /* lw $9, COND_OFFSET($GuestSP) beq $9, $0, 2 - nop*/ - ptmp = mkFormI(ptmp, 35, 10, 9, 316); + nop */ + ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64)); ptmp = mkFormI(ptmp, 4, 0, 9, (delta)); mkFormR(ptmp, 0, 0, 0, 0, 0, 0); } @@ -3262,8 +3423,8 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } /* Update the guest PC. */ - /* sw r-dstGA, amPC */ - p = do_load_or_store_machine_word(p, False/*!isLoad*/, + /* sw/sd r-dstGA, amPC */ + p = do_load_or_store_machine_word(p, False /*!isLoad*/ , iregNo(i->Min.XIndir.dstGA, mode64), i->Min.XIndir.amPC, mode64); @@ -3272,48 +3433,49 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, switch (i->Min.XAssisted.jk) { case Ijk_ClientReq: trcval = VEX_TRC_JMP_CLIENTREQ; break; case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break; - //case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break; - //case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break; + /* case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break; + case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break; */ case Ijk_EmWarn: trcval = VEX_TRC_JMP_EMWARN; break; case Ijk_EmFail: trcval = VEX_TRC_JMP_EMFAIL; break; - //case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break; + /* case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break; */ case Ijk_NoDecode: trcval = VEX_TRC_JMP_NODECODE; break; case Ijk_TInval: trcval = VEX_TRC_JMP_TINVAL; break; case Ijk_NoRedir: trcval = VEX_TRC_JMP_NOREDIR; break; case Ijk_SigTRAP: trcval = VEX_TRC_JMP_SIGTRAP; break; - //case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break; + /* case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break; */ case Ijk_SigBUS: trcval = VEX_TRC_JMP_SIGBUS; break; case Ijk_SigFPE_IntDiv: trcval = VEX_TRC_JMP_SIGFPE_INTDIV; break; case Ijk_SigFPE_IntOvf: trcval = VEX_TRC_JMP_SIGFPE_INTOVF; break; case Ijk_Boring: trcval = VEX_TRC_JMP_BORING; break; - /* We don't expect to see the following being assisted. */ - //case Ijk_Ret: - //case Ijk_Call: - /* fallthrough */ + /* We don't expect to see the following being assisted. + case Ijk_Ret: + case Ijk_Call: + fallthrough */ default: ppIRJumpKind(i->Min.XAssisted.jk); vpanic("emit_MIPSInstr.Min_XAssisted: unexpected jump kind"); } vassert(trcval != 0); - p = mkLoadImm_EXACTLY2or5(p, /*r*/10, trcval, mode64); + p = mkLoadImm_EXACTLY2or6(p, /*r*/ GuestSP, trcval, mode64); /* move r9, VG_(disp_cp_xassisted) */ - p = mkLoadImm_EXACTLY2or5(p, /*r*/9, + p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, (ULong)Ptr_to_ULong(disp_cp_xassisted), mode64); /* jalr $9 nop */ - p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4 - p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 + p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */ + p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */ /* Fix up the conditional jump, if there was one. */ if (i->Min.XAssisted.cond != MIPScc_AL) { Int delta = p - ptmp; delta = delta / 4 - 3; vassert(delta > 0 && delta < 40); - /* lw $9, 316($10) // guest_COND + + /* lw $9, COND_OFFSET($GuestSP) beq $9, $0, 2 - nop*/ - ptmp = mkFormI(ptmp, 35, 10, 9, 316); + nop */ + ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64)); ptmp = mkFormI(ptmp, 4, 0, 9, (delta)); mkFormR(ptmp, 0, 0, 0, 0, 0, 0); } @@ -3376,7 +3538,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } break; } - + case Min_Store: { MIPSAMode *am_addr = i->Min.Store.dst; if (am_addr->tag == Mam_IR) { @@ -3439,7 +3601,10 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, UInt idx = am_addr->Mam.IR.index; UInt r_dst = iregNo(i->Min.LoadL.dst, mode64); - p = mkFormI(p, 0x30, r_src, r_dst, idx); + if (i->Min.LoadL.sz == 4) + p = mkFormI(p, 0x30, r_src, r_dst, idx); + else + p = mkFormI(p, 0x34, r_src, r_dst, idx); goto done; } case Min_StoreC: { @@ -3448,7 +3613,10 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, UInt idx = am_addr->Mam.IR.index; UInt r_dst = iregNo(am_addr->Mam.IR.base, mode64); - p = mkFormI(p, 0x38, r_dst, r_src, idx); + if (i->Min.StoreC.sz == 4) + p = mkFormI(p, 0x38, r_dst, r_src, idx); + else + p = mkFormI(p, 0x3C, r_dst, r_src, idx); goto done; } case Min_RdWrLR: { @@ -3460,9 +3628,8 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, p = mkMoveReg(p, reg, 31); goto done; } - - // Floating point - + + /* Floating point */ case Min_FpLdSt: { MIPSAMode *am_addr = i->Min.FpLdSt.addr; UChar sz = i->Min.FpLdSt.sz; @@ -3501,79 +3668,73 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, case Min_FpUnary: { switch (i->Min.FpUnary.op) { - case Mfp_MOVS: { // FP move + case Mfp_MOVS: { /* FP move */ UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x6); break; } - case Mfp_MOVD: { // FP move + case Mfp_MOVD: { /* FP move */ UInt fr_dst = dregNo(i->Min.FpUnary.dst); UInt fr_src = dregNo(i->Min.FpUnary.src); p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x6); break; } - case Mfp_ABSS: { // ABSS + case Mfp_ABSS: { /* ABSS */ UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x5); break; } - case Mfp_ABSD: { // ABSD + case Mfp_ABSD: { /* ABSD */ UInt fr_dst = dregNo(i->Min.FpUnary.dst); UInt fr_src = dregNo(i->Min.FpUnary.src); p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x5); break; } - case Mfp_NEGS: { // ABSS + case Mfp_NEGS: { /* ABSS */ UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x7); break; } - case Mfp_NEGD: { // ABSD + case Mfp_NEGD: { /* ABSD */ UInt fr_dst = dregNo(i->Min.FpUnary.dst); UInt fr_src = dregNo(i->Min.FpUnary.src); p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x7); break; } - case Mfp_CVTD: { //CVT.D - UInt fr_dst = dregNo(i->Min.FpUnary.dst); - UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); - p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x21); - break; - } - case Mfp_SQRTS: { //SQRT.S + case Mfp_SQRTS: { /* SQRT.S */ UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x04); break; } - case Mfp_SQRTD: { //SQRT.D + case Mfp_SQRTD: { /* SQRT.D */ UInt fr_dst = dregNo(i->Min.FpUnary.dst); UInt fr_src = dregNo(i->Min.FpUnary.src); p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x04); break; } - case Mfp_RSQRTS: { //RSQRT.S + case Mfp_RSQRTS: { /* RSQRT.S */ UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x16); break; } - case Mfp_RSQRTD: { //RSQRT.D + case Mfp_RSQRTD: { /* RSQRT.D */ UInt fr_dst = dregNo(i->Min.FpUnary.dst); UInt fr_src = dregNo(i->Min.FpUnary.src); p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x16); break; } - case Mfp_RECIPS: { //RECIP.S + case Mfp_RECIPS: { /* RECIP.S */ UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64); UInt fr_src = fregNo(i->Min.FpUnary.src, mode64); p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x15); break; } - case Mfp_RECIPD: { //RECIP.D + case Mfp_RECIPD: { /* RECIP.D */ UInt fr_dst = dregNo(i->Min.FpUnary.dst); UInt fr_src = dregNo(i->Min.FpUnary.src); p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x15); @@ -3649,6 +3810,46 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, goto done; } + case Min_FpTernary: { + switch (i->Min.FpTernary.op) { + case Mfp_MADDS: { + UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64); + UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64); + UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64); + UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64); + p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x20); + break; + } + case Mfp_MADDD: { + UInt fr_dst = dregNo(i->Min.FpTernary.dst); + UInt fr_src1 = dregNo(i->Min.FpTernary.src1); + UInt fr_src2 = dregNo(i->Min.FpTernary.src2); + UInt fr_src3 = dregNo(i->Min.FpTernary.src3); + p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x21); + break; + } + case Mfp_MSUBS: { + UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64); + UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64); + UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64); + UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64); + p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x28); + break; + } + case Mfp_MSUBD: { + UInt fr_dst = dregNo(i->Min.FpTernary.dst); + UInt fr_src1 = dregNo(i->Min.FpTernary.src1); + UInt fr_src2 = dregNo(i->Min.FpTernary.src2); + UInt fr_src3 = dregNo(i->Min.FpTernary.src3); + p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x29); + break; + } + default: + goto bad; + } + goto done; + } + case Min_FpConvert: { switch (i->Min.FpConvert.op) { UInt fr_dst, fr_src; @@ -3677,6 +3878,31 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, fr_src = fregNo(i->Min.FpConvert.src, mode64); p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x21); break; + case Mfp_CVTDL: + fr_dst = dregNo(i->Min.FpConvert.dst); + fr_src = dregNo(i->Min.FpConvert.src); + p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x21); + break; + case Mfp_CVTDS: + fr_dst = dregNo(i->Min.FpConvert.dst); + fr_src = fregNo(i->Min.FpConvert.src, mode64); + p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x21); + break; + case Mfp_CVTSL: + fr_dst = dregNo(i->Min.FpConvert.dst); + fr_src = fregNo(i->Min.FpConvert.src, mode64); + p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x20); + break; + case Mfp_CVTLS: + fr_dst = fregNo(i->Min.FpConvert.dst, mode64); + fr_src = dregNo(i->Min.FpConvert.src); + p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x25); + break; + case Mfp_CVTLD: + fr_dst = dregNo(i->Min.FpConvert.dst); + fr_src = dregNo(i->Min.FpConvert.src); + p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x25); + break; case Mfp_TRUWS: fr_dst = fregNo(i->Min.FpConvert.dst, mode64); fr_src = fregNo(i->Min.FpConvert.src, mode64); @@ -3760,6 +3986,72 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, } goto done; } + + case Min_FpGpMove: { + switch (i->Min.FpGpMove.op) { + UInt rt, fs; + case MFpGpMove_mfc1: { + rt = iregNo(i->Min.FpGpMove.dst, mode64); + fs = fregNo(i->Min.FpGpMove.src, mode64); + p = mkFormR(p, 0x11, 0x0, rt, fs, 0x0, 0x0); + break; + } + case MFpGpMove_dmfc1: { + vassert(mode64); + rt = iregNo(i->Min.FpGpMove.dst, mode64); + fs = fregNo(i->Min.FpGpMove.src, mode64); + p = mkFormR(p, 0x11, 0x1, rt, fs, 0x0, 0x0); + break; + } + case MFpGpMove_mtc1: { + rt = iregNo(i->Min.FpGpMove.src, mode64); + fs = fregNo(i->Min.FpGpMove.dst, mode64); + p = mkFormR(p, 0x11, 0x4, rt, fs, 0x0, 0x0); + break; + } + case MFpGpMove_dmtc1: { + vassert(mode64); + rt = iregNo(i->Min.FpGpMove.src, mode64); + fs = fregNo(i->Min.FpGpMove.dst, mode64); + p = mkFormR(p, 0x11, 0x5, rt, fs, 0x0, 0x0); + break; + } + default: + goto bad; + } + goto done; + } + + case Min_MoveCond: { + switch (i->Min.MoveCond.op) { + UInt d, s, t; + case MFpMoveCond_movns: { + d = fregNo(i->Min.MoveCond.dst, mode64); + s = fregNo(i->Min.MoveCond.src, mode64); + t = iregNo(i->Min.MoveCond.cond, mode64); + p = mkFormR(p, 0x11, 0x10, t, s, d, 0x13); + break; + } + case MFpMoveCond_movnd: { + d = dregNo(i->Min.MoveCond.dst); + s = dregNo(i->Min.MoveCond.src); + t = iregNo(i->Min.MoveCond.cond, mode64); + p = mkFormR(p, 0x11, 0x11, t, s, d, 0x13); + break; + } + case MMoveCond_movn: { + d = iregNo(i->Min.MoveCond.dst, mode64); + s = iregNo(i->Min.MoveCond.src, mode64); + t = iregNo(i->Min.MoveCond.cond, mode64); + p = mkFormR(p, 0, s, t, d, 0, 0xb); + break; + } + default: + goto bad; + } + goto done; + } + case Min_EvCheck: { /* This requires a 32-bit dec/test in 32 mode. */ /* We generate: @@ -3774,23 +4066,23 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, */ UChar* p0 = p; /* lw r9, amCounter */ - p = do_load_or_store_machine_word(p, True/*isLoad*/, /*r*/9, + p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 9, i->Min.EvCheck.amCounter, mode64); /* addiu r9,r9,-1 */ p = mkFormI(p, 9, 9, 9, 0xFFFF); /* sw r30, amCounter */ - p = do_load_or_store_machine_word(p, False/*!isLoad*/, /*r*/9, + p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 9, i->Min.EvCheck.amCounter, mode64); /* bgez t9, nofail */ p = mkFormI(p, 1, 9, 1, 3); /* lw r9, amFailAddr */ - p = do_load_or_store_machine_word(p, True/*isLoad*/, /*r*/9, + p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 9, i->Min.EvCheck.amFailAddr, mode64); /* jalr $9 */ - p = mkFormR(p, 0, 9, 0, 31, 0, 9); // p += 4 - p = mkFormR(p, 0, 0, 0, 0, 0, 0); // p += 4 + p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */ + p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */ /* nofail: */ - + /* Crosscheck */ vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0); goto done; @@ -3800,43 +4092,58 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, /* Generate a code template to increment a memory location whose address will be known later as an immediate value. This code template will be patched once the memory location is known. - For now we do this with address == 0x65556555. - 32-bit: - - move r9, 0x65556555 - lw r8, 0(r9) - addiu r8, r8, 1 # add least significant word - sw r8, 0(r9) - sltiu r1, r8, 1 # set carry-in bit - lw r8, 4(r9) - addu r8, r8, r1 - sw r8, 4(r9) */ - + For now we do this with address == 0x65556555. */ if (mode64) { - vassert(0); + /* 64-bit: + move r9, 0x6555655565556555ULL + ld r8, 0(r9) + daddiu r8, r8, 1 + sd r8, 0(r9) */ + + /* move r9, 0x6555655565556555ULL */ + p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x6555655565556555ULL, + True /*mode64*/); + /* ld r8, 0(r9) */ + p = mkFormI(p, 55, 9, 8, 0); + + /* daddiu r8, r8, 1 */ + p = mkFormI(p, 25, 8, 8, 1); + + /* sd r8, 0(r9) */ + p = mkFormI(p, 63, 9, 8, 0); } else { - // move r9, 0x65556555 - p = mkLoadImm_EXACTLY2or5(p, /*r*/9, 0x65556555ULL, - False/*!mode64*/); - // lw r8, 0(r9) + /* 32-bit: + move r9, 0x65556555 + lw r8, 0(r9) + addiu r8, r8, 1 # add least significant word + sw r8, 0(r9) + sltiu r1, r8, 1 # set carry-in bit + lw r8, 4(r9) + addu r8, r8, r1 + sw r8, 4(r9) */ + + /* move r9, 0x65556555 */ + p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x65556555ULL, + False /*!mode64*/); + /* lw r8, 0(r9) */ p = mkFormI(p, 35, 9, 8, 0); - // addiu r8, r8, 1 # add least significant word + /* addiu r8, r8, 1 # add least significant word */ p = mkFormI(p, 9, 8, 8, 1); - // sw r8, 0(r9) + /* sw r8, 0(r9) */ p = mkFormI(p, 43, 9, 8, 0); - // sltiu r1, r8, 1 # set carry-in bit + /* sltiu r1, r8, 1 # set carry-in bit */ p = mkFormI(p, 11, 8, 1, 1); - // lw r8, 4(r9) + /* lw r8, 4(r9) */ p = mkFormI(p, 35, 9, 8, 4); - // addu r8, r8, r1 + /* addu r8, r8, r1 */ p = mkFormR(p, 0, 8, 1, 8, 0, 33); - // sw r8, 4(r9) + /* sw r8, 4(r9) */ p = mkFormI(p, 43, 9, 8, 4); } @@ -3845,7 +4152,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, *is_profInc = True; goto done; } - + default: goto bad; @@ -3855,8 +4162,8 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc, vex_printf("\n=> "); ppMIPSInstr(i, mode64); vpanic("emit_MIPSInstr"); - /*NOTREACHED*/ done: - //vassert(p - &buf[0] <= 32); + /* NOTREACHED */ done: + vassert(p - &buf[0] <= 128); return p - &buf[0]; } @@ -3881,36 +4188,36 @@ VexInvalRange chainXDirect_MIPS ( void* place_to_chain, jalr r9 nop viz - <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5> - 0x120F809 // jalr r9 - 0x00000000 // nop + <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6> + 0x120F809 # jalr r9 + 0x00000000 # nop */ UChar* p = (UChar*)place_to_chain; vassert(0 == (3 & (HWord)p)); - vassert(isLoadImm_EXACTLY2or5(p, /*r*/9, + vassert(isLoadImm_EXACTLY2or6(p, /*r*/9, (UInt)Ptr_to_ULong(disp_cp_chain_me_EXPECTED), mode64)); - vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x120F809); - vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x00000000); + vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809); + vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000); /* And what we want to change it to is either: move r9, place_to_jump_to jalr r9 nop viz - <8 bytes generated by mkLoadImm_EXACTLY2or5> - 0x120F809 // jalr r9 - 0x00000000 // nop + <8 bytes generated by mkLoadImm_EXACTLY2or6> + 0x120F809 # jalr r9 + 0x00000000 # nop The replacement has the same length as the original. */ - p = mkLoadImm_EXACTLY2or5(p, /*r*/9, + p = mkLoadImm_EXACTLY2or6(p, /*r*/9, Ptr_to_ULong(place_to_jump_to), mode64); p = emit32(p, 0x120F809); p = emit32(p, 0x00000000); Int len = p - (UChar*)place_to_chain; - vassert(len == (mode64 ? 28 : 16)); /* stay sane */ + vassert(len == (mode64 ? 32 : 16)); /* stay sane */ VexInvalRange vir = {(HWord)place_to_chain, len}; return vir; } @@ -3927,34 +4234,34 @@ VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain, jalr r9 nop viz - <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5> - 0x120F809 // jalr r9 - 0x00000000 // nop + <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6> + 0x120F809 # jalr r9 + 0x00000000 # nop */ UChar* p = (UChar*)place_to_unchain; vassert(0 == (3 & (HWord)p)); - vassert(isLoadImm_EXACTLY2or5(p, /*r*/9, + vassert(isLoadImm_EXACTLY2or6(p, /*r*/ 9, Ptr_to_ULong(place_to_jump_to_EXPECTED), mode64)); - vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x120F809); - vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x00000000); + vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809); + vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000); /* And what we want to change it to is: move r9, disp_cp_chain_me jalr r9 nop viz - <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5> - 0x120F809 // jalr r9 - 0x00000000 // nop + <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6> + 0x120F809 # jalr r9 + 0x00000000 # nop The replacement has the same length as the original. */ - p = mkLoadImm_EXACTLY2or5(p, /*r*/9, + p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, Ptr_to_ULong(disp_cp_chain_me), mode64); p = emit32(p, 0x120F809); p = emit32(p, 0x00000000); Int len = p - (UChar*)place_to_unchain; - vassert(len == (mode64 ? 28 : 16)); /* stay sane */ + vassert(len == (mode64 ? 32 : 16)); /* stay sane */ VexInvalRange vir = {(HWord)place_to_unchain, len}; return vir; } @@ -3964,20 +4271,31 @@ VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain, VexInvalRange patchProfInc_MIPS ( void* place_to_patch, ULong* location_of_counter, Bool mode64 ) { - vassert(sizeof(ULong*) == 4); + if (mode64) + vassert(sizeof(ULong*) == 8); + else + vassert(sizeof(ULong*) == 4); UChar* p = (UChar*)place_to_patch; vassert(0 == (3 & (HWord)p)); - vassert(isLoadImm_EXACTLY2or5((UChar *)p, /*r*/9, 0x65556555, mode64)); + vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9, + mode64 ? 0x6555655565556555ULL : 0x65556555, + mode64)); - vassert(fetch32(p + (mode64 ? 20 : 8) + 0) == 0x8D280000); - vassert(fetch32(p + (mode64 ? 20 : 8) + 4) == 0x25080001); - vassert(fetch32(p + (mode64 ? 20 : 8) + 8) == 0xAD280000); - vassert(fetch32(p + (mode64 ? 20 : 8) + 12) == 0x2d010001); - vassert(fetch32(p + (mode64 ? 20 : 8) + 16) == 0x8d280004); - vassert(fetch32(p + (mode64 ? 20 : 8) + 20) == 0x01014021); - vassert(fetch32(p + (mode64 ? 20 : 8) + 24) == 0xad280004); + if (mode64) { + vassert(fetch32(p + 24 + 0) == 0xDD280000); + vassert(fetch32(p + 24 + 4) == 0x65080001); + vassert(fetch32(p + 24 + 8) == 0xFD280000); + } else { + vassert(fetch32(p + 8 + 0) == 0x8D280000); + vassert(fetch32(p + 8 + 4) == 0x25080001); + vassert(fetch32(p + 8 + 8) == 0xAD280000); + vassert(fetch32(p + 8 + 12) == 0x2d010001); + vassert(fetch32(p + 8 + 16) == 0x8d280004); + vassert(fetch32(p + 8 + 20) == 0x01014021); + vassert(fetch32(p + 8 + 24) == 0xad280004); + } - p = mkLoadImm_EXACTLY2or5(p, /*r*/9, + p = mkLoadImm_EXACTLY2or6(p, /*r*/9, Ptr_to_ULong(location_of_counter), mode64); VexInvalRange vir = {(HWord)p, 8}; diff --git a/VEX/priv/host_mips_defs.h b/VEX/priv/host_mips_defs.h index fbcf3fb337..a7f6d77032 100644 --- a/VEX/priv/host_mips_defs.h +++ b/VEX/priv/host_mips_defs.h @@ -7,7 +7,7 @@ This file is part of Valgrind, a dynamic binary instrumentation framework. - Copyright (C) 2010-2012 RT-RK + Copyright (C) 2010-2013 RT-RK mips-valgrind@rt-rk.com This program is free software; you can redistribute it and/or @@ -32,12 +32,17 @@ #define __VEX_HOST_MIPS_DEFS_H #include "libvex_basictypes.h" -#include "libvex.h" // VexArch -#include "host_generic_regs.h" // HReg +#include "libvex.h" /* VexArch */ +#include "host_generic_regs.h" /* HReg */ /* Num registers used for function calls */ +#if defined(VGP_mips64_linux) +/* a0, a1, a2, a3, a4, a5, a6, a7 */ +#define MIPS_N_REGPARMS 8 +#else +/* a0, a1, a2, a3 */ #define MIPS_N_REGPARMS 4 - +#endif /* --------- Registers. --------- */ /* The usual HReg abstraction. @@ -46,9 +51,9 @@ extern void ppHRegMIPS(HReg, Bool); -extern HReg hregMIPS_GPR0(Bool mode64); // scratch reg / zero reg -extern HReg hregMIPS_GPR1(Bool mode64); // reserved for trap handling -extern HReg hregMIPS_GPR2(Bool mode64); // reserved for trap handling +extern HReg hregMIPS_GPR0(Bool mode64); /* scratch reg / zero reg */ +extern HReg hregMIPS_GPR1(Bool mode64); +extern HReg hregMIPS_GPR2(Bool mode64); extern HReg hregMIPS_GPR3(Bool mode64); extern HReg hregMIPS_GPR4(Bool mode64); extern HReg hregMIPS_GPR5(Bool mode64); @@ -69,8 +74,8 @@ extern HReg hregMIPS_GPR19(Bool mode64); extern HReg hregMIPS_GPR20(Bool mode64); extern HReg hregMIPS_GPR21(Bool mode64); extern HReg hregMIPS_GPR22(Bool mode64); -extern HReg hregMIPS_GPR23(Bool mode64); // GuestStatePtr -extern HReg hregMIPS_GPR24(Bool mode64); // reserved for dispatcher +extern HReg hregMIPS_GPR23(Bool mode64); /* GuestStatePtr */ +extern HReg hregMIPS_GPR24(Bool mode64); extern HReg hregMIPS_GPR25(Bool mode64); extern HReg hregMIPS_GPR26(Bool mode64); extern HReg hregMIPS_GPR27(Bool mode64); @@ -139,7 +144,7 @@ extern HReg hregMIPS_D13(void); extern HReg hregMIPS_D14(void); extern HReg hregMIPS_D15(void); -#define GuestStatePointer(_mode64) hregMIPS_GPR10(_mode64) +#define GuestStatePointer(_mode64) hregMIPS_GPR23(_mode64) #define StackFramePointer(_mode64) hregMIPS_GPR30(_mode64) #define LinkRegister(_mode64) hregMIPS_GPR31(_mode64) @@ -150,34 +155,39 @@ extern HReg hregMIPS_D15(void); #define HIRegister(_mode64) hregMIPS_HI(_mode64) #define LORegister(_mode64) hregMIPS_LO(_mode64) +#if defined(VGP_mips64_linux) +/* a0, a1, a2, a3, a4, a5, a6, a7 */ +#define MIPS_N_ARGREGS 8 +#elif defined(VGP_mips32_linux) /* a0, a1, a2, a3 */ #define MIPS_N_ARGREGS 4 +#endif /* --------- Condition codes, Intel encoding. --------- */ typedef enum { - MIPScc_EQ = 0, /* equal */ - MIPScc_NE = 1, /* not equal */ + MIPScc_EQ = 0, /* equal */ + MIPScc_NE = 1, /* not equal */ - MIPScc_HS = 2, /* >=u (higher or same) */ - MIPScc_LO = 3, /* =u (higher or same) */ + MIPScc_LO = 3, /* u (higher) */ - MIPScc_LS = 9, /* <=u (lower or same) */ + MIPScc_HI = 8, /* >u (higher) */ + MIPScc_LS = 9, /* <=u (lower or same) */ - MIPScc_GE = 10, /* >=s (signed greater or equal) */ - MIPScc_LT = 11, /* =s (signed greater or equal) */ + MIPScc_LT = 11, /* s (signed greater) */ - MIPScc_LE = 13, /* <=s (signed less or equal) */ + MIPScc_GT = 12, /* >s (signed greater) */ + MIPScc_LE = 13, /* <=s (signed less or equal) */ - MIPScc_AL = 14, /* always (unconditional) */ - MIPScc_NV = 15 /* never (unconditional): */ + MIPScc_AL = 14, /* always (unconditional) */ + MIPScc_NV = 15 /* never (unconditional): */ } MIPSCondCode; extern const HChar *showMIPSCondCode(MIPSCondCode); @@ -236,66 +246,6 @@ extern void ppMIPSRH(MIPSRH *, Bool); extern MIPSRH *MIPSRH_Imm(Bool, UShort); extern MIPSRH *MIPSRH_Reg(HReg); -/* --- Addressing Mode suitable for VFP --- */ -typedef struct { - HReg reg; - Int simm11; -} MIPSAModeV; - -extern MIPSAModeV *mkMIPSAModeV(HReg reg, Int simm11); - -extern void ppMIPSAModeV(MIPSAModeV *); - -/* --------- Reg or imm-8x4 operands --------- */ -/* a.k.a (a very restricted form of) Shifter Operand, - in the MIPS parlance. */ - -typedef enum { - MIPSri84_I84 = 5, /* imm8 `ror` (2 * imm4) */ - MIPSri84_R /* reg */ -} MIPSRI84Tag; - -typedef struct { - MIPSRI84Tag tag; - union { - struct { - UShort imm8; - UShort imm4; - } I84; - struct { - HReg reg; - } R; - } MIPSri84; -} MIPSRI84; - -extern MIPSRI84 *MIPSRI84_I84(UShort imm8, UShort imm4); -extern MIPSRI84 *MIPSRI84_R(HReg); - -extern void ppMIPSRI84(MIPSRI84 *); - -/* --------- Reg or imm5 operands --------- */ -typedef enum { - MIPSri5_I5 = 7, /* imm5, 1 .. 31 only (no zero!) */ - MIPSri5_R /* reg */ -} MIPSRI5Tag; - -typedef struct { - MIPSRI5Tag tag; - union { - struct { - UInt imm5; - } I5; - struct { - HReg reg; - } R; - } MIPSri5; -} MIPSRI5; - -extern MIPSRI5 *MIPSRI5_I5(UInt imm5); -extern MIPSRI5 *MIPSRI5_R(HReg); - -extern void ppMIPSRI5(MIPSRI5 *); - /* --------- Instructions. --------- */ /*Tags for operations*/ @@ -304,6 +254,8 @@ extern void ppMIPSRI5(MIPSRI5 *); typedef enum { Mun_CLO, Mun_CLZ, + Mun_DCLO, + Mun_DCLZ, Mun_NOP, } MIPSUnaryOp; @@ -316,6 +268,8 @@ typedef enum { Malu_INVALID, Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR, + Malu_DADD, Malu_DSUB, + Malu_SLT } MIPSAluOp; extern const HChar *showMIPSAluOp(MIPSAluOp, @@ -343,50 +297,53 @@ extern const HChar *showMIPSMaccOp(MIPSMaccOp, Bool); /* ----- Instruction tags ----- */ typedef enum { - Min_LI, /* load word (32/64-bit) immediate (fake insn) */ - Min_Alu, /* word add/sub/and/or/xor/nor/others? */ - Min_Shft, /* word sll/srl/sra */ - Min_Unary, /* clo, clz, nop, neg */ + Min_LI, /* load word (32/64-bit) immediate (fake insn) */ + Min_Alu, /* word add/sub/and/or/xor/nor/others? */ + Min_Shft, /* word sll/srl/sra */ + Min_Unary, /* clo, clz, nop, neg */ - Min_Cmp, /* word compare (fake insn) */ + Min_Cmp, /* word compare (fake insn) */ - Min_Mul, /* widening/non-widening multiply */ - Min_Div, /* div */ + Min_Mul, /* widening/non-widening multiply */ + Min_Div, /* div */ - Min_Call, /* call to address in register */ + Min_Call, /* call to address in register */ /* The following 5 insns are mandated by translation chaining */ - Min_XDirect, /* direct transfer to GA */ - Min_XIndir, /* indirect transfer to GA */ - Min_XAssisted, /* assisted transfer to GA */ - Min_EvCheck, /* Event check */ - Min_ProfInc, /* 64-bit profile counter increment */ - - Min_RdWrLR, /* Read/Write Link Register */ - Min_Mthi, /* Move to HI from GP register */ - Min_Mtlo, /* Move to LO from GP register */ - Min_Mfhi, /* Move from HI to GP register */ - Min_Mflo, /* Move from LO to GP register */ - Min_Macc, /* Multiply and accumulate */ - - Min_Load, /* zero-extending load a 8|16|32 bit value from mem */ - Min_Store, /* store a 8|16|32 bit value to mem */ - Min_LoadL, /* mips Load Linked Word */ - Min_StoreC, /* mips Store Conditional Word */ - - Min_FpUnary, /* FP unary op */ - Min_FpBinary, /* FP binary op */ - Min_FpConvert, /* FP conversion op */ - Min_FpMulAcc, /* FP multipy-accumulate style op */ - Min_FpLdSt, /* FP load/store */ - Min_FpSTFIW, /* stfiwx */ - Min_FpRSP, /* FP round IEEE754 double to IEEE754 single */ - Min_FpCftI, /* fcfid/fctid/fctiw */ - Min_FpCMov, /* FP floating point conditional move */ - Min_MtFCSR, /* set FCSR register */ - Min_MfFCSR, /* get FCSR register */ - Min_FpCompare, /* FP compare, generating value into int reg */ - Min_MovCond + Min_XDirect, /* direct transfer to GA */ + Min_XIndir, /* indirect transfer to GA */ + Min_XAssisted, /* assisted transfer to GA */ + Min_EvCheck, /* Event check */ + Min_ProfInc, /* 64-bit profile counter increment */ + + Min_RdWrLR, /* Read/Write Link Register */ + Min_Mthi, /* Move to HI from GP register */ + Min_Mtlo, /* Move to LO from GP register */ + Min_Mfhi, /* Move from HI to GP register */ + Min_Mflo, /* Move from LO to GP register */ + Min_Macc, /* Multiply and accumulate */ + + Min_Load, /* zero-extending load a 8|16|32 bit value from mem */ + Min_Store, /* store a 8|16|32 bit value to mem */ + Min_LoadL, /* mips Load Linked Word - LL */ + Min_StoreC, /* mips Store Conditional Word - SC */ + + Min_FpUnary, /* FP unary op */ + Min_FpBinary, /* FP binary op */ + Min_FpTernary, /* FP ternary op */ + Min_FpConvert, /* FP conversion op */ + Min_FpMulAcc, /* FP multipy-accumulate style op */ + Min_FpLdSt, /* FP load/store */ + Min_FpSTFIW, /* stfiwx */ + Min_FpRSP, /* FP round IEEE754 double to IEEE754 single */ + Min_FpCftI, /* fcfid/fctid/fctiw */ + Min_FpCMov, /* FP floating point conditional move */ + Min_MtFCSR, /* set FCSR register */ + Min_MfFCSR, /* get FCSR register */ + Min_FpCompare, /* FP compare, generating value into int reg */ + + Min_FpGpMove, /* Move from/to fpr to/from gpr */ + Min_MoveCond /* Move Conditional */ } MIPSInstrTag; /* --------- */ @@ -399,19 +356,42 @@ typedef enum { /* Binary */ Mfp_ADDD, Mfp_SUBD, Mfp_MULD, Mfp_DIVD, - Mfp_ADDS, Mfp_SUBS, Mfp_MULS, Mfp_DIVS, Mfp_CVTSD, Mfp_CVTSW, Mfp_CVTWD, - Mfp_CVTWS, Mfp_TRULS, Mfp_TRULD, Mfp_TRUWS, Mfp_TRUWD, Mfp_FLOORWS, - Mfp_FLOORWD, Mfp_ROUNDWS, Mfp_ROUNDWD, Mfp_CVTDW, Mfp_CMP, - Mfp_CEILWS, Mfp_CEILWD, Mfp_CEILLS, Mfp_CEILLD, + Mfp_ADDS, Mfp_SUBS, Mfp_MULS, Mfp_DIVS, /* Unary */ Mfp_SQRTS, Mfp_SQRTD, Mfp_RSQRTS, Mfp_RSQRTD, Mfp_RECIPS, Mfp_RECIPD, Mfp_ABSS, Mfp_ABSD, Mfp_NEGS, Mfp_NEGD, Mfp_MOVS, Mfp_MOVD, - Mfp_RES, Mfp_RSQRTE, Mfp_FRIN, Mfp_FRIM, Mfp_FRIP, Mfp_FRIZ, Mfp_CVTD + Mfp_RSQRTE, + + /* FP convert */ + Mfp_CVTSD, Mfp_CVTSW, Mfp_CVTWD, + Mfp_CVTWS, Mfp_CVTDL, Mfp_CVTSL, Mfp_CVTLS, Mfp_CVTLD, Mfp_TRULS, Mfp_TRULD, + Mfp_TRUWS, Mfp_TRUWD, Mfp_FLOORWS, Mfp_FLOORWD, Mfp_ROUNDWS, Mfp_ROUNDWD, + Mfp_CVTDW, Mfp_CMP, Mfp_CEILWS, Mfp_CEILWD, Mfp_CEILLS, Mfp_CEILLD, Mfp_CVTDS + } MIPSFpOp; extern const HChar *showMIPSFpOp(MIPSFpOp); +/* Move from/to fpr to/from gpr */ +typedef enum { + MFpGpMove_mfc1, /* Move Word From Floating Point - MIPS32 */ + MFpGpMove_dmfc1, /* Doubleword Move from Floating Point - MIPS64 */ + MFpGpMove_mtc1, /* Move Word to Floating Point - MIPS32 */ + MFpGpMove_dmtc1 /* Doubleword Move to Floating Point - MIPS64 */ +} MIPSFpGpMoveOp; + +extern const HChar *showMIPSFpGpMoveOp ( MIPSFpGpMoveOp ); + +/* Move Conditional */ +typedef enum { + MFpMoveCond_movns, /* FP Move Conditional on Not Zero - MIPS32 */ + MFpMoveCond_movnd, + MMoveCond_movn /* Move Conditional on Not Zero */ +} MIPSMoveCondOp; + +extern const HChar *showMIPSMoveCondOp ( MIPSMoveCondOp ); + /*--------- Structure for instructions ----------*/ /* Destinations are on the LEFT (first operand) */ @@ -467,30 +447,30 @@ typedef struct { MIPSCondCode cond; } Cmp; struct { - Bool widening; //True => widening, False => non-widening - Bool syned; //signed/unsigned - meaningless if widenind = False + Bool widening; /* True => widening, False => non-widening */ + Bool syned; /* signed/unsigned - meaningless if widenind = False */ Bool sz32; HReg dst; HReg srcL; HReg srcR; } Mul; struct { - Bool syned; //signed/unsigned - meaningless if widenind = False + Bool syned; /* signed/unsigned - meaningless if widenind = False */ Bool sz32; HReg srcL; HReg srcR; } Div; /* Pseudo-insn. Call target (an absolute address), on given condition (which could be Mcc_ALWAYS). argiregs indicates - which of r3 .. r10 + which of $4 .. $7 (mips32) or $4 .. $11 (mips64) carries argument values for this call, - using a bit mask (1< 4 args"); + vpanic("doHelperCall(MIPS): cannot currently handle > 4 or 8 args"); + } + if (mode64) { + argregs[0] = hregMIPS_GPR4(mode64); + argregs[1] = hregMIPS_GPR5(mode64); + argregs[2] = hregMIPS_GPR6(mode64); + argregs[3] = hregMIPS_GPR7(mode64); + argregs[4] = hregMIPS_GPR8(mode64); + argregs[5] = hregMIPS_GPR9(mode64); + argregs[6] = hregMIPS_GPR10(mode64); + argregs[7] = hregMIPS_GPR11(mode64); + argiregs = 0; + } else { + argregs[0] = hregMIPS_GPR4(mode64); + argregs[1] = hregMIPS_GPR5(mode64); + argregs[2] = hregMIPS_GPR6(mode64); + argregs[3] = hregMIPS_GPR7(mode64); + argiregs = 0; } - argregs[0] = hregMIPS_GPR4(mode64); - argregs[1] = hregMIPS_GPR5(mode64); - argregs[2] = hregMIPS_GPR6(mode64); - argregs[3] = hregMIPS_GPR7(mode64); - argiregs = 0; tmpregs[0] = tmpregs[1] = tmpregs[2] = tmpregs[3] = INVALID_HREG; - /* First decide which scheme (slow or fast) is to be used. First - assume the fast scheme, and select slow if any contraindications - (wow) appear. */ + /* First decide which scheme (slow or fast) is to be used. First assume the + fast scheme, and select slow if any contraindications (wow) appear. */ go_fast = True; @@ -424,11 +447,6 @@ static void doHelperCall(ISelEnv * env, Bool passBBP, IRExpr * guard, } } - /* save GuestStatePointer on the stack */ - sub_from_sp(env, 8); // Move SP down 4 bytes - addInstr(env, MIPSInstr_Store(4, MIPSAMode_IR(0, StackPointer(mode64)), - GuestStatePointer(mode64), mode64)); - /* At this point the scheme to use has been established. Generate code to get the arg values into the argument rregs. */ if (go_fast) { @@ -449,7 +467,7 @@ static void doHelperCall(ISelEnv * env, Bool passBBP, IRExpr * guard, argiregs |= (1 << (argreg + 4)); addInstr(env, mk_iMOVds_RR(argregs[argreg], iselWordExpr_R(env, args[i]))); - } else { // Ity_I64 + } else { /* Ity_I64 */ if (argreg & 1) { argreg++; argiregs |= (1 << (argreg + 4)); @@ -482,7 +500,7 @@ static void doHelperCall(ISelEnv * env, Bool passBBP, IRExpr * guard, || typeOfIRExpr(env->type_env, args[i]) == Ity_I64); if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32 || mode64) { tmpregs[argreg] = iselWordExpr_R(env, args[i]); - } else { // Ity_I64 + } else { /* Ity_I64 */ if (argreg & 1) argreg++; if (argreg + 1 >= MIPS_N_REGPARMS) @@ -512,7 +530,7 @@ static void doHelperCall(ISelEnv * env, Bool passBBP, IRExpr * guard, } /* Move the args to their final destinations. */ for (i = 0; i < argreg; i++) { - if (hregIsInvalid(tmpregs[i])) // Skip invalid regs + if (hregIsInvalid(tmpregs[i])) /* Skip invalid regs */ continue; /* None of these insns, including any spill code that might be generated, may alter the condition codes. */ @@ -521,23 +539,14 @@ static void doHelperCall(ISelEnv * env, Bool passBBP, IRExpr * guard, } } - target = toUInt(Ptr_to_ULong(cee->addr)); + target = mode64 ? Ptr_to_ULong(cee->addr) : + toUInt(Ptr_to_ULong(cee->addr)); /* Finally, the call itself. */ - if (mode64) - if (cc == MIPScc_AL) { - addInstr(env, MIPSInstr_CallAlways(cc, target, argiregs, rloc)); - } else { - addInstr(env, MIPSInstr_Call(cc, target, argiregs, src, rloc)); - } else if (cc == MIPScc_AL) { - addInstr(env, MIPSInstr_CallAlways(cc, (Addr32) target, argiregs, rloc)); - } else { - addInstr(env, MIPSInstr_Call(cc, (Addr32) target, argiregs, src, rloc)); - } - /* restore GuestStatePointer */ - addInstr(env, MIPSInstr_Load(4, GuestStatePointer(mode64), - MIPSAMode_IR(0, StackPointer(mode64)), mode64)); - add_to_sp(env, 8); // Reset SP + if (cc == MIPScc_AL) + addInstr(env, MIPSInstr_CallAlways(cc, (Addr64)target, argiregs, rloc)); + else + addInstr(env, MIPSInstr_Call(cc, (Addr64)target, argiregs, src, rloc)); } /*---------------------------------------------------------*/ @@ -559,6 +568,19 @@ static Bool uInt_fits_in_16_bits(UInt u) return toBool(u == (UInt) i); } +static Bool uLong_fits_in_16_bits ( ULong u ) +{ + Long i = u & 0xFFFFULL; + i <<= 48; + i >>= 48; + return toBool(u == (ULong) i); +} + +static Bool uLong_is_4_aligned ( ULong u ) +{ + return toBool((u & 3ULL) == 0); +} + static Bool sane_AMode(ISelEnv * env, MIPSAMode * am) { switch (am->tag) { @@ -585,10 +607,31 @@ static MIPSAMode *iselWordExpr_AMode(ISelEnv * env, IRExpr * e, IRType xferTy) /* DO NOT CALL THIS DIRECTLY ! */ static MIPSAMode *iselWordExpr_AMode_wrk(ISelEnv * env, IRExpr * e, - IRType xferTy) + IRType xferTy) { IRType ty = typeOfIRExpr(env->type_env, e); - { + if (env->mode64) { + Bool aligned4imm = toBool(xferTy == Ity_I32 || xferTy == Ity_I64); + vassert(ty == Ity_I64); + + /* Add64(expr,i), where i == sign-extend of (i & 0xFFFF) */ + if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add64 + && e->Iex.Binop.arg2->tag == Iex_Const + && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64 + && (aligned4imm ? + uLong_is_4_aligned(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64) : True) + && uLong_fits_in_16_bits(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)) { + return MIPSAMode_IR((Int) e->Iex.Binop.arg2->Iex.Const.con->Ico.U64, + iselWordExpr_R(env, e->Iex.Binop.arg1)); + } + + /* Add64(expr,expr) */ + if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add64) { + HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1); + HReg r_idx = iselWordExpr_R(env, e->Iex.Binop.arg2); + return MIPSAMode_RR(r_idx, r_base); + } + } else { vassert(ty == Ity_I32); /* Add32(expr,i), where i == sign-extend of (i & 0xFFFF) */ @@ -687,6 +730,10 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) aluOp = Malu_SUB; break; + case Iop_Sub64: + aluOp = Malu_DSUB; + break; + case Iop_And32: case Iop_And64: aluOp = Malu_AND; @@ -702,6 +749,10 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) aluOp = Malu_XOR; break; + case Iop_Add64: + aluOp = Malu_DADD; + break; + default: aluOp = Malu_INVALID; break; @@ -717,6 +768,8 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) switch (aluOp) { case Malu_ADD: case Malu_SUB: + case Malu_DADD: + case Malu_DSUB: ri_srcR = iselWordExpr_RH(env, True /*signed */ , e->Iex.Binop.arg2); break; @@ -762,23 +815,23 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) case Mshft_SLL: case Mshft_SRL: case Mshft_SRA: - ri_srcR = iselWordExpr_RH5u(env, e->Iex.Binop. arg2); + if (mode64) + ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2); + else + ri_srcR = iselWordExpr_RH5u(env, e->Iex.Binop.arg2); break; default: vpanic("iselIntExpr_R_wrk-shftOp-arg2"); } - /* widen the left arg if needed */ - /*TODO do we need this? */ - if (ty == Ity_I8 || ty == Ity_I16) - goto irreducible; if (ty == Ity_I64) { vassert(mode64); addInstr(env, MIPSInstr_Shft(shftOp, False/*64bit shift */, r_dst, r_srcL, ri_srcR)); - } else { + } else if (ty == Ity_I32) { addInstr(env, MIPSInstr_Shft(shftOp, True /*32bit shift */, r_dst, r_srcL, ri_srcR)); - } + } else + goto irreducible; return r_dst; } @@ -847,8 +900,7 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) size32 = False; break; default: - vpanic - ("iselCondCode(mips): CmpXX32 or CmpXX64"); + vpanic("iselCondCode(mips): CmpXX32 or CmpXX64"); } addInstr(env, MIPSInstr_Cmp(syned, size32, dst, r1, r2, cc)); @@ -856,22 +908,21 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) } if (e->Iex.Binop.op == Iop_Max32U) { - /* - tmp = argR - argL; - dst = argL; - bltz tmp,2; - dst = argR; - - */ + HReg tmp = newVRegI(env); + HReg r_dst = newVRegI(env); HReg argL = iselWordExpr_R(env, e->Iex.Binop.arg1); - MIPSRH *argR = iselWordExpr_RH(env, False /*signed */ , + HReg argR = iselWordExpr_R(env, e->Iex.Binop.arg2); + MIPSRH *argRH = iselWordExpr_RH(env, False /*signed */ , e->Iex.Binop.arg2); - HReg dst = newVRegI(env); - HReg tmp = newVRegI(env); - addInstr(env, MIPSInstr_Alu(Malu_SUB, tmp, argL, argR)); - addInstr(env, MIPSInstr_MovCond(dst, argL, argR, tmp, MIPScc_MI)); - - return dst; + /* max (v0, s0) + ------------ + slt v1, v0, s0 + movn v0, s0, v1 */ + + addInstr(env, MIPSInstr_Alu(Malu_SLT, tmp, argL, argRH)); + addInstr(env, mk_iMOVds_RR(r_dst, argL)); + addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dst, argR, tmp)); + return r_dst; } if (e->Iex.Binop.op == Iop_Mul32 || e->Iex.Binop.op == Iop_Mul64) { @@ -922,7 +973,10 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) if (e->Iex.Binop.op == Iop_CmpF64) { HReg r_srcL, r_srcR; - { + if (mode64) { + r_srcL = iselFltExpr(env, e->Iex.Binop.arg1); + r_srcR = iselFltExpr(env, e->Iex.Binop.arg2); + } else { r_srcL = iselDblExpr(env, e->Iex.Binop.arg1); r_srcR = iselDblExpr(env, e->Iex.Binop.arg2); } @@ -934,26 +988,26 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) HReg r_ccIR_b6 = newVRegI(env); /* Create in dst, the IRCmpF64Result encoded result. */ - // chech for EQ + /* chech for EQ */ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP, tmp, r_srcL, r_srcR, toUChar(2))); addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, r_ccMIPS, tmp, MIPSRH_Imm(False, 22))); - // chech for UN + /* chech for UN */ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP, tmp, r_srcL, r_srcR, toUChar(1))); addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tmp, tmp, MIPSRH_Imm(False, 23))); addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS, MIPSRH_Reg(tmp))); - // chech for LT + /* chech for LT */ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP, tmp, r_srcL, r_srcR, toUChar(12))); addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tmp, tmp, MIPSRH_Imm(False, 21))); addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS, MIPSRH_Reg(tmp))); - // chech for GT + /* chech for GT */ addInstr(env, MIPSInstr_FpCompare(Mfp_CMP, tmp, r_srcL, r_srcR, toUChar(15))); addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tmp, tmp, @@ -964,9 +1018,8 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) MIPSRH_Imm(False, 8))); addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS, MIPSRH_Reg(tmp))); - /* Map compare result from PPC to IR, - conforming to CmpF64 definition. */ - /* + /* Map compare result from MIPS to IR, + conforming to CmpF64 definition. FP cmp result | MIPS | IR -------------------------- UN | 0x1 | 0x45 @@ -975,7 +1028,7 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) LT | 0x8 | 0x01 */ - // r_ccIR_b0 = r_ccPPC[0] | r_ccPPC[3] + /* r_ccIR_b0 = r_ccMIPS[0] | r_ccMIPS[3] */ addInstr(env, MIPSInstr_Shft(Mshft_SRL, True, r_ccIR_b0, r_ccMIPS, MIPSRH_Imm(False, 0x3))); addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR_b0, r_ccMIPS, @@ -983,13 +1036,13 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b0, r_ccIR_b0, MIPSRH_Imm(False, 0x1))); - // r_ccIR_b2 = r_ccPPC[0] + /* r_ccIR_b2 = r_ccMIPS[0] */ addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, r_ccIR_b2, r_ccMIPS, MIPSRH_Imm(False, 0x2))); addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b2, r_ccIR_b2, MIPSRH_Imm(False, 0x4))); - // r_ccIR_b6 = r_ccPPC[0] | r_ccPPC[1] + /* r_ccIR_b6 = r_ccMIPS[0] | r_ccMIPS[1] */ addInstr(env, MIPSInstr_Shft(Mshft_SRL, True, r_ccIR_b6, r_ccMIPS, MIPSRH_Imm(False, 0x1))); addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR_b6, r_ccMIPS, @@ -999,7 +1052,7 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b6, r_ccIR_b6, MIPSRH_Imm(False, 0x40))); - // r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6 + /* r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6 */ addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR, r_ccIR_b0, MIPSRH_Reg(r_ccIR_b2))); addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR, r_ccIR, @@ -1058,25 +1111,41 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) return r_dst; } + if (e->Iex.Binop.op == Iop_F32toI64S) { + vassert(mode64); + HReg valS = newVRegI(env); + HReg tmpF = newVRegF(env); + HReg valF = iselFltExpr(env, e->Iex.Binop.arg2); + + /* CVTLS tmpF, valF */ + set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLS, tmpF, valF)); + set_MIPS_rounding_default(env); + + /* Doubleword Move from Floating Point + dmfc1 valS, tmpF */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmfc1, valS, tmpF)); + + return valS; + } + if (e->Iex.Binop.op == Iop_F64toI32S) { - HReg valD = iselDblExpr(env, e->Iex.Binop.arg2); + HReg valD; + if (mode64) + valD = iselFltExpr(env, e->Iex.Binop.arg2); + else + valD = iselDblExpr(env, e->Iex.Binop.arg2); HReg valS = newVRegF(env); HReg r_dst = newVRegI(env); - MIPSAMode *am_addr; + /* CVTWD valS, valD */ set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWD, valS, valD)); set_MIPS_rounding_default(env); - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - // store as F32 - addInstr(env, MIPSInstr_FpLdSt(False/*store */ , 4, valS, am_addr)); - // load as I32 - addInstr(env, MIPSInstr_Load(4, r_dst, am_addr, mode64)); - - add_to_sp(env, 16); // Reset SP + /* Move Word From Floating Point + mfc1 r_dst, valS */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, valS)); return r_dst; } @@ -1084,7 +1153,7 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) break; } - /* --------- UNARY OP --------- */ + /* --------- UNARY OP --------- */ case Iex_Unop: { IROp op_unop = e->Iex.Unop.op; @@ -1135,7 +1204,7 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) return r_dst; } - /*not(x) = nor(x,x) */ + /* not(x) = nor(x,x) */ case Iop_Not1: { HReg r_dst = newVRegI(env); HReg r_srcL = iselWordExpr_R(env, e->Iex.Unop.arg); @@ -1157,39 +1226,45 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) } case Iop_ReinterpF32asI32: { - MIPSAMode *am_addr; HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg); HReg r_dst = newVRegI(env); - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - // store as F32 - addInstr(env, MIPSInstr_FpLdSt(False/*store */ , 4, fr_src, - am_addr)); - // load as Ity_I32 - addInstr(env, MIPSInstr_Load(4, r_dst, am_addr, mode64)); + /* Move Word From Floating Point + mfc1 r_dst, fr_src */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, fr_src)); - add_to_sp(env, 16); // Reset SP return r_dst; } case Iop_ReinterpF64asI64: { vassert(mode64); - MIPSAMode *am_addr; HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg); HReg r_dst = newVRegI(env); - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); + /* Doubleword Move from Floating Point + mfc1 r_dst, fr_src */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmfc1, r_dst, fr_src)); - // store as F64 - addInstr(env, MIPSInstr_FpLdSt(False/*store */ , 8, fr_src, - am_addr)); - // load as Ity_I64 - addInstr(env, MIPSInstr_Load(8, r_dst, am_addr, mode64)); + return r_dst; + } + + case Iop_F64toI32S: { + HReg valD; + if (mode64) + valD = iselFltExpr(env, e->Iex.Binop.arg2); + else + valD = iselDblExpr(env, e->Iex.Binop.arg2); + HReg valS = newVRegF(env); + HReg r_dst = newVRegI(env); + + set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWD, valS, valD)); + set_MIPS_rounding_default(env); + + /* Move Word From Floating Point + mfc1 r_dst, valS */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, valS)); - add_to_sp(env, 16); // Reset SP return r_dst; } @@ -1251,9 +1326,9 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) HReg r_dst = newVRegI(env); HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); vassert(mode64); - addInstr(env, MIPSInstr_Shft(Mshft_SLL, False/*!32bit shift */, + addInstr(env, MIPSInstr_Shft(Mshft_SLL, False /*!32bit shift */, r_dst, r_src, MIPSRH_Imm(False, 32))); - addInstr(env, MIPSInstr_Shft(Mshft_SRL, False/*!32bit shift */, + addInstr(env, MIPSInstr_Shft(Mshft_SRL, False /*!32bit shift */, r_dst, r_dst, MIPSRH_Imm(False, 32))); return r_dst; } @@ -1263,33 +1338,47 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) return iselWordExpr_R(env, e->Iex.Unop.arg); case Iop_64HIto32: { - HReg rHi, rLo; - iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg); - return rHi; + if (env->mode64) { + HReg r_dst = newVRegI(env); + HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); + addInstr(env, MIPSInstr_Shft(Mshft_SRA, False /*64bit shift */, + r_dst, r_src, MIPSRH_Imm(True, 32))); + return r_dst; + } else { + HReg rHi, rLo; + iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg); + return rHi; + } } case Iop_64to32: { - HReg rHi, rLo; - iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg); - return rLo; + if (env->mode64) { + HReg r_dst = newVRegI(env); + r_dst = iselWordExpr_R(env, e->Iex.Unop.arg); + return r_dst; + } else { + HReg rHi, rLo; + iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg); + return rLo; + } } - + case Iop_64to16: { vassert(env->mode64); HReg r_dst = newVRegI(env); r_dst = iselWordExpr_R(env, e->Iex.Unop.arg); return r_dst; } - + case Iop_32Sto64: { HReg r_dst = newVRegI(env); HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); vassert(mode64); - addInstr(env, MIPSInstr_Shft(Mshft_SLL, True/*!32bit shift */, + addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /*!32bit shift */, r_dst, r_src, MIPSRH_Imm(True, 0))); return r_dst; } - + case Iop_CmpNEZ8: { HReg r_dst = newVRegI(env); HReg tmp = newVRegI(env); @@ -1353,13 +1442,25 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) return r_dst; } + case Iop_Clz64: { + vassert(mode64); + HReg r_dst = newVRegI(env); + HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); + addInstr(env, MIPSInstr_Unary(Mun_DCLZ, r_dst, r_src)); + return r_dst; + } + case Iop_CmpNEZ64: { HReg hi, lo; HReg r_dst = newVRegI(env); HReg r_src; - r_src = newVRegI(env); - iselInt64Expr(&hi, &lo, env, e->Iex.Unop.arg); - addInstr(env, MIPSInstr_Alu(Malu_OR, r_src, lo, MIPSRH_Reg(hi))); + if (env->mode64) { + r_src = iselWordExpr_R(env, e->Iex.Unop.arg); + } else { + r_src = newVRegI(env); + iselInt64Expr(&hi, &lo, env, e->Iex.Unop.arg); + addInstr(env, MIPSInstr_Alu(Malu_OR, r_src, lo, MIPSRH_Reg(hi))); + } MIPSCondCode cc; cc = MIPScc_NE; @@ -1388,14 +1489,14 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) vassert(mode64); HReg rHi, rLo; iselInt128Expr(&rHi, &rLo, env, e->Iex.Unop.arg); - return rHi; /* and abandon rLo .. poor wee thing :-) */ + return rHi; /* and abandon rLo .. poor wee thing :-) */ } case Iop_128to64: { vassert(mode64); HReg rHi, rLo; iselInt128Expr(&rHi, &rLo, env, e->Iex.Unop.arg); - return rLo; /* and abandon rLo .. poor wee thing :-) */ + return rLo; /* and abandon rLo .. poor wee thing :-) */ } default: @@ -1404,7 +1505,7 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) break; } - /* --------- GET --------- */ + /* --------- GET --------- */ case Iex_Get: { if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ((ty == Ity_I64) && mode64)) { @@ -1419,13 +1520,13 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) break; } - /* --------- MULTIPLEX --------- */ + /* --------- ITE --------- */ case Iex_ITE: { if ((ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ((ty == Ity_I64))) && typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) { /* - * r_dst = cond && rX + * r_dst = cond && r1 * cond = not(cond) * tmp = cond && r0 * r_dst = tmp + r_dst @@ -1439,7 +1540,6 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) HReg r_tmp = newVRegI(env); HReg r_tmp1 = newVRegI(env); HReg r_cond_neg = newVRegI(env); - /* r_cond = 0 - r_cond_1 */ addInstr(env, MIPSInstr_LI(mask, 0x0)); addInstr(env, MIPSInstr_Alu(Malu_SUB, r_cond, @@ -1458,8 +1558,8 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) break; } - /* --------- LITERAL --------- */ - /* 32/16/8-bit literals */ + /* --------- LITERAL --------- */ + /* 32/16/8-bit literals */ case Iex_Const: { Long l; HReg r_dst = newVRegI(env); @@ -1486,7 +1586,7 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) return r_dst; } - /* --------- CCALL --------- */ + /* --------- CCALL --------- */ case Iex_CCall: { HReg r_dst = newVRegI(env); vassert(ty == e->Iex.CCall.retty); @@ -1517,7 +1617,7 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) default: break; - } /* end switch(e->tag) */ + } /* end switch(e->tag) */ /* We get here if no pattern matched. */ irreducible: @@ -1644,6 +1744,48 @@ static MIPSRH *iselWordExpr_RH5u_wrk(ISelEnv * env, IRExpr * e) return MIPSRH_Reg(iselWordExpr_R(env, e)); } +/* --------------------- RH6u --------------------- */ + +/* Only used in 64-bit mode. */ +static MIPSRH *iselWordExpr_RH6u ( ISelEnv * env, IRExpr * e ) +{ + MIPSRH *ri; + vassert(env->mode64); + ri = iselWordExpr_RH6u_wrk(env, e); + /* sanity checks ... */ + switch (ri->tag) { + case Mrh_Imm: + vassert(ri->Mrh.Imm.imm16 >= 1 && ri->Mrh.Imm.imm16 <= 63); + vassert(!ri->Mrh.Imm.syned); + return ri; + case Mrh_Reg: + vassert(hregClass(ri->Mrh.Reg.reg) == HRcGPR(env->mode64)); + vassert(hregIsVirtual(ri->Mrh.Reg.reg)); + return ri; + default: + vpanic("iselIntExpr_RH6u: unknown mips64 RI tag"); + } +} + +/* DO NOT CALL THIS DIRECTLY ! */ +static MIPSRH *iselWordExpr_RH6u_wrk ( ISelEnv * env, IRExpr * e ) +{ + IRType ty = typeOfIRExpr(env->type_env, e); + vassert(ty == Ity_I8); + + /* special case: immediate */ + if (e->tag == Iex_Const + && e->Iex.Const.con->tag == Ico_U8 + && e->Iex.Const.con->Ico.U8 >= 1 && e->Iex.Const.con->Ico.U8 <= 63) + { + return MIPSRH_Imm(False /*unsigned */ , + e->Iex.Const.con->Ico.U8); + } + + /* default case: calculate into a register and return that */ + return MIPSRH_Reg(iselWordExpr_R(env, e)); +} + /* --------------------- CONDCODE --------------------- */ /* Generate code to evaluated a bit-typed expression, returning the @@ -1732,11 +1874,12 @@ static MIPSCondCode iselCondCode_wrk(ISelEnv * env, IRExpr * e) } addInstr(env, MIPSInstr_Cmp(syned, size32, dst, r1, r2, cc)); - // Store result to guest_COND + /* Store result to guest_COND */ MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64)); addInstr(env, MIPSInstr_Store(4, - MIPSAMode_IR(am_addr->Mam.IR.index + 316, am_addr->Mam.IR.base), + MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64), + am_addr->Mam.IR.base), dst, mode64)); return cc; } @@ -1747,21 +1890,23 @@ static MIPSCondCode iselCondCode_wrk(ISelEnv * env, IRExpr * e) addInstr(env, MIPSInstr_LI(r_dst, 0x1)); addInstr(env, MIPSInstr_Alu(Malu_SUB, r_dst, r_dst, r_srcR)); - // Store result to guest_COND + /* Store result to guest_COND */ MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64)); addInstr(env, MIPSInstr_Store(4, - MIPSAMode_IR(am_addr->Mam.IR.index + 316, am_addr->Mam.IR.base), + MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64), + am_addr->Mam.IR.base), r_dst, mode64)); return MIPScc_NE; } if (e->tag == Iex_RdTmp || e->tag == Iex_Unop) { HReg r_dst = iselWordExpr_R_wrk(env, e); - // Store result to guest_COND + /* Store result to guest_COND */ MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64)); addInstr(env, MIPSInstr_Store(4, - MIPSAMode_IR(am_addr->Mam.IR.index + 316, am_addr->Mam.IR.base), + MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64), + am_addr->Mam.IR.base), r_dst, mode64)); return MIPScc_EQ; } @@ -1961,6 +2106,7 @@ static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e) /* 64-bit ITE */ if (e->tag == Iex_ITE) { vassert(typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1); + vassert(!mode64); HReg expr0Lo, expr0Hi; HReg expr1Lo, expr1Hi; HReg tmpHi = newVRegI(env); @@ -1973,7 +2119,6 @@ static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e) HReg mask = newVRegI(env); HReg desLo = newVRegI(env); HReg desHi = newVRegI(env); - /* r_cond = 0 - r_cond_1 */ addInstr(env, MIPSInstr_LI(mask, 0x0)); addInstr(env, MIPSInstr_Alu(Malu_SUB, r_cond, @@ -2030,7 +2175,7 @@ static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e) HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2); - addInstr(env, MIPSInstr_Mul(syned/*Unsigned or Signed */ , + addInstr(env, MIPSInstr_Mul(syned /*Unsigned or Signed */, True /*widen */ , True, r_dst, r_srcL, r_srcR)); addInstr(env, MIPSInstr_Mfhi(tHi)); @@ -2058,13 +2203,13 @@ static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e) return; } - /* 32HLto64(e1,e2) */ + /* 32HLto64(e1,e2) */ case Iop_32HLto64: *rHi = iselWordExpr_R(env, e->Iex.Binop.arg1); *rLo = iselWordExpr_R(env, e->Iex.Binop.arg2); return; - /* Or64/And64/Xor64 */ + /* Or64/And64/Xor64 */ case Iop_Or64: case Iop_And64: case Iop_Xor64: { @@ -2165,24 +2310,25 @@ static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e) MIPSAMode *am_addr; HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg); - sub_from_sp(env, 16); // Move SP down 16 bytes + sub_from_sp(env, 16); /* Move SP down 16 bytes */ am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - // store as F64 + /* store as F64 */ addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src, am_addr)); - // load as 2xI32 + /* load as 2xI32 */ addInstr(env, MIPSInstr_Load(4, tLo, am_addr, mode64)); addInstr(env, MIPSInstr_Load(4, tHi, nextMIPSAModeFloat(am_addr), mode64)); - add_to_sp(env, 16); // Reset SP + /* Reset SP */ + add_to_sp(env, 16); *rHi = tHi; *rLo = tLo; return; } - + default: vex_printf("UNARY: No such op: "); ppIROp(e->Iex.Unop.op); @@ -2202,7 +2348,6 @@ static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e) /* Nothing interesting here; really just wrappers for 64-bit stuff. */ - static HReg iselFltExpr(ISelEnv * env, IRExpr * e) { HReg r = iselFltExpr_wrk(env, e); @@ -2226,7 +2371,10 @@ static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e) vassert(e->Iex.Load.ty == Ity_F32 || (e->Iex.Load.ty == Ity_F64 && mode64)); am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, ty); - addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, r_dst, am_addr)); + if (mode64 && e->Iex.Load.ty == Ity_F64) + addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, r_dst, am_addr)); + else + addInstr(env, MIPSInstr_FpLdSt(True /*load */, 4, r_dst, am_addr)); return r_dst; } @@ -2234,70 +2382,110 @@ static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e) HReg r_dst = newVRegF(env); MIPSAMode *am_addr = MIPSAMode_IR(e->Iex.Get.offset, GuestStatePointer(mode64)); - addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, r_dst, am_addr)); + if (mode64) + addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, r_dst, am_addr)); + else + addInstr(env, MIPSInstr_FpLdSt(True /*load */, 4, r_dst, am_addr)); return r_dst; } if (e->tag == Iex_Unop) { switch (e->Iex.Unop.op) { case Iop_ReinterpI32asF32: { - MIPSAMode *am_addr; HReg fr_src = iselWordExpr_R(env, e->Iex.Unop.arg); HReg r_dst = newVRegF(env); - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - // store as I32 - addInstr(env, MIPSInstr_Store(4, am_addr, fr_src, mode64)); - - // load as Ity_F32 - addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, r_dst, am_addr)); + /* Move Word to Floating Point + mtc1 r_dst, valS */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, r_dst, fr_src)); - add_to_sp(env, 16); // Reset SP return r_dst; - } case Iop_F32toF64: { - /* first arg is rounding mode; we ignore it. */ - MIPSAMode *am_addr; - HReg src = iselFltExpr(env, e->Iex.Unop.arg); - HReg dst = newVRegF(env); - - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - addInstr(env, MIPSInstr_Store(4, - MIPSAMode_IR(am_addr->Mam.IR.index + 4, - am_addr->Mam.IR.base), - hregMIPS_GPR0(mode64), mode64)); - addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 4, src, am_addr)); - - // load as Ity_F32 - addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, dst, am_addr)); - add_to_sp(env, 16); // Reset SP + if (mode64) { + HReg src = iselFltExpr(env, e->Iex.Unop.arg); + HReg dst = newVRegD(env); - return dst; + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDS, dst, src)); + return dst; + } else { + MIPSAMode *am_addr; + HReg src = iselFltExpr(env, e->Iex.Unop.arg); + HReg dst = newVRegF(env); + + sub_from_sp(env, 16); /* Move SP down 16 bytes */ + am_addr = MIPSAMode_IR(0, StackPointer(mode64)); + + addInstr(env, MIPSInstr_Store(4, + MIPSAMode_IR(am_addr->Mam.IR.index +4, + am_addr->Mam.IR.base), + hregMIPS_GPR0(mode64), mode64)); + addInstr(env, MIPSInstr_FpLdSt(False /* store */, 4, src, am_addr)); + + /* load as Ity_F64 */ + addInstr(env, MIPSInstr_FpLdSt(True /* load */, 8, dst, am_addr)); + /* Reset SP */ + add_to_sp(env, 16); + + return dst; + } } - case Iop_ReinterpI64asF64: - { + case Iop_ReinterpI64asF64: { vassert(mode64); - MIPSAMode *am_addr; HReg fr_src = iselWordExpr_R(env, e->Iex.Unop.arg); HReg r_dst = newVRegF(env); - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - // store as I64 - addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64)); + /* Move Doubleword to Floating Point + dmtc1 r_dst, valS */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmtc1, r_dst, fr_src)); - // load as Ity_F64 - addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, r_dst, am_addr)); - - add_to_sp(env, 16); // Reset SP return r_dst; } + case Iop_I32StoF64: { + vassert(mode64); + HReg dst = newVRegF(env); + HReg tmp1 = newVRegF(env); + HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); + MIPSAMode *am_addr; + + /* Move Word to Floating Point + mtc1 tmp1, r_src */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp1, r_src)); + + HReg irrm = newVRegI(env); + + MIPSAMode *am_addr1 = MIPSAMode_IR(552, GuestStatePointer(mode64)); + + addInstr(env, MIPSInstr_Load(4, irrm, am_addr1, mode64)); + + /* set rounding mode */ + HReg tmp = newVRegI(env); + HReg fcsr_old = newVRegI(env); + + addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tmp, + irrm, MIPSRH_Imm(False, 1))); + addInstr(env, MIPSInstr_Alu(Malu_XOR, tmp, irrm, MIPSRH_Reg(tmp))); + addInstr(env, MIPSInstr_Alu(Malu_AND, irrm, tmp, + MIPSRH_Imm(False, 3))); + /* save old value of FCSR */ + addInstr(env, MIPSInstr_MfFCSR(fcsr_old)); + /* Move SP down 8 bytes */ + sub_from_sp(env, 8); + am_addr = MIPSAMode_IR(0, StackPointer(mode64)); + + /* store old FCSR to stack */ + addInstr(env, MIPSInstr_Store(4, am_addr, fcsr_old, mode64)); + + /* set new value of FCSR */ + addInstr(env, MIPSInstr_MtFCSR(irrm)); + + /* and do convert */ + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDW, dst, tmp1)); + /* set MIPS roundig mode to default and reset sp */ + set_MIPS_rounding_default(env); + + return dst; + } case Iop_AbsF32: case Iop_AbsF64: { Bool sz32 = e->Iex.Unop.op == Iop_AbsF32; @@ -2314,6 +2502,13 @@ static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e) addInstr(env, MIPSInstr_FpUnary(sz32 ? Mfp_NEGS : Mfp_NEGD, dst, src)); return dst; } + case Iop_RoundF64toF64_ZERO: { + vassert(mode64); + HReg src = iselFltExpr(env, e->Iex.Unop.arg); + HReg dst = newVRegF(env); + addInstr(env, MIPSInstr_FpConvert(Mfp_TRULD, dst, src)); + return dst; + } default: break; } @@ -2337,19 +2532,37 @@ static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e) case Iop_DivF32: op = Mfp_DIVS; break; + case Iop_DivF64: + vassert(mode64); + op = Mfp_DIVD; + break; case Iop_MulF32: op = Mfp_MULS; break; + case Iop_MulF64: + vassert(mode64); + op = Mfp_MULD; + break; case Iop_AddF32: op = Mfp_ADDS; break; + case Iop_AddF64: + vassert(mode64); + op = Mfp_ADDD; + break; case Iop_SubF32: op = Mfp_SUBS; break; + case Iop_SubF64: + vassert(mode64); + op = Mfp_SUBD; + break; default: vassert(0); } + set_MIPS_rounding_mode(env, e->Iex.Triop.details->arg1); addInstr(env, MIPSInstr_FpBinary(op, dst, argL, argR)); + set_MIPS_rounding_default(env); return dst; } default: @@ -2360,7 +2573,11 @@ static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e) if (e->tag == Iex_Binop) { switch (e->Iex.Binop.op) { case Iop_F64toF32: { - HReg valD = iselDblExpr(env, e->Iex.Binop.arg2); + HReg valD; + if (mode64) + valD = iselFltExpr(env, e->Iex.Binop.arg2); + else + valD = iselDblExpr(env, e->Iex.Binop.arg2); HReg valS = newVRegF(env); set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); @@ -2375,31 +2592,85 @@ static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e) set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWS, valS, valF)); - set_MIPS_rounding_default(env); return valS; } + case Iop_RoundF64toInt: { + HReg valS = newVRegF(env); + HReg valF = iselFltExpr(env, e->Iex.Binop.arg2); + + set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLD, valS, valF)); + set_MIPS_rounding_default(env); + return valS; + } + case Iop_I32StoF32: { HReg r_dst = newVRegF(env); + HReg fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2); + HReg tmp = newVRegF(env); + + /* Move Word to Floating Point + mtc1 tmp, fr_src */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp, fr_src)); + + set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSW, r_dst, tmp)); + set_MIPS_rounding_default(env); + + return r_dst; + } + + case Iop_I64StoF64: { + HReg r_dst = newVRegF(env); MIPSAMode *am_addr; HReg fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2); HReg tmp = newVRegF(env); - sub_from_sp(env, 16); // Move SP down 16 bytes + /* Move SP down 8 bytes */ + sub_from_sp(env, 8); am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - // store as I32 - addInstr(env, MIPSInstr_Store(4, am_addr, fr_src, mode64)); + /* store as I64 */ + addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64)); - // load as Ity_F32 - addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, tmp, am_addr)); + /* load as Ity_F64 */ + addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, tmp, am_addr)); - add_to_sp(env, 16); // Reset SP + /* Reset SP */ + add_to_sp(env, 8); set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); - addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSW, r_dst, tmp)); + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDL, r_dst, tmp)); + set_MIPS_rounding_default(env); + + return r_dst; + } + + case Iop_I64StoF32: { + HReg r_dst = newVRegF(env); + + MIPSAMode *am_addr; + HReg fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2); + HReg tmp = newVRegF(env); + + /* Move SP down 8 bytes */ + sub_from_sp(env, 8); + am_addr = MIPSAMode_IR(0, StackPointer(mode64)); + + /* store as I64 */ + addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64)); + + /* load as Ity_F64 */ + addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, tmp, am_addr)); + + /* Reset SP */ + add_to_sp(env, 8); + + set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSL, r_dst, tmp)); set_MIPS_rounding_default(env); return r_dst; @@ -2417,12 +2688,51 @@ static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e) set_MIPS_rounding_default(env); return dst; } - + default: break; } } + if (e->tag == Iex_Qop) { + switch (e->Iex.Qop.details->op) { + case Iop_MAddF32: + case Iop_MAddF64: + case Iop_MSubF32: + case Iop_MSubF64: { + MIPSFpOp op = 0; + switch (e->Iex.Qop.details->op) { + case Iop_MAddF32: + op = Mfp_MADDS; + break; + case Iop_MAddF64: + op = Mfp_MADDD; + break; + case Iop_MSubF32: + op = Mfp_MSUBS; + break; + case Iop_MSubF64: + op = Mfp_MSUBD; + break; + default: + vassert(0); + } + HReg dst = newVRegF(env); + HReg src1 = iselFltExpr(env, e->Iex.Qop.details->arg2); + HReg src2 = iselFltExpr(env, e->Iex.Qop.details->arg3); + HReg src3 = iselFltExpr(env, e->Iex.Qop.details->arg4); + set_MIPS_rounding_mode(env, e->Iex.Qop.details->arg1); + addInstr(env, MIPSInstr_FpTernary(op, dst, + src1, src2, src3)); + set_MIPS_rounding_default(env); + return dst; + } + + default: + break; + } + } + if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_TruncF64asF32) { /* This is quite subtle. The only way to do the relevant truncation is to do a single-precision store and then a @@ -2457,14 +2767,30 @@ static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e) MIPSAMode *zero_r1 = MIPSAMode_IR(0, StackPointer(mode64)); sub_from_sp(env, 16); - // store as F32, hence truncating + /* store as F32, hence truncating */ addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 4, fsrc, zero_r1)); - // and reload. Good huh?! (sigh) + /* and reload. Good huh?! (sigh) */ addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, fdst, zero_r1)); add_to_sp(env, 16); return fdst; } + /* --------- ITE --------- */ + if (e->tag == Iex_ITE) { + if (ty == Ity_F64 + && typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) { + vassert(mode64); + HReg r0 = iselFltExpr(env, e->Iex.ITE.iffalse); + HReg r1 = iselFltExpr(env, e->Iex.ITE.iftrue); + HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond); + HReg r_dst = newVRegF(env); + addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, r_dst, r0)); + addInstr(env, MIPSInstr_MoveCond(MFpMoveCond_movnd, r_dst, r1, + r_cond)); + return r_dst; + } + } + vex_printf("iselFltExpr(mips): No such tag(0x%x)\n", e->tag); ppIRExpr(e); vpanic("iselFltExpr_wrk(mips)"); @@ -2519,39 +2845,11 @@ static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e) fpop = Mfp_ABSD; break; case Iop_F32toF64: { + vassert(!mode64); HReg src = iselFltExpr(env, e->Iex.Unop.arg); HReg dst = newVRegD(env); - HReg irrm = newVRegI(env); - - MIPSAMode *am_addr1 = MIPSAMode_IR(284, GuestStatePointer(mode64)); - - addInstr(env, MIPSInstr_Load(4, irrm, am_addr1, mode64)); - - // set new FCSR - HReg tmp = newVRegI(env); - HReg fcsr_old = newVRegI(env); - MIPSAMode *am_addr; - - addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tmp, irrm, - MIPSRH_Imm(False, 1))); - addInstr(env, MIPSInstr_Alu(Malu_XOR, tmp, irrm, MIPSRH_Reg(tmp))); - addInstr(env, MIPSInstr_Alu(Malu_AND, irrm, tmp, - MIPSRH_Imm(False, 3))); - /* save old value of FCSR */ - addInstr(env, MIPSInstr_MfFCSR(fcsr_old)); - sub_from_sp(env, 8); // Move SP down 4 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - //store old FCSR to stack - addInstr(env, MIPSInstr_Store(4, am_addr, fcsr_old, mode64)); - - //set new value of FCSR - addInstr(env, MIPSInstr_MtFCSR(irrm)); - - //set_MIPS_rounding_mode(env, e->Iex.Binop.arg1); - addInstr(env, MIPSInstr_FpUnary(Mfp_CVTD, dst, src)); - set_MIPS_rounding_default(env); + addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDS, dst, src)); return dst; } case Iop_ReinterpI64asF64: { @@ -2561,24 +2859,19 @@ static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e) iselInt64Expr(&Hi, &Lo, env, e->Iex.Unop.arg); - dst = mk_LoadRR32toFPR(env, Hi, Lo); // 2*I32 -> F64 + dst = mk_LoadRR32toFPR(env, Hi, Lo); /* 2*I32 -> F64 */ return dst; } case Iop_I32StoF64: { + vassert(!mode64); HReg dst = newVRegD(env); HReg tmp1 = newVRegF(env); HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); MIPSAMode *am_addr; - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - // store as I32 - addInstr(env, MIPSInstr_Store(4, am_addr, r_src, mode64)); - - // load as Ity_F32 - addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, tmp1, am_addr)); - - add_to_sp(env, 16); // Reset SP + /* Move Word to Floating Point + mtc1 tmp1, r_src */ + addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp1, r_src)); HReg irrm = newVRegI(env); @@ -2586,7 +2879,7 @@ static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e) addInstr(env, MIPSInstr_Load(4, irrm, am_addr1, mode64)); - //set rounding mode + /* set rounding mode */ HReg tmp = newVRegI(env); HReg fcsr_old = newVRegI(env); @@ -2597,17 +2890,20 @@ static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e) MIPSRH_Imm(False, 3))); /* save old value of FCSR */ addInstr(env, MIPSInstr_MfFCSR(fcsr_old)); - sub_from_sp(env, 8); // Move SP down 4 bytes + /* Move SP down 8 bytes */ + sub_from_sp(env, 8); am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - //store old FCSR to stack + /* store old FCSR to stack */ addInstr(env, MIPSInstr_Store(4, am_addr, fcsr_old, mode64)); - //set new value of FCSR + /* set new value of FCSR */ addInstr(env, MIPSInstr_MtFCSR(irrm)); - // and do convert + /* and do convert */ addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDW, dst, tmp1)); + + /* set MIPS roundinf mode to default and reset sp */ set_MIPS_rounding_default(env); return dst; @@ -2689,90 +2985,57 @@ static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e) } } - /* --------- MULTIPLEX --------- */ + if (e->tag == Iex_Qop) { + switch (e->Iex.Qop.details->op) { + case Iop_MAddF32: + case Iop_MAddF64: + case Iop_MSubF32: + case Iop_MSubF64: { + MIPSFpOp op = 0; + switch (e->Iex.Qop.details->op) { + case Iop_MAddF32: + op = Mfp_MADDS; + break; + case Iop_MAddF64: + op = Mfp_MADDD; + break; + case Iop_MSubF32: + op = Mfp_MSUBS; + break; + case Iop_MSubF64: + op = Mfp_MSUBD; + break; + default: + vassert(0); + } + HReg dst = newVRegD(env); + HReg src1 = iselDblExpr(env, e->Iex.Qop.details->arg2); + HReg src2 = iselDblExpr(env, e->Iex.Qop.details->arg3); + HReg src3 = iselDblExpr(env, e->Iex.Qop.details->arg4); + set_MIPS_rounding_mode(env, e->Iex.Qop.details->arg1); + addInstr(env, MIPSInstr_FpTernary(op, dst, + src1, src2, src3)); + set_MIPS_rounding_default(env); + return dst; + } + + default: + break; + } + } + + /* --------- ITE --------- */ if (e->tag == Iex_ITE) { if (ty == Ity_F64 && typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) { HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse); HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue); - HReg r_cond_1 = iselWordExpr_R(env, e->Iex.ITE.cond); - HReg r_cond = newVRegI(env); - HReg r_cond_neg = newVRegI(env); - HReg mask = newVRegI(env); + HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond); HReg r_dst = newVRegD(env); - HReg r_tmp_lo = newVRegI(env); - HReg r_tmp_hi = newVRegI(env); - HReg r_tmp1_lo = newVRegI(env); - HReg r_tmp1_hi = newVRegI(env); - HReg r_r0_lo = newVRegI(env); - HReg r_r0_hi = newVRegI(env); - HReg r_r1_lo = newVRegI(env); - HReg r_r1_hi = newVRegI(env); - HReg r_dst_lo = newVRegI(env); - HReg r_dst_hi = newVRegI(env); - - /* r_cond = 0 - r_cond_1 */ - addInstr(env, MIPSInstr_LI(mask, 0x0)); - addInstr(env, MIPSInstr_Alu(Malu_SUB, r_cond, - mask, MIPSRH_Reg(r_cond_1))); - - sub_from_sp(env, 16); // Move SP down 16 bytes - MIPSAMode *am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - // store as Ity_F64 - addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, r0, am_addr)); - - // load as 2xI32 - addInstr(env, MIPSInstr_Load(4, r_r0_lo, am_addr, mode64)); - addInstr(env, MIPSInstr_Load(4, r_r0_hi, nextMIPSAModeFloat(am_addr), - mode64)); - - add_to_sp(env, 16); // Reset SP - - addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp_lo, r_cond, - MIPSRH_Reg(r_r0_lo))); - addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp_hi, r_cond, - MIPSRH_Reg(r_r0_hi))); - - addInstr(env, MIPSInstr_Alu(Malu_NOR, r_cond_neg, r_cond, - MIPSRH_Reg(r_cond))); - - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - // store as Ity_F64 - addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, r1, am_addr)); - - // load as 2xI32 - addInstr(env, MIPSInstr_Load(4, r_r1_lo, am_addr, mode64)); - addInstr(env, MIPSInstr_Load(4, r_r1_hi, nextMIPSAModeFloat(am_addr), - mode64)); - - add_to_sp(env, 16); // Reset SP - - addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1_lo, r_cond_neg, - MIPSRH_Reg(r_r1_lo))); - addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1_hi, r_cond_neg, - MIPSRH_Reg(r_r1_hi))); - - addInstr(env, MIPSInstr_Alu(Malu_ADD, r_dst_lo, r_tmp_lo, - MIPSRH_Reg(r_tmp1_lo))); - addInstr(env, MIPSInstr_Alu(Malu_ADD, r_dst_hi, r_tmp_hi, - MIPSRH_Reg(r_tmp1_hi))); - - sub_from_sp(env, 16); // Move SP down 16 bytes - am_addr = MIPSAMode_IR(0, StackPointer(mode64)); - - // store as I32 - addInstr(env, MIPSInstr_Store(4, am_addr, r_dst_lo, mode64)); - addInstr(env, MIPSInstr_Store(4, nextMIPSAModeFloat(am_addr), - r_dst_hi, mode64)); - - // load as Ity_F32 - addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, r_dst, am_addr)); - - add_to_sp(env, 16); // Reset SP + addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, r_dst, r0)); + addInstr(env, MIPSInstr_MoveCond(MFpMoveCond_movnd, r_dst, r1, + r_cond)); return r_dst; } } @@ -2829,7 +3092,12 @@ static void iselStmt(ISelEnv * env, IRStmt * stmt) am_addr)); return; } - + if (tyd == Ity_F64 && mode64) { + HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data); + addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src, + am_addr)); + return; + } if (!mode64 && (tyd == Ity_F64)) { HReg fr_src = iselDblExpr(env, stmt->Ist.Store.data); addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src, @@ -2880,7 +3148,11 @@ static void iselStmt(ISelEnv * env, IRStmt * stmt) if (ty == Ity_F64) { HReg fr_src; - fr_src = iselDblExpr(env, stmt->Ist.Put.data); + if (mode64) { + fr_src = iselFltExpr(env, stmt->Ist.Put.data); + } else { + fr_src = iselDblExpr(env, stmt->Ist.Put.data); + } MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset, GuestStatePointer(mode64)); addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src, @@ -2903,12 +3175,28 @@ static void iselStmt(ISelEnv * env, IRStmt * stmt) } if (ty == Ity_I64) { - HReg rHi, rLo, dstHi, dstLo; - iselInt64Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data); - lookupIRTemp64(&dstHi, &dstLo, env, tmp); - addInstr(env, mk_iMOVds_RR(dstHi, rHi)); - addInstr(env, mk_iMOVds_RR(dstLo, rLo)); - return; + if (mode64) { + HReg r_dst = lookupIRTemp(env, tmp); + HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data); + addInstr(env, mk_iMOVds_RR(r_dst, r_src)); + return; + } else { + HReg rHi, rLo, dstHi, dstLo; + iselInt64Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data); + lookupIRTemp64(&dstHi, &dstLo, env, tmp); + addInstr(env, mk_iMOVds_RR(dstHi, rHi)); + addInstr(env, mk_iMOVds_RR(dstLo, rLo)); + return; + } + } + + if (mode64 && ty == Ity_I128) { + HReg rHi, rLo, dstHi, dstLo; + iselInt128Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data); + lookupIRTempPair(&dstHi, &dstLo, env, tmp); + addInstr(env, mk_iMOVds_RR(dstHi, rHi)); + addInstr(env, mk_iMOVds_RR(dstLo, rLo)); + return; } if (ty == Ity_F32) { @@ -2919,10 +3207,17 @@ static void iselStmt(ISelEnv * env, IRStmt * stmt) } if (ty == Ity_F64) { - HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data); - HReg dst = lookupIRTemp(env, tmp); - addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, dst, src)); - return; + if (mode64) { + HReg src = iselFltExpr(env, stmt->Ist.WrTmp.data); + HReg dst = lookupIRTemp(env, tmp); + addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, dst, src)); + return; + } else { + HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data); + HReg dst = lookupIRTemp(env, tmp); + addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, dst, src)); + return; + } } break; } @@ -2994,9 +3289,10 @@ static void iselStmt(ISelEnv * env, IRStmt * stmt) /* --------- Load Linked or Store Conditional --------- */ case Ist_LLSC: { - //Temporary solution; this need to be rewritten again for MIPS. - //On MIPS you can not read from address that is locked with LL before SC. - // If you read from address that is locked than SC will fall. + /* Temporary solution; this need to be rewritten again for MIPS. + On MIPS you can not read from address that is locked with LL + before SC. If you read from address that is locked than SC will + fall. */ IRTemp res = stmt->Ist.LLSC.result; IRType tyRes = typeOfIRTemp(env->type_env, res); IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr); @@ -3007,7 +3303,7 @@ static void iselStmt(ISelEnv * env, IRStmt * stmt) if (stmt->Ist.LLSC.storedata == NULL) { /* LL */ MIPSAMode *r_addr; - /*constructs addressing mode from address provided */ + /* constructs addressing mode from address provided */ r_addr = iselWordExpr_AMode(env, stmt->Ist.LLSC.addr, tyAddr); HReg r_dst = lookupIRTemp(env, res); @@ -3040,21 +3336,21 @@ static void iselStmt(ISelEnv * env, IRStmt * stmt) /* fallthru */ } goto stmt_fail; - /*NOTREACHED*/} + /* NOTREACHED */} - /* --------- INSTR MARK --------- */ - /* Doesn't generate any executable code ... */ + /* --------- INSTR MARK --------- */ + /* Doesn't generate any executable code ... */ case Ist_IMark: return; - /* --------- ABI HINT --------- */ - /* These have no meaning (denotation in the IR) and so we ignore - them ... if any actually made it this far. */ + /* --------- ABI HINT --------- */ + /* These have no meaning (denotation in the IR) and so we ignore + them ... if any actually made it this far. */ case Ist_AbiHint: return; - /* --------- NO-OP --------- */ - /* Fairly self-explanatory, wouldn't you say? */ + /* --------- NO-OP --------- */ + /* Fairly self-explanatory, wouldn't you say? */ case Ist_NoOp: return; @@ -3068,7 +3364,7 @@ static void iselStmt(ISelEnv * env, IRStmt * stmt) MIPSCondCode cc = iselCondCode(env, stmt->Ist.Exit.guard); MIPSAMode* amPC = MIPSAMode_IR(stmt->Ist.Exit.offsIP, - hregMIPS_GPR10(mode64)); + GuestStatePointer(mode64)); /* Case: boring transfer to known address */ if (stmt->Ist.Exit.jk == Ijk_Boring @@ -3156,7 +3452,7 @@ static void iselNext ( ISelEnv* env, vassert(cdst->tag == (env->mode64 ? Ico_U64 :Ico_U32)); if (jk == Ijk_Boring || jk == Ijk_Call) { /* Boring transfer to known address */ - MIPSAMode* amPC = MIPSAMode_IR(offsIP, hregMIPS_GPR10(env->mode64)); + MIPSAMode* amPC = MIPSAMode_IR(offsIP, GuestStatePointer(env->mode64)); if (env->chainingAllowed) { /* .. almost always true .. */ /* Skip the event check at the dst if this is a forwards @@ -3187,7 +3483,7 @@ static void iselNext ( ISelEnv* env, case Ijk_Boring: case Ijk_Ret: case Ijk_Call: { HReg r = iselWordExpr_R(env, next); - MIPSAMode* amPC = MIPSAMode_IR(offsIP, hregMIPS_GPR10(env->mode64)); + MIPSAMode* amPC = MIPSAMode_IR(offsIP, GuestStatePointer(env->mode64)); if (env->chainingAllowed) { addInstr(env, MIPSInstr_XIndir(r, amPC, MIPScc_AL)); } else { @@ -3215,7 +3511,7 @@ static void iselNext ( ISelEnv* env, case Ijk_Sys_syscall: case Ijk_TInval: { HReg r = iselWordExpr_R(env, next); - MIPSAMode* amPC = MIPSAMode_IR(offsIP, hregMIPS_GPR10(env->mode64)); + MIPSAMode* amPC = MIPSAMode_IR(offsIP, GuestStatePointer(env->mode64)); addInstr(env, MIPSInstr_XAssisted(r, amPC, MIPScc_AL, jk)); return; } @@ -3223,12 +3519,12 @@ static void iselNext ( ISelEnv* env, break; } - vex_printf( "\n-- PUT(%d) = ", offsIP); - ppIRExpr( next ); - vex_printf( "; exit-"); + vex_printf("\n-- PUT(%d) = ", offsIP); + ppIRExpr(next ); + vex_printf("; exit-"); ppIRJumpKind(jk); - vex_printf( "\n"); - vassert(0); // are we expecting any other kind? + vex_printf("\n"); + vassert(0); /* are we expecting any other kind? */ } /*---------------------------------------------------------*/ @@ -3253,9 +3549,10 @@ HInstrArray *iselSB_MIPS ( IRSB* bb, MIPSAMode *amCounter, *amFailAddr; /* sanity ... */ - vassert(arch_host == VexArchMIPS32); + vassert(arch_host == VexArchMIPS32 || arch_host == VexArchMIPS64); vassert(VEX_PRID_COMP_MIPS == hwcaps_host - || VEX_PRID_COMP_BROADCOM == hwcaps_host); + || VEX_PRID_COMP_BROADCOM == hwcaps_host + || VEX_PRID_COMP_NETLOGIC); mode64 = arch_host != VexArchMIPS32; @@ -3291,24 +3588,36 @@ HInstrArray *iselSB_MIPS ( IRSB* bb, case Ity_I1: case Ity_I8: case Ity_I16: - case Ity_I32: { - hreg = mkHReg(j++, HRcInt32, True); - break; - } - case Ity_I64: { - hreg = mkHReg(j++, HRcInt32, True); - hregHI = mkHReg(j++, HRcInt32, True); - break; - } + case Ity_I32: + if (mode64) { + hreg = mkHReg(j++, HRcInt64, True); + break; + } else { + hreg = mkHReg(j++, HRcInt32, True); + break; + } + case Ity_I64: + if (mode64) { + hreg = mkHReg(j++, HRcInt64, True); + break; + } else { + hreg = mkHReg(j++, HRcInt32, True); + hregHI = mkHReg(j++, HRcInt32, True); + break; + } case Ity_I128: vassert(mode64); hreg = mkHReg(j++, HRcInt64, True); hregHI = mkHReg(j++, HRcInt64, True); break; - case Ity_F32: { - hreg = mkHReg(j++, HRcFlt32, True); - break; - } + case Ity_F32: + if (mode64) { + hreg = mkHReg(j++, HRcFlt64, True); + break; + } else { + hreg = mkHReg(j++, HRcFlt32, True); + break; + } case Ity_F64: hreg = mkHReg(j++, HRcFlt64, True); break; @@ -3322,8 +3631,8 @@ HInstrArray *iselSB_MIPS ( IRSB* bb, env->vreg_ctr = j; /* The very first instruction must be an event check. */ - amCounter = MIPSAMode_IR(offs_Host_EvC_Counter, hregMIPS_GPR10(mode64)); - amFailAddr = MIPSAMode_IR(offs_Host_EvC_FailAddr, hregMIPS_GPR10(mode64)); + amCounter = MIPSAMode_IR(offs_Host_EvC_Counter, GuestStatePointer(mode64)); + amFailAddr = MIPSAMode_IR(offs_Host_EvC_FailAddr, GuestStatePointer(mode64)); addInstr(env, MIPSInstr_EvCheck(amCounter, amFailAddr)); /* Possibly a block counter increment (for profiling). At this diff --git a/VEX/priv/main_main.c b/VEX/priv/main_main.c index baa573a055..a4d4240ae1 100644 --- a/VEX/priv/main_main.c +++ b/VEX/priv/main_main.c @@ -42,6 +42,7 @@ #include "libvex_guest_ppc64.h" #include "libvex_guest_s390x.h" #include "libvex_guest_mips32.h" +#include "libvex_guest_mips64.h" #include "main_globals.h" #include "main_util.h" @@ -216,7 +217,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) Int i, j, k, out_used, guest_sizeB; Int offB_TISTART, offB_TILEN, offB_GUEST_IP, szB_GUEST_IP; Int offB_HOST_EvC_COUNTER, offB_HOST_EvC_FAILADDR; - UChar insn_bytes[64]; + UChar insn_bytes[128]; IRType guest_word_type; IRType host_word_type; Bool mode64, chainingAllowed; @@ -422,6 +423,30 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_host.hwcaps)); break; + case VexArchMIPS64: + mode64 = True; + getAllocableRegs_MIPS ( &n_available_real_regs, + &available_real_regs, mode64 ); + isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_MIPSInstr; + getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_MIPSInstr; + mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_MIPSInstr; + genSpill = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_MIPS; + genReload = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_MIPS; + ppInstr = (void(*)(HInstr*, Bool)) ppMIPSInstr; + ppReg = (void(*)(HReg)) ppHRegMIPS; + iselSB = iselSB_MIPS; + emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool, + void*,void*,void*,void*)) + emit_MIPSInstr; +#if defined(VKI_LITTLE_ENDIAN) + host_is_bigendian = False; +#elif defined(VKI_BIG_ENDIAN) + host_is_bigendian = True; +#endif + host_word_type = Ity_I64; + vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_host.hwcaps)); + break; + default: vpanic("LibVEX_Translate: unsupported host insn set"); } @@ -570,6 +595,26 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_NRADDR ) == 4); break; + case VexArchMIPS64: + preciseMemExnsFn = guest_mips64_state_requires_precise_mem_exns; + disInstrFn = disInstr_MIPS; + specHelper = guest_mips64_spechelper; + guest_sizeB = sizeof(VexGuestMIPS64State); + guest_word_type = Ity_I64; + guest_layout = &mips64Guest_layout; + offB_TISTART = offsetof(VexGuestMIPS64State,guest_TISTART); + offB_TILEN = offsetof(VexGuestMIPS64State,guest_TILEN); + offB_GUEST_IP = offsetof(VexGuestMIPS64State,guest_PC); + szB_GUEST_IP = sizeof( ((VexGuestMIPS64State*)0)->guest_PC ); + offB_HOST_EvC_COUNTER = offsetof(VexGuestMIPS64State,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR); + vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_guest.hwcaps)); + vassert(0 == sizeof(VexGuestMIPS64State) % 16); + vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_TISTART) == 8); + vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_TILEN ) == 8); + vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_NRADDR ) == 8); + break; + default: vpanic("LibVEX_Translate: unsupported guest insn set"); } @@ -909,6 +954,10 @@ VexInvalRange LibVEX_Chain ( VexArch arch_host, return chainXDirect_MIPS(place_to_chain, disp_cp_chain_me_EXPECTED, place_to_jump_to, False/*!mode64*/); + case VexArchMIPS64: + return chainXDirect_MIPS(place_to_chain, + disp_cp_chain_me_EXPECTED, + place_to_jump_to, True/*!mode64*/); default: vassert(0); } @@ -944,8 +993,12 @@ VexInvalRange LibVEX_UnChain ( VexArch arch_host, disp_cp_chain_me, True/*mode64*/); case VexArchMIPS32: return unchainXDirect_MIPS(place_to_unchain, - place_to_jump_to_EXPECTED, - disp_cp_chain_me, False/*!mode64*/); + place_to_jump_to_EXPECTED, + disp_cp_chain_me, False/*!mode64*/); + case VexArchMIPS64: + return unchainXDirect_MIPS(place_to_unchain, + place_to_jump_to_EXPECTED, + disp_cp_chain_me, True/*!mode64*/); default: vassert(0); } @@ -973,6 +1026,7 @@ Int LibVEX_evCheckSzB ( VexArch arch_host ) case VexArchPPC64: cached = evCheckSzB_PPC(); break; case VexArchMIPS32: + case VexArchMIPS64: cached = evCheckSzB_MIPS(); break; default: vassert(0); @@ -1004,6 +1058,9 @@ VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host, case VexArchMIPS32: return patchProfInc_MIPS(place_to_patch, location_of_counter, False/*!mode64*/); + case VexArchMIPS64: + return patchProfInc_MIPS(place_to_patch, + location_of_counter, True/*!mode64*/); default: vassert(0); } @@ -1075,6 +1132,7 @@ const HChar* LibVEX_ppVexArch ( VexArch arch ) case VexArchPPC64: return "PPC64"; case VexArchS390X: return "S390X"; case VexArchMIPS32: return "MIPS32"; + case VexArchMIPS64: return "MIPS64"; default: return "VexArch???"; } } @@ -1318,6 +1376,11 @@ static const HChar* show_hwcaps_mips32 ( UInt hwcaps ) return NULL; } +static const HChar* show_hwcaps_mips64 ( UInt hwcaps ) +{ + return "mips64-baseline"; +} + /* ---- */ static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps ) { @@ -1329,6 +1392,7 @@ static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps ) case VexArchARM: return show_hwcaps_arm(hwcaps); case VexArchS390X: return show_hwcaps_s390x(hwcaps); case VexArchMIPS32: return show_hwcaps_mips32(hwcaps); + case VexArchMIPS64: return show_hwcaps_mips64(hwcaps); default: return NULL; } } diff --git a/VEX/pub/libvex.h b/VEX/pub/libvex.h index 672a98a7d6..8e57dbbd5b 100644 --- a/VEX/pub/libvex.h +++ b/VEX/pub/libvex.h @@ -58,7 +58,8 @@ typedef VexArchPPC32, VexArchPPC64, VexArchS390X, - VexArchMIPS32 + VexArchMIPS32, + VexArchMIPS64 } VexArch; @@ -177,6 +178,7 @@ typedef #define VEX_PRID_COMP_MIPS 0x00010000 #define VEX_PRID_COMP_BROADCOM 0x00020000 +#define VEX_PRID_COMP_NETLOGIC 0x000c0000 /* These return statically allocated strings. */ diff --git a/VEX/pub/libvex_basictypes.h b/VEX/pub/libvex_basictypes.h index 110ca231e4..5335e2d053 100644 --- a/VEX/pub/libvex_basictypes.h +++ b/VEX/pub/libvex_basictypes.h @@ -199,7 +199,11 @@ typedef unsigned long HWord; # define VEX_REGPARM(_n) /* */ #elif defined(__mips__) +#if (__mips==64) +# define VEX_HOST_WORDSIZE 8 +#else # define VEX_HOST_WORDSIZE 4 +#endif # define VEX_REGPARM(_n) /* */ #else diff --git a/VEX/pub/libvex_guest_mips32.h b/VEX/pub/libvex_guest_mips32.h index 02c87ea241..cdcadfc8e0 100644 --- a/VEX/pub/libvex_guest_mips32.h +++ b/VEX/pub/libvex_guest_mips32.h @@ -7,7 +7,7 @@ This file is part of Valgrind, a dynamic binary instrumentation framework. - Copyright (C) 2010-2012 RT-RK + Copyright (C) 2010-2013 RT-RK mips-valgrind@rt-rk.com This program is free software; you can redistribute it and/or diff --git a/VEX/pub/libvex_guest_mips64.h b/VEX/pub/libvex_guest_mips64.h new file mode 100644 index 0000000000..9d03386b33 --- /dev/null +++ b/VEX/pub/libvex_guest_mips64.h @@ -0,0 +1,167 @@ + +/*---------------------------------------------------------------*/ +/*--- begin libvex_guest_mips64.h ---*/ +/*---------------------------------------------------------------*/ + +/* + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2010-2013 RT-RK + mips-valgrind@rt-rk.com + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + The GNU General Public License is contained in the file COPYING. + + Neither the names of the U.S. Department of Energy nor the + University of California nor the names of its contributors may be + used to endorse or promote products derived from this software + without prior written permission. +*/ + +#ifndef __LIBVEX_PUB_GUEST_MIPS64_H +#define __LIBVEX_PUB_GUEST_MIPS64_H + +#include "libvex_basictypes.h" +#include "libvex_emnote.h" + + +/*---------------------------------------------------------------*/ +/*--- Vex's representation of the MIPS64 CPU state. ---*/ +/*---------------------------------------------------------------*/ + +typedef + struct { + /* CPU Registers */ + /* 0 */ ULong guest_r0; /* Hardwired to 0 */ + /* 8 */ ULong guest_r1; /* Assembler temporary */ + /* 16 */ ULong guest_r2; /* Values for function returns ...*/ + /* 24 */ ULong guest_r3; /* ...and expression evaluation */ + /* 32 */ ULong guest_r4; /* Function arguments */ + /* 40 */ ULong guest_r5; + /* 48 */ ULong guest_r6; + /* 56 */ ULong guest_r7; + /* 64 */ ULong guest_r8; + /* 72 */ ULong guest_r9; + /* 80 */ ULong guest_r10; + /* 88 */ ULong guest_r11; + /* 96 */ ULong guest_r12; /* Temporaries */ + /* 104 */ ULong guest_r13; + /* 112 */ ULong guest_r14; + /* 120 */ ULong guest_r15; + /* 128 */ ULong guest_r16; /* Saved temporaries */ + /* 136 */ ULong guest_r17; + /* 144 */ ULong guest_r18; + /* 152 */ ULong guest_r19; + /* 160 */ ULong guest_r20; + /* 168 */ ULong guest_r21; + /* 176 */ ULong guest_r22; + /* 184 */ ULong guest_r23; + /* 192 */ ULong guest_r24; /* Temporaries */ + /* 200 */ ULong guest_r25; + /* 208 */ ULong guest_r26; /* Reserved for OS kernel */ + /* 216 */ ULong guest_r27; + /* 224 */ ULong guest_r28; /* Global pointer */ + /* 232 */ ULong guest_r29; /* Stack pointer */ + /* 240 */ ULong guest_r30; /* Frame pointer */ + /* 248 */ ULong guest_r31; /* Return address */ + /* 256 */ ULong guest_PC; /* Program counter */ + /* 264 */ ULong guest_HI; /* Multiply and divide reg higher result */ + /* 272 */ ULong guest_LO; /* Multiply and divide reg lower result */ + + /* FPU Registers */ + /* 280 */ ULong guest_f0; /* Floting point gen purpose registers */ + /* 288 */ ULong guest_f1; + /* 296 */ ULong guest_f2; + /* 304 */ ULong guest_f3; + /* 312 */ ULong guest_f4; + /* 320 */ ULong guest_f5; + /* 328 */ ULong guest_f6; + /* 336 */ ULong guest_f7; + /* 344 */ ULong guest_f8; + /* 352 */ ULong guest_f9; + /* 360 */ ULong guest_f10; + /* 368 */ ULong guest_f11; + /* 376 */ ULong guest_f12; + /* 384 */ ULong guest_f13; + /* 392 */ ULong guest_f14; + /* 400 */ ULong guest_f15; + /* 408 */ ULong guest_f16; + /* 416 */ ULong guest_f17; + /* 424 */ ULong guest_f18; + /* 432 */ ULong guest_f19; + /* 440 */ ULong guest_f20; + /* 448 */ ULong guest_f21; + /* 456 */ ULong guest_f22; + /* 464 */ ULong guest_f23; + /* 472 */ ULong guest_f24; + /* 480 */ ULong guest_f25; + /* 488 */ ULong guest_f26; + /* 496 */ ULong guest_f27; + /* 504 */ ULong guest_f28; + /* 512 */ ULong guest_f29; + /* 520 */ ULong guest_f30; + /* 528 */ ULong guest_f31; + + /* 536 */ UInt guest_FIR; + /* 540 */ UInt guest_FCCR; + /* 544 */ UInt guest_FEXR; + /* 548 */ UInt guest_FENR; + /* 552 */ UInt guest_FCSR; + + /* TLS pointer for the thread. It's read-only in user space. On Linux it + is set in user space by various thread-related syscalls. + User Local Register. + This register provides read access to the coprocessor 0 + UserLocal register, if it is implemented. In some operating + environments, the UserLocal register is a pointer to a thread-specific + storage block. + */ + ULong guest_ULR; /* 560 */ + + /* Emulation notes */ + UInt guest_EMNOTE; /* 568 */ + + /* For clflush: record start and length of area to invalidate */ + ULong guest_TISTART; /* 576 */ + ULong guest_TILEN; /* 584 */ + + ULong guest_NRADDR; /* 592 */ + + ULong host_EvC_FAILADDR; /* 600 */ + UInt host_EvC_COUNTER; /* 608 */ + UInt guest_COND; /* 612 */ + UInt padding[6]; +} VexGuestMIPS64State; + +/*---------------------------------------------------------------*/ +/*--- Utility functions for MIPS64 guest stuff. ---*/ +/*---------------------------------------------------------------*/ + +/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */ + +/* Initialise all guest MIPS64 state. */ + +extern +void LibVEX_GuestMIPS64_initialise ( /*OUT*/VexGuestMIPS64State* vex_state ); + +#endif /* ndef __LIBVEX_PUB_GUEST_MIPS64_H */ + +/*---------------------------------------------------------------*/ +/*--- libvex_guest_mips64.h ---*/ +/*---------------------------------------------------------------*/ +