From 1efe44baeb6da095900a546e36d6e75709f9d1dc Mon Sep 17 00:00:00 2001 From: Cerion Armour-Brown Date: Fri, 23 Dec 2005 00:55:09 +0000 Subject: [PATCH] Changed naming convention from 'PPC32' to 'PPC' for all VEX code common to both PPC32 and PPC64. And tidied up a fair bit while i was at it. git-svn-id: svn://svn.valgrind.org/vex/trunk@1504 --- VEX/priv/guest-ppc32/gdefs.h | 84 +- VEX/priv/guest-ppc32/ghelpers.c | 35 +- VEX/priv/guest-ppc32/toIR.c | 2166 +++++++++++++++---------------- VEX/priv/host-ppc32/hdefs.c | 1267 +++++++++--------- VEX/priv/host-ppc32/hdefs.h | 562 ++++---- VEX/priv/host-ppc32/isel.c | 1212 +++++++++-------- VEX/priv/main/vex_main.c | 52 +- VEX/pub/libvex.h | 4 +- VEX/switchback/switchback.c | 2 +- VEX/test_main.c | 2 +- 10 files changed, 2712 insertions(+), 2674 deletions(-) diff --git a/VEX/priv/guest-ppc32/gdefs.h b/VEX/priv/guest-ppc32/gdefs.h index 0f4bc03902..aa5dde4fc5 100644 --- a/VEX/priv/guest-ppc32/gdefs.h +++ b/VEX/priv/guest-ppc32/gdefs.h @@ -47,25 +47,25 @@ /* Only to be used within the guest-ppc32 directory. */ -#ifndef __LIBVEX_GUEST_PPC32_DEFS_H -#define __LIBVEX_GUEST_PPC32_DEFS_H +#ifndef __LIBVEX_GUEST_PPC_DEFS_H +#define __LIBVEX_GUEST_PPC_DEFS_H /*---------------------------------------------------------*/ -/*--- ppc32 to IR conversion ---*/ +/*--- ppc to IR conversion ---*/ /*---------------------------------------------------------*/ -/* Convert one ppc32 insn to IR. See the type DisOneInstrFn in +/* Convert one ppc insn to IR. See the type DisOneInstrFn in bb_to_IR.h. */ extern -DisResult disInstr_PPC32 ( IRBB* irbb, - Bool put_IP, - Bool (*resteerOkFn) ( Addr64 ), - UChar* guest_code, - Long delta, - Addr64 guest_IP, - VexArchInfo* archinfo, - Bool host_bigendian ); +DisResult disInstr_PPC ( IRBB* irbb, + Bool put_IP, + Bool (*resteerOkFn) ( Addr64 ), + UChar* guest_code, + Long delta, + Addr64 guest_IP, + VexArchInfo* archinfo, + Bool host_bigendian ); /* Used by the optimiser to specialise calls to helpers. */ extern @@ -95,46 +95,46 @@ VexGuestLayout ppc64Guest_layout; /* FP Rounding mode - different encoding to IR */ typedef enum { - PPC32rm_NEAREST = 0, - PPC32rm_NegINF = 1, - PPC32rm_PosINF = 2, - PPC32rm_ZERO = 3 - } PPC32RoundingMode; + PPCrm_NEAREST = 0, + PPCrm_NegINF = 1, + PPCrm_PosINF = 2, + PPCrm_ZERO = 3 + } PPCRoundingMode; /* Floating point comparison values - different encoding to IR */ typedef enum { - PPC32cr_LT = 0x8, - PPC32cr_GT = 0x4, - PPC32cr_EQ = 0x2, - PPC32cr_UN = 0x1 + PPCcr_LT = 0x8, + PPCcr_GT = 0x4, + PPCcr_EQ = 0x2, + PPCcr_UN = 0x1 } - PPC32CmpF64Result; + PPCCmpF64Result; /* Enumeration for xer_ca/ov calculation helper functions */ enum { - /* 0 */ PPC32G_FLAG_OP_ADD=0, // addc[o], addic - /* 1 */ PPC32G_FLAG_OP_ADDE, // adde[o], addme[o], addze[o] - /* 2 */ PPC32G_FLAG_OP_DIVW, // divwo - /* 3 */ PPC32G_FLAG_OP_DIVWU, // divwuo - /* 4 */ PPC32G_FLAG_OP_MULLW, // mullwo - /* 5 */ PPC32G_FLAG_OP_NEG, // nego - /* 6 */ PPC32G_FLAG_OP_SUBF, // subfo - /* 7 */ PPC32G_FLAG_OP_SUBFC, // subfc[o] - /* 8 */ PPC32G_FLAG_OP_SUBFE, // subfe[o], subfme[o], subfze[o] - /* 9 */ PPC32G_FLAG_OP_SUBFI, // subfic - /* 10 */ PPC32G_FLAG_OP_SRAW, // sraw - /* 11 */ PPC32G_FLAG_OP_SRAWI, // srawi - /* 12 */ PPC32G_FLAG_OP_SRAD, // srad - /* 13 */ PPC32G_FLAG_OP_SRADI, // sradi - PPC32G_FLAG_OP_NUMBER + /* 0 */ PPCG_FLAG_OP_ADD=0, // addc[o], addic + /* 1 */ PPCG_FLAG_OP_ADDE, // adde[o], addme[o], addze[o] + /* 2 */ PPCG_FLAG_OP_DIVW, // divwo + /* 3 */ PPCG_FLAG_OP_DIVWU, // divwuo + /* 4 */ PPCG_FLAG_OP_MULLW, // mullwo + /* 5 */ PPCG_FLAG_OP_NEG, // nego + /* 6 */ PPCG_FLAG_OP_SUBF, // subfo + /* 7 */ PPCG_FLAG_OP_SUBFC, // subfc[o] + /* 8 */ PPCG_FLAG_OP_SUBFE, // subfe[o], subfme[o], subfze[o] + /* 9 */ PPCG_FLAG_OP_SUBFI, // subfic + /* 10 */ PPCG_FLAG_OP_SRAW, // sraw + /* 11 */ PPCG_FLAG_OP_SRAWI, // srawi + /* 12 */ PPCG_FLAG_OP_SRAD, // srad + /* 13 */ PPCG_FLAG_OP_SRADI, // sradi + PPCG_FLAG_OP_NUMBER }; /*---------------------------------------------------------*/ -/*--- ppc32 guest helpers ---*/ +/*--- ppc guest helpers ---*/ /*---------------------------------------------------------*/ /* --- CLEAN HELPERS --- */ @@ -143,13 +143,17 @@ enum { /* --- DIRTY HELPERS --- */ -extern ULong ppc32g_dirtyhelper_MFTB ( void ); +extern ULong ppcg_dirtyhelper_MFTB ( void ); extern void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State* gst, UInt vD_idx, UInt sh, UInt shift_right ); -#endif /* ndef __LIBVEX_GUEST_PPC32_DEFS_H */ +extern void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst, + UInt vD_idx, UInt sh, + UInt shift_right ); + +#endif /* ndef __LIBVEX_GUEST_PPC_DEFS_H */ /*---------------------------------------------------------------*/ /*--- end guest-ppc32/gdefs.h ---*/ diff --git a/VEX/priv/guest-ppc32/ghelpers.c b/VEX/priv/guest-ppc32/ghelpers.c index a32bdc15f7..3e3653b988 100644 --- a/VEX/priv/guest-ppc32/ghelpers.c +++ b/VEX/priv/guest-ppc32/ghelpers.c @@ -76,7 +76,7 @@ /* DIRTY HELPER (non-referentially-transparent) */ /* Horrible hack. On non-ppc32 platforms, return 1. */ /* Reads a complete, consistent 64-bit TB value. */ -ULong ppc32g_dirtyhelper_MFTB ( void ) +ULong ppcg_dirtyhelper_MFTB ( void ) { # if defined(__powerpc__) ULong res; @@ -128,6 +128,35 @@ void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State* gst, (*pU128_dst)[3] = (*pU128_src)[3]; } +/* CALLED FROM GENERATED CODE */ +/* DIRTY HELPER (reads guest state, writes guest mem) */ +void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst, + UInt vD_off, UInt sh, UInt shift_right ) +{ + static + UChar ref[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F }; + U128* pU128_src; + U128* pU128_dst; + + vassert( vD_off <= sizeof(VexGuestPPC64State)-8 ); + vassert( sh <= 15 ); + vassert( shift_right <= 1 ); + if (shift_right) + sh = 16-sh; + /* else shift left */ + + pU128_src = (U128*)&ref[sh]; + pU128_dst = (U128*)( ((UChar*)gst) + vD_off ); + + (*pU128_dst)[0] = (*pU128_src)[0]; + (*pU128_dst)[1] = (*pU128_src)[1]; + (*pU128_dst)[2] = (*pU128_src)[2]; + (*pU128_dst)[3] = (*pU128_src)[3]; +} + /* Helper-function specialiser. */ @@ -411,7 +440,7 @@ void LibVEX_GuestPPC32_initialise ( /*OUT*/VexGuestPPC32State* vex_state ) vex_state->guest_CR7_321 = 0; vex_state->guest_CR7_0 = 0; - vex_state->guest_FPROUND = (UInt)PPC32rm_NEAREST; + vex_state->guest_FPROUND = (UInt)PPCrm_NEAREST; vex_state->guest_VRSAVE = 0; @@ -559,7 +588,7 @@ void LibVEX_GuestPPC64_initialise ( /*OUT*/VexGuestPPC64State* vex_state ) vex_state->guest_CR7_321 = 0; vex_state->guest_CR7_0 = 0; - vex_state->guest_FPROUND = (UInt)PPC32rm_NEAREST; + vex_state->guest_FPROUND = (UInt)PPCrm_NEAREST; vex_state->guest_VRSAVE = 0; diff --git a/VEX/priv/guest-ppc32/toIR.c b/VEX/priv/guest-ppc32/toIR.c index cda5bf6959..d85b54912f 100644 --- a/VEX/priv/guest-ppc32/toIR.c +++ b/VEX/priv/guest-ppc32/toIR.c @@ -84,7 +84,7 @@ */ -/* Translates PPC32 & PPC64 code to IR. */ +/* Translates PPC32/64 code to IR. */ /* References @@ -124,7 +124,7 @@ /*------------------------------------------------------------*/ /* These are set at the start of the translation of an insn, right - down in disInstr_PPC32, so that we don't have to pass them around + down in disInstr_PPC, so that we don't have to pass them around endlessly. They are all constant during the translation of any given insn. */ @@ -145,7 +145,7 @@ static Addr64 guest_CIA_curr_instr; static IRBB* irbb; /* Is our guest binary 32 or 64bit? Set at each call to - disInstr_PPC32 below. */ + disInstr_PPC below. */ static Bool mode64 = False; @@ -153,8 +153,6 @@ static Bool mode64 = False; /*--- Debugging output ---*/ /*------------------------------------------------------------*/ -#define PPC_TOIR_DEBUG 0 - #define DIP(format, args...) \ if (vex_traceflags & VEX_TRACE_FE) \ vex_printf(format, ## args) @@ -163,70 +161,30 @@ static Bool mode64 = False; if (vex_traceflags & VEX_TRACE_FE) \ vex_sprintf(buf, format, ## args) -#if PPC_TOIR_DEBUG -static void vex_printf_binary( ULong x, UInt len, Bool spaces ) -{ - UInt i; - vassert(len > 0 && len <= 64); - - for (i=len; i>0; i--) { - vex_printf("%d", ((x & (((ULong)1)<<(len-1))) != 0) ); - x = x << 1; - if (((i-1)%4)==0 && (i > 1) && spaces) { - vex_printf(" "); - } - } -} -#endif - /*------------------------------------------------------------*/ /*--- Offsets of various parts of the ppc32/64 guest state ---*/ /*------------------------------------------------------------*/ -// 64-bit offsets -#define OFFB64_CIA offsetof(VexGuestPPC64State,guest_CIA) -#define OFFB64_LR offsetof(VexGuestPPC64State,guest_LR) -#define OFFB64_CTR offsetof(VexGuestPPC64State,guest_CTR) - -#define OFFB64_XER_SO offsetof(VexGuestPPC64State,guest_XER_SO) -#define OFFB64_XER_OV offsetof(VexGuestPPC64State,guest_XER_OV) -#define OFFB64_XER_CA offsetof(VexGuestPPC64State,guest_XER_CA) -#define OFFB64_XER_BC offsetof(VexGuestPPC64State,guest_XER_BC) - -#define OFFB64_FPROUND offsetof(VexGuestPPC64State,guest_FPROUND) - -#define OFFB64_VRSAVE offsetof(VexGuestPPC64State,guest_VRSAVE) -#define OFFB64_VSCR offsetof(VexGuestPPC64State,guest_VSCR) - -#define OFFB64_EMWARN offsetof(VexGuestPPC64State,guest_EMWARN) - -#define OFFB64_TISTART offsetof(VexGuestPPC64State,guest_TISTART) -#define OFFB64_TILEN offsetof(VexGuestPPC64State,guest_TILEN) +#define offsetofPPCGuestState(_x) \ + (mode64 ? offsetof(VexGuestPPC64State, _x) : \ + offsetof(VexGuestPPC32State, _x)) -#define OFFB64_RESVN offsetof(VexGuestPPC64State,guest_RESVN) +#define OFFB_CIA offsetofPPCGuestState(guest_CIA) +#define OFFB_LR offsetofPPCGuestState(guest_LR) +#define OFFB_CTR offsetofPPCGuestState(guest_CTR) +#define OFFB_XER_SO offsetofPPCGuestState(guest_XER_SO) +#define OFFB_XER_OV offsetofPPCGuestState(guest_XER_OV) +#define OFFB_XER_CA offsetofPPCGuestState(guest_XER_CA) +#define OFFB_XER_BC offsetofPPCGuestState(guest_XER_BC) +#define OFFB_FPROUND offsetofPPCGuestState(guest_FPROUND) +#define OFFB_VRSAVE offsetofPPCGuestState(guest_VRSAVE) +#define OFFB_VSCR offsetofPPCGuestState(guest_VSCR) +#define OFFB_EMWARN offsetofPPCGuestState(guest_EMWARN) +#define OFFB_TISTART offsetofPPCGuestState(guest_TISTART) +#define OFFB_TILEN offsetofPPCGuestState(guest_TILEN) +#define OFFB_RESVN offsetofPPCGuestState(guest_RESVN) -// 32-bit offsets -#define OFFB32_CIA offsetof(VexGuestPPC32State,guest_CIA) -#define OFFB32_LR offsetof(VexGuestPPC32State,guest_LR) -#define OFFB32_CTR offsetof(VexGuestPPC32State,guest_CTR) - -#define OFFB32_XER_SO offsetof(VexGuestPPC32State,guest_XER_SO) -#define OFFB32_XER_OV offsetof(VexGuestPPC32State,guest_XER_OV) -#define OFFB32_XER_CA offsetof(VexGuestPPC32State,guest_XER_CA) -#define OFFB32_XER_BC offsetof(VexGuestPPC32State,guest_XER_BC) - -#define OFFB32_FPROUND offsetof(VexGuestPPC32State,guest_FPROUND) - -#define OFFB32_VRSAVE offsetof(VexGuestPPC32State,guest_VRSAVE) -#define OFFB32_VSCR offsetof(VexGuestPPC32State,guest_VSCR) - -#define OFFB32_EMWARN offsetof(VexGuestPPC32State,guest_EMWARN) - -#define OFFB32_TISTART offsetof(VexGuestPPC32State,guest_TISTART) -#define OFFB32_TILEN offsetof(VexGuestPPC32State,guest_TILEN) - -#define OFFB32_RESVN offsetof(VexGuestPPC32State,guest_RESVN) /*------------------------------------------------------------*/ @@ -362,6 +320,7 @@ static UInt MASK32( UInt begin, UInt end ) return mask; } +/* ditto for 64bit mask */ static ULong MASK64( UInt begin, UInt end ) { vassert(begin < 64); @@ -414,13 +373,6 @@ static UInt extend_s_16to32 ( UInt x ) return (UInt)((((Int)x) << 16) >> 16); } -#if 0 -static UInt extend_s_26to32 ( UInt x ) -{ - return (UInt)((((Int)x) << 6) >> 6); -} -#endif - static ULong extend_s_16to64 ( UInt x ) { return (ULong)((((Long)x) << 48) >> 48); @@ -515,22 +467,21 @@ static IRExpr* mkOR1 ( IRExpr* arg1, IRExpr* arg2 ) { vassert(typeOfIRExpr(irbb->tyenv, arg1) == Ity_I1); vassert(typeOfIRExpr(irbb->tyenv, arg2) == Ity_I1); - return - unop(Iop_32to1, binop(Iop_Or32, unop(Iop_1Uto32, arg1), - unop(Iop_1Uto32, arg2))); + return unop(Iop_32to1, binop(Iop_Or32, unop(Iop_1Uto32, arg1), + unop(Iop_1Uto32, arg2))); } static IRExpr* mkAND1 ( IRExpr* arg1, IRExpr* arg2 ) { vassert(typeOfIRExpr(irbb->tyenv, arg1) == Ity_I1); vassert(typeOfIRExpr(irbb->tyenv, arg2) == Ity_I1); - return - unop(Iop_32to1, binop(Iop_And32, unop(Iop_1Uto32, arg1), - unop(Iop_1Uto32, arg2))); + return unop(Iop_32to1, binop(Iop_And32, unop(Iop_1Uto32, arg1), + unop(Iop_1Uto32, arg2))); } /* expand V128_8Ux16 to 2x V128_16Ux8's */ -static void expand8Ux16( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) +static void expand8Ux16( IRExpr* vIn, + /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) { IRTemp ones8x16 = newTemp(Ity_V128); @@ -547,7 +498,8 @@ static void expand8Ux16( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) } /* expand V128_8Sx16 to 2x V128_16Sx8's */ -static void expand8Sx16( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) +static void expand8Sx16( IRExpr* vIn, + /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) { IRTemp ones8x16 = newTemp(Ity_V128); @@ -564,7 +516,8 @@ static void expand8Sx16( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) } /* expand V128_16Uto8 to 2x V128_32Ux4's */ -static void expand16Ux8( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) +static void expand16Ux8( IRExpr* vIn, + /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) { IRTemp ones16x8 = newTemp(Ity_V128); @@ -581,7 +534,8 @@ static void expand16Ux8( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) } /* expand V128_16Sto8 to 2x V128_32Sx4's */ -static void expand16Sx8( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) +static void expand16Sx8( IRExpr* vIn, + /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd ) { IRTemp ones16x8 = newTemp(Ity_V128); @@ -775,9 +729,7 @@ static IROp mkSzOp ( IRType ty, IROp op8 ) return adj + op8; } -/* Make sure we get valid 32 and 64bit addresses - CAB: do we ever get -ve addresses/offsets? -*/ +/* Make sure we get valid 32 and 64bit addresses */ static Addr64 mkSzAddr ( IRType ty, Addr64 addr ) { vassert(ty == Ity_I32 || ty == Ity_I64); @@ -872,84 +824,46 @@ static Int integerGuestRegOffset ( UInt archreg ) vassert(archreg < 32); // jrs: probably not necessary; only matters if we reference sub-parts - // of the ppc32 registers, but that isn't the case + // of the ppc registers, but that isn't the case // later: this might affect Altivec though? vassert(host_is_bigendian); - if (mode64) { - switch (archreg) { - case 0: return offsetof(VexGuestPPC64State, guest_GPR0); - case 1: return offsetof(VexGuestPPC64State, guest_GPR1); - case 2: return offsetof(VexGuestPPC64State, guest_GPR2); - case 3: return offsetof(VexGuestPPC64State, guest_GPR3); - case 4: return offsetof(VexGuestPPC64State, guest_GPR4); - case 5: return offsetof(VexGuestPPC64State, guest_GPR5); - case 6: return offsetof(VexGuestPPC64State, guest_GPR6); - case 7: return offsetof(VexGuestPPC64State, guest_GPR7); - case 8: return offsetof(VexGuestPPC64State, guest_GPR8); - case 9: return offsetof(VexGuestPPC64State, guest_GPR9); - case 10: return offsetof(VexGuestPPC64State, guest_GPR10); - case 11: return offsetof(VexGuestPPC64State, guest_GPR11); - case 12: return offsetof(VexGuestPPC64State, guest_GPR12); - case 13: return offsetof(VexGuestPPC64State, guest_GPR13); - case 14: return offsetof(VexGuestPPC64State, guest_GPR14); - case 15: return offsetof(VexGuestPPC64State, guest_GPR15); - case 16: return offsetof(VexGuestPPC64State, guest_GPR16); - case 17: return offsetof(VexGuestPPC64State, guest_GPR17); - case 18: return offsetof(VexGuestPPC64State, guest_GPR18); - case 19: return offsetof(VexGuestPPC64State, guest_GPR19); - case 20: return offsetof(VexGuestPPC64State, guest_GPR20); - case 21: return offsetof(VexGuestPPC64State, guest_GPR21); - case 22: return offsetof(VexGuestPPC64State, guest_GPR22); - case 23: return offsetof(VexGuestPPC64State, guest_GPR23); - case 24: return offsetof(VexGuestPPC64State, guest_GPR24); - case 25: return offsetof(VexGuestPPC64State, guest_GPR25); - case 26: return offsetof(VexGuestPPC64State, guest_GPR26); - case 27: return offsetof(VexGuestPPC64State, guest_GPR27); - case 28: return offsetof(VexGuestPPC64State, guest_GPR28); - case 29: return offsetof(VexGuestPPC64State, guest_GPR29); - case 30: return offsetof(VexGuestPPC64State, guest_GPR30); - case 31: return offsetof(VexGuestPPC64State, guest_GPR31); - default: break; - } - } else { - switch (archreg) { - case 0: return offsetof(VexGuestPPC32State, guest_GPR0); - case 1: return offsetof(VexGuestPPC32State, guest_GPR1); - case 2: return offsetof(VexGuestPPC32State, guest_GPR2); - case 3: return offsetof(VexGuestPPC32State, guest_GPR3); - case 4: return offsetof(VexGuestPPC32State, guest_GPR4); - case 5: return offsetof(VexGuestPPC32State, guest_GPR5); - case 6: return offsetof(VexGuestPPC32State, guest_GPR6); - case 7: return offsetof(VexGuestPPC32State, guest_GPR7); - case 8: return offsetof(VexGuestPPC32State, guest_GPR8); - case 9: return offsetof(VexGuestPPC32State, guest_GPR9); - case 10: return offsetof(VexGuestPPC32State, guest_GPR10); - case 11: return offsetof(VexGuestPPC32State, guest_GPR11); - case 12: return offsetof(VexGuestPPC32State, guest_GPR12); - case 13: return offsetof(VexGuestPPC32State, guest_GPR13); - case 14: return offsetof(VexGuestPPC32State, guest_GPR14); - case 15: return offsetof(VexGuestPPC32State, guest_GPR15); - case 16: return offsetof(VexGuestPPC32State, guest_GPR16); - case 17: return offsetof(VexGuestPPC32State, guest_GPR17); - case 18: return offsetof(VexGuestPPC32State, guest_GPR18); - case 19: return offsetof(VexGuestPPC32State, guest_GPR19); - case 20: return offsetof(VexGuestPPC32State, guest_GPR20); - case 21: return offsetof(VexGuestPPC32State, guest_GPR21); - case 22: return offsetof(VexGuestPPC32State, guest_GPR22); - case 23: return offsetof(VexGuestPPC32State, guest_GPR23); - case 24: return offsetof(VexGuestPPC32State, guest_GPR24); - case 25: return offsetof(VexGuestPPC32State, guest_GPR25); - case 26: return offsetof(VexGuestPPC32State, guest_GPR26); - case 27: return offsetof(VexGuestPPC32State, guest_GPR27); - case 28: return offsetof(VexGuestPPC32State, guest_GPR28); - case 29: return offsetof(VexGuestPPC32State, guest_GPR29); - case 30: return offsetof(VexGuestPPC32State, guest_GPR30); - case 31: return offsetof(VexGuestPPC32State, guest_GPR31); - default: break; - } - } - vpanic("integerGuestRegOffset(ppc32,be)"); /*notreached*/ + switch (archreg) { + case 0: return offsetofPPCGuestState(guest_GPR0); + case 1: return offsetofPPCGuestState(guest_GPR1); + case 2: return offsetofPPCGuestState(guest_GPR2); + case 3: return offsetofPPCGuestState(guest_GPR3); + case 4: return offsetofPPCGuestState(guest_GPR4); + case 5: return offsetofPPCGuestState(guest_GPR5); + case 6: return offsetofPPCGuestState(guest_GPR6); + case 7: return offsetofPPCGuestState(guest_GPR7); + case 8: return offsetofPPCGuestState(guest_GPR8); + case 9: return offsetofPPCGuestState(guest_GPR9); + case 10: return offsetofPPCGuestState(guest_GPR10); + case 11: return offsetofPPCGuestState(guest_GPR11); + case 12: return offsetofPPCGuestState(guest_GPR12); + case 13: return offsetofPPCGuestState(guest_GPR13); + case 14: return offsetofPPCGuestState(guest_GPR14); + case 15: return offsetofPPCGuestState(guest_GPR15); + case 16: return offsetofPPCGuestState(guest_GPR16); + case 17: return offsetofPPCGuestState(guest_GPR17); + case 18: return offsetofPPCGuestState(guest_GPR18); + case 19: return offsetofPPCGuestState(guest_GPR19); + case 20: return offsetofPPCGuestState(guest_GPR20); + case 21: return offsetofPPCGuestState(guest_GPR21); + case 22: return offsetofPPCGuestState(guest_GPR22); + case 23: return offsetofPPCGuestState(guest_GPR23); + case 24: return offsetofPPCGuestState(guest_GPR24); + case 25: return offsetofPPCGuestState(guest_GPR25); + case 26: return offsetofPPCGuestState(guest_GPR26); + case 27: return offsetofPPCGuestState(guest_GPR27); + case 28: return offsetofPPCGuestState(guest_GPR28); + case 29: return offsetofPPCGuestState(guest_GPR29); + case 30: return offsetofPPCGuestState(guest_GPR30); + case 31: return offsetofPPCGuestState(guest_GPR31); + default: break; + } + vpanic("integerGuestRegOffset(ppc,be)"); /*notreached*/ } static IRExpr* getIReg ( UInt archreg ) @@ -973,80 +887,42 @@ static Int floatGuestRegOffset ( UInt archreg ) { vassert(archreg < 32); - if (mode64) { - switch (archreg) { - case 0: return offsetof(VexGuestPPC64State, guest_FPR0); - case 1: return offsetof(VexGuestPPC64State, guest_FPR1); - case 2: return offsetof(VexGuestPPC64State, guest_FPR2); - case 3: return offsetof(VexGuestPPC64State, guest_FPR3); - case 4: return offsetof(VexGuestPPC64State, guest_FPR4); - case 5: return offsetof(VexGuestPPC64State, guest_FPR5); - case 6: return offsetof(VexGuestPPC64State, guest_FPR6); - case 7: return offsetof(VexGuestPPC64State, guest_FPR7); - case 8: return offsetof(VexGuestPPC64State, guest_FPR8); - case 9: return offsetof(VexGuestPPC64State, guest_FPR9); - case 10: return offsetof(VexGuestPPC64State, guest_FPR10); - case 11: return offsetof(VexGuestPPC64State, guest_FPR11); - case 12: return offsetof(VexGuestPPC64State, guest_FPR12); - case 13: return offsetof(VexGuestPPC64State, guest_FPR13); - case 14: return offsetof(VexGuestPPC64State, guest_FPR14); - case 15: return offsetof(VexGuestPPC64State, guest_FPR15); - case 16: return offsetof(VexGuestPPC64State, guest_FPR16); - case 17: return offsetof(VexGuestPPC64State, guest_FPR17); - case 18: return offsetof(VexGuestPPC64State, guest_FPR18); - case 19: return offsetof(VexGuestPPC64State, guest_FPR19); - case 20: return offsetof(VexGuestPPC64State, guest_FPR20); - case 21: return offsetof(VexGuestPPC64State, guest_FPR21); - case 22: return offsetof(VexGuestPPC64State, guest_FPR22); - case 23: return offsetof(VexGuestPPC64State, guest_FPR23); - case 24: return offsetof(VexGuestPPC64State, guest_FPR24); - case 25: return offsetof(VexGuestPPC64State, guest_FPR25); - case 26: return offsetof(VexGuestPPC64State, guest_FPR26); - case 27: return offsetof(VexGuestPPC64State, guest_FPR27); - case 28: return offsetof(VexGuestPPC64State, guest_FPR28); - case 29: return offsetof(VexGuestPPC64State, guest_FPR29); - case 30: return offsetof(VexGuestPPC64State, guest_FPR30); - case 31: return offsetof(VexGuestPPC64State, guest_FPR31); - default: break; - } - } else { - switch (archreg) { - case 0: return offsetof(VexGuestPPC32State, guest_FPR0); - case 1: return offsetof(VexGuestPPC32State, guest_FPR1); - case 2: return offsetof(VexGuestPPC32State, guest_FPR2); - case 3: return offsetof(VexGuestPPC32State, guest_FPR3); - case 4: return offsetof(VexGuestPPC32State, guest_FPR4); - case 5: return offsetof(VexGuestPPC32State, guest_FPR5); - case 6: return offsetof(VexGuestPPC32State, guest_FPR6); - case 7: return offsetof(VexGuestPPC32State, guest_FPR7); - case 8: return offsetof(VexGuestPPC32State, guest_FPR8); - case 9: return offsetof(VexGuestPPC32State, guest_FPR9); - case 10: return offsetof(VexGuestPPC32State, guest_FPR10); - case 11: return offsetof(VexGuestPPC32State, guest_FPR11); - case 12: return offsetof(VexGuestPPC32State, guest_FPR12); - case 13: return offsetof(VexGuestPPC32State, guest_FPR13); - case 14: return offsetof(VexGuestPPC32State, guest_FPR14); - case 15: return offsetof(VexGuestPPC32State, guest_FPR15); - case 16: return offsetof(VexGuestPPC32State, guest_FPR16); - case 17: return offsetof(VexGuestPPC32State, guest_FPR17); - case 18: return offsetof(VexGuestPPC32State, guest_FPR18); - case 19: return offsetof(VexGuestPPC32State, guest_FPR19); - case 20: return offsetof(VexGuestPPC32State, guest_FPR20); - case 21: return offsetof(VexGuestPPC32State, guest_FPR21); - case 22: return offsetof(VexGuestPPC32State, guest_FPR22); - case 23: return offsetof(VexGuestPPC32State, guest_FPR23); - case 24: return offsetof(VexGuestPPC32State, guest_FPR24); - case 25: return offsetof(VexGuestPPC32State, guest_FPR25); - case 26: return offsetof(VexGuestPPC32State, guest_FPR26); - case 27: return offsetof(VexGuestPPC32State, guest_FPR27); - case 28: return offsetof(VexGuestPPC32State, guest_FPR28); - case 29: return offsetof(VexGuestPPC32State, guest_FPR29); - case 30: return offsetof(VexGuestPPC32State, guest_FPR30); - case 31: return offsetof(VexGuestPPC32State, guest_FPR31); - default: break; - } - } - vpanic("floatGuestRegOffset(ppc32)"); /*notreached*/ + switch (archreg) { + case 0: return offsetofPPCGuestState(guest_FPR0); + case 1: return offsetofPPCGuestState(guest_FPR1); + case 2: return offsetofPPCGuestState(guest_FPR2); + case 3: return offsetofPPCGuestState(guest_FPR3); + case 4: return offsetofPPCGuestState(guest_FPR4); + case 5: return offsetofPPCGuestState(guest_FPR5); + case 6: return offsetofPPCGuestState(guest_FPR6); + case 7: return offsetofPPCGuestState(guest_FPR7); + case 8: return offsetofPPCGuestState(guest_FPR8); + case 9: return offsetofPPCGuestState(guest_FPR9); + case 10: return offsetofPPCGuestState(guest_FPR10); + case 11: return offsetofPPCGuestState(guest_FPR11); + case 12: return offsetofPPCGuestState(guest_FPR12); + case 13: return offsetofPPCGuestState(guest_FPR13); + case 14: return offsetofPPCGuestState(guest_FPR14); + case 15: return offsetofPPCGuestState(guest_FPR15); + case 16: return offsetofPPCGuestState(guest_FPR16); + case 17: return offsetofPPCGuestState(guest_FPR17); + case 18: return offsetofPPCGuestState(guest_FPR18); + case 19: return offsetofPPCGuestState(guest_FPR19); + case 20: return offsetofPPCGuestState(guest_FPR20); + case 21: return offsetofPPCGuestState(guest_FPR21); + case 22: return offsetofPPCGuestState(guest_FPR22); + case 23: return offsetofPPCGuestState(guest_FPR23); + case 24: return offsetofPPCGuestState(guest_FPR24); + case 25: return offsetofPPCGuestState(guest_FPR25); + case 26: return offsetofPPCGuestState(guest_FPR26); + case 27: return offsetofPPCGuestState(guest_FPR27); + case 28: return offsetofPPCGuestState(guest_FPR28); + case 29: return offsetofPPCGuestState(guest_FPR29); + case 30: return offsetofPPCGuestState(guest_FPR30); + case 31: return offsetofPPCGuestState(guest_FPR31); + default: break; + } + vpanic("floatGuestRegOffset(ppc)"); /*notreached*/ } static IRExpr* getFReg ( UInt archreg ) @@ -1068,80 +944,42 @@ static Int vectorGuestRegOffset ( UInt archreg ) { vassert(archreg < 32); - if (mode64) { - switch (archreg) { - case 0: return offsetof(VexGuestPPC64State, guest_VR0); - case 1: return offsetof(VexGuestPPC64State, guest_VR1); - case 2: return offsetof(VexGuestPPC64State, guest_VR2); - case 3: return offsetof(VexGuestPPC64State, guest_VR3); - case 4: return offsetof(VexGuestPPC64State, guest_VR4); - case 5: return offsetof(VexGuestPPC64State, guest_VR5); - case 6: return offsetof(VexGuestPPC64State, guest_VR6); - case 7: return offsetof(VexGuestPPC64State, guest_VR7); - case 8: return offsetof(VexGuestPPC64State, guest_VR8); - case 9: return offsetof(VexGuestPPC64State, guest_VR9); - case 10: return offsetof(VexGuestPPC64State, guest_VR10); - case 11: return offsetof(VexGuestPPC64State, guest_VR11); - case 12: return offsetof(VexGuestPPC64State, guest_VR12); - case 13: return offsetof(VexGuestPPC64State, guest_VR13); - case 14: return offsetof(VexGuestPPC64State, guest_VR14); - case 15: return offsetof(VexGuestPPC64State, guest_VR15); - case 16: return offsetof(VexGuestPPC64State, guest_VR16); - case 17: return offsetof(VexGuestPPC64State, guest_VR17); - case 18: return offsetof(VexGuestPPC64State, guest_VR18); - case 19: return offsetof(VexGuestPPC64State, guest_VR19); - case 20: return offsetof(VexGuestPPC64State, guest_VR20); - case 21: return offsetof(VexGuestPPC64State, guest_VR21); - case 22: return offsetof(VexGuestPPC64State, guest_VR22); - case 23: return offsetof(VexGuestPPC64State, guest_VR23); - case 24: return offsetof(VexGuestPPC64State, guest_VR24); - case 25: return offsetof(VexGuestPPC64State, guest_VR25); - case 26: return offsetof(VexGuestPPC64State, guest_VR26); - case 27: return offsetof(VexGuestPPC64State, guest_VR27); - case 28: return offsetof(VexGuestPPC64State, guest_VR28); - case 29: return offsetof(VexGuestPPC64State, guest_VR29); - case 30: return offsetof(VexGuestPPC64State, guest_VR30); - case 31: return offsetof(VexGuestPPC64State, guest_VR31); - default: break; - } - } else { - switch (archreg) { - case 0: return offsetof(VexGuestPPC32State, guest_VR0); - case 1: return offsetof(VexGuestPPC32State, guest_VR1); - case 2: return offsetof(VexGuestPPC32State, guest_VR2); - case 3: return offsetof(VexGuestPPC32State, guest_VR3); - case 4: return offsetof(VexGuestPPC32State, guest_VR4); - case 5: return offsetof(VexGuestPPC32State, guest_VR5); - case 6: return offsetof(VexGuestPPC32State, guest_VR6); - case 7: return offsetof(VexGuestPPC32State, guest_VR7); - case 8: return offsetof(VexGuestPPC32State, guest_VR8); - case 9: return offsetof(VexGuestPPC32State, guest_VR9); - case 10: return offsetof(VexGuestPPC32State, guest_VR10); - case 11: return offsetof(VexGuestPPC32State, guest_VR11); - case 12: return offsetof(VexGuestPPC32State, guest_VR12); - case 13: return offsetof(VexGuestPPC32State, guest_VR13); - case 14: return offsetof(VexGuestPPC32State, guest_VR14); - case 15: return offsetof(VexGuestPPC32State, guest_VR15); - case 16: return offsetof(VexGuestPPC32State, guest_VR16); - case 17: return offsetof(VexGuestPPC32State, guest_VR17); - case 18: return offsetof(VexGuestPPC32State, guest_VR18); - case 19: return offsetof(VexGuestPPC32State, guest_VR19); - case 20: return offsetof(VexGuestPPC32State, guest_VR20); - case 21: return offsetof(VexGuestPPC32State, guest_VR21); - case 22: return offsetof(VexGuestPPC32State, guest_VR22); - case 23: return offsetof(VexGuestPPC32State, guest_VR23); - case 24: return offsetof(VexGuestPPC32State, guest_VR24); - case 25: return offsetof(VexGuestPPC32State, guest_VR25); - case 26: return offsetof(VexGuestPPC32State, guest_VR26); - case 27: return offsetof(VexGuestPPC32State, guest_VR27); - case 28: return offsetof(VexGuestPPC32State, guest_VR28); - case 29: return offsetof(VexGuestPPC32State, guest_VR29); - case 30: return offsetof(VexGuestPPC32State, guest_VR30); - case 31: return offsetof(VexGuestPPC32State, guest_VR31); - default: break; - } - } - vpanic("vextorGuestRegOffset(ppc32)"); /*notreached*/ + switch (archreg) { + case 0: return offsetofPPCGuestState(guest_VR0); + case 1: return offsetofPPCGuestState(guest_VR1); + case 2: return offsetofPPCGuestState(guest_VR2); + case 3: return offsetofPPCGuestState(guest_VR3); + case 4: return offsetofPPCGuestState(guest_VR4); + case 5: return offsetofPPCGuestState(guest_VR5); + case 6: return offsetofPPCGuestState(guest_VR6); + case 7: return offsetofPPCGuestState(guest_VR7); + case 8: return offsetofPPCGuestState(guest_VR8); + case 9: return offsetofPPCGuestState(guest_VR9); + case 10: return offsetofPPCGuestState(guest_VR10); + case 11: return offsetofPPCGuestState(guest_VR11); + case 12: return offsetofPPCGuestState(guest_VR12); + case 13: return offsetofPPCGuestState(guest_VR13); + case 14: return offsetofPPCGuestState(guest_VR14); + case 15: return offsetofPPCGuestState(guest_VR15); + case 16: return offsetofPPCGuestState(guest_VR16); + case 17: return offsetofPPCGuestState(guest_VR17); + case 18: return offsetofPPCGuestState(guest_VR18); + case 19: return offsetofPPCGuestState(guest_VR19); + case 20: return offsetofPPCGuestState(guest_VR20); + case 21: return offsetofPPCGuestState(guest_VR21); + case 22: return offsetofPPCGuestState(guest_VR22); + case 23: return offsetofPPCGuestState(guest_VR23); + case 24: return offsetofPPCGuestState(guest_VR24); + case 25: return offsetofPPCGuestState(guest_VR25); + case 26: return offsetofPPCGuestState(guest_VR26); + case 27: return offsetofPPCGuestState(guest_VR27); + case 28: return offsetofPPCGuestState(guest_VR28); + case 29: return offsetofPPCGuestState(guest_VR29); + case 30: return offsetofPPCGuestState(guest_VR30); + case 31: return offsetofPPCGuestState(guest_VR31); + default: break; + } + vpanic("vextorGuestRegOffset(ppc)"); /*notreached*/ } static IRExpr* getVReg ( UInt archreg ) @@ -1160,59 +998,31 @@ static void putVReg ( UInt archreg, IRExpr* e ) static Int guestCR321offset ( UInt cr ) { - if (mode64) { - switch (cr) { - case 0: return offsetof(VexGuestPPC64State, guest_CR0_321 ); - case 1: return offsetof(VexGuestPPC64State, guest_CR1_321 ); - case 2: return offsetof(VexGuestPPC64State, guest_CR2_321 ); - case 3: return offsetof(VexGuestPPC64State, guest_CR3_321 ); - case 4: return offsetof(VexGuestPPC64State, guest_CR4_321 ); - case 5: return offsetof(VexGuestPPC64State, guest_CR5_321 ); - case 6: return offsetof(VexGuestPPC64State, guest_CR6_321 ); - case 7: return offsetof(VexGuestPPC64State, guest_CR7_321 ); - default: vpanic("guestCR321offset(ppc32)"); - } - } else { - switch (cr) { - case 0: return offsetof(VexGuestPPC32State, guest_CR0_321 ); - case 1: return offsetof(VexGuestPPC32State, guest_CR1_321 ); - case 2: return offsetof(VexGuestPPC32State, guest_CR2_321 ); - case 3: return offsetof(VexGuestPPC32State, guest_CR3_321 ); - case 4: return offsetof(VexGuestPPC32State, guest_CR4_321 ); - case 5: return offsetof(VexGuestPPC32State, guest_CR5_321 ); - case 6: return offsetof(VexGuestPPC32State, guest_CR6_321 ); - case 7: return offsetof(VexGuestPPC32State, guest_CR7_321 ); - default: vpanic("guestCR321offset(ppc32)"); - } + switch (cr) { + case 0: return offsetofPPCGuestState(guest_CR0_321 ); + case 1: return offsetofPPCGuestState(guest_CR1_321 ); + case 2: return offsetofPPCGuestState(guest_CR2_321 ); + case 3: return offsetofPPCGuestState(guest_CR3_321 ); + case 4: return offsetofPPCGuestState(guest_CR4_321 ); + case 5: return offsetofPPCGuestState(guest_CR5_321 ); + case 6: return offsetofPPCGuestState(guest_CR6_321 ); + case 7: return offsetofPPCGuestState(guest_CR7_321 ); + default: vpanic("guestCR321offset(ppc)"); } } static Int guestCR0offset ( UInt cr ) { - if (mode64) { - switch (cr) { - case 0: return offsetof(VexGuestPPC64State, guest_CR0_0 ); - case 1: return offsetof(VexGuestPPC64State, guest_CR1_0 ); - case 2: return offsetof(VexGuestPPC64State, guest_CR2_0 ); - case 3: return offsetof(VexGuestPPC64State, guest_CR3_0 ); - case 4: return offsetof(VexGuestPPC64State, guest_CR4_0 ); - case 5: return offsetof(VexGuestPPC64State, guest_CR5_0 ); - case 6: return offsetof(VexGuestPPC64State, guest_CR6_0 ); - case 7: return offsetof(VexGuestPPC64State, guest_CR7_0 ); - default: vpanic("guestCR3offset(ppc32)"); - } - } else { - switch (cr) { - case 0: return offsetof(VexGuestPPC32State, guest_CR0_0 ); - case 1: return offsetof(VexGuestPPC32State, guest_CR1_0 ); - case 2: return offsetof(VexGuestPPC32State, guest_CR2_0 ); - case 3: return offsetof(VexGuestPPC32State, guest_CR3_0 ); - case 4: return offsetof(VexGuestPPC32State, guest_CR4_0 ); - case 5: return offsetof(VexGuestPPC32State, guest_CR5_0 ); - case 6: return offsetof(VexGuestPPC32State, guest_CR6_0 ); - case 7: return offsetof(VexGuestPPC32State, guest_CR7_0 ); - default: vpanic("guestCR3offset(ppc32)"); - } + switch (cr) { + case 0: return offsetofPPCGuestState(guest_CR0_0 ); + case 1: return offsetofPPCGuestState(guest_CR1_0 ); + case 2: return offsetofPPCGuestState(guest_CR2_0 ); + case 3: return offsetofPPCGuestState(guest_CR3_0 ); + case 4: return offsetofPPCGuestState(guest_CR4_0 ); + case 5: return offsetofPPCGuestState(guest_CR5_0 ); + case 6: return offsetofPPCGuestState(guest_CR6_0 ); + case 7: return offsetofPPCGuestState(guest_CR7_0 ); + default: vpanic("guestCR3offset(ppc)"); } } @@ -1331,7 +1141,7 @@ static IRExpr* addr_align( IRExpr* addr, UChar align ) case 16: mask = ((Long)-1) << 4; break; // quad-word aligned default: vex_printf("addr_align: align = %u\n", align); - vpanic("addr_align(ppc32)"); + vpanic("addr_align(ppc)"); } vassert(typeOfIRExpr(irbb->tyenv,addr) == ty); @@ -1462,7 +1272,8 @@ static void putCRbit ( UInt bi, IRExpr* bit ) zero and nonzero if the bit is 1. Write into *where the index of where the bit will be. */ -static IRExpr* /* :: Ity_I32 */ getCRbit_anywhere ( UInt bi, Int* where ) +static +IRExpr* /* :: Ity_I32 */ getCRbit_anywhere ( UInt bi, Int* where ) { UInt n = bi / 4; UInt off = bi % 4; @@ -1537,7 +1348,8 @@ static void set_AV_CR6 ( IRExpr* result, Bool test_all_ones ) unop(Iop_V128to32, binop(Iop_AndV128, binop(Iop_AndV128, mkexpr(v0), mkexpr(v1)), - binop(Iop_AndV128, mkexpr(v2), mkexpr(v3)))))) ); + binop(Iop_AndV128, mkexpr(v2), mkexpr(v3))) + ))) ); putCR321( 6, binop(Iop_Or8, binop(Iop_Shl8, mkexpr(rOnes), mkU8(3)), binop(Iop_Shl8, mkexpr(rZeros), mkU8(1))) ); @@ -1557,34 +1369,33 @@ static void putXER_SO ( IRExpr* e ) { vassert(typeOfIRExpr(irbb->tyenv, e) == Ity_I8); IRExpr* so = binop(Iop_And8, e, mkU8(1)); - stmt( IRStmt_Put( (mode64 ? OFFB64_XER_SO : OFFB32_XER_SO), so) ); + stmt( IRStmt_Put( OFFB_XER_SO, so ) ); } static void putXER_OV ( IRExpr* e ) { vassert(typeOfIRExpr(irbb->tyenv, e) == Ity_I8); IRExpr* ov = binop(Iop_And8, e, mkU8(1)); - stmt( IRStmt_Put( (mode64 ? OFFB64_XER_OV : OFFB32_XER_OV), ov) ); + stmt( IRStmt_Put( OFFB_XER_OV, ov ) ); } static void putXER_CA ( IRExpr* e ) { vassert(typeOfIRExpr(irbb->tyenv, e) == Ity_I8); IRExpr* ca = binop(Iop_And8, e, mkU8(1)); - stmt( IRStmt_Put( (mode64 ? OFFB64_XER_CA : OFFB32_XER_CA), ca) ); + stmt( IRStmt_Put( OFFB_XER_CA, ca ) ); } static void putXER_BC ( IRExpr* e ) { vassert(typeOfIRExpr(irbb->tyenv, e) == Ity_I8); IRExpr* bc = binop(Iop_And8, e, mkU8(0x7F)); - stmt( IRStmt_Put( (mode64 ? OFFB64_XER_BC : OFFB32_XER_BC), bc) ); + stmt( IRStmt_Put( OFFB_XER_BC, bc ) ); } static IRExpr* /* :: Ity_I8 */ getXER_SO ( void ) { - return mode64 ? IRExpr_Get( OFFB64_XER_SO, Ity_I8 ) : - IRExpr_Get( OFFB32_XER_SO, Ity_I8 ); + return IRExpr_Get( OFFB_XER_SO, Ity_I8 ); } static IRExpr* /* :: Ity_I32 */ getXER_SO32 ( void ) @@ -1594,8 +1405,7 @@ static IRExpr* /* :: Ity_I32 */ getXER_SO32 ( void ) static IRExpr* /* :: Ity_I8 */ getXER_OV ( void ) { - return mode64 ? IRExpr_Get( OFFB64_XER_OV, Ity_I8 ) : - IRExpr_Get( OFFB32_XER_OV, Ity_I8 ); + return IRExpr_Get( OFFB_XER_OV, Ity_I8 ); } static IRExpr* /* :: Ity_I32 */ getXER_OV32 ( void ) @@ -1605,21 +1415,18 @@ static IRExpr* /* :: Ity_I32 */ getXER_OV32 ( void ) static IRExpr* /* :: Ity_I32 */ getXER_CA32 ( void ) { - IRExpr* ca = mode64 ? IRExpr_Get( OFFB64_XER_CA, Ity_I8 ) : - IRExpr_Get( OFFB32_XER_CA, Ity_I8 ); + IRExpr* ca = IRExpr_Get( OFFB_XER_CA, Ity_I8 ); return binop( Iop_And32, unop(Iop_8Uto32, ca ), mkU32(1) ); } static IRExpr* /* :: Ity_I8 */ getXER_BC ( void ) { - return mode64 ? IRExpr_Get( OFFB64_XER_BC, Ity_I8 ) : - IRExpr_Get( OFFB32_XER_BC, Ity_I8 ); + return IRExpr_Get( OFFB_XER_BC, Ity_I8 ); } static IRExpr* /* :: Ity_I32 */ getXER_BC32 ( void ) { - IRExpr* bc = mode64 ? IRExpr_Get( OFFB64_XER_BC, Ity_I8 ) : - IRExpr_Get( OFFB32_XER_BC, Ity_I8 ); + IRExpr* bc = IRExpr_Get( OFFB_XER_BC, Ity_I8 ); return binop( Iop_And32, unop(Iop_8Uto32, bc), mkU32(0x7F) ); } @@ -1632,7 +1439,7 @@ static void set_XER_OV_32( UInt op, IRExpr* res, { IRTemp t64; IRExpr* xer_ov; - vassert(op < PPC32G_FLAG_OP_NUMBER); + vassert(op < PPCG_FLAG_OP_NUMBER); vassert(typeOfIRExpr(irbb->tyenv,res) == Ity_I32); vassert(typeOfIRExpr(irbb->tyenv,argL) == Ity_I32); vassert(typeOfIRExpr(irbb->tyenv,argR) == Ity_I32); @@ -1652,8 +1459,8 @@ static void set_XER_OV_32( UInt op, IRExpr* res, unop(Iop_Not32, (_jj)) switch (op) { - case /* 0 */ PPC32G_FLAG_OP_ADD: - case /* 1 */ PPC32G_FLAG_OP_ADDE: + case /* 0 */ PPCG_FLAG_OP_ADD: + case /* 1 */ PPCG_FLAG_OP_ADDE: /* (argL^argR^-1) & (argL^res) & (1<<31) ?1:0 */ // i.e. ((both_same_sign) & (sign_changed) & (sign_mask)) xer_ov @@ -1665,7 +1472,7 @@ static void set_XER_OV_32( UInt op, IRExpr* res, = binop(Iop_Shr32, xer_ov, mkU8(31) ); break; - case /* 2 */ PPC32G_FLAG_OP_DIVW: + case /* 2 */ PPCG_FLAG_OP_DIVW: /* (argL == INT32_MIN && argR == -1) || argR == 0 */ xer_ov = mkOR1( @@ -1679,13 +1486,13 @@ static void set_XER_OV_32( UInt op, IRExpr* res, = unop(Iop_1Uto32, xer_ov); break; - case /* 3 */ PPC32G_FLAG_OP_DIVWU: + case /* 3 */ PPCG_FLAG_OP_DIVWU: /* argR == 0 */ xer_ov = unop(Iop_1Uto32, binop(Iop_CmpEQ32, argR, mkU32(0))); break; - case /* 4 */ PPC32G_FLAG_OP_MULLW: + case /* 4 */ PPCG_FLAG_OP_MULLW: /* OV true if result can't be represented in 32 bits i.e sHi != sign extension of sLo */ t64 = newTemp(Ity_I64); @@ -1701,16 +1508,16 @@ static void set_XER_OV_32( UInt op, IRExpr* res, = unop(Iop_1Uto32, xer_ov); break; - case /* 5 */ PPC32G_FLAG_OP_NEG: + case /* 5 */ PPCG_FLAG_OP_NEG: /* argL == INT32_MIN */ xer_ov = unop( Iop_1Uto32, binop(Iop_CmpEQ32, argL, mkU32(INT32_MIN)) ); break; - case /* 6 */ PPC32G_FLAG_OP_SUBF: - case /* 7 */ PPC32G_FLAG_OP_SUBFC: - case /* 8 */ PPC32G_FLAG_OP_SUBFE: + case /* 6 */ PPCG_FLAG_OP_SUBF: + case /* 7 */ PPCG_FLAG_OP_SUBFC: + case /* 8 */ PPCG_FLAG_OP_SUBFE: /* ((~argL)^argR^-1) & ((~argL)^res) & (1<<31) ?1:0; */ xer_ov = AND3( XOR3(NOT(argL),argR,mkU32(-1)), @@ -1723,7 +1530,7 @@ static void set_XER_OV_32( UInt op, IRExpr* res, default: vex_printf("set_XER_OV: op = %u\n", op); - vpanic("set_XER_OV(ppc32)"); + vpanic("set_XER_OV(ppc)"); } /* xer_ov MUST denote either 0 or 1, no other value allowed */ @@ -1743,7 +1550,7 @@ static void set_XER_OV_64( UInt op, IRExpr* res, IRExpr* argL, IRExpr* argR ) { IRExpr* xer_ov; - vassert(op < PPC32G_FLAG_OP_NUMBER); + vassert(op < PPCG_FLAG_OP_NUMBER); vassert(typeOfIRExpr(irbb->tyenv,res) == Ity_I64); vassert(typeOfIRExpr(irbb->tyenv,argL) == Ity_I64); vassert(typeOfIRExpr(irbb->tyenv,argR) == Ity_I64); @@ -1763,8 +1570,8 @@ static void set_XER_OV_64( UInt op, IRExpr* res, unop(Iop_Not64, (_jj)) switch (op) { - case /* 0 */ PPC32G_FLAG_OP_ADD: - case /* 1 */ PPC32G_FLAG_OP_ADDE: + case /* 0 */ PPCG_FLAG_OP_ADD: + case /* 1 */ PPCG_FLAG_OP_ADDE: /* (argL^argR^-1) & (argL^res) & (1<<63) ? 1:0 */ // i.e. ((both_same_sign) & (sign_changed) & (sign_mask)) xer_ov @@ -1776,7 +1583,7 @@ static void set_XER_OV_64( UInt op, IRExpr* res, = unop(Iop_64to1, binop(Iop_Shr64, xer_ov, mkU8(63))); break; - case /* 2 */ PPC32G_FLAG_OP_DIVW: + case /* 2 */ PPCG_FLAG_OP_DIVW: /* (argL == INT64_MIN && argR == -1) || argR == 0 */ xer_ov = mkOR1( @@ -1788,13 +1595,13 @@ static void set_XER_OV_64( UInt op, IRExpr* res, ); break; - case /* 3 */ PPC32G_FLAG_OP_DIVWU: + case /* 3 */ PPCG_FLAG_OP_DIVWU: /* argR == 0 */ xer_ov = binop(Iop_CmpEQ64, argR, mkU64(0)); break; - case /* 4 */ PPC32G_FLAG_OP_MULLW: { + case /* 4 */ PPCG_FLAG_OP_MULLW: { /* OV true if result can't be represented in 64 bits i.e sHi != sign extension of sLo */ xer_ov @@ -1807,15 +1614,15 @@ static void set_XER_OV_64( UInt op, IRExpr* res, break; } - case /* 5 */ PPC32G_FLAG_OP_NEG: + case /* 5 */ PPCG_FLAG_OP_NEG: /* argL == INT64_MIN */ xer_ov = binop(Iop_CmpEQ64, argL, mkU64(INT64_MIN)); break; - case /* 6 */ PPC32G_FLAG_OP_SUBF: - case /* 7 */ PPC32G_FLAG_OP_SUBFC: - case /* 8 */ PPC32G_FLAG_OP_SUBFE: + case /* 6 */ PPCG_FLAG_OP_SUBF: + case /* 7 */ PPCG_FLAG_OP_SUBFC: + case /* 8 */ PPCG_FLAG_OP_SUBFE: /* ((~argL)^argR^-1) & ((~argL)^res) & (1<<63) ?1:0; */ xer_ov = AND3( XOR3(NOT(argL),argR,mkU64(-1)), @@ -1828,7 +1635,7 @@ static void set_XER_OV_64( UInt op, IRExpr* res, default: vex_printf("set_XER_OV: op = %u\n", op); - vpanic("set_XER_OV(ppc32)"); + vpanic("set_XER_OV(ppc64)"); } /* xer_ov MUST denote either 0 or 1, no other value allowed */ @@ -1862,7 +1669,7 @@ static void set_XER_CA_32 ( UInt op, IRExpr* res, IRExpr* argL, IRExpr* argR, IRExpr* oldca ) { IRExpr* xer_ca; - vassert(op < PPC32G_FLAG_OP_NUMBER); + vassert(op < PPCG_FLAG_OP_NUMBER); vassert(typeOfIRExpr(irbb->tyenv,res) == Ity_I32); vassert(typeOfIRExpr(irbb->tyenv,argL) == Ity_I32); vassert(typeOfIRExpr(irbb->tyenv,argR) == Ity_I32); @@ -1875,13 +1682,13 @@ static void set_XER_CA_32 ( UInt op, IRExpr* res, if it has any other value, that invariant has been violated. */ switch (op) { - case /* 0 */ PPC32G_FLAG_OP_ADD: + case /* 0 */ PPCG_FLAG_OP_ADD: /* res tyenv,res) == Ity_I64); vassert(typeOfIRExpr(irbb->tyenv,argL) == Ity_I64); vassert(typeOfIRExpr(irbb->tyenv,argR) == Ity_I64); @@ -1991,13 +1800,13 @@ static void set_XER_CA_64 ( UInt op, IRExpr* res, if it has any other value, that invariant has been violated. */ switch (op) { - case /* 0 */ PPC32G_FLAG_OP_ADD: + case /* 0 */ PPCG_FLAG_OP_ADD: /* res tyenv,src ); vassert( reg < PPC_GST_MAX ); switch (reg) { case PPC_GST_CIA: - vassert( typeOfIRExpr(irbb->tyenv,src ) == ty ); - stmt( IRStmt_Put( (mode64 ? OFFB64_CIA : OFFB32_CIA), src ) ); + vassert( ty_src == ty ); + stmt( IRStmt_Put( OFFB_CIA, src ) ); break; case PPC_GST_LR: - vassert( typeOfIRExpr(irbb->tyenv,src ) == ty ); - stmt( IRStmt_Put( (mode64 ? OFFB64_LR : OFFB32_LR), src ) ); + vassert( ty_src == ty ); + stmt( IRStmt_Put( OFFB_LR, src ) ); break; case PPC_GST_CTR: - vassert( typeOfIRExpr(irbb->tyenv,src ) == ty ); - stmt( IRStmt_Put( (mode64 ? OFFB64_CTR : OFFB32_CTR), src ) ); + vassert( ty_src == ty ); + stmt( IRStmt_Put( OFFB_CTR, src ) ); break; case PPC_GST_VRSAVE: - vassert( typeOfIRExpr(irbb->tyenv,src ) == Ity_I32 ); - stmt( IRStmt_Put((mode64 ? OFFB64_VRSAVE :OFFB32_VRSAVE),src)); + vassert( ty_src == Ity_I32 ); + stmt( IRStmt_Put( OFFB_VRSAVE,src)); break; case PPC_GST_VSCR: - vassert( typeOfIRExpr(irbb->tyenv,src ) == Ity_I32 ); - stmt( IRStmt_Put( (mode64 ? OFFB64_VSCR : OFFB32_VSCR), + vassert( ty_src == Ity_I32 ); + stmt( IRStmt_Put( OFFB_VSCR, binop(Iop_And32, src, mkU32(MASK_VSCR_VALID)) ) ); break; case PPC_GST_XER: - vassert( typeOfIRExpr(irbb->tyenv,src ) == Ity_I32 ); + vassert( ty_src == Ity_I32 ); putXER_SO( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(31))) ); putXER_OV( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(30))) ); putXER_CA( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(29))) ); @@ -2332,28 +2143,28 @@ static void putGST ( PPC_GST reg, IRExpr* src ) break; case PPC_GST_EMWARN: - vassert(typeOfIRExpr(irbb->tyenv,src) == Ity_I32); - stmt( IRStmt_Put((mode64 ? OFFB64_EMWARN : OFFB32_EMWARN),src) ); + vassert( ty_src == Ity_I32 ); + stmt( IRStmt_Put( OFFB_EMWARN,src) ); break; case PPC_GST_TISTART: - vassert( typeOfIRExpr(irbb->tyenv,src ) == ty ); - stmt( IRStmt_Put((mode64 ? OFFB64_TISTART : - OFFB32_TISTART), src) ); + vassert( ty_src == ty ); + stmt( IRStmt_Put( OFFB_TISTART, src) ); break; case PPC_GST_TILEN: - vassert( typeOfIRExpr(irbb->tyenv,src ) == ty ); - stmt( IRStmt_Put((mode64 ? OFFB64_TILEN : OFFB32_TILEN), src) ); + vassert( ty_src == ty ); + stmt( IRStmt_Put( OFFB_TILEN, src) ); break; case PPC_GST_RESVN: - vassert( typeOfIRExpr(irbb->tyenv,src ) == ty ); - stmt( IRStmt_Put((mode64 ? OFFB64_RESVN : OFFB32_RESVN), src) ); + vassert( ty_src == ty ); + stmt( IRStmt_Put( OFFB_RESVN, src) ); break; default: - vpanic("putGST(ppc32)"); + vex_printf("putGST(ppc): reg = %u", reg); + vpanic("putGST(ppc)"); } } @@ -2372,7 +2183,7 @@ static void putGST_masked ( PPC_GST reg, IRExpr* src, UInt mask ) /* Allow writes to Rounding Mode */ if (mask & 0x3) { - stmt( IRStmt_Put( (mode64 ? OFFB64_FPROUND : OFFB32_FPROUND), + stmt( IRStmt_Put( OFFB_FPROUND, binop(Iop_And32, src, mkU32(0x3)) )); } @@ -2399,8 +2210,8 @@ static void putGST_masked ( PPC_GST reg, IRExpr* src, UInt mask ) } default: - vex_printf("putGST_masked(ppc32): %u", reg); - vpanic("putGST_masked(ppc32)"); + vex_printf("putGST_masked(ppc): reg = %u", reg); + vpanic("putGST_masked(ppc)"); } } @@ -2472,7 +2283,7 @@ static Bool dis_int_arith ( UInt theInstr ) DIP("addic r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16); assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA), mkSzExtendS16(ty, uimm16) ) ); - set_XER_CA( ty, PPC32G_FLAG_OP_ADD, + set_XER_CA( ty, PPCG_FLAG_OP_ADD, mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16), mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ ); break; @@ -2481,7 +2292,7 @@ static Bool dis_int_arith ( UInt theInstr ) DIP("addic. r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16); assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA), mkSzExtendS16(ty, uimm16) ) ); - set_XER_CA( ty, PPC32G_FLAG_OP_ADD, + set_XER_CA( ty, PPCG_FLAG_OP_ADD, mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16), mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ ); do_rc = True; // Always record to CR @@ -2531,7 +2342,7 @@ static Bool dis_int_arith ( UInt theInstr ) assign( rD, binop( mkSzOp(ty, Iop_Sub8), mkSzExtendS16(ty, uimm16), mkexpr(rA)) ); - set_XER_CA( ty, PPC32G_FLAG_OP_SUBFI, + set_XER_CA( ty, PPCG_FLAG_OP_SUBFI, mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16), mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ ); break; @@ -2543,27 +2354,27 @@ static Bool dis_int_arith ( UInt theInstr ) switch (opc2) { case 0x10A: // add (Add, PPC32 p347) DIP("add%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA), mkexpr(rB) ) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_ADD, + set_XER_OV( ty, PPCG_FLAG_OP_ADD, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; case 0x00A: // addc (Add Carrying, PPC32 p348) DIP("addc%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA), mkexpr(rB)) ); - set_XER_CA( ty, PPC32G_FLAG_OP_ADD, + set_XER_CA( ty, PPCG_FLAG_OP_ADD, mkexpr(rD), mkexpr(rA), mkexpr(rB), mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_ADD, + set_XER_OV( ty, PPCG_FLAG_OP_ADD, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; @@ -2571,32 +2382,32 @@ static Bool dis_int_arith ( UInt theInstr ) case 0x08A: { // adde (Add Extended, PPC32 p349) IRTemp old_xer_ca = newTemp(ty); DIP("adde%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); // rD = rA + rB + XER[CA] assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) ); assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA), binop( mkSzOp(ty, Iop_Add8), mkexpr(rB), mkexpr(old_xer_ca))) ); - set_XER_CA( ty, PPC32G_FLAG_OP_ADDE, + set_XER_CA( ty, PPCG_FLAG_OP_ADDE, mkexpr(rD), mkexpr(rA), mkexpr(rB), mkexpr(old_xer_ca) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_ADDE, + set_XER_OV( ty, PPCG_FLAG_OP_ADDE, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; } - case 0x0EA: { // addme (Add to Minus One Extended, PPC32 p354) + case 0x0EA: { // addme (Add to Minus One Extended, PPC32 p354) IRTemp old_xer_ca = newTemp(ty); IRExpr *min_one; if (rB_addr != 0) { - vex_printf("dis_int_arith(PPC32)(addme,rB_addr)\n"); + vex_printf("dis_int_arith(ppc)(addme,rB_addr)\n"); return False; } DIP("addme%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); // rD = rA + (-1) + XER[CA] // => Just another form of adde @@ -2605,11 +2416,11 @@ static Bool dis_int_arith ( UInt theInstr ) assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA), binop( mkSzOp(ty, Iop_Add8), min_one, mkexpr(old_xer_ca)) )); - set_XER_CA( ty, PPC32G_FLAG_OP_ADDE, + set_XER_CA( ty, PPCG_FLAG_OP_ADDE, mkexpr(rD), mkexpr(rA), min_one, mkexpr(old_xer_ca) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_ADDE, + set_XER_OV( ty, PPCG_FLAG_OP_ADDE, mkexpr(rD), mkexpr(rA), min_one ); } break; @@ -2618,22 +2429,22 @@ static Bool dis_int_arith ( UInt theInstr ) case 0x0CA: { // addze (Add to Zero Extended, PPC32 p355) IRTemp old_xer_ca = newTemp(ty); if (rB_addr != 0) { - vex_printf("dis_int_arith(PPC32)(addze,rB_addr)\n"); + vex_printf("dis_int_arith(ppc)(addze,rB_addr)\n"); return False; } DIP("addze%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); // rD = rA + (0) + XER[CA] // => Just another form of adde assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) ); assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA), mkexpr(old_xer_ca)) ); - set_XER_CA( ty, PPC32G_FLAG_OP_ADDE, + set_XER_CA( ty, PPCG_FLAG_OP_ADDE, mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0), mkexpr(old_xer_ca) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_ADDE, + set_XER_OV( ty, PPCG_FLAG_OP_ADDE, mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0) ); } break; @@ -2641,7 +2452,7 @@ static Bool dis_int_arith ( UInt theInstr ) case 0x1EB: // divw (Divide Word, PPC32 p388) DIP("divw%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); if (mode64) { /* Note: @@ -2653,15 +2464,16 @@ static Bool dis_int_arith ( UInt theInstr ) - makes set_CR0 happy */ IRExpr* dividend = mk64lo32Sto64( mkexpr(rA) ); IRExpr* divisor = mk64lo32Sto64( mkexpr(rB) ); - assign( rD, mk64lo32Uto64( binop(Iop_DivS64, dividend, divisor) ) ); + assign( rD, mk64lo32Uto64( binop(Iop_DivS64, dividend, + divisor) ) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_DIVW, + set_XER_OV( ty, PPCG_FLAG_OP_DIVW, mkexpr(rD), dividend, divisor ); } } else { assign( rD, binop(Iop_DivS32, mkexpr(rA), mkexpr(rB)) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_DIVW, + set_XER_OV( ty, PPCG_FLAG_OP_DIVW, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } } @@ -2673,7 +2485,7 @@ static Bool dis_int_arith ( UInt theInstr ) case 0x1CB: // divwu (Divide Word Unsigned, PPC32 p389) DIP("divwu%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); if (mode64) { /* Note: @@ -2683,15 +2495,16 @@ static Bool dis_int_arith ( UInt theInstr ) */ IRExpr* dividend = mk64lo32Uto64( mkexpr(rA) ); IRExpr* divisor = mk64lo32Uto64( mkexpr(rB) ); - assign( rD, mk64lo32Uto64( binop(Iop_DivU64, dividend, divisor) ) ); + assign( rD, mk64lo32Uto64( binop(Iop_DivU64, dividend, + divisor) ) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_DIVWU, + set_XER_OV( ty, PPCG_FLAG_OP_DIVWU, mkexpr(rD), dividend, divisor ); } } else { assign( rD, binop(Iop_DivU32, mkexpr(rA), mkexpr(rB)) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_DIVWU, + set_XER_OV( ty, PPCG_FLAG_OP_DIVWU, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } } @@ -2700,10 +2513,10 @@ static Bool dis_int_arith ( UInt theInstr ) case 0x04B: // mulhw (Multiply High Word, PPC32 p488) if (flag_OE != 0) { - vex_printf("dis_int_arith(PPC32)(mulhw,flag_OE)\n"); + vex_printf("dis_int_arith(ppc)(mulhw,flag_OE)\n"); return False; } - DIP("mulhw%s r%u,r%u,r%u\n", flag_rC ? "." : "", + DIP("mulhw%s r%u,r%u,r%u\n", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); if (mode64) { /* rD[hi32] are undefined: setting them to sign of lo32 @@ -2720,12 +2533,12 @@ static Bool dis_int_arith ( UInt theInstr ) } break; - case 0x00B: // mulhwu (Multiply High Word Unsigned, PPC32 p489) + case 0x00B: // mulhwu (Multiply High Word Unsigned, PPC32 p489) if (flag_OE != 0) { - vex_printf("dis_int_arith(PPC32)(mulhwu,flag_OE)\n"); + vex_printf("dis_int_arith(ppc)(mulhwu,flag_OE)\n"); return False; } - DIP("mulhwu%s r%u,r%u,r%u\n", flag_rC ? "." : "", + DIP("mulhwu%s r%u,r%u,r%u\n", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); if (mode64) { /* rD[hi32] are undefined: setting them to sign of lo32 @@ -2744,7 +2557,7 @@ static Bool dis_int_arith ( UInt theInstr ) case 0x0EB: // mullw (Multiply Low Word, PPC32 p491) DIP("mullw%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); if (mode64) { /* rD[hi32] are undefined: setting them to sign of lo32 @@ -2753,7 +2566,7 @@ static Bool dis_int_arith ( UInt theInstr ) IRExpr *b = unop(Iop_64to32, mkexpr(rB) ); assign( rD, binop(Iop_MullS32, a, b) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_MULLW, + set_XER_OV( ty, PPCG_FLAG_OP_MULLW, mkexpr(rD), unop(Iop_32Uto64, a), unop(Iop_32Uto64, b) ); } @@ -2762,7 +2575,7 @@ static Bool dis_int_arith ( UInt theInstr ) binop(Iop_MullU32, mkexpr(rA), mkexpr(rB))) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_MULLW, + set_XER_OV( ty, PPCG_FLAG_OP_MULLW, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } } @@ -2770,47 +2583,47 @@ static Bool dis_int_arith ( UInt theInstr ) case 0x068: // neg (Negate, PPC32 p493) if (rB_addr != 0) { - vex_printf("dis_int_arith(PPC32)(neg,rB_addr)\n"); + vex_printf("dis_int_arith(ppc)(neg,rB_addr)\n"); return False; } DIP("neg%s%s r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr); // rD = (~rA) + 1 assign( rD, binop( mkSzOp(ty, Iop_Add8), unop( mkSzOp(ty, Iop_Not8), mkexpr(rA) ), mkSzImm(ty, 1)) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_NEG, + set_XER_OV( ty, PPCG_FLAG_OP_NEG, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; case 0x028: // subf (Subtract From, PPC32 p537) DIP("subf%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); // rD = rB - rA assign( rD, binop( mkSzOp(ty, Iop_Sub8), mkexpr(rB), mkexpr(rA)) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_SUBF, + set_XER_OV( ty, PPCG_FLAG_OP_SUBF, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; case 0x008: // subfc (Subtract from Carrying, PPC32 p538) DIP("subfc%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); // rD = rB - rA assign( rD, binop( mkSzOp(ty, Iop_Sub8), mkexpr(rB), mkexpr(rA)) ); - set_XER_CA( ty, PPC32G_FLAG_OP_SUBFC, + set_XER_CA( ty, PPCG_FLAG_OP_SUBFC, mkexpr(rD), mkexpr(rA), mkexpr(rB), mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_SUBFC, + set_XER_OV( ty, PPCG_FLAG_OP_SUBFC, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; @@ -2818,7 +2631,7 @@ static Bool dis_int_arith ( UInt theInstr ) case 0x088: {// subfe (Subtract from Extended, PPC32 p539) IRTemp old_xer_ca = newTemp(ty); DIP("subfe%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); // rD = (log not)rA + rB + XER[CA] assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) ); @@ -2826,25 +2639,25 @@ static Bool dis_int_arith ( UInt theInstr ) unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)), binop( mkSzOp(ty, Iop_Add8), mkexpr(rB), mkexpr(old_xer_ca))) ); - set_XER_CA( ty, PPC32G_FLAG_OP_SUBFE, + set_XER_CA( ty, PPCG_FLAG_OP_SUBFE, mkexpr(rD), mkexpr(rA), mkexpr(rB), mkexpr(old_xer_ca) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_SUBFE, + set_XER_OV( ty, PPCG_FLAG_OP_SUBFE, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; } - case 0x0E8: { // subfme (Subtract from Minus One Extended, PPC32 p541) + case 0x0E8: { // subfme (Subtract from -1 Extended, PPC32 p541) IRTemp old_xer_ca = newTemp(ty); IRExpr *min_one; if (rB_addr != 0) { - vex_printf("dis_int_arith(PPC32)(subfme,rB_addr)\n"); + vex_printf("dis_int_arith(ppc)(subfme,rB_addr)\n"); return False; } DIP("subfme%s%s r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr); // rD = (log not)rA + (-1) + XER[CA] // => Just another form of subfe @@ -2854,24 +2667,24 @@ static Bool dis_int_arith ( UInt theInstr ) unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)), binop( mkSzOp(ty, Iop_Add8), min_one, mkexpr(old_xer_ca))) ); - set_XER_CA( ty, PPC32G_FLAG_OP_SUBFE, + set_XER_CA( ty, PPCG_FLAG_OP_SUBFE, mkexpr(rD), mkexpr(rA), min_one, mkexpr(old_xer_ca) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_SUBFE, + set_XER_OV( ty, PPCG_FLAG_OP_SUBFE, mkexpr(rD), mkexpr(rA), min_one ); } break; } - case 0x0C8: { // subfze (Subtract from Zero Extended, PPC32 p542) + case 0x0C8: { // subfze (Subtract from Zero Extended, PPC32 p542) IRTemp old_xer_ca = newTemp(ty); if (rB_addr != 0) { - vex_printf("dis_int_arith(PPC32)(subfze,rB_addr)\n"); + vex_printf("dis_int_arith(ppc)(subfze,rB_addr)\n"); return False; } DIP("subfze%s%s r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr); // rD = (log not)rA + (0) + XER[CA] // => Just another form of subfe @@ -2879,11 +2692,11 @@ static Bool dis_int_arith ( UInt theInstr ) assign( rD, binop( mkSzOp(ty, Iop_Add8), unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)), mkexpr(old_xer_ca)) ); - set_XER_CA( ty, PPC32G_FLAG_OP_SUBFE, + set_XER_CA( ty, PPCG_FLAG_OP_SUBFE, mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0), mkexpr(old_xer_ca) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_SUBFE, + set_XER_OV( ty, PPCG_FLAG_OP_SUBFE, mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0) ); } break; @@ -2891,12 +2704,12 @@ static Bool dis_int_arith ( UInt theInstr ) /* 64bit Arithmetic */ - case 0x49: // mulhd (Multiply High Double Word, PPC64 p539) + case 0x49: // mulhd (Multiply High DWord, PPC64 p539) if (flag_OE != 0) { - vex_printf("dis_int_arith(PPC32)(mulhd,flagOE)\n"); + vex_printf("dis_int_arith(ppc)(mulhd,flagOE)\n"); return False; } - DIP("mulhd%s r%u,r%u,r%u\n", flag_rC ? "." : "", + DIP("mulhd%s r%u,r%u,r%u\n", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); assign( rD, unop(Iop_128HIto64, binop(Iop_MullS64, @@ -2904,36 +2717,36 @@ static Bool dis_int_arith ( UInt theInstr ) break; - case 0x9: // mulhdu (Multiply High Double Word Unsigned, PPC64 p540) + case 0x9: // mulhdu (Multiply High DWord Unsigned, PPC64 p540) if (flag_OE != 0) { - vex_printf("dis_int_arith(PPC32)(mulhdu,flagOE)\n"); + vex_printf("dis_int_arith(ppc)(mulhdu,flagOE)\n"); return False; } - DIP("mulhdu%s r%u,r%u,r%u\n", flag_rC ? "." : "", + DIP("mulhdu%s r%u,r%u,r%u\n", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); assign( rD, unop(Iop_128HIto64, binop(Iop_MullU64, mkexpr(rA), mkexpr(rB))) ); break; - case 0xE9: // mulld (Multiply Low Double Word, PPC64 p543) + case 0xE9: // mulld (Multiply Low DWord, PPC64 p543) DIP("mulld%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); assign( rD, binop(Iop_Mul64, mkexpr(rA), mkexpr(rB)) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_MULLW, + set_XER_OV( ty, PPCG_FLAG_OP_MULLW, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; - case 0x1E9: // divd (Divide Double Word, PPC64 p419) + case 0x1E9: // divd (Divide DWord, PPC64 p419) DIP("divd%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); assign( rD, binop(Iop_DivS64, mkexpr(rA), mkexpr(rB)) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_DIVW, + set_XER_OV( ty, PPCG_FLAG_OP_DIVW, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; @@ -2942,26 +2755,26 @@ static Bool dis_int_arith ( UInt theInstr ) => rD=undef, if(flag_rC) CR7=undef, if(flag_OE) XER_OV=1 => But _no_ exception raised. */ - case 0x1C9: // divdu (Divide Double Word Unsigned, PPC64 p420) + case 0x1C9: // divdu (Divide DWord Unsigned, PPC64 p420) DIP("divdu%s%s r%u,r%u,r%u\n", - flag_OE ? "o" : "", flag_rC ? "." : "", + flag_OE ? "o" : "", flag_rC ? ".":"", rD_addr, rA_addr, rB_addr); assign( rD, binop(Iop_DivU64, mkexpr(rA), mkexpr(rB)) ); if (flag_OE) { - set_XER_OV( ty, PPC32G_FLAG_OP_DIVWU, + set_XER_OV( ty, PPCG_FLAG_OP_DIVWU, mkexpr(rD), mkexpr(rA), mkexpr(rB) ); } break; /* Note: ditto comment divd, for (x / 0) */ default: - vex_printf("dis_int_arith(PPC32)(opc2)\n"); + vex_printf("dis_int_arith(ppc)(opc2)\n"); return False; } break; default: - vex_printf("dis_int_arith(PPC32)(opc1)\n"); + vex_printf("dis_int_arith(ppc)(opc1)\n"); return False; } @@ -2996,12 +2809,12 @@ static Bool dis_int_cmp ( UInt theInstr ) IRExpr *b; if (!mode64 && flag_L==1) { // L==1 invalid for 32 bit. - vex_printf("dis_int_cmp(PPC32)(flag_L)\n"); + vex_printf("dis_int_cmp(ppc)(flag_L)\n"); return False; } if (b22 != 0) { - vex_printf("dis_int_cmp(PPC32)(b22)\n"); + vex_printf("dis_int_cmp(ppc)(b22)\n"); return False; } @@ -3036,7 +2849,7 @@ static Bool dis_int_cmp ( UInt theInstr ) /* X Form */ case 0x1F: if (b0 != 0) { - vex_printf("dis_int_cmp(PPC32)(0x1F,b0)\n"); + vex_printf("dis_int_cmp(ppc)(0x1F,b0)\n"); return False; } b = getIReg(rB_addr); @@ -3067,13 +2880,13 @@ static Bool dis_int_cmp ( UInt theInstr ) break; default: - vex_printf("dis_int_cmp(PPC32)(opc2)\n"); + vex_printf("dis_int_cmp(ppc)(opc2)\n"); return False; } break; default: - vex_printf("dis_int_cmp(PPC32)(opc1)\n"); + vex_printf("dis_int_cmp(ppc)(opc1)\n"); return False; } @@ -3153,14 +2966,14 @@ static Bool dis_int_logic ( UInt theInstr ) switch (opc2) { case 0x01C: // and (AND, PPC32 p356) DIP("and%s r%u,r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); assign(rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS), mkexpr(rB))); break; case 0x03C: // andc (AND with Complement, PPC32 p357) DIP("andc%s r%u,r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); assign(rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS), unop( mkSzOp(ty, Iop_Not8), mkexpr(rB)))); @@ -3169,27 +2982,30 @@ static Bool dis_int_logic ( UInt theInstr ) case 0x01A: { // cntlzw (Count Leading Zeros Word, PPC32 p371) IRExpr* lo32; if (rB_addr!=0) { - vex_printf("dis_int_logic(PPC32)(cntlzw,rB_addr)\n"); + vex_printf("dis_int_logic(ppc)(cntlzw,rB_addr)\n"); return False; } DIP("cntlzw%s r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr); + flag_rC ? ".":"", rA_addr, rS_addr); // mode64: count in low word only lo32 = mode64 ? unop(Iop_64to32, mkexpr(rS)) : mkexpr(rS); // Iop_Clz32 undefined for arg==0, so deal with that case: irx = binop(Iop_CmpNE32, lo32, mkU32(0)); - assign(rA, IRExpr_Mux0X( unop(Iop_1Uto8, irx), - mkSzImm(ty, 32), - mkSzWiden32(ty, unop(Iop_Clz32, lo32), False) )); + assign(rA, mkSzWiden32(ty, + IRExpr_Mux0X( unop(Iop_1Uto8, irx), + mkU32(32), + unop(Iop_Clz32, lo32)), + False)); + // TODO: alternatively: assign(rA, verbose_Clz32(rS)); break; } case 0x11C: // eqv (Equivalent, PPC32 p396) DIP("eqv%s r%u,r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); assign( rA, unop( mkSzOp(ty, Iop_Not8), binop( mkSzOp(ty, Iop_Xor8), mkexpr(rS), mkexpr(rB))) ); @@ -3197,11 +3013,11 @@ static Bool dis_int_logic ( UInt theInstr ) case 0x3BA: // extsb (Extend Sign Byte, PPC32 p397 if (rB_addr!=0) { - vex_printf("dis_int_logic(PPC32)(extsb,rB_addr)\n"); + vex_printf("dis_int_logic(ppc)(extsb,rB_addr)\n"); return False; } DIP("extsb%s r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr); + flag_rC ? ".":"", rA_addr, rS_addr); if (mode64) assign( rA, unop(Iop_8Sto64, unop(Iop_64to8, mkexpr(rS))) ); else @@ -3210,20 +3026,22 @@ static Bool dis_int_logic ( UInt theInstr ) case 0x39A: // extsh (Extend Sign Half Word, PPC32 p398) if (rB_addr!=0) { - vex_printf("dis_int_logic(PPC32)(extsh,rB_addr)\n"); + vex_printf("dis_int_logic(ppc)(extsh,rB_addr)\n"); return False; } DIP("extsh%s r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr); + flag_rC ? ".":"", rA_addr, rS_addr); if (mode64) - assign( rA, unop(Iop_16Sto64, unop(Iop_64to16, mkexpr(rS))) ); + assign( rA, unop(Iop_16Sto64, + unop(Iop_64to16, mkexpr(rS))) ); else - assign( rA, unop(Iop_16Sto32, unop(Iop_32to16, mkexpr(rS))) ); + assign( rA, unop(Iop_16Sto32, + unop(Iop_32to16, mkexpr(rS))) ); break; case 0x1DC: // nand (NAND, PPC32 p492) DIP("nand%s r%u,r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); assign( rA, unop( mkSzOp(ty, Iop_Not8), binop( mkSzOp(ty, Iop_And8), mkexpr(rS), mkexpr(rB))) ); @@ -3231,7 +3049,7 @@ static Bool dis_int_logic ( UInt theInstr ) case 0x07C: // nor (NOR, PPC32 p494) DIP("nor%s r%u,r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); assign( rA, unop( mkSzOp(ty, Iop_Not8), binop( mkSzOp(ty, Iop_Or8), mkexpr(rS), mkexpr(rB))) ); @@ -3243,7 +3061,7 @@ static Bool dis_int_logic ( UInt theInstr ) assign( rA, mkexpr(rS) ); } else { DIP("or%s r%u,r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS), mkexpr(rB)) ); } @@ -3251,14 +3069,14 @@ static Bool dis_int_logic ( UInt theInstr ) case 0x19C: // orc (OR with Complement, PPC32 p496) DIP("orc%s r%u,r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS), unop(mkSzOp(ty, Iop_Not8), mkexpr(rB)))); break; case 0x13C: // xor (XOR, PPC32 p549) DIP("xor%s r%u,r%u,r%u\n", - flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); assign( rA, binop( mkSzOp(ty, Iop_Xor8), mkexpr(rS), mkexpr(rB)) ); break; @@ -3267,34 +3085,36 @@ static Bool dis_int_logic ( UInt theInstr ) /* 64bit Integer Logical Instructions */ case 0x3DA: // extsw (Extend Sign Word, PPC64 p430) if (rB_addr!=0) { - vex_printf("dis_int_logic(PPC32)(extsw,rB_addr)\n"); + vex_printf("dis_int_logic(ppc)(extsw,rB_addr)\n"); return False; } - DIP("extsw%s r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr); + DIP("extsw%s r%u,r%u\n", flag_rC ? ".":"", rA_addr, rS_addr); assign(rA, unop(Iop_32Sto64, unop(Iop_64to32, mkexpr(rS)))); break; - case 0x03A: // cntlzd (Count Leading Zeros DW, PPC64 p401) + case 0x03A: // cntlzd (Count Leading Zeros DWord, PPC64 p401) if (rB_addr!=0) { - vex_printf("dis_int_logic(PPC32)(cntlzd,rB_addr)\n"); + vex_printf("dis_int_logic(ppc)(cntlzd,rB_addr)\n"); return False; } - DIP("cntlzd%s r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr); + DIP("cntlzd%s r%u,r%u\n", + flag_rC ? ".":"", rA_addr, rS_addr); // Iop_Clz64 undefined for arg==0, so deal with that case: irx = binop(Iop_CmpNE64, mkexpr(rS), mkU64(0)); assign(rA, IRExpr_Mux0X( unop(Iop_1Uto8, irx), mkU64(64), unop(Iop_Clz64, mkexpr(rS)) )); + // TODO: alternatively: assign(rA, verbose_Clz64(rS)); break; default: - vex_printf("dis_int_logic(PPC32)(opc2)\n"); + vex_printf("dis_int_logic(ppc)(opc2)\n"); return False; } break; default: - vex_printf("dis_int_logic(PPC32)(opc1)\n"); + vex_printf("dis_int_logic(ppc)(opc1)\n"); return False; } @@ -3340,8 +3160,8 @@ static Bool dis_int_rot ( UInt theInstr ) switch (opc1) { case 0x14: { - // rlwimi (Rotate Left Word Immediate then Mask Insert, PPC32 p500) - DIP("rlwimi%s r%u,r%u,%d,%d,%d\n", flag_rC ? "." : "", + // rlwimi (Rotate Left Word Imm then Mask Insert, PPC32 p500) + DIP("rlwimi%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd); if (mode64) { // tmp32 = (ROTL(rS_Lo32, Imm) @@ -3349,7 +3169,8 @@ static Bool dis_int_rot ( UInt theInstr ) mask64 = MASK64(31-MaskEnd, 31-MaskBeg); r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) ); r = unop(Iop_32Uto64, r); - assign( rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))) ); + assign( rot, binop(Iop_Or64, r, + binop(Iop_Shl64, r, mkU8(32))) ); assign( rA, binop(Iop_Or64, binop(Iop_And64, mkexpr(rot), mkU64(mask64)), @@ -3368,44 +3189,46 @@ static Bool dis_int_rot ( UInt theInstr ) } case 0x15: { - // rlwinm (Rotate Left Word Immediate then AND with Mask, PPC32 p501) + // rlwinm (Rotate Left Word Imm then AND with Mask, PPC32 p501) vassert(MaskBeg < 32); vassert(MaskEnd < 32); vassert(sh_imm < 32); if (mode64) { mask64 = MASK64(31-MaskEnd, 31-MaskBeg); - DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? "." : "", + DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd); // tmp32 = (ROTL(rS_Lo32, Imm) // rA = ((tmp32 || tmp32) & mask64) r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) ); r = unop(Iop_32Uto64, r); - assign( rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))) ); + assign( rot, binop(Iop_Or64, r, + binop(Iop_Shl64, r, mkU8(32))) ); assign( rA, binop(Iop_And64, mkexpr(rot), mkU64(mask64)) ); } else { if (MaskBeg == 0 && sh_imm+MaskEnd == 31) { /* Special-case the ,n,0,31-n form as that is just n-bit - shift left (PPC32 p501) */ - DIP("slwi%s r%u,r%u,%d\n", flag_rC ? "." : "", + shift left, PPC32 p501 */ + DIP("slwi%s r%u,r%u,%d\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm); assign( rA, binop(Iop_Shl32, mkexpr(rS), mkU8(sh_imm)) ); } else if (MaskEnd == 31 && sh_imm+MaskBeg == 32) { /* Special-case the ,32-n,n,31 form as that is just n-bit - unsigned shift right (PPC32 p501) */ - DIP("srwi%s r%u,r%u,%d\n", flag_rC ? "." : "", + unsigned shift right, PPC32 p501 */ + DIP("srwi%s r%u,r%u,%d\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm); assign( rA, binop(Iop_Shr32, mkexpr(rS), mkU8(MaskBeg)) ); } else { /* General case. */ mask32 = MASK32(31-MaskEnd, 31-MaskBeg); - DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? "." : "", + DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd); // rA = ROTL(rS, Imm) & mask - assign( rA, binop(Iop_And32, ROTL(mkexpr(rS), mkU8(sh_imm)), + assign( rA, binop(Iop_And32, + ROTL(mkexpr(rS), mkU8(sh_imm)), mkU32(mask32)) ); } } @@ -3414,7 +3237,7 @@ static Bool dis_int_rot ( UInt theInstr ) case 0x17: { // rlwnm (Rotate Left Word then AND with Mask, PPC32 p503 - DIP("rlwnm%s r%u,r%u,r%u,%d,%d\n", flag_rC ? "." : "", + DIP("rlwnm%s r%u,r%u,r%u,%d,%d\n", flag_rC ? ".":"", rA_addr, rS_addr, rB_addr, MaskBeg, MaskEnd); if (mode64) { mask64 = MASK64(31-MaskEnd, 31-MaskBeg); @@ -3433,7 +3256,8 @@ static Bool dis_int_rot ( UInt theInstr ) // rA = ROTL(rS, rB[0-4]) & mask // note, ROTL does the masking, so we don't do it here assign( rA, binop(Iop_And32, - ROTL(mkexpr(rS), unop(Iop_32to8, mkexpr(rB))), + ROTL(mkexpr(rS), + unop(Iop_32to8, mkexpr(rB))), mkU32(mask32)) ); } break; @@ -3453,15 +3277,15 @@ static Bool dis_int_rot ( UInt theInstr ) /* r = ROTL64( rS, rB_lo6) */ r = ROTL( mkexpr(rS), unop(Iop_64to8, mkexpr(rB)) ); - if (b1 == 0) { // rldcl (Rotate Left DW then Clear Left, PPC64 p555) - DIP("rldcl%s r%u,r%u,r%u,%u\n", flag_rC ? "." : "", + if (b1 == 0) { // rldcl (Rotl DWord, Clear Left, PPC64 p555) + DIP("rldcl%s r%u,r%u,r%u,%u\n", flag_rC ? ".":"", rA_addr, rS_addr, rB_addr, msk_imm); // note, ROTL does the masking, so we don't do it here mask64 = MASK64(0, 63-msk_imm); assign( rA, binop(Iop_And64, r, mkU64(mask64)) ); break; - } else { // rldcr (Rotate Left DW then Clear Right, PPC64 p556) - DIP("rldcr%s r%u,r%u,r%u,%u\n", flag_rC ? "." : "", + } else { // rldcr (Rotl DWord, Clear Right, PPC64 p556) + DIP("rldcr%s r%u,r%u,r%u,%u\n", flag_rC ? ".":"", rA_addr, rS_addr, rB_addr, msk_imm); mask64 = MASK64(63-msk_imm, 63); assign( rA, binop(Iop_And64, r, mkU64(mask64)) ); @@ -3469,8 +3293,8 @@ static Bool dis_int_rot ( UInt theInstr ) } break; } - case 0x2: // rldic (Rotate Left DW Imm then Clear, PPC64 p557) - DIP("rldic%s r%u,r%u,%u,%u\n", flag_rC ? "." : "", + case 0x2: // rldic (Rotl DWord Imm, Clear, PPC64 p557) + DIP("rldic%s r%u,r%u,%u,%u\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm, msk_imm); r = ROTL(mkexpr(rS), mkU8(sh_imm)); mask64 = MASK64(sh_imm, 63-msk_imm); @@ -3484,45 +3308,48 @@ static Bool dis_int_rot ( UInt theInstr ) assign(rA, r & m); */ - case 0x0: // rldicl (Rotate Left DW Imm then Clear Left, PPC64 p558) - DIP("rldicl%s r%u,r%u,%u,%u\n", flag_rC ? "." : "", + case 0x0: // rldicl (Rotl DWord Imm, Clear Left, PPC64 p558) + DIP("rldicl%s r%u,r%u,%u,%u\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm, msk_imm); r = ROTL(mkexpr(rS), mkU8(sh_imm)); mask64 = MASK64(0, 63-msk_imm); assign( rA, binop(Iop_And64, r, mkU64(mask64)) ); break; - // later: deal with special case: (msk_imm + sh_imm == 63) => SHR(63 - sh_imm) + /* later: deal with special case: + (msk_imm + sh_imm == 63) => SHR(63 - sh_imm) */ - case 0x1: // rldicr (Rotate Left DW Imm then Clear Right, PPC64 p559) - DIP("rldicr%s r%u,r%u,%u,%u\n", flag_rC ? "." : "", + case 0x1: // rldicr (Rotl DWord Imm, Clear Right, PPC64 p559) + DIP("rldicr%s r%u,r%u,%u,%u\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm, msk_imm); r = ROTL(mkexpr(rS), mkU8(sh_imm)); mask64 = MASK64(63-msk_imm, 63); assign( rA, binop(Iop_And64, r, mkU64(mask64)) ); break; - // later: deal with special case: (msk_imm == sh_imm) => SHL(sh_imm) + /* later: deal with special case: + (msk_imm == sh_imm) => SHL(sh_imm) */ - case 0x3: { // rldimi (Rotate Left DW Imm then Mask Insert, PPC64 p560) + case 0x3: { // rldimi (Rotl DWord Imm, Mask Insert, PPC64 p560) IRTemp rA_orig = newTemp(ty); - DIP("rldimi%s r%u,r%u,%u,%u\n", flag_rC ? "." : "", + DIP("rldimi%s r%u,r%u,%u,%u\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm, msk_imm); r = ROTL(mkexpr(rS), mkU8(sh_imm)); mask64 = MASK64(sh_imm, 63-msk_imm); assign( rA_orig, getIReg(rA_addr) ); assign( rA, binop(Iop_Or64, binop(Iop_And64, mkU64(mask64), r), - binop(Iop_And64, mkU64(~mask64), mkexpr(rA_orig))) ); + binop(Iop_And64, mkU64(~mask64), + mkexpr(rA_orig))) ); break; } default: - vex_printf("dis_int_rot(PPC32)(opc2)\n"); + vex_printf("dis_int_rot(ppc)(opc2)\n"); return False; } break; } default: - vex_printf("dis_int_rot(PPC32)(opc1)\n"); + vex_printf("dis_int_rot(ppc)(opc1)\n"); return False; } @@ -3573,9 +3400,9 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rD_addr, mkSzWiden8(ty, val, False) ); break; - case 0x23: // lbzu (Load B & Zero with Update, PPC32 p434) + case 0x23: // lbzu (Load B & Zero, Update, PPC32 p434) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lbzu,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lbzu,rA_addr|rD_addr)\n"); return False; } DIP("lbzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr); @@ -3584,15 +3411,15 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rA_addr, mkexpr(EA) ); break; - case 0x2A: // lha (Load HW Algebraic, PPC32 p445) + case 0x2A: // lha (Load HW Alg, PPC32 p445) DIP("lha r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr); val = loadBE(Ity_I16, mkexpr(EA)); putIReg( rD_addr, mkSzWiden16(ty, val, True) ); break; - case 0x2B: // lhau (Load HW Algebraic with Update, PPC32 p446) + case 0x2B: // lhau (Load HW Alg, Update, PPC32 p446) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lhau,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lhau,rA_addr|rD_addr)\n"); return False; } DIP("lhau r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr); @@ -3607,9 +3434,9 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rD_addr, mkSzWiden16(ty, val, False) ); break; - case 0x29: // lhzu (Load HW & and Zero with Update, PPC32 p451) + case 0x29: // lhzu (Load HW & and Zero, Update, PPC32 p451) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lhzu,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lhzu,rA_addr|rD_addr)\n"); return False; } DIP("lhzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr); @@ -3624,9 +3451,9 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rD_addr, mkSzWiden32(ty, val, False) ); break; - case 0x21: // lwzu (Load W & Zero with Update, PPC32 p461)) + case 0x21: // lwzu (Load W & Zero, Update, PPC32 p461)) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lwzu,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lwzu,rA_addr|rD_addr)\n"); return False; } DIP("lwzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr); @@ -3638,15 +3465,15 @@ static Bool dis_int_load ( UInt theInstr ) /* X Form */ case 0x1F: if (b0 != 0) { - vex_printf("dis_int_load(PPC32)(Ox1F,b0)\n"); + vex_printf("dis_int_load(ppc)(Ox1F,b0)\n"); return False; } switch (opc2) { - case 0x077: // lbzux (Load B & Zero with Update Indexed, PPC32 p435) + case 0x077: // lbzux (Load B & Zero, Update Indexed, PPC32 p435) DIP("lbzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lwzux,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lwzux,rA_addr|rD_addr)\n"); return False; } val = loadBE(Ity_I8, mkexpr(EA)); @@ -3654,15 +3481,15 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rA_addr, mkexpr(EA) ); break; - case 0x057: // lbzx (Load B & Zero Indexed, PPC32 p436) + case 0x057: // lbzx (Load B & Zero, Indexed, PPC32 p436) DIP("lbzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); val = loadBE(Ity_I8, mkexpr(EA)); putIReg( rD_addr, mkSzWiden8(ty, val, False) ); break; - case 0x177: // lhaux (Load HW Algebraic with Update Indexed, PPC32 p447) + case 0x177: // lhaux (Load HW Alg, Update Indexed, PPC32 p447) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lhaux,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lhaux,rA_addr|rD_addr)\n"); return False; } DIP("lhaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); @@ -3671,15 +3498,15 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rA_addr, mkexpr(EA) ); break; - case 0x157: // lhax (Load HW Algebraic Indexed, PPC32 p448) + case 0x157: // lhax (Load HW Alg, Indexed, PPC32 p448) DIP("lhax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); val = loadBE(Ity_I16, mkexpr(EA)); putIReg( rD_addr, mkSzWiden16(ty, val, True) ); break; - case 0x137: // lhzux (Load HW & Zero with Update Indexed, PPC32 p452) + case 0x137: // lhzux (Load HW & Zero, Update Indexed, PPC32 p452) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lhzux,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lhzux,rA_addr|rD_addr)\n"); return False; } DIP("lhzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); @@ -3688,15 +3515,15 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rA_addr, mkexpr(EA) ); break; - case 0x117: // lhzx (Load HW & Zero Indexed, PPC32 p453) + case 0x117: // lhzx (Load HW & Zero, Indexed, PPC32 p453) DIP("lhzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); val = loadBE(Ity_I16, mkexpr(EA)); putIReg( rD_addr, mkSzWiden16(ty, val, False) ); break; - case 0x037: // lwzux (Load W & Zero with Update Indexed, PPC32 p462) + case 0x037: // lwzux (Load W & Zero, Update Indexed, PPC32 p462) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lwzux,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lwzux,rA_addr|rD_addr)\n"); return False; } DIP("lwzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); @@ -3705,7 +3532,7 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rA_addr, mkexpr(EA) ); break; - case 0x017: // lwzx (Load W & Zero Indexed, PPC32 p463) + case 0x017: // lwzx (Load W & Zero, Indexed, PPC32 p463) DIP("lwzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); val = loadBE(Ity_I32, mkexpr(EA)); putIReg( rD_addr, mkSzWiden32(ty, val, False) ); @@ -3713,9 +3540,9 @@ static Bool dis_int_load ( UInt theInstr ) /* 64bit Loads */ - case 0x035: // ldux (Load DW with Update Indexed, PPC64 p475) + case 0x035: // ldux (Load DWord, Update Indexed, PPC64 p475) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(ldux,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(ldux,rA_addr|rD_addr)\n"); return False; } DIP("ldux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); @@ -3723,28 +3550,30 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rA_addr, mkexpr(EA) ); break; - case 0x015: // ldx (Load DW Indexed, PPC64 p476) + case 0x015: // ldx (Load DWord, Indexed, PPC64 p476) DIP("ldx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) ); break; - case 0x175: // lwaux (Load W Algebraic with Update Indexed, PPC64 p501) + case 0x175: // lwaux (Load W Alg, Update Indexed, PPC64 p501) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(lwaux,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(lwaux,rA_addr|rD_addr)\n"); return False; } DIP("lwaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); - putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) ); + putIReg( rD_addr, + unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) ); putIReg( rA_addr, mkexpr(EA) ); break; - case 0x155: // lwax (Load W Algebraic Indexed, PPC64 p502) + case 0x155: // lwax (Load W Alg, Indexed, PPC64 p502) DIP("lwax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); - putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) ); + putIReg( rD_addr, + unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) ); break; default: - vex_printf("dis_int_load(PPC32)(opc2)\n"); + vex_printf("dis_int_load(ppc)(opc2)\n"); return False; } break; @@ -3752,14 +3581,14 @@ static Bool dis_int_load ( UInt theInstr ) /* DS Form - 64bit Loads */ case 0x3A: switch (b1<<1 | b0) { - case 0x0: // ld (Load Double Word, PPC64 p472) + case 0x0: // ld (Load DWord, PPC64 p472) DIP("ld r%u,%d(r%u)\n", rD_addr, simm16, rA_addr); putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) ); break; - case 0x1: // ldu (Load Double Word with Update, PPC64 p474) + case 0x1: // ldu (Load DWord, Update, PPC64 p474) if (rA_addr == 0 || rA_addr == rD_addr) { - vex_printf("dis_int_load(PPC32)(ldu,rA_addr|rD_addr)\n"); + vex_printf("dis_int_load(ppc)(ldu,rA_addr|rD_addr)\n"); return False; } DIP("ldu r%u,%d(r%u)\n", rD_addr, simm16, rA_addr); @@ -3768,19 +3597,20 @@ static Bool dis_int_load ( UInt theInstr ) putIReg( rA_addr, mkexpr(EA) ); break; - case 0x2: // lwa (Load Word Algebraic, PPC64 p499) + case 0x2: // lwa (Load Word Alg, PPC64 p499) DIP("lwa r%u,%d(r%u)\n", rD_addr, simm16, rA_addr); - putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) ); + putIReg( rD_addr, + unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) ); break; default: - vex_printf("dis_int_load(PPC32)(0x3A, opc2)\n"); + vex_printf("dis_int_load(ppc)(0x3A, opc2)\n"); return False; } break; default: - vex_printf("dis_int_load(PPC32)(opc1)\n"); + vex_printf("dis_int_load(ppc)(opc1)\n"); return False; } return True; @@ -3804,7 +3634,6 @@ static Bool dis_int_store ( UInt theInstr ) UChar b0 = ifieldBIT0(theInstr); Int simm16 = extend_s_16to32(uimm16); - IRType ty = mode64 ? Ity_I64 : Ity_I32; IRTemp rS = newTemp(ty); IRTemp rB = newTemp(ty); @@ -3830,9 +3659,9 @@ static Bool dis_int_store ( UInt theInstr ) storeBE( mkexpr(EA), mkSzNarrow8(ty, mkexpr(rS)) ); break; - case 0x27: // stbu (Store B with Update, PPC32 p510) + case 0x27: // stbu (Store B, Update, PPC32 p510) if (rA_addr == 0 ) { - vex_printf("dis_int_store(PPC32)(stbu,rA_addr)\n"); + vex_printf("dis_int_store(ppc)(stbu,rA_addr)\n"); return False; } DIP("stbu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr); @@ -3845,9 +3674,9 @@ static Bool dis_int_store ( UInt theInstr ) storeBE( mkexpr(EA), mkSzNarrow16(ty, mkexpr(rS)) ); break; - case 0x2D: // sthu (Store HW with Update, PPC32 p524) + case 0x2D: // sthu (Store HW, Update, PPC32 p524) if (rA_addr == 0) { - vex_printf("dis_int_store(PPC32)(sthu,rA_addr)\n"); + vex_printf("dis_int_store(ppc)(sthu,rA_addr)\n"); return False; } DIP("sthu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr); @@ -3860,9 +3689,9 @@ static Bool dis_int_store ( UInt theInstr ) storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) ); break; - case 0x25: // stwu (Store W with Update, PPC32 p534) + case 0x25: // stwu (Store W, Update, PPC32 p534) if (rA_addr == 0) { - vex_printf("dis_int_store(PPC32)(stwu,rA_addr)\n"); + vex_printf("dis_int_store(ppc)(stwu,rA_addr)\n"); return False; } DIP("stwu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr); @@ -3873,14 +3702,14 @@ static Bool dis_int_store ( UInt theInstr ) /* X Form : all these use EA_indexed */ case 0x1F: if (b0 != 0) { - vex_printf("dis_int_store(PPC32)(0x1F,b0)\n"); + vex_printf("dis_int_store(ppc)(0x1F,b0)\n"); return False; } switch (opc2) { - case 0x0F7: // stbux (Store B with Update Indexed, PPC32 p511) + case 0x0F7: // stbux (Store B, Update Indexed, PPC32 p511) if (rA_addr == 0) { - vex_printf("dis_int_store(PPC32)(stbux,rA_addr)\n"); + vex_printf("dis_int_store(ppc)(stbux,rA_addr)\n"); return False; } DIP("stbux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr); @@ -3893,9 +3722,9 @@ static Bool dis_int_store ( UInt theInstr ) storeBE( mkexpr(EA), mkSzNarrow8(ty, mkexpr(rS)) ); break; - case 0x1B7: // sthux (Store HW with Update Indexed, PPC32 p525) + case 0x1B7: // sthux (Store HW, Update Indexed, PPC32 p525) if (rA_addr == 0) { - vex_printf("dis_int_store(PPC32)(sthux,rA_addr)\n"); + vex_printf("dis_int_store(ppc)(sthux,rA_addr)\n"); return False; } DIP("sthux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr); @@ -3908,9 +3737,9 @@ static Bool dis_int_store ( UInt theInstr ) storeBE( mkexpr(EA), mkSzNarrow16(ty, mkexpr(rS)) ); break; - case 0x0B7: // stwux (Store W with Update Indexed, PPC32 p535) + case 0x0B7: // stwux (Store W, Update Indexed, PPC32 p535) if (rA_addr == 0) { - vex_printf("dis_int_store(PPC32)(stwux,rA_addr)\n"); + vex_printf("dis_int_store(ppc)(stwux,rA_addr)\n"); return False; } DIP("stwux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr); @@ -3925,9 +3754,9 @@ static Bool dis_int_store ( UInt theInstr ) /* 64bit Stores */ - case 0x0B5: // stdux (Store DW with Update Indexed, PPC64 p584) + case 0x0B5: // stdux (Store DWord, Update Indexed, PPC64 p584) if (rA_addr == 0) { - vex_printf("dis_int_store(PPC32)(stdux,rA_addr)\n"); + vex_printf("dis_int_store(ppc)(stdux,rA_addr)\n"); return False; } DIP("stdux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr); @@ -3935,13 +3764,13 @@ static Bool dis_int_store ( UInt theInstr ) storeBE( mkexpr(EA), mkexpr(rS) ); break; - case 0x095: // stdx (Store DW Indexed, PPC64 p585) + case 0x095: // stdx (Store DWord Indexed, PPC64 p585) DIP("stdx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr); storeBE( mkexpr(EA), mkexpr(rS) ); break; default: - vex_printf("dis_int_store(PPC32)(opc2)\n"); + vex_printf("dis_int_store(ppc)(opc2)\n"); return False; } break; @@ -3949,25 +3778,25 @@ static Bool dis_int_store ( UInt theInstr ) /* DS Form - 64bit Stores */ case 0x3E: switch (b1<<1 | b0) { - case 0x0: // std (Store Double Word, PPC64 p580) + case 0x0: // std (Store DWord, PPC64 p580) DIP("std r%u,%d(r%u)\n", rS_addr, simm16, rA_addr); storeBE( mkexpr(EA), mkexpr(rS) ); break; - case 0x1: // stdu (Store Double Word with Update, PPC64 p583) + case 0x1: // stdu (Store DWord, Update, PPC64 p583) DIP("stdu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr); putIReg( rA_addr, mkexpr(EA) ); storeBE( mkexpr(EA), mkexpr(rS) ); break; default: - vex_printf("dis_int_load(PPC32)(0x3A, opc2)\n"); + vex_printf("dis_int_load(ppc)(0x3A, opc2)\n"); return False; } break; default: - vex_printf("dis_int_store(PPC32)(opc1)\n"); + vex_printf("dis_int_store(ppc)(opc1)\n"); return False; } return True; @@ -3999,13 +3828,14 @@ static Bool dis_int_ldst_mult ( UInt theInstr ) switch (opc1) { case 0x2E: // lmw (Load Multiple Word, PPC32 p454) if (rA_addr >= rD_addr) { - vex_printf("dis_int_ldst_mult(PPC32)(lmw,rA_addr)\n"); + vex_printf("dis_int_ldst_mult(ppc)(lmw,rA_addr)\n"); return False; } DIP("lmw r%u,%d(r%u)\n", rD_addr, simm16, rA_addr); for (r = rD_addr; r <= 31; r++) { irx_addr = binop(Iop_Add32, mkexpr(EA), mkU32(ea_off)); - putIReg( r, mkSzWiden32(ty, loadBE(Ity_I32, irx_addr ), False) ); + putIReg( r, mkSzWiden32(ty, loadBE(Ity_I32, irx_addr ), + False) ); ea_off += 4; } break; @@ -4020,7 +3850,7 @@ static Bool dis_int_ldst_mult ( UInt theInstr ) break; default: - vex_printf("dis_int_ldst_mult(PPC32)(opc1)\n"); + vex_printf("dis_int_ldst_mult(ppc)(opc1)\n"); return False; } return True; @@ -4128,7 +3958,7 @@ static Bool dis_int_ldst_str ( UInt theInstr, /*OUT*/Bool* stopHere ) *stopHere = False; if (opc1 != 0x1F || b0 != 0) { - vex_printf("dis_int_ldst_str(PPC32)(opc1)\n"); + vex_printf("dis_int_ldst_str(ppc)(opc1)\n"); return False; } @@ -4199,7 +4029,7 @@ static Bool dis_int_ldst_str ( UInt theInstr, /*OUT*/Bool* stopHere ) return True; default: - vex_printf("dis_int_ldst_str(PPC32)(opc2)\n"); + vex_printf("dis_int_ldst_str(ppc)(opc2)\n"); return False; } return True; @@ -4264,7 +4094,8 @@ static IRExpr* /* :: Ity_I32 */ branch_cond_ok( UInt BO, UInt BI ) /* We have to invert the sense of the information held in cr_bi. For that we need to know which bit getCRbit_anywhere regards as significant. */ - assign( res, binop(Iop_Xor32, mkexpr(cr_bi), mkU32(1< 31 ? 31 : amt ) XER.CA = amt > 31 ? sign-of-rS : (computation as per srawi) */ - assign( sh_amt, binop(Iop_And32, mkU32(0x3F), mkexpr(rB_lo32)) ); + assign( sh_amt, binop(Iop_And32, mkU32(0x3F), + mkexpr(rB_lo32)) ); assign( outofrange, unop( Iop_1Uto8, - binop(Iop_CmpLT32U, mkU32(31), mkexpr(sh_amt)) )); + binop(Iop_CmpLT32U, mkU32(31), + mkexpr(sh_amt)) )); e_tmp = binop( Iop_Sar32, mkexpr(rS_lo32), unop( Iop_32to8, @@ -4827,7 +4661,7 @@ static Bool dis_int_shift ( UInt theInstr ) mkU32(31)) ) ); assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */True) ); - set_XER_CA( ty, PPC32G_FLAG_OP_SRAW, + set_XER_CA( ty, PPCG_FLAG_OP_SRAW, mkexpr(rA), mkSzWiden32(ty, mkexpr(rS_lo32), True), mkSzWiden32(ty, mkexpr(sh_amt), True ), @@ -4835,27 +4669,29 @@ static Bool dis_int_shift ( UInt theInstr ) break; } - case 0x338: // srawi (Shift Right Algebraic Word Immediate, PPC32 p507) - DIP("srawi%s r%u,r%u,%d\n", flag_rC ? "." : "", + case 0x338: // srawi (Shift Right Alg Word Immediate, PPC32 p507) + DIP("srawi%s r%u,r%u,%d\n", flag_rC ? ".":"", rA_addr, rS_addr, sh_imm); vassert(sh_imm < 32); if (mode64) { assign( rA, binop(Iop_Sar64, - binop(Iop_Shl64, getIReg(rS_addr), mkU8(32)), + binop(Iop_Shl64, getIReg(rS_addr), + mkU8(32)), mkU8(32 + sh_imm)) ); } else { - assign( rA, binop(Iop_Sar32, mkexpr(rS_lo32), mkU8(sh_imm)) ); + assign( rA, binop(Iop_Sar32, mkexpr(rS_lo32), + mkU8(sh_imm)) ); } - set_XER_CA( ty, PPC32G_FLAG_OP_SRAWI, + set_XER_CA( ty, PPCG_FLAG_OP_SRAWI, mkexpr(rA), - mkSzWiden32(ty, mkexpr(rS_lo32), /* Signed */True), + mkSzWiden32(ty, mkexpr(rS_lo32), /* Syned */True), mkSzImm(ty, sh_imm), - mkSzWiden32(ty, getXER_CA32(), /* Signed */False) ); + mkSzWiden32(ty, getXER_CA32(), /* Syned */False) ); break; case 0x218: // srw (Shift Right Word, PPC32 p508) - DIP("srw%s r%u,r%u,r%u\n", flag_rC ? "." : "", + DIP("srw%s r%u,r%u,r%u\n", flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); /* rA = rS >>u rB */ /* ppc32 semantics are: @@ -4869,18 +4705,21 @@ static Bool dis_int_shift ( UInt theInstr ) binop( Iop_Shr32, mkexpr(rS_lo32), unop( Iop_32to8, - binop(Iop_And32, mkexpr(rB_lo32), mkU32(31)))), + binop(Iop_And32, mkexpr(rB_lo32), + mkU32(31)))), unop( Iop_Not32, binop( Iop_Sar32, - binop(Iop_Shl32, mkexpr(rB_lo32), mkU8(26)), + binop(Iop_Shl32, mkexpr(rB_lo32), + mkU8(26)), mkU8(31)))); assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */False) ); break; /* 64bit Shifts */ - case 0x01B: // sld (Shift Left DW, PPC64 p568) - DIP("sld%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + case 0x01B: // sld (Shift Left DWord, PPC64 p568) + DIP("sld%s r%u,r%u,r%u\n", + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); /* rA = rS << rB */ /* ppc64 semantics are: slw(x,y) = (x << (y & 63)) -- primary result @@ -4900,9 +4739,10 @@ static Bool dis_int_shift ( UInt theInstr ) mkU8(63)))) ); break; - case 0x31A: { // srad (Shift Right Algebraic DW, PPC64 p570) + case 0x31A: { // srad (Shift Right Alg DWord, PPC64 p570) IRTemp sh_amt = newTemp(Ity_I64); - DIP("srad%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + DIP("srad%s r%u,r%u,r%u\n", + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); /* amt = rB & 127 rA = Sar64( rS, amt > 63 ? 63 : amt ) XER.CA = amt > 63 ? sign-of-rS : (computation as per srawi) @@ -4910,7 +4750,8 @@ static Bool dis_int_shift ( UInt theInstr ) assign( sh_amt, binop(Iop_And64, mkU64(0x7F), mkexpr(rB)) ); assign( outofrange, unop( Iop_1Uto8, - binop(Iop_CmpLT64U, mkU64(63), mkexpr(sh_amt)) )); + binop(Iop_CmpLT64U, mkU64(63), + mkexpr(sh_amt)) )); assign( rA, binop( Iop_Sar64, mkexpr(rS), @@ -4919,27 +4760,29 @@ static Bool dis_int_shift ( UInt theInstr ) mkexpr(sh_amt), mkU64(63)) )) ); - set_XER_CA( ty, PPC32G_FLAG_OP_SRAD, + set_XER_CA( ty, PPCG_FLAG_OP_SRAD, mkexpr(rA), mkexpr(rS), mkexpr(sh_amt), - mkSzWiden32(ty, getXER_CA32(), /* Signed */False) ); + mkSzWiden32(ty, getXER_CA32(), /* Syned */False) ); break; } - case 0x33A: case 0x33B: // sradi (Shift Right Algebraic DW Imm, PPC64 p571) + case 0x33A: case 0x33B: // sradi (Shr Alg DWord Imm, PPC64 p571) sh_imm |= b1<<5; vassert(sh_imm < 64); - DIP("sradi%s r%u,r%u,%u\n", flag_rC ? "." : "", rA_addr, rS_addr, sh_imm); + DIP("sradi%s r%u,r%u,%u\n", + flag_rC ? ".":"", rA_addr, rS_addr, sh_imm); assign( rA, binop(Iop_Sar64, getIReg(rS_addr), mkU8(sh_imm)) ); - set_XER_CA( ty, PPC32G_FLAG_OP_SRADI, + set_XER_CA( ty, PPCG_FLAG_OP_SRADI, mkexpr(rA), getIReg(rS_addr), mkU64(sh_imm), - mkSzWiden32(ty, getXER_CA32(), /* Signed */False) ); + mkSzWiden32(ty, getXER_CA32(), /* Syned */False) ); break; - case 0x21B: // srd (Shift Right DW, PPC64 p574) - DIP("srd%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr); + case 0x21B: // srd (Shift Right DWord, PPC64 p574) + DIP("srd%s r%u,r%u,r%u\n", + flag_rC ? ".":"", rA_addr, rS_addr, rB_addr); /* rA = rS >>u rB */ /* ppc semantics are: srw(x,y) = (x >>u (y & 63)) -- primary result @@ -4960,11 +4803,11 @@ static Bool dis_int_shift ( UInt theInstr ) break; default: - vex_printf("dis_int_shift(PPC32)(opc2)\n"); + vex_printf("dis_int_shift(ppc)(opc2)\n"); return False; } } else { - vex_printf("dis_int_shift(PPC32)(opc1)\n"); + vex_printf("dis_int_shift(ppc)(opc1)\n"); return False; } @@ -5015,7 +4858,7 @@ static Bool dis_int_ldst_rev ( UInt theInstr ) IRTemp w2 = newTemp(Ity_I32); if (opc1 != 0x1F || b0 != 0) { - vex_printf("dis_int_ldst_rev(PPC32)(opc1|b0)\n"); + vex_printf("dis_int_ldst_rev(ppc)(opc1|b0)\n"); return False; } @@ -5038,7 +4881,8 @@ static Bool dis_int_ldst_rev ( UInt theInstr ) DIP("lwbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr); assign( w1, loadBE(Ity_I32, mkexpr(EA)) ); assign( w2, gen_byterev32(w1) ); - putIReg( rD_addr, mkSzWiden32(ty, mkexpr(w2), /* Signed */False) ); + putIReg( rD_addr, mkSzWiden32(ty, mkexpr(w2), + /* Signed */False) ); break; //zz case 0x396: // sthbrx (Store Half Word Byte-Reverse Indexed, PPC32 p523) @@ -5057,14 +4901,14 @@ static Bool dis_int_ldst_rev ( UInt theInstr ) //zz storeBE( mkexpr(EA), getIReg(tmp16) ); //zz break; - case 0x296: // stwbrx (Store Word Byte-Reverse Indexed, PPC32 p531) + case 0x296: // stwbrx (Store Word Byte-Reverse Indxd, PPC32 p531) DIP("stwbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr); assign( w1, mkSzNarrow32(ty, getIReg(rS_addr)) ); storeBE( mkexpr(EA), gen_byterev32(w1) ); break; default: - vex_printf("dis_int_ldst_rev(PPC32)(opc2)\n"); + vex_printf("dis_int_ldst_rev(ppc)(opc2)\n"); return False; } return True; @@ -5106,15 +4950,15 @@ static Bool dis_proc_ctl ( UInt theInstr ) TBR = ((TBR & 31) << 5) | ((TBR >> 5) & 31); if (opc1 != 0x1F || b0 != 0) { - vex_printf("dis_proc_ctl(PPC32)(opc1|b0)\n"); + vex_printf("dis_proc_ctl(ppc)(opc1|b0)\n"); return False; } switch (opc2) { /* X-Form */ - case 0x200: { // mcrxr (Move to Condition Register from XER, PPC32 p466) + case 0x200: { // mcrxr (Move to Cond Register from XER, PPC32 p466) if (b21to22 != 0 || b11to20 != 0) { - vex_printf("dis_proc_ctl(PPC32)(mcrxr,b21to22|b11to20)\n"); + vex_printf("dis_proc_ctl(ppc)(mcrxr,b21to22|b11to20)\n"); return False; } DIP("mcrxr crf%d\n", crfD); @@ -5130,9 +4974,9 @@ static Bool dis_proc_ctl ( UInt theInstr ) break; } - case 0x013: // mfcr (Move from Condition Register, PPC32 p467) + case 0x013: // mfcr (Move from Cond Register, PPC32 p467) if (b11to20 != 0) { - vex_printf("dis_proc_ctl(PPC32)(mfcr,b11to20)\n"); + vex_printf("dis_proc_ctl(ppc)(mfcr,b11to20)\n"); return False; } DIP("mfcr r%u\n", rD_addr); @@ -5164,7 +5008,7 @@ static Bool dis_proc_ctl ( UInt theInstr ) break; default: - vex_printf("dis_proc_ctl(PPC32)(mfspr,SPR)(0x%x)\n", SPR); + vex_printf("dis_proc_ctl(ppc)(mfspr,SPR)(0x%x)\n", SPR); return False; } break; @@ -5172,13 +5016,11 @@ static Bool dis_proc_ctl ( UInt theInstr ) case 0x173: { // mftb (Move from Time Base, PPC32 p475) IRTemp val = newTemp(Ity_I64); IRExpr** args = mkIRExprVec_0(); - IRDirty* d = unsafeIRDirty_1_N ( - val, - 0/*regparms*/, - "ppc32g_dirtyhelper_MFTB", - &ppc32g_dirtyhelper_MFTB, - args - ); + IRDirty* d = unsafeIRDirty_1_N( val, + 0/*regparms*/, + "ppcg_dirtyhelper_MFTB", + &ppcg_dirtyhelper_MFTB, + args ); /* execute the dirty call, dumping the result in val. */ stmt( IRStmt_Dirty(d) ); @@ -5200,11 +5042,11 @@ static Bool dis_proc_ctl ( UInt theInstr ) break; } - case 0x090: { // mtcrf (Move to Condition Register Fields, PPC32 p477) + case 0x090: { // mtcrf (Move to Cond Register Fields, PPC32 p477) Int cr; UChar shft; if (b11 != 0 || b20 != 0) { - vex_printf("dis_proc_ctl(PPC32)(mtcrf,b11|b20)\n"); + vex_printf("dis_proc_ctl(ppc)(mtcrf,b11|b20)\n"); return False; } DIP("mtcrf 0x%x,r%u\n", CRM, rS_addr); @@ -5242,13 +5084,13 @@ static Bool dis_proc_ctl ( UInt theInstr ) break; default: - vex_printf("dis_proc_ctl(PPC32)(mtspr,SPR)(%u)\n", SPR); + vex_printf("dis_proc_ctl(ppc)(mtspr,SPR)(%u)\n", SPR); return False; } break; default: - vex_printf("dis_proc_ctl(PPC32)(opc2)\n"); + vex_printf("dis_proc_ctl(ppc)(opc2)\n"); return False; } return True; @@ -5269,12 +5111,12 @@ static Bool dis_cache_manage ( UInt theInstr, UChar rB_addr = ifieldRegB(theInstr); UInt opc2 = ifieldOPClo10(theInstr); UChar b0 = ifieldBIT0(theInstr); - UInt lineszB = guest_archinfo->ppc32_cache_line_szB; + UInt lineszB = guest_archinfo->ppc_cache_line_szB; IRType ty = mode64 ? Ity_I64 : Ity_I32; if (opc1 != 0x1F || b21to25 != 0 || b0 != 0) { - vex_printf("dis_cache_manage(PPC32)(opc1|b21to25|b0)\n"); + vex_printf("dis_cache_manage(ppc)(opc1|b21to25|b0)\n"); return False; } @@ -5285,13 +5127,13 @@ static Bool dis_cache_manage ( UInt theInstr, //zz case 0x2F6: // dcba (Data Cache Block Allocate, PPC32 p380) //zz vassert(0); /* AWAITING TEST CASE */ //zz DIP("dcba r%u,r%u\n", rA_addr, rB_addr); -//zz if (0) vex_printf("vex ppc32->IR: kludged dcba\n"); +//zz if (0) vex_printf("vex ppc->IR: kludged dcba\n"); //zz break; case 0x056: // dcbf (Data Cache Block Flush, PPC32 p382) DIP("dcbf r%u,r%u\n", rA_addr, rB_addr); /* nop as far as vex is concerned */ - if (0) vex_printf("vex ppc32->IR: kludged dcbf\n"); + if (0) vex_printf("vex ppc->IR: kludged dcbf\n"); break; case 0x036: // dcbst (Data Cache Block Store, PPC32 p384) @@ -5372,7 +5214,7 @@ static Bool dis_cache_manage ( UInt theInstr, } default: - vex_printf("dis_cache_manage(PPC32)(opc2)\n"); + vex_printf("dis_cache_manage(ppc)(opc2)\n"); return False; } return True; @@ -5385,7 +5227,7 @@ static Bool dis_cache_manage ( UInt theInstr, /* --------- Synthesise a 2-bit FPU rounding mode. --------- */ /* Produces a value in 0 .. 3, which is encoded as per the type - IRRoundingMode. PPC32RoundingMode encoding is different to + IRRoundingMode. PPCRoundingMode encoding is different to IRRoundingMode, so need to map it. */ static IRExpr* /* :: Ity_I32 */ get_roundingmode ( void ) @@ -5411,7 +5253,8 @@ static IRExpr* /* :: Ity_I32 */ get_roundingmode ( void ) - returns type Ity_F64 */ static IRExpr* roundToSgl ( IRExpr* src ) { - return unop(Iop_F32toF64, binop(Iop_F64toF32, get_roundingmode(), src)); + return unop(Iop_F32toF64, + binop(Iop_F64toF32, get_roundingmode(), src)); } @@ -5447,17 +5290,19 @@ static Bool dis_fp_load ( UInt theInstr ) case 0x30: // lfs (Load Float Single, PPC32 p441) DIP("lfs fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr); assign( EA, ea_rAor0_simm(rA_addr, simm16) ); - putFReg( frD_addr, unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) ); + putFReg( frD_addr, + unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) ); break; - case 0x31: // lfsu (Load Float Single with Update, PPC32 p442) + case 0x31: // lfsu (Load Float Single, Update, PPC32 p442) if (rA_addr == 0) { - vex_printf("dis_fp_load(PPC32)(instr,lfsu)\n"); + vex_printf("dis_fp_load(ppc)(instr,lfsu)\n"); return False; } DIP("lfsu fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr); assign( EA, ea_rA_simm(rA_addr, simm16) ); - putFReg( frD_addr, unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) ); + putFReg( frD_addr, + unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) ); putIReg( rA_addr, mkexpr(EA) ); break; @@ -5467,9 +5312,9 @@ static Bool dis_fp_load ( UInt theInstr ) putFReg( frD_addr, loadBE(Ity_F64, mkexpr(EA)) ); break; - case 0x33: // lfdu (Load Float Double with Update, PPC32 p438) + case 0x33: // lfdu (Load Float Double, Update, PPC32 p438) if (rA_addr == 0) { - vex_printf("dis_fp_load(PPC32)(instr,lfdu)\n"); + vex_printf("dis_fp_load(ppc)(instr,lfdu)\n"); return False; } DIP("lfdu fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr); @@ -5480,7 +5325,7 @@ static Bool dis_fp_load ( UInt theInstr ) case 0x1F: if (b0 != 0) { - vex_printf("dis_fp_load(PPC32)(instr,b0)\n"); + vex_printf("dis_fp_load(ppc)(instr,b0)\n"); return False; } @@ -5492,14 +5337,15 @@ static Bool dis_fp_load ( UInt theInstr ) loadBE(Ity_F32, mkexpr(EA))) ); break; - case 0x237: // lfsux (Load Float Single with Update Indexed, PPC32 p443) + case 0x237: // lfsux (Load Float Single, Update Indxd, PPC32 p443) if (rA_addr == 0) { - vex_printf("dis_fp_load(PPC32)(instr,lfsux)\n"); + vex_printf("dis_fp_load(ppc)(instr,lfsux)\n"); return False; } DIP("lfsux fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr); assign( EA, ea_rA_idxd(rA_addr, rB_addr) ); - putFReg( frD_addr, unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) ); + putFReg( frD_addr, + unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) ); putIReg( rA_addr, mkexpr(EA) ); break; @@ -5509,9 +5355,9 @@ static Bool dis_fp_load ( UInt theInstr ) putFReg( frD_addr, loadBE(Ity_F64, mkexpr(EA)) ); break; - case 0x277: // lfdux (Load Float Double with Update Indexed, PPC32 p439) + case 0x277: // lfdux (Load Float Double, Update Indxd, PPC32 p439) if (rA_addr == 0) { - vex_printf("dis_fp_load(PPC32)(instr,lfdux)\n"); + vex_printf("dis_fp_load(ppc)(instr,lfdux)\n"); return False; } DIP("lfdux fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr); @@ -5521,13 +5367,13 @@ static Bool dis_fp_load ( UInt theInstr ) break; default: - vex_printf("dis_fp_load(PPC32)(opc2)\n"); + vex_printf("dis_fp_load(ppc)(opc2)\n"); return False; } break; default: - vex_printf("dis_fp_load(PPC32)(opc1)\n"); + vex_printf("dis_fp_load(ppc)(opc1)\n"); return False; } return True; @@ -5573,9 +5419,9 @@ static Bool dis_fp_store ( UInt theInstr ) binop(Iop_F64toF32, get_roundingmode(), mkexpr(frS)) ); break; - case 0x35: // stfsu (Store Float Single with Update, PPC32 p519) + case 0x35: // stfsu (Store Float Single, Update, PPC32 p519) if (rA_addr == 0) { - vex_printf("dis_fp_store(PPC32)(instr,stfsu)\n"); + vex_printf("dis_fp_store(ppc)(instr,stfsu)\n"); return False; } DIP("stfsu fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr); @@ -5592,9 +5438,9 @@ static Bool dis_fp_store ( UInt theInstr ) storeBE( mkexpr(EA), mkexpr(frS) ); break; - case 0x37: // stfdu (Store Float Double with Update, PPC32 p514) + case 0x37: // stfdu (Store Float Double, Update, PPC32 p514) if (rA_addr == 0) { - vex_printf("dis_fp_store(PPC32)(instr,stfdu)\n"); + vex_printf("dis_fp_store(ppc)(instr,stfdu)\n"); return False; } DIP("stfdu fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr); @@ -5605,7 +5451,7 @@ static Bool dis_fp_store ( UInt theInstr ) case 0x1F: if (b0 != 0) { - vex_printf("dis_fp_store(PPC32)(instr,b0)\n"); + vex_printf("dis_fp_store(ppc)(instr,b0)\n"); return False; } @@ -5614,20 +5460,20 @@ static Bool dis_fp_store ( UInt theInstr ) DIP("stfsx fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr); assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) ); /* This implementation loses accuracy - see note for stfs */ - storeBE( mkexpr(EA), - binop(Iop_F64toF32, get_roundingmode(), mkexpr(frS)) ); + storeBE( mkexpr(EA), binop(Iop_F64toF32, + get_roundingmode(), mkexpr(frS)) ); break; - case 0x2B7: // stfsux (Store Float Single with Update Indexed, PPC32 p520) + case 0x2B7: // stfsux (Store Float Sgl, Update Indxd, PPC32 p520) if (rA_addr == 0) { - vex_printf("dis_fp_store(PPC32)(instr,stfsux)\n"); + vex_printf("dis_fp_store(ppc)(instr,stfsux)\n"); return False; } DIP("stfsux fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr); assign( EA, ea_rA_idxd(rA_addr, rB_addr) ); /* This implementation loses accuracy - see note for stfs */ - storeBE( mkexpr(EA), - binop(Iop_F64toF32, get_roundingmode(), mkexpr(frS)) ); + storeBE( mkexpr(EA), binop(Iop_F64toF32, + get_roundingmode(), mkexpr(frS)) ); putIReg( rA_addr, mkexpr(EA) ); break; @@ -5637,9 +5483,9 @@ static Bool dis_fp_store ( UInt theInstr ) storeBE( mkexpr(EA), mkexpr(frS) ); break; - case 0x2F7: // stfdux (Store Float Double with Update Indexed, PPC32 p515) + case 0x2F7: // stfdux (Store Float Dbl, Update Indxd, PPC32 p515) if (rA_addr == 0) { - vex_printf("dis_fp_store(PPC32)(instr,stfdux)\n"); + vex_printf("dis_fp_store(ppc)(instr,stfdux)\n"); return False; } DIP("stfdux fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr); @@ -5656,13 +5502,13 @@ static Bool dis_fp_store ( UInt theInstr ) //zz break; default: - vex_printf("dis_fp_store(PPC32)(opc2)\n"); + vex_printf("dis_fp_store(ppc)(opc2)\n"); return False; } break; default: - vex_printf("dis_fp_store(PPC32)(opc1)\n"); + vex_printf("dis_fp_store(ppc)(opc1)\n"); return False; } return True; @@ -5699,20 +5545,21 @@ static Bool dis_fp_arith ( UInt theInstr ) switch (opc2) { case 0x12: // fdivs (Floating Divide Single, PPC32 p407) if (frC_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fdivs)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fdivs)\n"); return False; } - DIP("fdivs%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fdivs%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frB_addr); - assign( frD, roundToSgl( binop(Iop_DivF64, mkexpr(frA), mkexpr(frB)) )); + assign( frD, roundToSgl( binop(Iop_DivF64, + mkexpr(frA), mkexpr(frB)) )); break; case 0x14: // fsubs (Floating Subtract Single, PPC32 p430) if (frC_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fsubs)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fsubs)\n"); return False; } - DIP("fsubs%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fsubs%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frB_addr); assign( frD, roundToSgl( binop(Iop_SubF64, mkexpr(frA), mkexpr(frB)) )); @@ -5720,10 +5567,10 @@ static Bool dis_fp_arith ( UInt theInstr ) case 0x15: // fadds (Floating Add Single, PPC32 p401) if (frC_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fadds)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fadds)\n"); return False; } - DIP("fadds%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fadds%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frB_addr); assign( frD, roundToSgl( binop(Iop_AddF64, mkexpr(frA), mkexpr(frB)) )); @@ -5731,20 +5578,20 @@ static Bool dis_fp_arith ( UInt theInstr ) //zz case 0x16: // fsqrts (Floating SqRt (Single-Precision), PPC32 p428) //zz if (frA_addr != 0 || frC_addr != 0) { -//zz vex_printf("dis_fp_arith(PPC32)(instr,fsqrts)\n"); +//zz vex_printf("dis_fp_arith(ppc)(instr,fsqrts)\n"); //zz return False; //zz } -//zz DIP("fsqrts%s fr%u,fr%u\n", flag_rC ? "." : "", +//zz DIP("fsqrts%s fr%u,fr%u\n", flag_rC ? ".":"", //zz frD_addr, frB_addr); //zz assign( frD, roundToSgl( unop(Iop_SqrtF64, mkexpr(frB)) )); //zz break; //zz case 0x18: // fres (Floating Reciprocal Estimate Single, PPC32 p421) //zz if (frA_addr != 0 || frC_addr != 0) { -//zz vex_printf("dis_fp_arith(PPC32)(instr,fres)\n"); +//zz vex_printf("dis_fp_arith(ppc)(instr,fres)\n"); //zz return False; //zz } -//zz DIP("fres%s fr%u,fr%u\n", flag_rC ? "." : "", +//zz DIP("fres%s fr%u,fr%u\n", flag_rC ? ".":"", //zz frD_addr, frB_addr); //zz DIP(" => not implemented\n"); //zz // CAB: Can we use one of the 128 bit SIMD Iop_Recip32F ops? @@ -5752,58 +5599,59 @@ static Bool dis_fp_arith ( UInt theInstr ) case 0x19: // fmuls (Floating Multiply Single, PPC32 p414) if (frB_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fmuls)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fmuls)\n"); return False; } - DIP("fmuls%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fmuls%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr); - assign( frD, roundToSgl( binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)) )); + assign( frD, roundToSgl( binop(Iop_MulF64, + mkexpr(frA), mkexpr(frC)) )); break; default: - vex_printf("dis_fp_arith(PPC32)(3B: opc2)\n"); + vex_printf("dis_fp_arith(ppc)(3B: opc2)\n"); return False; } break; case 0x3F: switch (opc2) { - case 0x12: // fdiv (Floating Divide (Double-Precision), PPC32 p406) + case 0x12: // fdiv (Floating Div (Double-Precision), PPC32 p406) if (frC_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fdiv)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fdiv)\n"); return False; } - DIP("fdiv%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fdiv%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frB_addr); assign( frD, binop( Iop_DivF64, mkexpr(frA), mkexpr(frB) ) ); break; - case 0x14: // fsub (Floating Subtract (Double-Precision), PPC32 p429) + case 0x14: // fsub (Floating Sub (Double-Precision), PPC32 p429) if (frC_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fsub)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fsub)\n"); return False; } - DIP("fsub%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fsub%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frB_addr); assign( frD, binop( Iop_SubF64, mkexpr(frA), mkexpr(frB) ) ); break; case 0x15: // fadd (Floating Add (Double-Precision), PPC32 p400) if (frC_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fadd)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fadd)\n"); return False; } - DIP("fadd%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fadd%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frB_addr); assign( frD, binop( Iop_AddF64, mkexpr(frA), mkexpr(frB) ) ); break; case 0x16: // fsqrt (Floating SqRt (Double-Precision), PPC32 p427) if (frA_addr != 0 || frC_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fsqrt)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fsqrt)\n"); return False; } - DIP("fsqrt%s fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fsqrt%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( frD, unop( Iop_SqrtF64, mkexpr(frB) ) ); break; @@ -5812,12 +5660,13 @@ static Bool dis_fp_arith ( UInt theInstr ) IRTemp cc = newTemp(Ity_I32); IRTemp cc_b0 = newTemp(Ity_I32); - DIP("fsel%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fsel%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); // cc: UN == 0x41, LT == 0x01, GT == 0x00, EQ == 0x40 // => GT|EQ == (cc & 0x1 == 0) - assign( cc, binop(Iop_CmpF64, mkexpr(frA), IRExpr_Const(IRConst_F64(0))) ); + assign( cc, binop(Iop_CmpF64, mkexpr(frA), + IRExpr_Const(IRConst_F64(0))) ); assign( cc_b0, binop(Iop_And32, mkexpr(cc), mkU32(1)) ); // frD = (frA >= 0.0) ? frC : frB @@ -5831,35 +5680,35 @@ static Bool dis_fp_arith ( UInt theInstr ) break; } - case 0x19: // fmul (Floating Multiply (Double Precision), PPC32 p413) + case 0x19: // fmul (Floating Mult (Double Precision), PPC32 p413) if (frB_addr != 0) { - vex_printf("dis_fp_arith(PPC32)(instr,fmul)\n"); + vex_printf("dis_fp_arith(ppc)(instr,fmul)\n"); return False; } - DIP("fmul%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fmul%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr); assign( frD, binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ) ); break; -//zz case 0x1A: // frsqrte (Floating Reciprocal SqRt Estimate, PPC32 p424) +//zz case 0x1A: // frsqrte (Floating Recip SqRt Est., PPC32 p424) //zz if (frA_addr != 0 || frC_addr != 0) { -//zz vex_printf("dis_fp_arith(PPC32)(instr,frsqrte)\n"); +//zz vex_printf("dis_fp_arith(ppc)(instr,frsqrte)\n"); //zz return False; //zz } -//zz DIP("frsqrte%s fr%u,fr%u\n", flag_rC ? "." : "", +//zz DIP("frsqrte%s fr%u,fr%u\n", flag_rC ? ".":"", //zz frD_addr, frB_addr); //zz DIP(" => not implemented\n"); //zz // CAB: Iop_SqrtF64, then one of the 128 bit SIMD Iop_Recip32F ops? //zz return False; default: - vex_printf("dis_fp_arith(PPC32)(3F: opc2)\n"); + vex_printf("dis_fp_arith(ppc)(3F: opc2)\n"); return False; } break; default: - vex_printf("dis_fp_arith(PPC32)(opc1)\n"); + vex_printf("dis_fp_arith(ppc)(opc1)\n"); return False; } @@ -5896,93 +5745,99 @@ static Bool dis_fp_multadd ( UInt theInstr ) case 0x3B: switch (opc2) { case 0x1C: // fmsubs (Floating Mult-Subtr Single, PPC32 p412) - DIP("fmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); - assign( frD, roundToSgl( - binop( Iop_SubF64, - binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)), - mkexpr(frB)) )); + assign( frD, roundToSgl( binop( Iop_SubF64, + binop(Iop_MulF64, mkexpr(frA), + mkexpr(frC)), + mkexpr(frB)) )); break; case 0x1D: // fmadds (Floating Mult-Add Single, PPC32 p409) - DIP("fmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); - assign( frD, roundToSgl( - binop( Iop_AddF64, - binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)), - mkexpr(frB)) )); + assign( frD, roundToSgl( binop( Iop_AddF64, + binop(Iop_MulF64, mkexpr(frA), + mkexpr(frC)), + mkexpr(frB)) )); break; case 0x1E: // fnmsubs (Float Neg Mult-Subtr Single, PPC32 p420) - DIP("fnmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fnmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); assign( frD, roundToSgl( - unop(Iop_NegF64, - binop(Iop_SubF64, - binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)), - mkexpr(frB))) )); + unop(Iop_NegF64, + binop(Iop_SubF64, + binop(Iop_MulF64, mkexpr(frA), + mkexpr(frC)), + mkexpr(frB))) )); break; case 0x1F: // fnmadds (Floating Negative Multiply-Add Single, PPC32 p418) - DIP("fnmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + DIP("fnmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); assign( frD, roundToSgl( - unop(Iop_NegF64, - binop(Iop_AddF64, - binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)), - mkexpr(frB))) )); + unop(Iop_NegF64, + binop(Iop_AddF64, + binop(Iop_MulF64, mkexpr(frA), + mkexpr(frC)), + mkexpr(frB))) )); break; default: - vex_printf("dis_fp_multadd(PPC32)(3B: opc2)\n"); + vex_printf("dis_fp_multadd(ppc)(3B: opc2)\n"); return False; } break; case 0x3F: switch (opc2) { - case 0x1C: // fmsub (Float Mult-Subtr (Double Precision), PPC32 p411) - DIP("fmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + case 0x1C: // fmsub (Float Mult-Sub (Dbl Precision), PPC32 p411) + DIP("fmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); assign( frD, binop( Iop_SubF64, - binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ), + binop( Iop_MulF64, mkexpr(frA), + mkexpr(frC) ), mkexpr(frB) )); break; - case 0x1D: // fmadd (Float Mult-Add (Double Precision), PPC32 p408) - DIP("fmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + case 0x1D: // fmadd (Float Mult-Add (Dbl Precision), PPC32 p408) + DIP("fmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); assign( frD, binop( Iop_AddF64, - binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ), + binop( Iop_MulF64, mkexpr(frA), + mkexpr(frC) ), mkexpr(frB) )); break; - case 0x1E: // fnmsub (Float Neg Mult-Subtr (Double Precision), PPC32 p419) - DIP("fnmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + case 0x1E: // fnmsub (Float Neg Mult-Subtr (Dbl Precision), PPC32 p419) + DIP("fnmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); assign( frD, unop( Iop_NegF64, binop( Iop_SubF64, - binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ), + binop( Iop_MulF64, mkexpr(frA), + mkexpr(frC) ), mkexpr(frB) ))); break; - case 0x1F: // fnmadd (Float Neg Mult-Add (Double Precision), PPC32 p417) - DIP("fnmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "", + case 0x1F: // fnmadd (Float Neg Mult-Add (Dbl Precision), PPC32 p417) + DIP("fnmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr, frC_addr, frB_addr); assign( frD, unop( Iop_NegF64, binop( Iop_AddF64, - binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ), + binop( Iop_MulF64, mkexpr(frA), + mkexpr(frC) ), mkexpr(frB) ))); break; default: - vex_printf("dis_fp_multadd(PPC32)(3F: opc2)\n"); + vex_printf("dis_fp_multadd(ppc)(3F: opc2)\n"); return False; } break; default: - vex_printf("dis_fp_multadd(PPC32)(opc1)\n"); + vex_printf("dis_fp_multadd(ppc)(opc1)\n"); return False; } @@ -6013,7 +5868,7 @@ static Bool dis_fp_cmp ( UInt theInstr ) IRTemp frB = newTemp(Ity_F64); if (opc1 != 0x3F || b21to22 != 0 || b0 != 0) { - vex_printf("dis_fp_cmp(PPC32)(instr)\n"); + vex_printf("dis_fp_cmp(ppc)(instr)\n"); return False; } @@ -6062,7 +5917,7 @@ static Bool dis_fp_cmp ( UInt theInstr ) DIP("fcmpo crf%d,fr%u,fr%u\n", crfD, frA_addr, frB_addr); break; default: - vex_printf("dis_fp_cmp(PPC32)(opc2)\n"); + vex_printf("dis_fp_cmp(ppc)(opc2)\n"); return False; } return True; @@ -6089,7 +5944,7 @@ static Bool dis_fp_round ( UInt theInstr ) IRTemp r_tmp64 = newTemp(Ity_I64); if (opc1 != 0x3F || b16to20 != 0) { - vex_printf("dis_fp_round(PPC32)(instr)\n"); + vex_printf("dis_fp_round(ppc)(instr)\n"); return False; } @@ -6097,19 +5952,20 @@ static Bool dis_fp_round ( UInt theInstr ) switch (opc2) { case 0x00C: // frsp (Float Round to Single, PPC32 p423) - DIP("frsp%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); + DIP("frsp%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( frD, roundToSgl( mkexpr(frB) )); break; case 0x00E: // fctiw (Float Conv to Int, PPC32 p404) - DIP("fctiw%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); - assign( r_tmp32, binop(Iop_F64toI32, get_roundingmode(), mkexpr(frB)) ); + DIP("fctiw%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); + assign( r_tmp32, + binop(Iop_F64toI32, get_roundingmode(), mkexpr(frB)) ); assign( frD, unop( Iop_ReinterpI64asF64, unop( Iop_32Uto64, mkexpr(r_tmp32)))); break; case 0x00F: // fctiwz (Float Conv to Int, Round to Zero, PPC32 p405) - DIP("fctiwz%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); + DIP("fctiwz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( r_tmp32, binop(Iop_F64toI32, mkU32(0x3), mkexpr(frB)) ); assign( frD, unop( Iop_ReinterpI64asF64, unop( Iop_32Uto64, mkexpr(r_tmp32)))); @@ -6117,26 +5973,28 @@ static Bool dis_fp_round ( UInt theInstr ) /* 64bit FP conversions */ - case 0x32E: // fctid (Float Conv to Int DW, PPC64 p437) - DIP("fctid%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); - assign( r_tmp64, binop(Iop_F64toI64, get_roundingmode(), mkexpr(frB)) ); + case 0x32E: // fctid (Float Conv to Int DWord, PPC64 p437) + DIP("fctid%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); + assign( r_tmp64, + binop(Iop_F64toI64, get_roundingmode(), mkexpr(frB)) ); assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) ); break; - case 0x32F: // fctidz (Float Conv to Int DW, Round to Zero, PPC64 p437) - DIP("fctidz%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); + case 0x32F: // fctidz (Float Conv to Int DWord, Round to Zero, PPC64 p437) + DIP("fctidz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( r_tmp64, binop(Iop_F64toI64, mkU32(0x3), mkexpr(frB)) ); assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) ); break; - case 0x34E: // fcfid (Float Conv from Int DW, PPC64 p434) - DIP("fcfid%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); + case 0x34E: // fcfid (Float Conv from Int DWord, PPC64 p434) + DIP("fcfid%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) ); - assign( frD, binop(Iop_I64toF64, get_roundingmode(), mkexpr(r_tmp64)) ); + assign( frD, binop(Iop_I64toF64, get_roundingmode(), + mkexpr(r_tmp64)) ); break; default: - vex_printf("dis_fp_round(PPC32)(opc2)\n"); + vex_printf("dis_fp_round(ppc)(opc2)\n"); return False; } @@ -6163,7 +6021,7 @@ static Bool dis_fp_move ( UInt theInstr ) IRTemp frB = newTemp(Ity_F64); if (opc1 != 0x3F || b16to20 != 0) { - vex_printf("dis_fp_move(PPC32)(instr)\n"); + vex_printf("dis_fp_move(ppc)(instr)\n"); return False; } @@ -6171,27 +6029,27 @@ static Bool dis_fp_move ( UInt theInstr ) switch (opc2) { case 0x028: // fneg (Floating Negate, PPC32 p416) - DIP("fneg%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); + DIP("fneg%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( frD, unop( Iop_NegF64, mkexpr(frB) )); break; case 0x048: // fmr (Floating Move Register, PPC32 p410) - DIP("fmr%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); + DIP("fmr%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( frD, mkexpr(frB) ); break; case 0x088: // fnabs (Floating Negative Absolute Value, PPC32 p415) - DIP("fnabs%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); + DIP("fnabs%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( frD, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr(frB) ))); break; case 0x108: // fabs (Floating Absolute Value, PPC32 p399) - DIP("fabs%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr); + DIP("fabs%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr); assign( frD, unop( Iop_AbsF64, mkexpr(frB) )); break; default: - vex_printf("dis_fp_move(PPC32)(opc2)\n"); + vex_printf("dis_fp_move(ppc)(opc2)\n"); return False; } @@ -6212,7 +6070,7 @@ static Bool dis_fp_scr ( UInt theInstr ) UChar flag_rC = ifieldBIT0(theInstr); if (opc1 != 0x3F) { - vex_printf("dis_fp_scr(PPC32)(instr)\n"); + vex_printf("dis_fp_scr(ppc)(instr)\n"); return False; } @@ -6223,10 +6081,10 @@ static Bool dis_fp_scr ( UInt theInstr ) //zz UInt b11to20 = IFIELD(theInstr, 11, 10); //zz //zz if (b11to20 != 0) { -//zz vex_printf("dis_fp_scr(PPC32)(instr,mtfsb1)\n"); +//zz vex_printf("dis_fp_scr(ppc)(instr,mtfsb1)\n"); //zz return False; //zz } -//zz DIP("mtfsb1%s crb%d \n", flag_rC ? "." : "", crbD); +//zz DIP("mtfsb1%s crb%d \n", flag_rC ? ".":"", crbD); //zz putGST_masked( PPC_GST_FPSCR, mkU32(1<<(31-crbD)), 1<<(31-crbD) ); //zz break; //zz } @@ -6240,7 +6098,7 @@ static Bool dis_fp_scr ( UInt theInstr ) //zz IRTemp tmp = newTemp(Ity_I32); //zz //zz if (b21to22 != 0 || b11to17 != 0 || flag_rC != 0) { -//zz vex_printf("dis_fp_scr(PPC32)(instr,mcrfs)\n"); +//zz vex_printf("dis_fp_scr(ppc)(instr,mcrfs)\n"); //zz return False; //zz } //zz DIP("mcrfs crf%d,crf%d\n", crfD, crfS); @@ -6255,10 +6113,10 @@ static Bool dis_fp_scr ( UInt theInstr ) UInt b11to20 = IFIELD(theInstr, 11, 10); if (b11to20 != 0) { - vex_printf("dis_fp_scr(PPC32)(instr,mtfsb0)\n"); + vex_printf("dis_fp_scr(ppc)(instr,mtfsb0)\n"); return False; } - DIP("mtfsb0%s crb%d\n", flag_rC ? "." : "", crbD); + DIP("mtfsb0%s crb%d\n", flag_rC ? ".":"", crbD); putGST_masked( PPC_GST_FPSCR, mkU32(0), 1<<(31-crbD) ); break; } @@ -6270,10 +6128,10 @@ static Bool dis_fp_scr ( UInt theInstr ) UChar b11 = toUChar( IFIELD( theInstr, 11, 1 ) ); if (b16to22 != 0 || b11 != 0) { - vex_printf("dis_fp_scr(PPC32)(instr,mtfsfi)\n"); + vex_printf("dis_fp_scr(ppc)(instr,mtfsfi)\n"); return False; } - DIP("mtfsfi%s crf%d,%d\n", flag_rC ? "." : "", crfD, IMM); + DIP("mtfsfi%s crf%d,%d\n", flag_rC ? ".":"", crfD, IMM); putGST_field( PPC_GST_FPSCR, mkU32(IMM), crfD ); break; } @@ -6283,13 +6141,14 @@ static Bool dis_fp_scr ( UInt theInstr ) UInt b11to20 = IFIELD(theInstr, 11, 10); if (b11to20 != 0) { - vex_printf("dis_fp_scr(PPC32)(instr,mffs)\n"); + vex_printf("dis_fp_scr(ppc)(instr,mffs)\n"); return False; } - DIP("mffs%s fr%u\n", flag_rC ? "." : "", frD_addr); - putFReg( frD_addr, unop( Iop_ReinterpI64asF64, - unop( Iop_32Uto64, - getGST_masked( PPC_GST_FPSCR, 0x3 ) ))); + DIP("mffs%s fr%u\n", flag_rC ? ".":"", frD_addr); + putFReg( frD_addr, + unop( Iop_ReinterpI64asF64, + unop( Iop_32Uto64, + getGST_masked( PPC_GST_FPSCR, MASK_FPSCR_RN ) ))); break; } @@ -6303,10 +6162,10 @@ static Bool dis_fp_scr ( UInt theInstr ) Int i, mask; if (b25 != 0 || b16 != 0) { - vex_printf("dis_fp_scr(PPC32)(instr,mtfsf)\n"); + vex_printf("dis_fp_scr(ppc)(instr,mtfsf)\n"); return False; } - DIP("mtfsf%s %d,fr%u\n", flag_rC ? "." : "", FM, frB_addr); + DIP("mtfsf%s %d,fr%u\n", flag_rC ? ".":"", FM, frB_addr); assign( frB, getFReg(frB_addr)); assign( rB_32, unop( Iop_64to32, unop( Iop_ReinterpF64asI64, mkexpr(frB) ))); @@ -6322,7 +6181,7 @@ static Bool dis_fp_scr ( UInt theInstr ) } default: - vex_printf("dis_fp_scr(PPC32)(opc2)\n"); + vex_printf("dis_fp_scr(ppc)(opc2)\n"); return False; } return True; @@ -6351,24 +6210,26 @@ static Bool dis_av_datastream ( UInt theInstr ) UChar b0 = ifieldBIT0(theInstr); if (opc1 != 0x1F || b23to24 != 0 || b0 != 0) { - vex_printf("dis_av_datastream(PPC32)(instr)\n"); + vex_printf("dis_av_datastream(ppc)(instr)\n"); return False; } switch (opc2) { case 0x156: // dst (Data Stream Touch, AV p115) - DIP("dst%s r%u,r%u,%d\n", flag_T ? "t" : "", rA_addr, rB_addr, STRM); + DIP("dst%s r%u,r%u,%d\n", flag_T ? "t" : "", + rA_addr, rB_addr, STRM); DIP(" => not implemented\n"); return False; case 0x176: // dstst (Data Stream Touch for Store, AV p117) - DIP("dstst%s r%u,r%u,%d\n", flag_T ? "t" : "", rA_addr, rB_addr, STRM); + DIP("dstst%s r%u,r%u,%d\n", flag_T ? "t" : "", + rA_addr, rB_addr, STRM); DIP(" => not implemented\n"); return False; case 0x336: // dss (Data Stream Stop, AV p114) if (rA_addr != 0 || rB_addr != 0) { - vex_printf("dis_av_datastream(PPC32)(opc2,dst)\n"); + vex_printf("dis_av_datastream(ppc)(opc2,dst)\n"); return False; } if (flag_A == 0) { @@ -6381,7 +6242,7 @@ static Bool dis_av_datastream ( UInt theInstr ) return False; default: - vex_printf("dis_av_datastream(PPC32)(opc2)\n"); + vex_printf("dis_av_datastream(ppc)(opc2)\n"); return False; } return True; @@ -6400,14 +6261,14 @@ static Bool dis_av_procctl ( UInt theInstr ) UInt opc2 = IFIELD( theInstr, 0, 11 ); if (opc1 != 0x4) { - vex_printf("dis_av_procctl(PPC32)(instr)\n"); + vex_printf("dis_av_procctl(ppc)(instr)\n"); return False; } switch (opc2) { case 0x604: // mfvscr (Move from VSCR, AV p129) if (vA_addr != 0 || vB_addr != 0) { - vex_printf("dis_av_procctl(PPC32)(opc2,dst)\n"); + vex_printf("dis_av_procctl(ppc)(opc2,dst)\n"); return False; } DIP("mfvscr v%d\n", vD_addr); @@ -6417,7 +6278,7 @@ static Bool dis_av_procctl ( UInt theInstr ) case 0x644: { // mtvscr (Move to VSCR, AV p130) IRTemp vB = newTemp(Ity_V128); if (vD_addr != 0 || vA_addr != 0) { - vex_printf("dis_av_procctl(PPC32)(opc2,dst)\n"); + vex_printf("dis_av_procctl(ppc)(opc2,dst)\n"); return False; } DIP("mtvscr v%d\n", vB_addr); @@ -6426,7 +6287,7 @@ static Bool dis_av_procctl ( UInt theInstr ) break; } default: - vex_printf("dis_av_procctl(PPC32)(opc2)\n"); + vex_printf("dis_av_procctl(ppc)(opc2)\n"); return False; } return True; @@ -6450,7 +6311,7 @@ static Bool dis_av_load ( UInt theInstr ) IRTemp addr_align16 = newTemp(ty); if (opc1 != 0x1F || b0 != 0) { - vex_printf("dis_av_load(PPC32)(instr)\n"); + vex_printf("dis_av_load(ppc)(instr)\n"); return False; } @@ -6465,11 +6326,18 @@ static Bool dis_av_load ( UInt theInstr ) mkU32(vD_off), binop(Iop_And32, mkexpr(EA_lo32), mkU32(0xF)), mkU32(0)/*left*/ ); - IRDirty* d = unsafeIRDirty_0_N ( - 0/*regparms*/, - "ppc32g_dirtyhelper_LVS", - &ppc32g_dirtyhelper_LVS, - args ); + IRDirty* d; + if (!mode64) { + d = unsafeIRDirty_0_N ( 0/*regparms*/, + "ppc32g_dirtyhelper_LVS", + &ppc32g_dirtyhelper_LVS, + args ); + } else { + d = unsafeIRDirty_0_N ( 0/*regparms*/, + "ppc64g_dirtyhelper_LVS", + &ppc64g_dirtyhelper_LVS, + args ); + } DIP("lvsl v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr); /* declare guest state effects */ d->needsBBP = True; @@ -6488,11 +6356,18 @@ static Bool dis_av_load ( UInt theInstr ) mkU32(vD_off), binop(Iop_And32, mkexpr(EA_lo32), mkU32(0xF)), mkU32(1)/*right*/ ); - IRDirty* d = unsafeIRDirty_0_N ( - 0/*regparms*/, - "ppc32g_dirtyhelper_LVS", - &ppc32g_dirtyhelper_LVS, - args ); + IRDirty* d; + if (!mode64) { + d = unsafeIRDirty_0_N ( 0/*regparms*/, + "ppc32g_dirtyhelper_LVS", + &ppc32g_dirtyhelper_LVS, + args ); + } else { + d = unsafeIRDirty_0_N ( 0/*regparms*/, + "ppc64g_dirtyhelper_LVS", + &ppc64g_dirtyhelper_LVS, + args ); + } DIP("lvsr v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr); /* declare guest state effects */ d->needsBBP = True; @@ -6537,7 +6412,7 @@ static Bool dis_av_load ( UInt theInstr ) return False; default: - vex_printf("dis_av_load(PPC32)(opc2)\n"); + vex_printf("dis_av_load(ppc)(opc2)\n"); return False; } return True; @@ -6565,7 +6440,7 @@ static Bool dis_av_store ( UInt theInstr ) IRTemp idx = newTemp(Ity_I8); if (opc1 != 0x1F || b0 != 0) { - vex_printf("dis_av_store(PPC32)(instr)\n"); + vex_printf("dis_av_store(ppc)(instr)\n"); return False; } @@ -6578,7 +6453,8 @@ static Bool dis_av_store ( UInt theInstr ) assign( eb, binop(Iop_And8, mkU8(0xF), unop(Iop_32to8, mkSzNarrow32(ty, mkexpr(EA)) )) ); - assign( idx, binop(Iop_Shl8, binop(Iop_Sub8, mkU8(15), mkexpr(eb)), + assign( idx, binop(Iop_Shl8, + binop(Iop_Sub8, mkU8(15), mkexpr(eb)), mkU8(3)) ); storeBE( mkexpr(EA), unop(Iop_32to8, unop(Iop_V128to32, @@ -6591,7 +6467,8 @@ static Bool dis_av_store ( UInt theInstr ) mkSzNarrow32(ty, addr_align(mkexpr(EA), 2)) ); assign( eb, binop(Iop_And8, mkU8(0xF), unop(Iop_32to8, mkexpr(addr_aligned) )) ); - assign( idx, binop(Iop_Shl8, binop(Iop_Sub8, mkU8(14), mkexpr(eb)), + assign( idx, binop(Iop_Shl8, + binop(Iop_Sub8, mkU8(14), mkexpr(eb)), mkU8(3)) ); storeBE( mkexpr(addr_aligned), unop(Iop_32to16, unop(Iop_V128to32, @@ -6604,7 +6481,8 @@ static Bool dis_av_store ( UInt theInstr ) mkSzNarrow32(ty, addr_align(mkexpr(EA), 4)) ); assign( eb, binop(Iop_And8, mkU8(0xF), unop(Iop_32to8, mkexpr(addr_aligned) )) ); - assign( idx, binop(Iop_Shl8, binop(Iop_Sub8, mkU8(12), mkexpr(eb)), + assign( idx, binop(Iop_Shl8, + binop(Iop_Sub8, mkU8(12), mkexpr(eb)), mkU8(3)) ); storeBE( mkexpr(addr_aligned), unop(Iop_V128to32, @@ -6622,11 +6500,11 @@ static Bool dis_av_store ( UInt theInstr ) DIP("stvxl v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr); DIP(" => not implemented\n"); return False; - // STORE(vS, 16, addr_align( mkexpr(EA), 16 )); +// break; default: - vex_printf("dis_av_store(PPC32)(opc2)\n"); + vex_printf("dis_av_store(ppc)(opc2)\n"); return False; } return True; @@ -6664,7 +6542,7 @@ static Bool dis_av_arith ( UInt theInstr ) assign( vB, getVReg(vB_addr)); if (opc1 != 0x4) { - vex_printf("dis_av_arith(PPC32)(opc1 != 0x4)\n"); + vex_printf("dis_av_arith(ppc)(opc1 != 0x4)\n"); return False; } @@ -6763,7 +6641,7 @@ static Bool dis_av_arith ( UInt theInstr ) // TODO: set VSCR[SAT] break; - case 0x640: // vsubuhs (Subtract Unsigned Half Word Saturate, AV p268) + case 0x640: // vsubuhs (Subtract Unsigned HWord Saturate, AV p268) DIP("vsubuhs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); putVReg( vD_addr, binop(Iop_QSub16Ux8, mkexpr(vA), mkexpr(vB)) ); // TODO: set VSCR[SAT] @@ -6893,22 +6771,26 @@ static Bool dis_av_arith ( UInt theInstr ) /* Multiply */ case 0x008: // vmuloub (Multiply Odd Unsigned Byte, AV p213) DIP("vmuloub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); - putVReg( vD_addr, binop(Iop_MullEven8Ux16, mkexpr(vA), mkexpr(vB))); + putVReg( vD_addr, + binop(Iop_MullEven8Ux16, mkexpr(vA), mkexpr(vB))); break; case 0x048: // vmulouh (Multiply Odd Unsigned Half Word, AV p214) DIP("vmulouh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); - putVReg( vD_addr, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB))); + putVReg( vD_addr, + binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB))); break; case 0x108: // vmulosb (Multiply Odd Signed Byte, AV p211) DIP("vmulosb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); - putVReg( vD_addr, binop(Iop_MullEven8Sx16, mkexpr(vA), mkexpr(vB))); + putVReg( vD_addr, + binop(Iop_MullEven8Sx16, mkexpr(vA), mkexpr(vB))); break; case 0x148: // vmulosh (Multiply Odd Signed Half Word, AV p212) DIP("vmulosh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); - putVReg( vD_addr, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB))); + putVReg( vD_addr, + binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB))); break; case 0x208: // vmuleub (Multiply Even Unsigned Byte, AV p209) @@ -6952,21 +6834,21 @@ static Bool dis_av_arith ( UInt theInstr ) /* add lanes */ assign( z3, binop(Iop_Add64, mkexpr(b3), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a15), mkexpr(a14)), - binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a15), mkexpr(a14)), + binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) ); assign( z2, binop(Iop_Add64, mkexpr(b2), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a11), mkexpr(a10)), - binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a11), mkexpr(a10)), + binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) ); assign( z1, binop(Iop_Add64, mkexpr(b1), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a7), mkexpr(a6)), - binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a7), mkexpr(a6)), + binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) ); assign( z0, binop(Iop_Add64, mkexpr(b0), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a3), mkexpr(a2)), - binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a3), mkexpr(a2)), + binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) ); /* saturate-narrow to 32bit, and combine to V128 */ putVReg( vD_addr, mkV128from4x64U( mkexpr(z3), mkexpr(z2), @@ -6992,21 +6874,21 @@ static Bool dis_av_arith ( UInt theInstr ) /* add lanes */ assign( z3, binop(Iop_Add64, mkexpr(b3), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a15), mkexpr(a14)), - binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a15), mkexpr(a14)), + binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) ); assign( z2, binop(Iop_Add64, mkexpr(b2), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a11), mkexpr(a10)), - binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a11), mkexpr(a10)), + binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) ); assign( z1, binop(Iop_Add64, mkexpr(b1), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a7), mkexpr(a6)), - binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a7), mkexpr(a6)), + binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) ); assign( z0, binop(Iop_Add64, mkexpr(b0), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a3), mkexpr(a2)), - binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a3), mkexpr(a2)), + binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) ); /* saturate-narrow to 32bit, and combine to V128 */ putVReg( vD_addr, mkV128from4x64S( mkexpr(z3), mkexpr(z2), @@ -7066,9 +6948,9 @@ static Bool dis_av_arith ( UInt theInstr ) /* add lanes */ assign( z0, binop(Iop_Add64, mkexpr(b0), - binop(Iop_Add64, - binop(Iop_Add64, mkexpr(a3), mkexpr(a2)), - binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) ); + binop(Iop_Add64, + binop(Iop_Add64, mkexpr(a3), mkexpr(a2)), + binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) ); /* saturate-narrow to 32bit, and combine to V128 */ putVReg( vD_addr, mkV128from4x64S( mkU64(0), mkU64(0), @@ -7076,7 +6958,7 @@ static Bool dis_av_arith ( UInt theInstr ) break; } default: - vex_printf("dis_av_arith(PPC32)(opc2=0x%x)\n", opc2); + vex_printf("dis_av_arith(ppc)(opc2=0x%x)\n", opc2); return False; } return True; @@ -7100,7 +6982,7 @@ static Bool dis_av_logic ( UInt theInstr ) assign( vB, getVReg(vB_addr)); if (opc1 != 0x4) { - vex_printf("dis_av_logic(PPC32)(opc1 != 0x4)\n"); + vex_printf("dis_av_logic(ppc)(opc1 != 0x4)\n"); return False; } @@ -7133,7 +7015,7 @@ static Bool dis_av_logic ( UInt theInstr ) break; default: - vex_printf("dis_av_logic(PPC32)(opc2=0x%x)\n", opc2); + vex_printf("dis_av_logic(ppc)(opc2=0x%x)\n", opc2); return False; } return True; @@ -7159,58 +7041,67 @@ static Bool dis_av_cmp ( UInt theInstr ) assign( vB, getVReg(vB_addr)); if (opc1 != 0x4) { - vex_printf("dis_av_cmp(PPC32)(instr)\n"); + vex_printf("dis_av_cmp(ppc)(instr)\n"); return False; } switch (opc2) { case 0x006: // vcmpequb (Compare Equal-to Unsigned B, AV p160) - DIP("vcmpequb%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpequb%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpEQ8x16, mkexpr(vA), mkexpr(vB)) ); break; case 0x046: // vcmpequh (Compare Equal-to Unsigned HW, AV p161) - DIP("vcmpequh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpequh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpEQ16x8, mkexpr(vA), mkexpr(vB)) ); break; case 0x086: // vcmpequw (Compare Equal-to Unsigned W, AV p162) - DIP("vcmpequw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpequw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpEQ32x4, mkexpr(vA), mkexpr(vB)) ); break; case 0x206: // vcmpgtub (Compare Greater-than Unsigned B, AV p168) - DIP("vcmpgtub%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpgtub%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpGT8Ux16, mkexpr(vA), mkexpr(vB)) ); break; case 0x246: // vcmpgtuh (Compare Greater-than Unsigned HW, AV p169) - DIP("vcmpgtuh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpgtuh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpGT16Ux8, mkexpr(vA), mkexpr(vB)) ); break; case 0x286: // vcmpgtuw (Compare Greater-than Unsigned W, AV p170) - DIP("vcmpgtuw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpgtuw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpGT32Ux4, mkexpr(vA), mkexpr(vB)) ); break; case 0x306: // vcmpgtsb (Compare Greater-than Signed B, AV p165) - DIP("vcmpgtsb%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpgtsb%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpGT8Sx16, mkexpr(vA), mkexpr(vB)) ); break; case 0x346: // vcmpgtsh (Compare Greater-than Signed HW, AV p166) - DIP("vcmpgtsh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpgtsh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpGT16Sx8, mkexpr(vA), mkexpr(vB)) ); break; case 0x386: // vcmpgtsw (Compare Greater-than Signed W, AV p167) - DIP("vcmpgtsw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpgtsw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpGT32Sx4, mkexpr(vA), mkexpr(vB)) ); break; default: - vex_printf("dis_av_cmp(PPC32)(opc2)\n"); + vex_printf("dis_av_cmp(ppc)(opc2)\n"); return False; } @@ -7265,22 +7156,23 @@ static Bool dis_av_multarith ( UInt theInstr ) assign( zeros, unop(Iop_Dup32x4, mkU32(0)) ); if (opc1 != 0x4) { - vex_printf("dis_av_multarith(PPC32)(instr)\n"); + vex_printf("dis_av_multarith(ppc)(instr)\n"); return False; } switch (opc2) { /* Multiply-Add */ - case 0x20: { // vmhaddshs (Multiply High, Add Signed HW Saturate, AV p185) + case 0x20: { // vmhaddshs (Mult Hi, Add Signed HW Saturate, AV p185) IRTemp cSigns = newTemp(Ity_V128); - DIP("vmhaddshs v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); - assign( cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)) ); - assign( aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)) ); - assign( bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)) ); - assign( cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns), mkexpr(vC)) ); - assign( aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)) ); - assign( bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)) ); - assign( cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns), mkexpr(vC)) ); + DIP("vmhaddshs v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); + assign(cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC))); + assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA))); + assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB))); + assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns),mkexpr(vC))); + assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA))); + assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB))); + assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns),mkexpr(vC))); assign( zLo, binop(Iop_Add32x4, mkexpr(cLo), binop(Iop_SarN32x4, @@ -7294,20 +7186,22 @@ static Bool dis_av_multarith ( UInt theInstr ) mkexpr(aHi), mkexpr(bHi)), mkU8(15))) ); - putVReg( vD_addr, binop(Iop_QNarrow32Sx4, mkexpr(zHi), mkexpr(zLo)) ); + putVReg( vD_addr, + binop(Iop_QNarrow32Sx4, mkexpr(zHi), mkexpr(zLo)) ); break; } - case 0x21: { // vmhraddshs (Multiply High Round, Add Signed HW Saturate, AV p186) + case 0x21: { // vmhraddshs (Mult High Round, Add Signed HW Saturate, AV p186) IRTemp zKonst = newTemp(Ity_V128); IRTemp cSigns = newTemp(Ity_V128); - DIP("vmhraddshs v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); - assign( cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)) ); - assign( aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)) ); - assign( bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)) ); - assign( cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns), mkexpr(vC)) ); - assign( aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)) ); - assign( bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)) ); - assign( cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns), mkexpr(vC)) ); + DIP("vmhraddshs v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); + assign(cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)) ); + assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA))); + assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB))); + assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns),mkexpr(vC))); + assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA))); + assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB))); + assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns),mkexpr(vC))); /* shifting our const avoids store/load version of Dup */ assign( zKonst, binop(Iop_ShlN32x4, unop(Iop_Dup32x4, mkU32(0x1)), @@ -7330,21 +7224,22 @@ static Bool dis_av_multarith ( UInt theInstr ) putVReg( vD_addr, binop(Iop_QNarrow32Sx4, mkexpr(zHi), mkexpr(zLo)) ); break; } - case 0x22: { // vmladduhm (Multiply Low, Add Unsigned HW Modulo, AV p194) - DIP("vmladduhm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); - assign( aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)) ); - assign( bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)) ); - assign( cLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vC)) ); - assign( aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)) ); - assign( bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)) ); - assign( cHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vC)) ); - assign( zLo, binop(Iop_Add32x4, - binop(Iop_MullEven16Ux8, mkexpr(aLo), mkexpr(bLo) ), - mkexpr(cLo)) ); - assign( zHi, binop(Iop_Add32x4, - binop(Iop_MullEven16Ux8, mkexpr(aHi), mkexpr(bHi) ), - mkexpr(cHi)) ); - putVReg( vD_addr, binop(Iop_Narrow32x4, mkexpr(zHi), mkexpr(zLo)) ); + case 0x22: { // vmladduhm (Mult Low, Add Unsigned HW Modulo, AV p194) + DIP("vmladduhm v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); + assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA))); + assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB))); + assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vC))); + assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA))); + assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB))); + assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vC))); + assign(zLo, binop(Iop_Add32x4, + binop(Iop_MullEven16Ux8, mkexpr(aLo), mkexpr(bLo)), + mkexpr(cLo)) ); + assign(zHi, binop(Iop_Add32x4, + binop(Iop_MullEven16Ux8, mkexpr(aHi), mkexpr(bHi)), + mkexpr(cHi))); + putVReg(vD_addr, binop(Iop_Narrow32x4, mkexpr(zHi), mkexpr(zLo))); break; } @@ -7353,7 +7248,8 @@ static Bool dis_av_multarith ( UInt theInstr ) case 0x24: { // vmsumubm (Multiply Sum Unsigned B Modulo, AV p204) IRTemp abEE, abEO, abOE, abOO; abEE = abEO = abOE = abOO = IRTemp_INVALID; - DIP("vmsumubm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); + DIP("vmsumubm v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); /* multiply vA,vB (unsigned, widening) */ assign( abEvn, MK_Iop_MullOdd8Ux16( mkexpr(vA), mkexpr(vB) )); @@ -7364,10 +7260,10 @@ static Bool dis_av_multarith ( UInt theInstr ) expand16Ux8( mkexpr(abOdd), &abOE, &abOO ); putVReg( vD_addr, - binop(Iop_Add32x4, mkexpr(vC), - binop(Iop_Add32x4, - binop(Iop_Add32x4, mkexpr(abEE), mkexpr(abEO)), - binop(Iop_Add32x4, mkexpr(abOE), mkexpr(abOO)))) ); + binop(Iop_Add32x4, mkexpr(vC), + binop(Iop_Add32x4, + binop(Iop_Add32x4, mkexpr(abEE), mkexpr(abEO)), + binop(Iop_Add32x4, mkexpr(abOE), mkexpr(abOO)))) ); break; } case 0x25: { // vmsummbm (Multiply Sum Mixed-Sign B Modulo, AV p201) @@ -7377,7 +7273,8 @@ static Bool dis_av_multarith ( UInt theInstr ) IRTemp abOE = newTemp(Ity_V128); IRTemp abOO = newTemp(Ity_V128); aEvn = aOdd = bEvn = bOdd = IRTemp_INVALID; - DIP("vmsummbm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); + DIP("vmsummbm v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); /* sign-extend vA, zero-extend vB, for mixed-sign multiply (separating out adjacent lanes to different vectors) */ @@ -7392,23 +7289,25 @@ static Bool dis_av_multarith ( UInt theInstr ) /* add results together, + vC */ putVReg( vD_addr, - binop(Iop_QAdd32Sx4, mkexpr(vC), - binop(Iop_QAdd32Sx4, - binop(Iop_QAdd32Sx4, mkexpr(abEE), mkexpr(abEO)), - binop(Iop_QAdd32Sx4, mkexpr(abOE), mkexpr(abOO)))) ); + binop(Iop_QAdd32Sx4, mkexpr(vC), + binop(Iop_QAdd32Sx4, + binop(Iop_QAdd32Sx4, mkexpr(abEE), mkexpr(abEO)), + binop(Iop_QAdd32Sx4, mkexpr(abOE), mkexpr(abOO)))) ); break; } case 0x26: { // vmsumuhm (Multiply Sum Unsigned HW Modulo, AV p205) - DIP("vmsumuhm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); + DIP("vmsumuhm v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); assign( abEvn, MK_Iop_MullOdd16Ux8( mkexpr(vA), mkexpr(vB) )); assign( abOdd, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)) ); putVReg( vD_addr, - binop(Iop_Add32x4, mkexpr(vC), - binop(Iop_Add32x4, mkexpr(abEvn), mkexpr(abOdd))) ); + binop(Iop_Add32x4, mkexpr(vC), + binop(Iop_Add32x4, mkexpr(abEvn), mkexpr(abOdd))) ); break; } case 0x27: { // vmsumuhs (Multiply Sum Unsigned HW Saturate, AV p206) - DIP("vmsumuhs v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); + DIP("vmsumuhs v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); /* widening multiply, separating lanes */ assign( abEvn, MK_Iop_MullOdd16Ux8(mkexpr(vA), mkexpr(vB) )); assign( abOdd, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)) ); @@ -7435,16 +7334,18 @@ static Bool dis_av_multarith ( UInt theInstr ) break; } case 0x28: { // vmsumshm (Multiply Sum Signed HW Modulo, AV p202) - DIP("vmsumshm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); + DIP("vmsumshm v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); assign( abEvn, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) )); assign( abOdd, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)) ); putVReg( vD_addr, - binop(Iop_Add32x4, mkexpr(vC), - binop(Iop_Add32x4, mkexpr(abOdd), mkexpr(abEvn))) ); + binop(Iop_Add32x4, mkexpr(vC), + binop(Iop_Add32x4, mkexpr(abOdd), mkexpr(abEvn))) ); break; } case 0x29: { // vmsumshs (Multiply Sum Signed HW Saturate, AV p203) - DIP("vmsumshs v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); + DIP("vmsumshs v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); /* widening multiply, separating lanes */ assign( abEvn, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) )); assign( abOdd, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)) ); @@ -7470,7 +7371,7 @@ static Bool dis_av_multarith ( UInt theInstr ) break; } default: - vex_printf("dis_av_multarith(PPC32)(opc2)\n"); + vex_printf("dis_av_multarith(ppc)(opc2)\n"); return False; } return True; @@ -7494,7 +7395,7 @@ static Bool dis_av_shift ( UInt theInstr ) assign( vB, getVReg(vB_addr)); if (opc1 != 0x4){ - vex_printf("dis_av_shift(PPC32)(instr)\n"); + vex_printf("dis_av_shift(ppc)(instr)\n"); return False; } @@ -7580,17 +7481,17 @@ static Bool dis_av_shift ( UInt theInstr ) binop(Iop_ShrV128, mkexpr(vA), mkexpr(sh)) ); break; } - case 0x304: // vsrab (Shift Right Algebraic B, AV p253) + case 0x304: // vsrab (Shift Right Alg B, AV p253) DIP("vsrab v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); putVReg( vD_addr, binop(Iop_Sar8x16, mkexpr(vA), mkexpr(vB)) ); break; - case 0x344: // vsrah (Shift Right Algebraic HW, AV p254) + case 0x344: // vsrah (Shift Right Alg HW, AV p254) DIP("vsrah v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); putVReg( vD_addr, binop(Iop_Sar16x8, mkexpr(vA), mkexpr(vB)) ); break; - case 0x384: // vsraw (Shift Right Algebraic W, AV p255) + case 0x384: // vsraw (Shift Right Alg W, AV p255) DIP("vsraw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); putVReg( vD_addr, binop(Iop_Sar32x4, mkexpr(vA), mkexpr(vB)) ); break; @@ -7607,7 +7508,7 @@ static Bool dis_av_shift ( UInt theInstr ) } default: - vex_printf("dis_av_shift(PPC32)(opc2)\n"); + vex_printf("dis_av_shift(ppc)(opc2)\n"); return False; } return True; @@ -7639,7 +7540,7 @@ static Bool dis_av_permute ( UInt theInstr ) assign( vC, getVReg(vC_addr)); if (opc1 != 0x4) { - vex_printf("dis_av_permute(PPC32)(instr)\n"); + vex_printf("dis_av_permute(ppc)(instr)\n"); return False; } @@ -7658,14 +7559,18 @@ static Bool dis_av_permute ( UInt theInstr ) IRTemp b_perm = newTemp(Ity_V128); IRTemp mask = newTemp(Ity_V128); IRTemp vC_andF = newTemp(Ity_V128); - DIP("vperm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr); + DIP("vperm v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vB_addr, vC_addr); /* Limit the Perm8x16 steering values to 0 .. 15 as that is what IR specifies, and also to hide irrelevant bits from memcheck */ - assign( vC_andF, binop(Iop_AndV128, mkexpr(vC), - unop(Iop_Dup8x16, mkU8(0xF))) ); - assign( a_perm, binop(Iop_Perm8x16, mkexpr(vA), mkexpr(vC_andF)) ); - assign( b_perm, binop(Iop_Perm8x16, mkexpr(vB), mkexpr(vC_andF)) ); + assign( vC_andF, + binop(Iop_AndV128, mkexpr(vC), + unop(Iop_Dup8x16, mkU8(0xF))) ); + assign( a_perm, + binop(Iop_Perm8x16, mkexpr(vA), mkexpr(vC_andF)) ); + assign( b_perm, + binop(Iop_Perm8x16, mkexpr(vB), mkexpr(vC_andF)) ); // mask[i8] = (vC[i8]_4 == 1) ? 0xFF : 0x0 assign( mask, binop(Iop_SarN8x16, binop(Iop_ShlN8x16, mkexpr(vC), mkU8(3)), @@ -7680,17 +7585,18 @@ static Bool dis_av_permute ( UInt theInstr ) } case 0x2C: // vsldoi (Shift Left Double by Octet Imm, AV p241) if (b10 != 0) { - vex_printf("dis_av_permute(PPC32)(vsldoi)\n"); + vex_printf("dis_av_permute(ppc)(vsldoi)\n"); return False; } - DIP("vsldoi v%d,v%d,v%d,%d\n", vD_addr, vA_addr, vB_addr, SHB_uimm4); + DIP("vsldoi v%d,v%d,v%d,%d\n", + vD_addr, vA_addr, vB_addr, SHB_uimm4); if (SHB_uimm4 == 0) putVReg( vD_addr, mkexpr(vA) ); else putVReg( vD_addr, - binop(Iop_OrV128, - binop(Iop_ShlV128, mkexpr(vA), mkU8(SHB_uimm4*8)), - binop(Iop_ShrV128, mkexpr(vB), mkU8((16-SHB_uimm4)*8))) ); + binop(Iop_OrV128, + binop(Iop_ShlV128, mkexpr(vA), mkU8(SHB_uimm4*8)), + binop(Iop_ShrV128, mkexpr(vB), mkU8((16-SHB_uimm4)*8))) ); return True; default: @@ -7772,16 +7678,18 @@ static Bool dis_av_permute ( UInt theInstr ) case 0x34C: // vspltish (Splat Immediate Signed HW, AV p248) DIP("vspltish v%d,%d\n", vD_addr, (Char)SIMM_8); - putVReg( vD_addr, unop(Iop_Dup16x8, mkU16(extend_s_8to32(SIMM_8))) ); + putVReg( vD_addr, + unop(Iop_Dup16x8, mkU16(extend_s_8to32(SIMM_8))) ); break; case 0x38C: // vspltisw (Splat Immediate Signed W, AV p249) DIP("vspltisw v%d,%d\n", vD_addr, (Char)SIMM_8); - putVReg( vD_addr, unop(Iop_Dup32x4, mkU32(extend_s_8to32(SIMM_8))) ); + putVReg( vD_addr, + unop(Iop_Dup32x4, mkU32(extend_s_8to32(SIMM_8))) ); break; default: - vex_printf("dis_av_permute(PPC32)(opc2)\n"); + vex_printf("dis_av_permute(ppc)(opc2)\n"); return False; } return True; @@ -7807,7 +7715,7 @@ static Bool dis_av_pack ( UInt theInstr ) assign( vB, getVReg(vB_addr)); if (opc1 != 0x4) { - vex_printf("dis_av_pack(PPC32)(instr)\n"); + vex_printf("dis_av_pack(ppc)(instr)\n"); return False; } @@ -7825,13 +7733,15 @@ static Bool dis_av_pack ( UInt theInstr ) case 0x08E: // vpkuhus (Pack Unsigned HW Unsigned Saturate, AV p225) DIP("vpkuhus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); - putVReg( vD_addr, binop(Iop_QNarrow16Ux8, mkexpr(vA), mkexpr(vB)) ); + putVReg( vD_addr, + binop(Iop_QNarrow16Ux8, mkexpr(vA), mkexpr(vB)) ); // TODO: set VSCR[SAT] return True; case 0x0CE: // vpkuwus (Pack Unsigned W Unsigned Saturate, AV p227) DIP("vpkuwus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); - putVReg( vD_addr, binop(Iop_QNarrow32Ux4, mkexpr(vA), mkexpr(vB)) ); + putVReg( vD_addr, + binop(Iop_QNarrow32Ux4, mkexpr(vA), mkexpr(vB)) ); // TODO: set VSCR[SAT] return True; @@ -7877,19 +7787,21 @@ static Bool dis_av_pack ( UInt theInstr ) } case 0x18E: // vpkshss (Pack Signed HW Signed Saturate, AV p220) DIP("vpkshss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); - putVReg( vD_addr, binop(Iop_QNarrow16Sx8, mkexpr(vA), mkexpr(vB)) ); + putVReg( vD_addr, + binop(Iop_QNarrow16Sx8, mkexpr(vA), mkexpr(vB)) ); // TODO: set VSCR[SAT] return True; case 0x1CE: // vpkswss (Pack Signed W Signed Saturate, AV p222) DIP("vpkswss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr); - putVReg( vD_addr, binop(Iop_QNarrow32Sx4, mkexpr(vA), mkexpr(vB)) ); + putVReg( vD_addr, + binop(Iop_QNarrow32Sx4, mkexpr(vA), mkexpr(vB)) ); // TODO: set VSCR[SAT] return True; case 0x30E: { // vpkpx (Pack Pixel, AV p219) /* CAB: Worth a new primop? */ - /* Using shifts to compact pixel elements, then packing them them */ + /* Using shifts to compact pixel elements, then packing them */ IRTemp a1 = newTemp(Ity_V128); IRTemp a2 = newTemp(Ity_V128); IRTemp a3 = newTemp(Ity_V128); @@ -7934,7 +7846,7 @@ static Bool dis_av_pack ( UInt theInstr ) if (vA_addr != 0) { - vex_printf("dis_av_pack(PPC32)(vA_addr)\n"); + vex_printf("dis_av_pack(ppc)(vA_addr)\n"); return False; } @@ -7947,25 +7859,29 @@ static Bool dis_av_pack ( UInt theInstr ) case 0x20E: { // vupkhsb (Unpack High Signed B, AV p277) DIP("vupkhsb v%d,v%d\n", vD_addr, vB_addr); assign( signs, binop(Iop_CmpGT8Sx16, mkexpr(zeros), mkexpr(vB)) ); - putVReg( vD_addr, binop(Iop_InterleaveHI8x16, mkexpr(signs), mkexpr(vB)) ); + putVReg( vD_addr, + binop(Iop_InterleaveHI8x16, mkexpr(signs), mkexpr(vB)) ); break; } case 0x24E: { // vupkhsh (Unpack High Signed HW, AV p278) DIP("vupkhsh v%d,v%d\n", vD_addr, vB_addr); assign( signs, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vB)) ); - putVReg( vD_addr, binop(Iop_InterleaveHI16x8, mkexpr(signs), mkexpr(vB)) ); + putVReg( vD_addr, + binop(Iop_InterleaveHI16x8, mkexpr(signs), mkexpr(vB)) ); break; } case 0x28E: { // vupklsb (Unpack Low Signed B, AV p280) DIP("vupklsb v%d,v%d\n", vD_addr, vB_addr); assign( signs, binop(Iop_CmpGT8Sx16, mkexpr(zeros), mkexpr(vB)) ); - putVReg( vD_addr, binop(Iop_InterleaveLO8x16, mkexpr(signs), mkexpr(vB)) ); + putVReg( vD_addr, + binop(Iop_InterleaveLO8x16, mkexpr(signs), mkexpr(vB)) ); break; } case 0x2CE: { // vupklsh (Unpack Low Signed HW, AV p281) DIP("vupklsh v%d,v%d\n", vD_addr, vB_addr); assign( signs, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vB)) ); - putVReg( vD_addr, binop(Iop_InterleaveLO16x8, mkexpr(signs), mkexpr(vB)) ); + putVReg( vD_addr, + binop(Iop_InterleaveLO16x8, mkexpr(signs), mkexpr(vB)) ); break; } case 0x34E: { // vupkhpx (Unpack High Pixel16, AV p276) @@ -7996,9 +7912,10 @@ static Bool dis_av_pack ( UInt theInstr ) mkU8(11)) ); assign( z23, binop(Iop_InterleaveHI16x8, mkexpr(zeros), binop(Iop_OrV128, mkexpr(z2), mkexpr(z3))) ); - putVReg( vD_addr, binop(Iop_OrV128, - binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)), - mkexpr(z23)) ); + putVReg( vD_addr, + binop(Iop_OrV128, + binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)), + mkexpr(z23)) ); break; } case 0x3CE: { // vupklpx (Unpack Low Pixel16, AV p279) @@ -8028,13 +7945,14 @@ static Bool dis_av_pack ( UInt theInstr ) mkU8(11)) ); assign( z23, binop(Iop_InterleaveLO16x8, mkexpr(zeros), binop(Iop_OrV128, mkexpr(z2), mkexpr(z3))) ); - putVReg( vD_addr, binop(Iop_OrV128, - binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)), - mkexpr(z23)) ); + putVReg( vD_addr, + binop(Iop_OrV128, + binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)), + mkexpr(z23)) ); break; } default: - vex_printf("dis_av_pack(PPC32)(opc2)\n"); + vex_printf("dis_av_pack(ppc)(opc2)\n"); return False; } return True; @@ -8062,23 +7980,27 @@ static Bool dis_av_fp_arith ( UInt theInstr ) assign( vC, getVReg(vC_addr)); if (opc1 != 0x4) { - vex_printf("dis_av_fp_arith(PPC32)(instr)\n"); + vex_printf("dis_av_fp_arith(ppc)(instr)\n"); return False; } opc2 = IFIELD( theInstr, 0, 6 ); switch (opc2) { case 0x2E: // vmaddfp (Multiply Add FP, AV p177) - DIP("vmaddfp v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vC_addr, vB_addr); - putVReg( vD_addr, binop(Iop_Add32Fx4, mkexpr(vB), - binop(Iop_Mul32Fx4, mkexpr(vA), mkexpr(vC))) ); + DIP("vmaddfp v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vC_addr, vB_addr); + putVReg( vD_addr, + binop(Iop_Add32Fx4, mkexpr(vB), + binop(Iop_Mul32Fx4, mkexpr(vA), mkexpr(vC))) ); return True; case 0x2F: { // vnmsubfp (Negative Multiply-Subtract FP, AV p215) - DIP("vnmsubfp v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vC_addr, vB_addr); - putVReg( vD_addr, binop(Iop_Sub32Fx4, - mkexpr(vB), - binop(Iop_Mul32Fx4, mkexpr(vA), mkexpr(vC))) ); + DIP("vnmsubfp v%d,v%d,v%d,v%d\n", + vD_addr, vA_addr, vC_addr, vB_addr); + putVReg( vD_addr, + binop(Iop_Sub32Fx4, + mkexpr(vB), + binop(Iop_Mul32Fx4, mkexpr(vA), mkexpr(vC))) ); return True; } @@ -8114,7 +8036,7 @@ static Bool dis_av_fp_arith ( UInt theInstr ) if (vA_addr != 0) { - vex_printf("dis_av_fp_arith(PPC32)(vA_addr)\n"); + vex_printf("dis_av_fp_arith(ppc)(vA_addr)\n"); return False; } @@ -8124,7 +8046,7 @@ static Bool dis_av_fp_arith ( UInt theInstr ) putVReg( vD_addr, unop(Iop_Recip32Fx4, mkexpr(vB)) ); return True; - case 0x14A: // vrsqrtefp (Reciprocal Square Root Estimate FP, AV p237) + case 0x14A: // vrsqrtefp (Reciprocal Sqrt Estimate FP, AV p237) DIP("vrsqrtefp v%d,v%d\n", vD_addr, vB_addr); putVReg( vD_addr, unop(Iop_RSqrt32Fx4, mkexpr(vB)) ); return True; @@ -8140,7 +8062,7 @@ static Bool dis_av_fp_arith ( UInt theInstr ) return False; default: - vex_printf("dis_av_fp_arith(PPC32)(opc2=0x%x)\n",opc2); + vex_printf("dis_av_fp_arith(ppc)(opc2=0x%x)\n",opc2); return False; } return True; @@ -8168,23 +8090,26 @@ static Bool dis_av_fp_cmp ( UInt theInstr ) assign( vB, getVReg(vB_addr)); if (opc1 != 0x4) { - vex_printf("dis_av_fp_cmp(PPC32)(instr)\n"); + vex_printf("dis_av_fp_cmp(ppc)(instr)\n"); return False; } switch (opc2) { case 0x0C6: // vcmpeqfp (Compare Equal-to FP, AV p159) - DIP("vcmpeqfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpeqfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpEQ32Fx4, mkexpr(vA), mkexpr(vB)) ); break; - case 0x1C6: // vcmpgefp (Compare Greater-than-or-Equal-to FP, AV p163) - DIP("vcmpgefp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + case 0x1C6: // vcmpgefp (Compare Greater-than-or-Equal-to, AV p163) + DIP("vcmpgefp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpGE32Fx4, mkexpr(vA), mkexpr(vB)) ); break; case 0x2C6: // vcmpgtfp (Compare Greater-than FP, AV p164) - DIP("vcmpgtfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpgtfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); assign( vD, binop(Iop_CmpGT32Fx4, mkexpr(vA), mkexpr(vB)) ); break; @@ -8192,12 +8117,14 @@ static Bool dis_av_fp_cmp ( UInt theInstr ) IRTemp gt = newTemp(Ity_V128); IRTemp lt = newTemp(Ity_V128); IRTemp zeros = newTemp(Ity_V128); - DIP("vcmpbfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr); + DIP("vcmpbfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), + vD_addr, vA_addr, vB_addr); cmp_bounds = True; assign( zeros, unop(Iop_Dup32x4, mkU32(0)) ); /* Note: making use of fact that the ppc backend for compare insns - return zero'd lanes if either of the corresponding arg lanes is a nan. + return zero'd lanes if either of the corresponding arg lanes is + a nan. Perhaps better to have an irop Iop_isNan32Fx4, but then we'd need this for the other compares too (vcmpeqfp etc)... @@ -8207,7 +8134,8 @@ static Bool dis_av_fp_cmp ( UInt theInstr ) binop(Iop_CmpLE32Fx4, mkexpr(vA), mkexpr(vB))) ); assign( lt, unop(Iop_NotV128, binop(Iop_CmpGE32Fx4, mkexpr(vA), - binop(Iop_Sub32Fx4, mkexpr(zeros), mkexpr(vB)))) ); + binop(Iop_Sub32Fx4, mkexpr(zeros), + mkexpr(vB)))) ); // finally, just shift gt,lt to correct position assign( vD, binop(Iop_ShlN32x4, @@ -8221,7 +8149,7 @@ static Bool dis_av_fp_cmp ( UInt theInstr ) } default: - vex_printf("dis_av_fp_cmp(PPC32)(opc2)\n"); + vex_printf("dis_av_fp_cmp(ppc)(opc2)\n"); return False; } @@ -8257,10 +8185,11 @@ static Bool dis_av_fp_convert ( UInt theInstr ) scale = (float)( (unsigned int) 1<true, true->false */ -PPC32CondTest invertCondTest ( PPC32CondTest ct ) +PPCCondTest invertCondTest ( PPCCondTest ct ) { vassert(ct != Pct_ALWAYS); return (ct == Pct_TRUE) ? Pct_FALSE : Pct_TRUE; @@ -308,54 +316,54 @@ PPC32CondTest invertCondTest ( PPC32CondTest ct ) /* --------- PPCAMode: memory address expressions. --------- */ -PPC32AMode* PPC32AMode_IR ( Int idx, HReg base ) { - PPC32AMode* am = LibVEX_Alloc(sizeof(PPC32AMode)); +PPCAMode* PPCAMode_IR ( Int idx, HReg base ) { + PPCAMode* am = LibVEX_Alloc(sizeof(PPCAMode)); vassert(idx >= -0x8000 && idx < 0x8000); am->tag = Pam_IR; am->Pam.IR.base = base; am->Pam.IR.index = idx; return am; } -PPC32AMode* PPC32AMode_RR ( HReg idx, HReg base ) { - PPC32AMode* am = LibVEX_Alloc(sizeof(PPC32AMode)); +PPCAMode* PPCAMode_RR ( HReg idx, HReg base ) { + PPCAMode* am = LibVEX_Alloc(sizeof(PPCAMode)); am->tag = Pam_RR; am->Pam.RR.base = base; am->Pam.RR.index = idx; return am; } -PPC32AMode* dopyPPC32AMode ( PPC32AMode* am ) { +PPCAMode* dopyPPCAMode ( PPCAMode* am ) { switch (am->tag) { case Pam_IR: - return PPC32AMode_IR( am->Pam.IR.index, am->Pam.IR.base ); + return PPCAMode_IR( am->Pam.IR.index, am->Pam.IR.base ); case Pam_RR: - return PPC32AMode_RR( am->Pam.RR.index, am->Pam.RR.base ); + return PPCAMode_RR( am->Pam.RR.index, am->Pam.RR.base ); default: - vpanic("dopyPPC32AMode"); + vpanic("dopyPPCAMode"); } } -void ppPPC32AMode ( PPC32AMode* am ) { +void ppPPCAMode ( PPCAMode* am ) { switch (am->tag) { case Pam_IR: if (am->Pam.IR.index == 0) vex_printf("0("); else vex_printf("%d(", (Int)am->Pam.IR.index); - ppHRegPPC32(am->Pam.IR.base); + ppHRegPPC(am->Pam.IR.base); vex_printf(")"); return; case Pam_RR: - ppHRegPPC32(am->Pam.RR.base); + ppHRegPPC(am->Pam.RR.base); vex_printf(","); - ppHRegPPC32(am->Pam.RR.index); + ppHRegPPC(am->Pam.RR.index); return; default: - vpanic("ppPPC32AMode"); + vpanic("ppPPCAMode"); } } -static void addRegUsage_PPC32AMode ( HRegUsage* u, PPC32AMode* am ) { +static void addRegUsage_PPCAMode ( HRegUsage* u, PPCAMode* am ) { switch (am->tag) { case Pam_IR: addHRegUse(u, HRmRead, am->Pam.IR.base); @@ -365,11 +373,11 @@ static void addRegUsage_PPC32AMode ( HRegUsage* u, PPC32AMode* am ) { addHRegUse(u, HRmRead, am->Pam.RR.index); return; default: - vpanic("addRegUsage_PPC32AMode"); + vpanic("addRegUsage_PPCAMode"); } } -static void mapRegs_PPC32AMode ( HRegRemap* m, PPC32AMode* am ) { +static void mapRegs_PPCAMode ( HRegRemap* m, PPCAMode* am ) { switch (am->tag) { case Pam_IR: am->Pam.IR.base = lookupHRegRemap(m, am->Pam.IR.base); @@ -379,14 +387,14 @@ static void mapRegs_PPC32AMode ( HRegRemap* m, PPC32AMode* am ) { am->Pam.RR.index = lookupHRegRemap(m, am->Pam.RR.index); return; default: - vpanic("mapRegs_PPC32AMode"); + vpanic("mapRegs_PPCAMode"); } } /* --------- Operand, which can be a reg or a u16/s16. --------- */ -PPC32RH* PPC32RH_Imm ( Bool syned, UShort imm16 ) { - PPC32RH* op = LibVEX_Alloc(sizeof(PPC32RH)); +PPCRH* PPCRH_Imm ( Bool syned, UShort imm16 ) { + PPCRH* op = LibVEX_Alloc(sizeof(PPCRH)); op->tag = Prh_Imm; op->Prh.Imm.syned = syned; op->Prh.Imm.imm16 = imm16; @@ -397,14 +405,14 @@ PPC32RH* PPC32RH_Imm ( Bool syned, UShort imm16 ) { vassert(syned == True || syned == False); return op; } -PPC32RH* PPC32RH_Reg ( HReg reg ) { - PPC32RH* op = LibVEX_Alloc(sizeof(PPC32RH)); +PPCRH* PPCRH_Reg ( HReg reg ) { + PPCRH* op = LibVEX_Alloc(sizeof(PPCRH)); op->tag = Prh_Reg; op->Prh.Reg.reg = reg; return op; } -void ppPPC32RH ( PPC32RH* op ) { +void ppPPCRH ( PPCRH* op ) { switch (op->tag) { case Prh_Imm: if (op->Prh.Imm.syned) @@ -413,17 +421,17 @@ void ppPPC32RH ( PPC32RH* op ) { vex_printf("%u", (UInt)(UShort)op->Prh.Imm.imm16); return; case Prh_Reg: - ppHRegPPC32(op->Prh.Reg.reg); + ppHRegPPC(op->Prh.Reg.reg); return; default: - vpanic("ppPPC32RH"); + vpanic("ppPPCRH"); } } -/* An PPC32RH can only be used in a "read" context (what would it mean +/* An PPCRH can only be used in a "read" context (what would it mean to write or modify a literal?) and so we enumerate its registers accordingly. */ -static void addRegUsage_PPC32RH ( HRegUsage* u, PPC32RH* op ) { +static void addRegUsage_PPCRH ( HRegUsage* u, PPCRH* op ) { switch (op->tag) { case Prh_Imm: return; @@ -431,11 +439,11 @@ static void addRegUsage_PPC32RH ( HRegUsage* u, PPC32RH* op ) { addHRegUse(u, HRmRead, op->Prh.Reg.reg); return; default: - vpanic("addRegUsage_PPC32RH"); + vpanic("addRegUsage_PPCRH"); } } -static void mapRegs_PPC32RH ( HRegRemap* m, PPC32RH* op ) { +static void mapRegs_PPCRH ( HRegRemap* m, PPCRH* op ) { switch (op->tag) { case Prh_Imm: return; @@ -443,43 +451,43 @@ static void mapRegs_PPC32RH ( HRegRemap* m, PPC32RH* op ) { op->Prh.Reg.reg = lookupHRegRemap(m, op->Prh.Reg.reg); return; default: - vpanic("mapRegs_PPC32RH"); + vpanic("mapRegs_PPCRH"); } } /* --------- Operand, which can be a reg or a u32/64. --------- */ -PPC32RI* PPC32RI_Imm ( ULong imm64 ) { - PPC32RI* op = LibVEX_Alloc(sizeof(PPC32RI)); +PPCRI* PPCRI_Imm ( ULong imm64 ) { + PPCRI* op = LibVEX_Alloc(sizeof(PPCRI)); op->tag = Pri_Imm; op->Pri.Imm = imm64; return op; } -PPC32RI* PPC32RI_Reg ( HReg reg ) { - PPC32RI* op = LibVEX_Alloc(sizeof(PPC32RI)); +PPCRI* PPCRI_Reg ( HReg reg ) { + PPCRI* op = LibVEX_Alloc(sizeof(PPCRI)); op->tag = Pri_Reg; op->Pri.Reg = reg; return op; } -void ppPPC32RI ( PPC32RI* dst ) { +void ppPPCRI ( PPCRI* dst ) { switch (dst->tag) { case Pri_Imm: vex_printf("0x%llx", dst->Pri.Imm); break; case Pri_Reg: - ppHRegPPC32(dst->Pri.Reg); + ppHRegPPC(dst->Pri.Reg); break; default: - vpanic("ppPPC32RI"); + vpanic("ppPPCRI"); } } -/* An PPC32RI can only be used in a "read" context (what would it +/* An PPCRI can only be used in a "read" context (what would it mean to write or modify a literal?) and so we enumerate its registers accordingly. */ -static void addRegUsage_PPC32RI ( HRegUsage* u, PPC32RI* dst ) { +static void addRegUsage_PPCRI ( HRegUsage* u, PPCRI* dst ) { switch (dst->tag) { case Pri_Imm: return; @@ -487,11 +495,11 @@ static void addRegUsage_PPC32RI ( HRegUsage* u, PPC32RI* dst ) { addHRegUse(u, HRmRead, dst->Pri.Reg); return; default: - vpanic("addRegUsage_PPC32RI"); + vpanic("addRegUsage_PPCRI"); } } -static void mapRegs_PPC32RI ( HRegRemap* m, PPC32RI* dst ) { +static void mapRegs_PPCRI ( HRegRemap* m, PPCRI* dst ) { switch (dst->tag) { case Pri_Imm: return; @@ -499,45 +507,45 @@ static void mapRegs_PPC32RI ( HRegRemap* m, PPC32RI* dst ) { dst->Pri.Reg = lookupHRegRemap(m, dst->Pri.Reg); return; default: - vpanic("mapRegs_PPC32RI"); + vpanic("mapRegs_PPCRI"); } } /* --------- Operand, which can be a vector reg or a simm5. --------- */ -PPC32VI5s* PPC32VI5s_Imm ( Char simm5 ) { - PPC32VI5s* op = LibVEX_Alloc(sizeof(PPC32VI5s)); +PPCVI5s* PPCVI5s_Imm ( Char simm5 ) { + PPCVI5s* op = LibVEX_Alloc(sizeof(PPCVI5s)); op->tag = Pvi_Imm; op->Pvi.Imm5s = simm5; vassert(simm5 >= -16 && simm5 <= 15); return op; } -PPC32VI5s* PPC32VI5s_Reg ( HReg reg ) { - PPC32VI5s* op = LibVEX_Alloc(sizeof(PPC32VI5s)); - op->tag = Pvi_Reg; - op->Pvi.Reg = reg; +PPCVI5s* PPCVI5s_Reg ( HReg reg ) { + PPCVI5s* op = LibVEX_Alloc(sizeof(PPCVI5s)); + op->tag = Pvi_Reg; + op->Pvi.Reg = reg; vassert(hregClass(reg) == HRcVec128); return op; } -void ppPPC32VI5s ( PPC32VI5s* src ) { +void ppPPCVI5s ( PPCVI5s* src ) { switch (src->tag) { case Pvi_Imm: vex_printf("%d", (Int)src->Pvi.Imm5s); break; case Pvi_Reg: - ppHRegPPC32(src->Pvi.Reg); + ppHRegPPC(src->Pvi.Reg); break; default: - vpanic("ppPPC32VI5s"); + vpanic("ppPPCVI5s"); } } -/* An PPC32VI5s can only be used in a "read" context (what would it +/* An PPCVI5s can only be used in a "read" context (what would it mean to write or modify a literal?) and so we enumerate its registers accordingly. */ -static void addRegUsage_PPC32VI5s ( HRegUsage* u, PPC32VI5s* dst ) { +static void addRegUsage_PPCVI5s ( HRegUsage* u, PPCVI5s* dst ) { switch (dst->tag) { case Pvi_Imm: return; @@ -545,11 +553,11 @@ static void addRegUsage_PPC32VI5s ( HRegUsage* u, PPC32VI5s* dst ) { addHRegUse(u, HRmRead, dst->Pvi.Reg); return; default: - vpanic("addRegUsage_PPC32VI5s"); + vpanic("addRegUsage_PPCVI5s"); } } -static void mapRegs_PPC32VI5s ( HRegRemap* m, PPC32VI5s* dst ) { +static void mapRegs_PPCVI5s ( HRegRemap* m, PPCVI5s* dst ) { switch (dst->tag) { case Pvi_Imm: return; @@ -557,35 +565,35 @@ static void mapRegs_PPC32VI5s ( HRegRemap* m, PPC32VI5s* dst ) { dst->Pvi.Reg = lookupHRegRemap(m, dst->Pvi.Reg); return; default: - vpanic("mapRegs_PPC32VI5s"); + vpanic("mapRegs_PPCVI5s"); } } /* --------- Instructions. --------- */ -HChar* showPPC32UnaryOp ( PPC32UnaryOp op ) { +HChar* showPPCUnaryOp ( PPCUnaryOp op ) { switch (op) { - case Pun_NOT: return "not"; - case Pun_NEG: return "neg"; + case Pun_NOT: return "not"; + case Pun_NEG: return "neg"; case Pun_CLZ32: return "cntlzw"; case Pun_CLZ64: return "cntlzd"; - default: vpanic("showPPC32UnaryOp"); + default: vpanic("showPPCUnaryOp"); } } -HChar* showPPC32AluOp ( PPC32AluOp op, Bool immR ) { +HChar* showPPCAluOp ( PPCAluOp op, Bool immR ) { switch (op) { case Palu_ADD: return immR ? "addi" : "add"; case Palu_SUB: return immR ? "subi" : "sub"; case Palu_AND: return immR ? "andi." : "and"; case Palu_OR: return immR ? "ori" : "or"; case Palu_XOR: return immR ? "xori" : "xor"; - default: vpanic("showPPC32AluOp"); + default: vpanic("showPPCAluOp"); } } -HChar* showPPC32ShftOp ( PPC32ShftOp op, Bool immR, Bool sz32 ) { +HChar* showPPCShftOp ( PPCShftOp op, Bool immR, Bool sz32 ) { switch (op) { case Pshft_SHL: return sz32 ? (immR ? "slwi" : "slw") : (immR ? "sldi" : "sld"); @@ -593,11 +601,11 @@ HChar* showPPC32ShftOp ( PPC32ShftOp op, Bool immR, Bool sz32 ) { (immR ? "srdi" : "srd"); case Pshft_SAR: return sz32 ? (immR ? "srawi" : "sraw") : (immR ? "sradi" : "srad"); - default: vpanic("showPPC32ShftOp"); + default: vpanic("showPPCShftOp"); } } -HChar* showPPC32FpOp ( PPC32FpOp op ) { +HChar* showPPCFpOp ( PPCFpOp op ) { switch (op) { case Pfp_ADD: return "fadd"; case Pfp_SUB: return "fsub"; @@ -607,11 +615,11 @@ HChar* showPPC32FpOp ( PPC32FpOp op ) { case Pfp_ABS: return "fabs"; case Pfp_NEG: return "fneg"; case Pfp_MOV: return "fmr"; - default: vpanic("showPPC32FpOp"); + default: vpanic("showPPCFpOp"); } } -HChar* showPPC32AvOp ( PPC32AvOp op ) { +HChar* showPPCAvOp ( PPCAvOp op ) { switch (op) { /* Unary */ @@ -674,11 +682,11 @@ HChar* showPPC32AvOp ( PPC32AvOp op ) { case Pav_MRGHI: return "vmrgh"; // b,h,w case Pav_MRGLO: return "vmrgl"; // b,h,w - default: vpanic("showPPC32AvOp"); + default: vpanic("showPPCAvOp"); } } -HChar* showPPC32AvFpOp ( PPC32AvFpOp op ) { +HChar* showPPCAvFpOp ( PPCAvFpOp op ) { switch (op) { /* Floating Point Binary */ case Pavfp_ADDF: return "vaddfp"; @@ -702,13 +710,13 @@ HChar* showPPC32AvFpOp ( PPC32AvFpOp op ) { case Pavfp_ROUNDN: return "vrfin"; case Pavfp_ROUNDZ: return "vrfiz"; - default: vpanic("showPPC32AvFpOp"); + default: vpanic("showPPCAvFpOp"); } } -PPC32Instr* PPC32Instr_LI ( HReg dst, ULong imm64, Bool mode64 ) +PPCInstr* PPCInstr_LI ( HReg dst, ULong imm64, Bool mode64 ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_LI; i->Pin.LI.dst = dst; i->Pin.LI.imm64 = imm64; @@ -716,9 +724,9 @@ PPC32Instr* PPC32Instr_LI ( HReg dst, ULong imm64, Bool mode64 ) vassert( (Long)imm64 == (Long)(Int)(UInt)imm64 ); return i; } -PPC32Instr* PPC32Instr_Alu ( PPC32AluOp op, HReg dst, - HReg srcL, PPC32RH* srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_Alu ( PPCAluOp op, HReg dst, + HReg srcL, PPCRH* srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_Alu; i->Pin.Alu.op = op; i->Pin.Alu.dst = dst; @@ -726,9 +734,9 @@ PPC32Instr* PPC32Instr_Alu ( PPC32AluOp op, HReg dst, i->Pin.Alu.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_Shft ( PPC32ShftOp op, Bool sz32, - HReg dst, HReg srcL, PPC32RH* srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_Shft ( PPCShftOp op, Bool sz32, + HReg dst, HReg srcL, PPCRH* srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_Shft; i->Pin.Shft.op = op; i->Pin.Shft.sz32 = sz32; @@ -737,20 +745,20 @@ PPC32Instr* PPC32Instr_Shft ( PPC32ShftOp op, Bool sz32, i->Pin.Shft.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_AddSubC32 ( Bool isAdd, Bool setC, - HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_AddSubC32; - i->Pin.AddSubC32.isAdd = isAdd; - i->Pin.AddSubC32.setC = setC; - i->Pin.AddSubC32.dst = dst; - i->Pin.AddSubC32.srcL = srcL; - i->Pin.AddSubC32.srcR = srcR; +PPCInstr* PPCInstr_AddSubC ( Bool isAdd, Bool setC, + HReg dst, HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_AddSubC; + i->Pin.AddSubC.isAdd = isAdd; + i->Pin.AddSubC.setC = setC; + i->Pin.AddSubC.dst = dst; + i->Pin.AddSubC.srcL = srcL; + i->Pin.AddSubC.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_Cmp ( Bool syned, Bool sz32, - UInt crfD, HReg srcL, PPC32RH* srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_Cmp ( Bool syned, Bool sz32, + UInt crfD, HReg srcL, PPCRH* srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_Cmp; i->Pin.Cmp.syned = syned; i->Pin.Cmp.sz32 = sz32; @@ -759,17 +767,17 @@ PPC32Instr* PPC32Instr_Cmp ( Bool syned, Bool sz32, i->Pin.Cmp.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_Unary ( PPC32UnaryOp op, HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_Unary; - i->Pin.Unary32.op = op; - i->Pin.Unary32.dst = dst; - i->Pin.Unary32.src = src; +PPCInstr* PPCInstr_Unary ( PPCUnaryOp op, HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_Unary; + i->Pin.Unary.op = op; + i->Pin.Unary.dst = dst; + i->Pin.Unary.src = src; return i; } -PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi, Bool sz32, - HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_MulL ( Bool syned, Bool hi, Bool sz32, + HReg dst, HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_MulL; i->Pin.MulL.syned = syned; i->Pin.MulL.hi = hi; @@ -782,21 +790,21 @@ PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi, Bool sz32, if (!hi) vassert(!syned); return i; } -PPC32Instr* PPC32Instr_Div ( Bool syned, Bool sz32, - HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_Div; - i->Pin.Div.syned = syned; - i->Pin.Div.sz32 = sz32; - i->Pin.Div.dst = dst; - i->Pin.Div.srcL = srcL; - i->Pin.Div.srcR = srcR; +PPCInstr* PPCInstr_Div ( Bool syned, Bool sz32, + HReg dst, HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_Div; + i->Pin.Div.syned = syned; + i->Pin.Div.sz32 = sz32; + i->Pin.Div.dst = dst; + i->Pin.Div.srcL = srcL; + i->Pin.Div.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_Call ( PPC32CondCode cond, - Addr64 target, UInt argiregs ) { +PPCInstr* PPCInstr_Call ( PPCCondCode cond, + Addr64 target, UInt argiregs ) { UInt mask; - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_Call; i->Pin.Call.cond = cond; i->Pin.Call.target = target; @@ -806,18 +814,18 @@ PPC32Instr* PPC32Instr_Call ( PPC32CondCode cond, vassert(0 == (argiregs & ~mask)); return i; } -PPC32Instr* PPC32Instr_Goto ( IRJumpKind jk, - PPC32CondCode cond, PPC32RI* dst ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_Goto ( IRJumpKind jk, + PPCCondCode cond, PPCRI* dst ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_Goto; i->Pin.Goto.cond = cond; i->Pin.Goto.dst = dst; i->Pin.Goto.jk = jk; return i; } -PPC32Instr* PPC32Instr_CMov ( PPC32CondCode cond, - HReg dst, PPC32RI* src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_CMov ( PPCCondCode cond, + HReg dst, PPCRI* src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_CMov; i->Pin.CMov.cond = cond; i->Pin.CMov.src = src; @@ -825,9 +833,9 @@ PPC32Instr* PPC32Instr_CMov ( PPC32CondCode cond, vassert(cond.test != Pct_ALWAYS); return i; } -PPC32Instr* PPC32Instr_Load ( UChar sz, Bool syned, - HReg dst, PPC32AMode* src, Bool mode64 ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_Load ( UChar sz, Bool syned, + HReg dst, PPCAMode* src, Bool mode64 ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_Load; i->Pin.Load.sz = sz; i->Pin.Load.syned = syned; @@ -837,9 +845,9 @@ PPC32Instr* PPC32Instr_Load ( UChar sz, Bool syned, if (sz == 8) vassert(mode64); return i; } -PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst, HReg src, - Bool mode64 ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst, HReg src, + Bool mode64 ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_Store; i->Pin.Store.sz = sz; i->Pin.Store.src = src; @@ -848,37 +856,38 @@ PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst, HReg src, if (sz == 8) vassert(mode64); return i; } -PPC32Instr* PPC32Instr_Set32 ( PPC32CondCode cond, HReg dst ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_Set32; - i->Pin.Set32.cond = cond; - i->Pin.Set32.dst = dst; +PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_Set; + i->Pin.Set.cond = cond; + i->Pin.Set.dst = dst; return i; } -PPC32Instr* PPC32Instr_MfCR ( HReg dst ) +PPCInstr* PPCInstr_MfCR ( HReg dst ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_MfCR; - i->Pin.MfCR.dst = dst; + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_MfCR; + i->Pin.MfCR.dst = dst; return i; } -PPC32Instr* PPC32Instr_MFence ( void ) +PPCInstr* PPCInstr_MFence ( void ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_MFence; + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_MFence; return i; } -PPC32Instr* PPC32Instr_FpUnary ( PPC32FpOp op, HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpUnary ( PPCFpOp op, HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpUnary; i->Pin.FpUnary.op = op; i->Pin.FpUnary.dst = dst; i->Pin.FpUnary.src = src; return i; } -PPC32Instr* PPC32Instr_FpBinary ( PPC32FpOp op, HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpBinary ( PPCFpOp op, HReg dst, + HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpBinary; i->Pin.FpBinary.op = op; i->Pin.FpBinary.dst = dst; @@ -886,8 +895,9 @@ PPC32Instr* PPC32Instr_FpBinary ( PPC32FpOp op, HReg dst, HReg srcL, HReg srcR ) i->Pin.FpBinary.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, PPC32AMode* addr ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpLdSt ( Bool isLoad, UChar sz, + HReg reg, PPCAMode* addr ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpLdSt; i->Pin.FpLdSt.isLoad = isLoad; i->Pin.FpLdSt.sz = sz; @@ -896,36 +906,36 @@ PPC32Instr* PPC32Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, PPC32AMode* add vassert(sz == 4 || sz == 8); return i; } -PPC32Instr* PPC32Instr_FpF64toF32 ( HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpF64toF32 ( HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpF64toF32; i->Pin.FpF64toF32.dst = dst; i->Pin.FpF64toF32.src = src; return i; } -PPC32Instr* PPC32Instr_FpF64toI32 ( HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpF64toI32 ( HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpF64toI32; i->Pin.FpF64toI32.dst = dst; i->Pin.FpF64toI32.src = src; return i; } -PPC32Instr* PPC32Instr_FpF64toI64 ( HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpF64toI64 ( HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpF64toI64; i->Pin.FpF64toI64.dst = dst; i->Pin.FpF64toI64.src = src; return i; } -PPC32Instr* PPC32Instr_FpI64toF64 ( HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpI64toF64 ( HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpI64toF64; i->Pin.FpI64toF64.dst = dst; i->Pin.FpI64toF64.src = src; return i; } -PPC32Instr* PPC32Instr_FpCMov ( PPC32CondCode cond, HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpCMov ( PPCCondCode cond, HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpCMov; i->Pin.FpCMov.cond = cond; i->Pin.FpCMov.dst = dst; @@ -933,14 +943,14 @@ PPC32Instr* PPC32Instr_FpCMov ( PPC32CondCode cond, HReg dst, HReg src ) { vassert(cond.test != Pct_ALWAYS); return i; } -PPC32Instr* PPC32Instr_FpLdFPSCR ( HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpLdFPSCR ( HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpLdFPSCR; i->Pin.FpLdFPSCR.src = src; return i; } -PPC32Instr* PPC32Instr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_FpCmp; i->Pin.FpCmp.dst = dst; i->Pin.FpCmp.srcL = srcL; @@ -949,8 +959,8 @@ PPC32Instr* PPC32Instr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) { } /* Read/Write Link Register */ -PPC32Instr* PPC32Instr_RdWrLR ( Bool wrLR, HReg gpr ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_RdWrLR ( Bool wrLR, HReg gpr ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_RdWrLR; i->Pin.RdWrLR.wrLR = wrLR; i->Pin.RdWrLR.gpr = gpr; @@ -958,8 +968,9 @@ PPC32Instr* PPC32Instr_RdWrLR ( Bool wrLR, HReg gpr ) { } /* AltiVec */ -PPC32Instr* PPC32Instr_AvLdSt ( Bool isLoad, UChar sz, HReg reg, PPC32AMode* addr ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvLdSt ( Bool isLoad, UChar sz, + HReg reg, PPCAMode* addr ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvLdSt; i->Pin.AvLdSt.isLoad = isLoad; i->Pin.AvLdSt.sz = sz; @@ -967,16 +978,17 @@ PPC32Instr* PPC32Instr_AvLdSt ( Bool isLoad, UChar sz, HReg reg, PPC32AMode* add i->Pin.AvLdSt.addr = addr; return i; } -PPC32Instr* PPC32Instr_AvUnary ( PPC32AvOp op, HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvUnary; i->Pin.AvUnary.op = op; i->Pin.AvUnary.dst = dst; i->Pin.AvUnary.src = src; return i; } -PPC32Instr* PPC32Instr_AvBinary ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst, + HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvBinary; i->Pin.AvBinary.op = op; i->Pin.AvBinary.dst = dst; @@ -984,35 +996,39 @@ PPC32Instr* PPC32Instr_AvBinary ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) i->Pin.AvBinary.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_AvBin8x16 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_AvBin8x16; +PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst, + HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_AvBin8x16; i->Pin.AvBin8x16.op = op; i->Pin.AvBin8x16.dst = dst; i->Pin.AvBin8x16.srcL = srcL; i->Pin.AvBin8x16.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_AvBin16x8 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_AvBin16x8; +PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst, + HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_AvBin16x8; i->Pin.AvBin16x8.op = op; i->Pin.AvBin16x8.dst = dst; i->Pin.AvBin16x8.srcL = srcL; i->Pin.AvBin16x8.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_AvBin32x4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); - i->tag = Pin_AvBin32x4; +PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst, + HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + i->tag = Pin_AvBin32x4; i->Pin.AvBin32x4.op = op; i->Pin.AvBin32x4.dst = dst; i->Pin.AvBin32x4.srcL = srcL; i->Pin.AvBin32x4.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_AvBin32Fx4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvOp op, HReg dst, + HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvBin32Fx4; i->Pin.AvBin32Fx4.op = op; i->Pin.AvBin32Fx4.dst = dst; @@ -1020,16 +1036,16 @@ PPC32Instr* PPC32Instr_AvBin32Fx4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR i->Pin.AvBin32Fx4.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_AvUn32Fx4 ( PPC32AvOp op, HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvUn32Fx4 ( PPCAvOp op, HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvUn32Fx4; i->Pin.AvUn32Fx4.op = op; i->Pin.AvUn32Fx4.dst = dst; i->Pin.AvUn32Fx4.src = src; return i; } -PPC32Instr* PPC32Instr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvPerm; i->Pin.AvPerm.dst = dst; i->Pin.AvPerm.srcL = srcL; @@ -1037,8 +1053,8 @@ PPC32Instr* PPC32Instr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) { i->Pin.AvPerm.ctl = ctl; return i; } -PPC32Instr* PPC32Instr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvSel; i->Pin.AvSel.ctl = ctl; i->Pin.AvSel.dst = dst; @@ -1046,8 +1062,9 @@ PPC32Instr* PPC32Instr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) { i->Pin.AvSel.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_AvShlDbl ( UChar shift, HReg dst, HReg srcL, HReg srcR ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvShlDbl ( UChar shift, HReg dst, + HReg srcL, HReg srcR ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvShlDbl; i->Pin.AvShlDbl.shift = shift; i->Pin.AvShlDbl.dst = dst; @@ -1055,16 +1072,16 @@ PPC32Instr* PPC32Instr_AvShlDbl ( UChar shift, HReg dst, HReg srcL, HReg srcR ) i->Pin.AvShlDbl.srcR = srcR; return i; } -PPC32Instr* PPC32Instr_AvSplat ( UChar sz, HReg dst, PPC32VI5s* src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvSplat ( UChar sz, HReg dst, PPCVI5s* src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvSplat; i->Pin.AvSplat.sz = sz; i->Pin.AvSplat.dst = dst; i->Pin.AvSplat.src = src; return i; } -PPC32Instr* PPC32Instr_AvCMov ( PPC32CondCode cond, HReg dst, HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvCMov ( PPCCondCode cond, HReg dst, HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvCMov; i->Pin.AvCMov.cond = cond; i->Pin.AvCMov.dst = dst; @@ -1072,8 +1089,8 @@ PPC32Instr* PPC32Instr_AvCMov ( PPC32CondCode cond, HReg dst, HReg src ) { vassert(cond.test != Pct_ALWAYS); return i; } -PPC32Instr* PPC32Instr_AvLdVSCR ( HReg src ) { - PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr)); +PPCInstr* PPCInstr_AvLdVSCR ( HReg src ) { + PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); i->tag = Pin_AvLdVSCR; i->Pin.AvLdVSCR.src = src; return i; @@ -1084,7 +1101,7 @@ PPC32Instr* PPC32Instr_AvLdVSCR ( HReg src ) { static void ppLoadImm ( HReg dst, ULong imm, Bool mode64 ) { #if 1 vex_printf("li_word "); - ppHRegPPC32(dst); + ppHRegPPC(dst); if (!mode64) { vassert(imm == (ULong)(Long)(Int)(UInt)imm); vex_printf(",0x%08x", (UInt)imm); @@ -1092,23 +1109,21 @@ static void ppLoadImm ( HReg dst, ULong imm, Bool mode64 ) { vex_printf(",0x%016llx", imm); } #else -// if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) { - if (imm == (ULong)(Long)(Int)(Short)(UShort)imm) { + if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) { // sign-extendable from 16 bits vex_printf("li "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(",0x%x", (UInt)imm); } else { -// if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) { - if (imm == (ULong)(Long)(Int)(UInt)imm) { + if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) { // sign-extendable from 32 bits vex_printf("lis "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(",0x%x ; ", (UInt)(imm >> 16)); vex_printf("ori "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(","); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(",0x%x", (UInt)(imm & 0xFFFF)); } else { // full 64bit immediate load: 5 (five!) insns. @@ -1116,31 +1131,31 @@ static void ppLoadImm ( HReg dst, ULong imm, Bool mode64 ) { // load high word vex_printf("lis "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(",0x%x ; ", (UInt)(imm >> 48) & 0xFFFF); vex_printf("ori "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(","); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(",0x%x ; ", (UInt)(imm >> 32) & 0xFFFF); // shift r_dst low word to high word => rldicr vex_printf("rldicr "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(","); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(",32,31 ; "); // load low word vex_printf("oris "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(","); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(",0x%x ; ", (UInt)(imm >> 16) & 0xFFFF); vex_printf("ori "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(","); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(",0x%x", (UInt)(imm >> 0) & 0xFFFF); } } @@ -1150,29 +1165,29 @@ static void ppLoadImm ( HReg dst, ULong imm, Bool mode64 ) { static void ppMovReg ( HReg dst, HReg src ) { if (hregNumber(dst) != hregNumber(src)) { vex_printf("mr "); - ppHRegPPC32(dst); + ppHRegPPC(dst); vex_printf(","); - ppHRegPPC32(src); + ppHRegPPC(src); } } -void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) +void ppPPCInstr ( PPCInstr* i, Bool mode64 ) { switch (i->tag) { case Pin_LI: ppLoadImm(i->Pin.LI.dst, i->Pin.LI.imm64, mode64); break; case Pin_Alu: { - HReg r_srcL = i->Pin.Alu.srcL; - PPC32RH* rh_srcR = i->Pin.Alu.srcR; + HReg r_srcL = i->Pin.Alu.srcL; + PPCRH* rh_srcR = i->Pin.Alu.srcR; /* special-case "mr" */ if (i->Pin.Alu.op == Palu_OR && // or Rd,Rs,Rs == mr Rd,Rs rh_srcR->tag == Prh_Reg && rh_srcR->Prh.Reg.reg == r_srcL) { vex_printf("mr "); - ppHRegPPC32(i->Pin.Alu.dst); + ppHRegPPC(i->Pin.Alu.dst); vex_printf(","); - ppHRegPPC32(r_srcL); + ppHRegPPC(r_srcL); return; } /* special-case "li" */ @@ -1180,43 +1195,43 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) rh_srcR->tag == Prh_Imm && hregNumber(r_srcL) == 0) { vex_printf("li "); - ppHRegPPC32(i->Pin.Alu.dst); + ppHRegPPC(i->Pin.Alu.dst); vex_printf(","); - ppPPC32RH(rh_srcR); + ppPPCRH(rh_srcR); return; } /* generic */ - vex_printf("%s ", showPPC32AluOp(i->Pin.Alu.op, - toBool(rh_srcR->tag == Prh_Imm))); - ppHRegPPC32(i->Pin.Alu.dst); + vex_printf("%s ", showPPCAluOp(i->Pin.Alu.op, + toBool(rh_srcR->tag == Prh_Imm))); + ppHRegPPC(i->Pin.Alu.dst); vex_printf(","); - ppHRegPPC32(r_srcL); + ppHRegPPC(r_srcL); vex_printf(","); - ppPPC32RH(rh_srcR); + ppPPCRH(rh_srcR); return; } case Pin_Shft: { - HReg r_srcL = i->Pin.Shft.srcL; - PPC32RH* rh_srcR = i->Pin.Shft.srcR; - vex_printf("%s ", showPPC32ShftOp(i->Pin.Shft.op, - toBool(rh_srcR->tag == Prh_Imm), - i->Pin.Shft.sz32)); - ppHRegPPC32(i->Pin.Shft.dst); + HReg r_srcL = i->Pin.Shft.srcL; + PPCRH* rh_srcR = i->Pin.Shft.srcR; + vex_printf("%s ", showPPCShftOp(i->Pin.Shft.op, + toBool(rh_srcR->tag == Prh_Imm), + i->Pin.Shft.sz32)); + ppHRegPPC(i->Pin.Shft.dst); vex_printf(","); - ppHRegPPC32(r_srcL); + ppHRegPPC(r_srcL); vex_printf(","); - ppPPC32RH(rh_srcR); + ppPPCRH(rh_srcR); return; } - case Pin_AddSubC32: + case Pin_AddSubC: vex_printf("%s%s ", - i->Pin.AddSubC32.isAdd ? "add" : "sub", - i->Pin.AddSubC32.setC ? "c" : "e"); - ppHRegPPC32(i->Pin.AddSubC32.dst); + i->Pin.AddSubC.isAdd ? "add" : "sub", + i->Pin.AddSubC.setC ? "c" : "e"); + ppHRegPPC(i->Pin.AddSubC.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AddSubC32.srcL); + ppHRegPPC(i->Pin.AddSubC.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AddSubC32.srcR); + ppHRegPPC(i->Pin.AddSubC.srcR); return; case Pin_Cmp: vex_printf("%s%c%s %%cr%u,", @@ -1224,42 +1239,42 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) i->Pin.Cmp.sz32 ? 'w' : 'd', i->Pin.Cmp.srcR->tag == Prh_Imm ? "i" : "", i->Pin.Cmp.crfD); - ppHRegPPC32(i->Pin.Cmp.srcL); + ppHRegPPC(i->Pin.Cmp.srcL); vex_printf(","); - ppPPC32RH(i->Pin.Cmp.srcR); + ppPPCRH(i->Pin.Cmp.srcR); return; case Pin_Unary: - vex_printf("%s ", showPPC32UnaryOp(i->Pin.Unary32.op)); - ppHRegPPC32(i->Pin.Unary32.dst); + vex_printf("%s ", showPPCUnaryOp(i->Pin.Unary.op)); + ppHRegPPC(i->Pin.Unary.dst); vex_printf(","); - ppHRegPPC32(i->Pin.Unary32.src); + ppHRegPPC(i->Pin.Unary.src); return; case Pin_MulL: vex_printf("mul%c%c%s ", i->Pin.MulL.hi ? 'h' : 'l', i->Pin.MulL.sz32 ? 'w' : 'd', i->Pin.MulL.hi ? (i->Pin.MulL.syned ? "s" : "u") : ""); - ppHRegPPC32(i->Pin.MulL.dst); + ppHRegPPC(i->Pin.MulL.dst); vex_printf(","); - ppHRegPPC32(i->Pin.MulL.srcL); + ppHRegPPC(i->Pin.MulL.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.MulL.srcR); + ppHRegPPC(i->Pin.MulL.srcR); return; case Pin_Div: vex_printf("div%c%s ", i->Pin.Div.sz32 ? 'w' : 'd', i->Pin.Div.syned ? "" : "u"); - ppHRegPPC32(i->Pin.Div.dst); + ppHRegPPC(i->Pin.Div.dst); vex_printf(","); - ppHRegPPC32(i->Pin.Div.srcL); + ppHRegPPC(i->Pin.Div.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.Div.srcR); + ppHRegPPC(i->Pin.Div.srcR); return; case Pin_Call: { Int n; vex_printf("call: "); if (i->Pin.Call.cond.test != Pct_ALWAYS) { - vex_printf("if (%s) ", showPPC32CondCode(i->Pin.Call.cond)); + vex_printf("if (%s) ", showPPCCondCode(i->Pin.Call.cond)); } vex_printf("{ "); ppLoadImm(hregPPC_GPR10(mode64), i->Pin.Call.target, mode64); @@ -1277,7 +1292,7 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) case Pin_Goto: vex_printf("goto: "); if (i->Pin.Goto.cond.test != Pct_ALWAYS) { - vex_printf("if (%s) ", showPPC32CondCode(i->Pin.Goto.cond)); + vex_printf("if (%s) ", showPPCCondCode(i->Pin.Goto.cond)); } vex_printf("{ "); if (i->Pin.Goto.jk != Ijk_Boring @@ -1288,20 +1303,21 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) vex_printf(" ; "); } if (i->Pin.Goto.dst->tag == Pri_Imm) { - ppLoadImm(hregPPC_GPR3(mode64), i->Pin.Goto.dst->Pri.Imm, mode64); + ppLoadImm(hregPPC_GPR3(mode64), i->Pin.Goto.dst->Pri.Imm, + mode64); } else { ppMovReg(hregPPC_GPR3(mode64), i->Pin.Goto.dst->Pri.Reg); } vex_printf(" ; blr }"); return; case Pin_CMov: - vex_printf("cmov (%s) ", showPPC32CondCode(i->Pin.CMov.cond)); - ppHRegPPC32(i->Pin.CMov.dst); + vex_printf("cmov (%s) ", showPPCCondCode(i->Pin.CMov.cond)); + ppHRegPPC(i->Pin.CMov.dst); vex_printf(","); - ppPPC32RI(i->Pin.CMov.src); + ppPPCRI(i->Pin.CMov.src); vex_printf(": "); if (i->Pin.CMov.cond.test != Pct_ALWAYS) { - vex_printf("if (%s) ", showPPC32CondCode(i->Pin.CMov.cond)); + vex_printf("if (%s) ", showPPCCondCode(i->Pin.CMov.cond)); } vex_printf("{ "); if (i->Pin.CMov.src->tag == Pri_Imm) { @@ -1317,9 +1333,9 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) UChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : 'd'; HChar* s_syned = i->Pin.Load.syned ? "a" : sz==8 ? "" : "z"; vex_printf("l%c%s%s ", c_sz, s_syned, idxd ? "x" : "" ); - ppHRegPPC32(i->Pin.Load.dst); + ppHRegPPC(i->Pin.Load.dst); vex_printf(","); - ppPPC32AMode(i->Pin.Load.src); + ppPPCAMode(i->Pin.Load.src); return; } case Pin_Store: { @@ -1327,28 +1343,28 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) Bool idxd = toBool(i->Pin.Store.dst->tag == Pam_RR); UChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : /*8*/ 'd'; vex_printf("st%c%s ", c_sz, idxd ? "x" : "" ); - ppHRegPPC32(i->Pin.Store.src); + ppHRegPPC(i->Pin.Store.src); vex_printf(","); - ppPPC32AMode(i->Pin.Store.dst); + ppPPCAMode(i->Pin.Store.dst); return; } - case Pin_Set32: { - PPC32CondCode cc = i->Pin.Set32.cond; - vex_printf("set32 (%s),", showPPC32CondCode(cc)); - ppHRegPPC32(i->Pin.Set32.dst); + case Pin_Set: { + PPCCondCode cc = i->Pin.Set.cond; + vex_printf("set (%s),", showPPCCondCode(cc)); + ppHRegPPC(i->Pin.Set.dst); if (cc.test == Pct_ALWAYS) { vex_printf(": { li "); - ppHRegPPC32(i->Pin.Set32.dst); + ppHRegPPC(i->Pin.Set.dst); vex_printf(",1 }"); } else { vex_printf(": { mfcr r0 ; rlwinm "); - ppHRegPPC32(i->Pin.Set32.dst); + ppHRegPPC(i->Pin.Set.dst); vex_printf(",r0,%u,31,31", cc.flag+1); if (cc.test == Pct_FALSE) { vex_printf("; xori "); - ppHRegPPC32(i->Pin.Set32.dst); + ppHRegPPC(i->Pin.Set.dst); vex_printf(","); - ppHRegPPC32(i->Pin.Set32.dst); + ppHRegPPC(i->Pin.Set.dst); vex_printf(",1"); } vex_printf(" }"); @@ -1357,25 +1373,25 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) } case Pin_MfCR: vex_printf("mfcr "); - ppHRegPPC32(i->Pin.MfCR.dst); + ppHRegPPC(i->Pin.MfCR.dst); break; case Pin_MFence: vex_printf("mfence (=sync)"); return; case Pin_FpUnary: - vex_printf("%s ", showPPC32FpOp(i->Pin.FpUnary.op)); - ppHRegPPC32(i->Pin.FpUnary.dst); + vex_printf("%s ", showPPCFpOp(i->Pin.FpUnary.op)); + ppHRegPPC(i->Pin.FpUnary.dst); vex_printf(","); - ppHRegPPC32(i->Pin.FpUnary.src); + ppHRegPPC(i->Pin.FpUnary.src); return; case Pin_FpBinary: - vex_printf("%s ", showPPC32FpOp(i->Pin.FpBinary.op)); - ppHRegPPC32(i->Pin.FpBinary.dst); + vex_printf("%s ", showPPCFpOp(i->Pin.FpBinary.op)); + ppHRegPPC(i->Pin.FpBinary.dst); vex_printf(","); - ppHRegPPC32(i->Pin.FpBinary.srcL); + ppHRegPPC(i->Pin.FpBinary.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.FpBinary.srcR); + ppHRegPPC(i->Pin.FpBinary.srcR); return; case Pin_FpLdSt: { UChar sz = i->Pin.FpLdSt.sz; @@ -1384,89 +1400,89 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) vex_printf("lf%c%s ", (sz==4 ? 's' : 'd'), idxd ? "x" : "" ); - ppHRegPPC32(i->Pin.FpLdSt.reg); + ppHRegPPC(i->Pin.FpLdSt.reg); vex_printf(","); - ppPPC32AMode(i->Pin.FpLdSt.addr); + ppPPCAMode(i->Pin.FpLdSt.addr); } else { vex_printf("stf%c%s ", (sz==4 ? 's' : 'd'), idxd ? "x" : "" ); - ppHRegPPC32(i->Pin.FpLdSt.reg); + ppHRegPPC(i->Pin.FpLdSt.reg); vex_printf(","); - ppPPC32AMode(i->Pin.FpLdSt.addr); + ppPPCAMode(i->Pin.FpLdSt.addr); } return; } case Pin_FpF64toF32: vex_printf("frsp "); - ppHRegPPC32(i->Pin.FpF64toF32.dst); + ppHRegPPC(i->Pin.FpF64toF32.dst); vex_printf(","); - ppHRegPPC32(i->Pin.FpF64toF32.src); + ppHRegPPC(i->Pin.FpF64toF32.src); return; case Pin_FpF64toI32: vex_printf("fctiw %%fr7,"); - ppHRegPPC32(i->Pin.FpF64toI32.src); + ppHRegPPC(i->Pin.FpF64toI32.src); vex_printf("; stfiwx %%fr7,%%r0,%%r1"); vex_printf("; lwzx "); - ppHRegPPC32(i->Pin.FpF64toI32.dst); + ppHRegPPC(i->Pin.FpF64toI32.dst); vex_printf(",%%r0,%%r1"); return; case Pin_FpF64toI64: vex_printf("fctid %%fr7,"); - ppHRegPPC32(i->Pin.FpF64toI64.src); + ppHRegPPC(i->Pin.FpF64toI64.src); vex_printf("; stfdx %%fr7,%%r0,%%r1"); vex_printf("; ldx "); - ppHRegPPC32(i->Pin.FpF64toI64.dst); + ppHRegPPC(i->Pin.FpF64toI64.dst); vex_printf(",%%r0,%%r1"); return; case Pin_FpI64toF64: vex_printf("stdx "); - ppHRegPPC32(i->Pin.FpI64toF64.src); + ppHRegPPC(i->Pin.FpI64toF64.src); vex_printf(",%%r0,%%r1"); vex_printf("; lfdx %%fr7,%%r0,%%r1"); vex_printf("; fcfid "); - ppHRegPPC32(i->Pin.FpI64toF64.dst); + ppHRegPPC(i->Pin.FpI64toF64.dst); vex_printf(",%%r7"); return; case Pin_FpCMov: - vex_printf("fpcmov (%s) ", showPPC32CondCode(i->Pin.FpCMov.cond)); - ppHRegPPC32(i->Pin.FpCMov.dst); + vex_printf("fpcmov (%s) ", showPPCCondCode(i->Pin.FpCMov.cond)); + ppHRegPPC(i->Pin.FpCMov.dst); vex_printf(","); - ppHRegPPC32(i->Pin.FpCMov.src); + ppHRegPPC(i->Pin.FpCMov.src); vex_printf(": "); vex_printf("if (fr_dst != fr_src) { "); if (i->Pin.FpCMov.cond.test != Pct_ALWAYS) { - vex_printf("if (%s) { ", showPPC32CondCode(i->Pin.FpCMov.cond)); + vex_printf("if (%s) { ", showPPCCondCode(i->Pin.FpCMov.cond)); } vex_printf("fmr "); - ppHRegPPC32(i->Pin.FpCMov.dst); + ppHRegPPC(i->Pin.FpCMov.dst); vex_printf(","); - ppHRegPPC32(i->Pin.FpCMov.src); + ppHRegPPC(i->Pin.FpCMov.src); if (i->Pin.FpCMov.cond.test != Pct_ALWAYS) vex_printf(" }"); vex_printf(" }"); return; case Pin_FpLdFPSCR: vex_printf("mtfsf 0xFF,"); - ppHRegPPC32(i->Pin.FpLdFPSCR.src); + ppHRegPPC(i->Pin.FpLdFPSCR.src); return; case Pin_FpCmp: vex_printf("fcmpo %%cr1,"); - ppHRegPPC32(i->Pin.FpCmp.srcL); + ppHRegPPC(i->Pin.FpCmp.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.FpCmp.srcR); + ppHRegPPC(i->Pin.FpCmp.srcR); vex_printf("; mfcr "); - ppHRegPPC32(i->Pin.FpCmp.dst); + ppHRegPPC(i->Pin.FpCmp.dst); vex_printf("; rlwinm "); - ppHRegPPC32(i->Pin.FpCmp.dst); + ppHRegPPC(i->Pin.FpCmp.dst); vex_printf(","); - ppHRegPPC32(i->Pin.FpCmp.dst); + ppHRegPPC(i->Pin.FpCmp.dst); vex_printf(",8,28,31"); return; case Pin_RdWrLR: vex_printf("%s ", i->Pin.RdWrLR.wrLR ? "mtlr" : "mflr"); - ppHRegPPC32(i->Pin.RdWrLR.gpr); + ppHRegPPC(i->Pin.RdWrLR.gpr); return; case Pin_AvLdSt: { @@ -1482,97 +1498,97 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) vex_printf("lv%sx ", str_size); else vex_printf("stv%sx ", str_size); - ppHRegPPC32(i->Pin.AvLdSt.reg); + ppHRegPPC(i->Pin.AvLdSt.reg); vex_printf(","); if (i->Pin.AvLdSt.addr->tag == Pam_IR) vex_printf("%%r30"); else - ppHRegPPC32(i->Pin.AvLdSt.addr->Pam.RR.index); + ppHRegPPC(i->Pin.AvLdSt.addr->Pam.RR.index); vex_printf(","); - ppHRegPPC32(i->Pin.AvLdSt.addr->Pam.RR.base); + ppHRegPPC(i->Pin.AvLdSt.addr->Pam.RR.base); return; } case Pin_AvUnary: - vex_printf("%s ", showPPC32AvOp(i->Pin.AvUnary.op)); - ppHRegPPC32(i->Pin.AvUnary.dst); + vex_printf("%s ", showPPCAvOp(i->Pin.AvUnary.op)); + ppHRegPPC(i->Pin.AvUnary.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvUnary.src); + ppHRegPPC(i->Pin.AvUnary.src); return; case Pin_AvBinary: - vex_printf("%s ", showPPC32AvOp(i->Pin.AvBinary.op)); - ppHRegPPC32(i->Pin.AvBinary.dst); + vex_printf("%s ", showPPCAvOp(i->Pin.AvBinary.op)); + ppHRegPPC(i->Pin.AvBinary.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvBinary.srcL); + ppHRegPPC(i->Pin.AvBinary.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AvBinary.srcR); + ppHRegPPC(i->Pin.AvBinary.srcR); return; case Pin_AvBin8x16: - vex_printf("%s(b) ", showPPC32AvOp(i->Pin.AvBin8x16.op)); - ppHRegPPC32(i->Pin.AvBin8x16.dst); + vex_printf("%s(b) ", showPPCAvOp(i->Pin.AvBin8x16.op)); + ppHRegPPC(i->Pin.AvBin8x16.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvBin8x16.srcL); + ppHRegPPC(i->Pin.AvBin8x16.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AvBin8x16.srcR); + ppHRegPPC(i->Pin.AvBin8x16.srcR); return; case Pin_AvBin16x8: - vex_printf("%s(h) ", showPPC32AvOp(i->Pin.AvBin16x8.op)); - ppHRegPPC32(i->Pin.AvBin16x8.dst); + vex_printf("%s(h) ", showPPCAvOp(i->Pin.AvBin16x8.op)); + ppHRegPPC(i->Pin.AvBin16x8.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvBin16x8.srcL); + ppHRegPPC(i->Pin.AvBin16x8.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AvBin16x8.srcR); + ppHRegPPC(i->Pin.AvBin16x8.srcR); return; case Pin_AvBin32x4: - vex_printf("%s(w) ", showPPC32AvOp(i->Pin.AvBin32x4.op)); - ppHRegPPC32(i->Pin.AvBin32x4.dst); + vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvBin32x4.op)); + ppHRegPPC(i->Pin.AvBin32x4.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvBin32x4.srcL); + ppHRegPPC(i->Pin.AvBin32x4.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AvBin32x4.srcR); + ppHRegPPC(i->Pin.AvBin32x4.srcR); return; case Pin_AvBin32Fx4: - vex_printf("%s ", showPPC32AvFpOp(i->Pin.AvBin32Fx4.op)); - ppHRegPPC32(i->Pin.AvBin32Fx4.dst); + vex_printf("%s ", showPPCAvFpOp(i->Pin.AvBin32Fx4.op)); + ppHRegPPC(i->Pin.AvBin32Fx4.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvBin32Fx4.srcL); + ppHRegPPC(i->Pin.AvBin32Fx4.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AvBin32Fx4.srcR); + ppHRegPPC(i->Pin.AvBin32Fx4.srcR); return; case Pin_AvUn32Fx4: - vex_printf("%s ", showPPC32AvFpOp(i->Pin.AvUn32Fx4.op)); - ppHRegPPC32(i->Pin.AvUn32Fx4.dst); + vex_printf("%s ", showPPCAvFpOp(i->Pin.AvUn32Fx4.op)); + ppHRegPPC(i->Pin.AvUn32Fx4.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvUn32Fx4.src); + ppHRegPPC(i->Pin.AvUn32Fx4.src); return; case Pin_AvPerm: vex_printf("vperm "); - ppHRegPPC32(i->Pin.AvPerm.dst); + ppHRegPPC(i->Pin.AvPerm.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvPerm.srcL); + ppHRegPPC(i->Pin.AvPerm.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AvPerm.srcR); + ppHRegPPC(i->Pin.AvPerm.srcR); vex_printf(","); - ppHRegPPC32(i->Pin.AvPerm.ctl); + ppHRegPPC(i->Pin.AvPerm.ctl); return; case Pin_AvSel: vex_printf("vsel "); - ppHRegPPC32(i->Pin.AvSel.dst); + ppHRegPPC(i->Pin.AvSel.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvSel.srcL); + ppHRegPPC(i->Pin.AvSel.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AvSel.srcR); + ppHRegPPC(i->Pin.AvSel.srcR); vex_printf(","); - ppHRegPPC32(i->Pin.AvSel.ctl); + ppHRegPPC(i->Pin.AvSel.ctl); return; case Pin_AvShlDbl: vex_printf("vsldoi "); - ppHRegPPC32(i->Pin.AvShlDbl.dst); + ppHRegPPC(i->Pin.AvShlDbl.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvShlDbl.srcL); + ppHRegPPC(i->Pin.AvShlDbl.srcL); vex_printf(","); - ppHRegPPC32(i->Pin.AvShlDbl.srcR); + ppHRegPPC(i->Pin.AvShlDbl.srcR); vex_printf(",%d", i->Pin.AvShlDbl.shift); return; @@ -1581,28 +1597,28 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) UChar ch_sz = toUChar( (sz == 8) ? 'b' : (sz == 16) ? 'h' : 'w' ); vex_printf("vsplt%s%c ", i->Pin.AvSplat.src->tag == Pvi_Imm ? "is" : "", ch_sz); - ppHRegPPC32(i->Pin.AvSplat.dst); + ppHRegPPC(i->Pin.AvSplat.dst); vex_printf(","); - ppPPC32VI5s(i->Pin.AvSplat.src); + ppPPCVI5s(i->Pin.AvSplat.src); if (i->Pin.AvSplat.src->tag == Pvi_Reg) vex_printf(", %d", (128/sz)-1); /* louis lane */ return; } case Pin_AvCMov: - vex_printf("avcmov (%s) ", showPPC32CondCode(i->Pin.AvCMov.cond)); - ppHRegPPC32(i->Pin.AvCMov.dst); + vex_printf("avcmov (%s) ", showPPCCondCode(i->Pin.AvCMov.cond)); + ppHRegPPC(i->Pin.AvCMov.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvCMov.src); + ppHRegPPC(i->Pin.AvCMov.src); vex_printf(": "); vex_printf("if (v_dst != v_src) { "); if (i->Pin.AvCMov.cond.test != Pct_ALWAYS) { - vex_printf("if (%s) { ", showPPC32CondCode(i->Pin.AvCMov.cond)); + vex_printf("if (%s) { ", showPPCCondCode(i->Pin.AvCMov.cond)); } vex_printf("vmr "); - ppHRegPPC32(i->Pin.AvCMov.dst); + ppHRegPPC(i->Pin.AvCMov.dst); vex_printf(","); - ppHRegPPC32(i->Pin.AvCMov.src); + ppHRegPPC(i->Pin.AvCMov.src); if (i->Pin.FpCMov.cond.test != Pct_ALWAYS) vex_printf(" }"); vex_printf(" }"); @@ -1610,18 +1626,18 @@ void ppPPC32Instr ( PPC32Instr* i, Bool mode64 ) case Pin_AvLdVSCR: vex_printf("mtvscr "); - ppHRegPPC32(i->Pin.AvLdVSCR.src); + ppHRegPPC(i->Pin.AvLdVSCR.src); return; default: - vex_printf("\nppPPC32Instr(ppc32): No such tag(%d)\n", (Int)i->tag); - vpanic("ppPPC32Instr(ppc32)"); + vex_printf("\nppPPCInstr: No such tag(%d)\n", (Int)i->tag); + vpanic("ppPPCInstr"); } } /* --------- Helpers for register allocation. --------- */ -void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 ) +void getRegUsage_PPCInstr ( HRegUsage* u, PPCInstr* i, Bool mode64 ) { initHRegUsage(u); switch (i->tag) { @@ -1629,37 +1645,37 @@ void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 ) addHRegUse(u, HRmWrite, i->Pin.LI.dst); break; case Pin_Alu: - addHRegUse(u, HRmRead, i->Pin.Alu.srcL); - addRegUsage_PPC32RH(u, i->Pin.Alu.srcR); + addHRegUse(u, HRmRead, i->Pin.Alu.srcL); + addRegUsage_PPCRH(u, i->Pin.Alu.srcR); addHRegUse(u, HRmWrite, i->Pin.Alu.dst); return; case Pin_Shft: addHRegUse(u, HRmRead, i->Pin.Shft.srcL); - addRegUsage_PPC32RH(u, i->Pin.Shft.srcR); + addRegUsage_PPCRH(u, i->Pin.Shft.srcR); addHRegUse(u, HRmWrite, i->Pin.Shft.dst); return; - case Pin_AddSubC32: - addHRegUse(u, HRmWrite, i->Pin.AddSubC32.dst); - addHRegUse(u, HRmRead, i->Pin.AddSubC32.srcL); - addHRegUse(u, HRmRead, i->Pin.AddSubC32.srcR); + case Pin_AddSubC: + addHRegUse(u, HRmWrite, i->Pin.AddSubC.dst); + addHRegUse(u, HRmRead, i->Pin.AddSubC.srcL); + addHRegUse(u, HRmRead, i->Pin.AddSubC.srcR); return; case Pin_Cmp: addHRegUse(u, HRmRead, i->Pin.Cmp.srcL); - addRegUsage_PPC32RH(u, i->Pin.Cmp.srcR); + addRegUsage_PPCRH(u, i->Pin.Cmp.srcR); return; case Pin_Unary: - addHRegUse(u, HRmWrite, i->Pin.Unary32.dst); - addHRegUse(u, HRmRead, i->Pin.Unary32.src); + addHRegUse(u, HRmWrite, i->Pin.Unary.dst); + addHRegUse(u, HRmRead, i->Pin.Unary.src); return; case Pin_MulL: addHRegUse(u, HRmWrite, i->Pin.MulL.dst); - addHRegUse(u, HRmRead, i->Pin.MulL.srcL); - addHRegUse(u, HRmRead, i->Pin.MulL.srcR); + addHRegUse(u, HRmRead, i->Pin.MulL.srcL); + addHRegUse(u, HRmRead, i->Pin.MulL.srcR); return; case Pin_Div: addHRegUse(u, HRmWrite, i->Pin.Div.dst); - addHRegUse(u, HRmRead, i->Pin.Div.srcL); - addHRegUse(u, HRmRead, i->Pin.Div.srcR); + addHRegUse(u, HRmRead, i->Pin.Div.srcL); + addHRegUse(u, HRmRead, i->Pin.Div.srcR); return; case Pin_Call: { UInt argir; @@ -1701,14 +1717,14 @@ void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 ) /* Finally, there is the issue that the insn trashes a register because the literal target address has to be loaded into a register. %r10 seems a suitable victim. - (Can't use %r0, as use ops that interpret it as value zero). */ + (Can't use %r0, as use ops that interpret it as value zero). */ addHRegUse(u, HRmWrite, hregPPC_GPR10(mode64)); /* Upshot of this is that the assembler really must use %r10, and no other, as a destination temporary. */ return; } case Pin_Goto: - addRegUsage_PPC32RI(u, i->Pin.Goto.dst); + addRegUsage_PPCRI(u, i->Pin.Goto.dst); /* GPR3 holds destination address from Pin_Goto */ addHRegUse(u, HRmWrite, hregPPC_GPR3(mode64)); if (i->Pin.Goto.jk != Ijk_Boring @@ -1720,19 +1736,19 @@ void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 ) addHRegUse(u, HRmWrite, GuestStatePtr(mode64)); return; case Pin_CMov: - addRegUsage_PPC32RI(u, i->Pin.CMov.src); + addRegUsage_PPCRI(u, i->Pin.CMov.src); addHRegUse(u, HRmWrite, i->Pin.CMov.dst); return; case Pin_Load: - addRegUsage_PPC32AMode(u, i->Pin.Load.src); + addRegUsage_PPCAMode(u, i->Pin.Load.src); addHRegUse(u, HRmWrite, i->Pin.Load.dst); return; case Pin_Store: - addHRegUse(u, HRmRead, i->Pin.Store.src); - addRegUsage_PPC32AMode(u, i->Pin.Store.dst); + addHRegUse(u, HRmRead, i->Pin.Store.src); + addRegUsage_PPCAMode(u, i->Pin.Store.dst); return; - case Pin_Set32: - addHRegUse(u, HRmWrite, i->Pin.Set32.dst); + case Pin_Set: + addHRegUse(u, HRmWrite, i->Pin.Set.dst); return; case Pin_MfCR: addHRegUse(u, HRmWrite, i->Pin.MfCR.dst); @@ -1752,38 +1768,38 @@ void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 ) case Pin_FpLdSt: addHRegUse(u, (i->Pin.FpLdSt.isLoad ? HRmWrite : HRmRead), i->Pin.FpLdSt.reg); - addRegUsage_PPC32AMode(u, i->Pin.FpLdSt.addr); + addRegUsage_PPCAMode(u, i->Pin.FpLdSt.addr); return; case Pin_FpF64toF32: addHRegUse(u, HRmWrite, i->Pin.FpF64toF32.dst); addHRegUse(u, HRmRead, i->Pin.FpF64toF32.src); return; case Pin_FpF64toI32: - addHRegUse(u, HRmWrite, i->Pin.FpF64toI32.dst); - addHRegUse(u, HRmWrite, hregPPC32_FPR7()); - addHRegUse(u, HRmRead, i->Pin.FpF64toI32.src); + addHRegUse(u, HRmWrite, i->Pin.FpF64toI32.dst); + addHRegUse(u, HRmWrite, hregPPC_FPR7()); + addHRegUse(u, HRmRead, i->Pin.FpF64toI32.src); return; case Pin_FpF64toI64: addHRegUse(u, HRmWrite, i->Pin.FpF64toI64.dst); - addHRegUse(u, HRmWrite, hregPPC32_FPR7()); + addHRegUse(u, HRmWrite, hregPPC_FPR7()); addHRegUse(u, HRmRead, i->Pin.FpF64toI64.src); return; case Pin_FpI64toF64: addHRegUse(u, HRmWrite, i->Pin.FpI64toF64.dst); - addHRegUse(u, HRmWrite, hregPPC32_FPR7()); + addHRegUse(u, HRmWrite, hregPPC_FPR7()); addHRegUse(u, HRmRead, i->Pin.FpI64toF64.src); return; case Pin_FpCMov: addHRegUse(u, HRmModify, i->Pin.FpCMov.dst); - addHRegUse(u, HRmRead, i->Pin.FpCMov.src); + addHRegUse(u, HRmRead, i->Pin.FpCMov.src); return; case Pin_FpLdFPSCR: addHRegUse(u, HRmRead, i->Pin.FpLdFPSCR.src); return; case Pin_FpCmp: addHRegUse(u, HRmWrite, i->Pin.FpCmp.dst); - addHRegUse(u, HRmRead, i->Pin.FpCmp.srcL); - addHRegUse(u, HRmRead, i->Pin.FpCmp.srcR); + addHRegUse(u, HRmRead, i->Pin.FpCmp.srcL); + addHRegUse(u, HRmRead, i->Pin.FpCmp.srcR); return; case Pin_RdWrLR: @@ -1796,7 +1812,7 @@ void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 ) i->Pin.AvLdSt.reg); if (i->Pin.AvLdSt.addr->tag == Pam_IR) addHRegUse(u, HRmWrite, hregPPC_GPR30(mode64)); - addRegUsage_PPC32AMode(u, i->Pin.AvLdSt.addr); + addRegUsage_PPCAMode(u, i->Pin.AvLdSt.addr); return; case Pin_AvUnary: addHRegUse(u, HRmWrite, i->Pin.AvUnary.dst); @@ -1835,7 +1851,7 @@ void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 ) addHRegUse(u, HRmRead, i->Pin.AvBin32Fx4.srcL); addHRegUse(u, HRmRead, i->Pin.AvBin32Fx4.srcR); if (i->Pin.AvBin32Fx4.op == Pavfp_MULF) - addHRegUse(u, HRmWrite, hregPPC32_VR29()); + addHRegUse(u, HRmWrite, hregPPC_VR29()); return; case Pin_AvUn32Fx4: addHRegUse(u, HRmWrite, i->Pin.AvUn32Fx4.dst); @@ -1859,20 +1875,20 @@ void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 ) addHRegUse(u, HRmRead, i->Pin.AvShlDbl.srcR); return; case Pin_AvSplat: - addHRegUse(u, HRmWrite, i->Pin.AvSplat.dst); - addRegUsage_PPC32VI5s(u, i->Pin.AvSplat.src); + addHRegUse(u, HRmWrite, i->Pin.AvSplat.dst); + addRegUsage_PPCVI5s(u, i->Pin.AvSplat.src); return; case Pin_AvCMov: addHRegUse(u, HRmModify, i->Pin.AvCMov.dst); - addHRegUse(u, HRmRead, i->Pin.AvCMov.src); + addHRegUse(u, HRmRead, i->Pin.AvCMov.src); return; case Pin_AvLdVSCR: addHRegUse(u, HRmRead, i->Pin.AvLdVSCR.src); return; default: - ppPPC32Instr(i, mode64); - vpanic("getRegUsage_PPC32Instr"); + ppPPCInstr(i, mode64); + vpanic("getRegUsage_PPCInstr"); } } @@ -1882,7 +1898,7 @@ static void mapReg( HRegRemap* m, HReg* r ) *r = lookupHRegRemap(m, *r); } -void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 ) +void mapRegs_PPCInstr ( HRegRemap* m, PPCInstr* i, Bool mode64 ) { switch (i->tag) { case Pin_LI: @@ -1891,25 +1907,25 @@ void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 ) case Pin_Alu: mapReg(m, &i->Pin.Alu.dst); mapReg(m, &i->Pin.Alu.srcL); - mapRegs_PPC32RH(m, i->Pin.Alu.srcR); + mapRegs_PPCRH(m, i->Pin.Alu.srcR); return; case Pin_Shft: mapReg(m, &i->Pin.Shft.dst); mapReg(m, &i->Pin.Shft.srcL); - mapRegs_PPC32RH(m, i->Pin.Shft.srcR); + mapRegs_PPCRH(m, i->Pin.Shft.srcR); return; - case Pin_AddSubC32: - mapReg(m, &i->Pin.AddSubC32.dst); - mapReg(m, &i->Pin.AddSubC32.srcL); - mapReg(m, &i->Pin.AddSubC32.srcR); + case Pin_AddSubC: + mapReg(m, &i->Pin.AddSubC.dst); + mapReg(m, &i->Pin.AddSubC.srcL); + mapReg(m, &i->Pin.AddSubC.srcR); return; case Pin_Cmp: mapReg(m, &i->Pin.Cmp.srcL); - mapRegs_PPC32RH(m, i->Pin.Cmp.srcR); + mapRegs_PPCRH(m, i->Pin.Cmp.srcR); return; case Pin_Unary: - mapReg(m, &i->Pin.Unary32.dst); - mapReg(m, &i->Pin.Unary32.src); + mapReg(m, &i->Pin.Unary.dst); + mapReg(m, &i->Pin.Unary.src); return; case Pin_MulL: mapReg(m, &i->Pin.MulL.dst); @@ -1924,22 +1940,22 @@ void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 ) case Pin_Call: return; case Pin_Goto: - mapRegs_PPC32RI(m, i->Pin.Goto.dst); + mapRegs_PPCRI(m, i->Pin.Goto.dst); return; case Pin_CMov: - mapRegs_PPC32RI(m, i->Pin.CMov.src); + mapRegs_PPCRI(m, i->Pin.CMov.src); mapReg(m, &i->Pin.CMov.dst); return; case Pin_Load: - mapRegs_PPC32AMode(m, i->Pin.Load.src); + mapRegs_PPCAMode(m, i->Pin.Load.src); mapReg(m, &i->Pin.Load.dst); return; case Pin_Store: mapReg(m, &i->Pin.Store.src); - mapRegs_PPC32AMode(m, i->Pin.Store.dst); + mapRegs_PPCAMode(m, i->Pin.Store.dst); return; - case Pin_Set32: - mapReg(m, &i->Pin.Set32.dst); + case Pin_Set: + mapReg(m, &i->Pin.Set.dst); return; case Pin_MfCR: mapReg(m, &i->Pin.MfCR.dst); @@ -1957,7 +1973,7 @@ void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 ) return; case Pin_FpLdSt: mapReg(m, &i->Pin.FpLdSt.reg); - mapRegs_PPC32AMode(m, i->Pin.FpLdSt.addr); + mapRegs_PPCAMode(m, i->Pin.FpLdSt.addr); return; case Pin_FpF64toF32: mapReg(m, &i->Pin.FpF64toF32.dst); @@ -1992,7 +2008,7 @@ void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 ) return; case Pin_AvLdSt: mapReg(m, &i->Pin.AvLdSt.reg); - mapRegs_PPC32AMode(m, i->Pin.AvLdSt.addr); + mapRegs_PPCAMode(m, i->Pin.AvLdSt.addr); return; case Pin_AvUnary: mapReg(m, &i->Pin.AvUnary.dst); @@ -2046,7 +2062,7 @@ void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 ) return; case Pin_AvSplat: mapReg(m, &i->Pin.AvSplat.dst); - mapRegs_PPC32VI5s(m, i->Pin.AvSplat.src); + mapRegs_PPCVI5s(m, i->Pin.AvSplat.src); return; case Pin_AvCMov: mapReg(m, &i->Pin.AvCMov.dst); @@ -2057,8 +2073,8 @@ void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 ) return; default: - ppPPC32Instr(i, mode64); - vpanic("mapRegs_PPC32Instr"); + ppPPCInstr(i, mode64); + vpanic("mapRegs_PPCInstr"); } } @@ -2066,7 +2082,7 @@ void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 ) source and destination to *src and *dst. If in doubt say No. Used by the register allocator to do move coalescing. */ -Bool isMove_PPC32Instr ( PPC32Instr* i, HReg* src, HReg* dst ) +Bool isMove_PPCInstr ( PPCInstr* i, HReg* src, HReg* dst ) { /* Moves between integer regs */ if (i->tag == Pin_Alu) { @@ -2096,51 +2112,51 @@ Bool isMove_PPC32Instr ( PPC32Instr* i, HReg* src, HReg* dst ) /* Generate ppc32 spill/reload instructions under the direction of the register allocator. Note it's critical these don't write the condition codes. */ -PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 ) +PPCInstr* genSpill_PPC ( HReg rreg, UShort offsetB, Bool mode64 ) { - PPC32AMode* am; + PPCAMode* am; vassert(!hregIsVirtual(rreg)); - am = PPC32AMode_IR(offsetB, GuestStatePtr(mode64)); + am = PPCAMode_IR( offsetB, GuestStatePtr(mode64) ); switch (hregClass(rreg)) { case HRcInt64: vassert(mode64); - return PPC32Instr_Store( 8, am, rreg, mode64 ); + return PPCInstr_Store( 8, am, rreg, mode64 ); case HRcInt32: vassert(!mode64); - return PPC32Instr_Store( 4, am, rreg, mode64 ); + return PPCInstr_Store( 4, am, rreg, mode64 ); case HRcFlt64: - return PPC32Instr_FpLdSt ( False/*store*/, 8, rreg, am ); + return PPCInstr_FpLdSt ( False/*store*/, 8, rreg, am ); case HRcVec128: // XXX: GPR30 used as spill register to kludge AltiVec AMode_IR - return PPC32Instr_AvLdSt ( False/*store*/, 16, rreg, am ); + return PPCInstr_AvLdSt ( False/*store*/, 16, rreg, am ); default: ppHRegClass(hregClass(rreg)); - vpanic("genSpill_PPC32: unimplemented regclass"); + vpanic("genSpill_PPC: unimplemented regclass"); } } -PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 ) +PPCInstr* genReload_PPC ( HReg rreg, UShort offsetB, Bool mode64 ) { - PPC32AMode* am; + PPCAMode* am; vassert(!hregIsVirtual(rreg)); - am = PPC32AMode_IR(offsetB, GuestStatePtr(mode64)); + am = PPCAMode_IR( offsetB, GuestStatePtr(mode64) ); switch (hregClass(rreg)) { case HRcInt64: vassert(mode64); - return PPC32Instr_Load( 8, False, rreg, am, mode64 ); + return PPCInstr_Load( 8, False, rreg, am, mode64 ); case HRcInt32: vassert(!mode64); - return PPC32Instr_Load( 4, False, rreg, am, mode64 ); + return PPCInstr_Load( 4, False, rreg, am, mode64 ); case HRcFlt64: - return PPC32Instr_FpLdSt ( True/*load*/, 8, rreg, am ); + return PPCInstr_FpLdSt ( True/*load*/, 8, rreg, am ); case HRcVec128: // XXX: GPR30 used as spill register to kludge AltiVec AMode_IR - return PPC32Instr_AvLdSt ( True/*load*/, 16, rreg, am ); + return PPCInstr_AvLdSt ( True/*load*/, 16, rreg, am ); default: ppHRegClass(hregClass(rreg)); - vpanic("genReload_PPC32: unimplemented regclass"); + vpanic("genReload_PPC: unimplemented regclass"); } } @@ -2191,7 +2207,8 @@ static UChar* emit32 ( UChar* p, UInt w32 ) as per PPC32 p576 */ -static UChar* mkFormD ( UChar* p, UInt opc1, UInt r1, UInt r2, UInt imm ) +static UChar* mkFormD ( UChar* p, UInt opc1, + UInt r1, UInt r2, UInt imm ) { UInt theInstr; vassert(opc1 < 0x40); @@ -2214,7 +2231,8 @@ static UChar* mkFormMD ( UChar* p, UInt opc1, UInt r1, UInt r2, vassert(opc2 < 0x08); imm2 = ((imm2 & 0x1F) << 1) | (imm2 >> 5); theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | - ((imm1 & 0x1F)<<11) | (imm2<<5) | (opc2<<2) | ((imm1 >> 5)<<1)); + ((imm1 & 0x1F)<<11) | (imm2<<5) | + (opc2<<2) | ((imm1 >> 5)<<1)); return emit32(p, theInstr); } @@ -2228,7 +2246,8 @@ static UChar* mkFormX ( UChar* p, UInt opc1, UInt r1, UInt r2, vassert(r3 < 0x20); vassert(opc2 < 0x400); vassert(b0 < 0x2); - theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (opc2<<1) | (b0)); + theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | + (r3<<11) | (opc2<<1) | (b0)); return emit32(p, theInstr); } @@ -2258,7 +2277,8 @@ static UChar* mkFormXL ( UChar* p, UInt opc1, UInt f1, UInt f2, vassert(f3 < 0x20); vassert(opc2 < 0x400); vassert(b0 < 0x2); - theInstr = ((opc1<<26) | (f1<<21) | (f2<<16) | (f3<<11) | (opc2<<1) | (b0)); + theInstr = ((opc1<<26) | (f1<<21) | (f2<<16) | + (f3<<11) | (opc2<<1) | (b0)); return emit32(p, theInstr); } @@ -2278,9 +2298,10 @@ static UChar* mkFormXFX ( UChar* p, UInt r1, UInt f2, UInt opc2 ) case 371: // mftb case 467: // mtspr vassert(f2 < 0x400); - f2 = ((f2>>5) & 0x1F) | ((f2 & 0x1F)<<5); // re-arrange split field + // re-arrange split field + f2 = ((f2>>5) & 0x1F) | ((f2 & 0x1F)<<5); break; - default: vpanic("mkFormXFX(PPC32)"); + default: vpanic("mkFormXFX(ppch)"); } theInstr = ((31<<26) | (r1<<21) | (f2<<11) | (opc2<<1)); return emit32(p, theInstr); @@ -2335,7 +2356,8 @@ static UChar* mkFormB ( UChar* p, UInt BO, UInt BI, vassert(BD < 0x4000); vassert(AA < 0x2); vassert(LK < 0x2); - theInstr = ((16<<26) | (BO<<21) | (BI<<16) | (BD<<2) | (AA<<1) | (LK)); + theInstr = ((16<<26) | (BO<<21) | (BI<<16) | + (BD<<2) | (AA<<1) | (LK)); return emit32(p, theInstr); } @@ -2373,7 +2395,7 @@ static UChar* mkFormA ( UChar* p, UInt opc1, UInt r1, UInt r2, } static UChar* doAMode_IR ( UChar* p, UInt opc1, UInt rSD, - PPC32AMode* am, Bool mode64 ) + PPCAMode* am, Bool mode64 ) { UInt rA, idx; vassert(am->tag == Pam_IR); @@ -2392,7 +2414,7 @@ static UChar* doAMode_IR ( UChar* p, UInt opc1, UInt rSD, } static UChar* doAMode_RR ( UChar* p, UInt opc1, UInt opc2, - UInt rSD, PPC32AMode* am, Bool mode64 ) + UInt rSD, PPCAMode* am, Bool mode64 ) { UInt rA, rB; vassert(am->tag == Pam_RR); @@ -2410,15 +2432,13 @@ static UChar* mkLoadImm ( UChar* p, UInt r_dst, ULong imm, Bool mode64 ) { vassert(r_dst < 0x20); -// if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) { - if (imm == (ULong)(Long)(Int)(Short)(UShort)imm) { + if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) { // sign-extendable from 16 bits // addi r_dst,0,imm => li r_dst,imm p = mkFormD(p, 14, r_dst, 0, imm & 0xFFFF); } else { -// if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) { - if (imm == (ULong)(Long)(Int)(UInt)imm) { + if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) { // sign-extendable from 32 bits // addis r_dst,r0,(imm>>16) => lis r_dst, (imm>>16) @@ -2484,7 +2504,8 @@ static UChar* mkFormVXR ( UChar* p, UInt opc1, UInt r1, UInt r2, vassert(r3 < 0x20); vassert(Rc < 0x2); vassert(opc2 < 0x400); - theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (Rc<<10) | opc2); + theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | + (r3<<11) | (Rc<<10) | opc2); return emit32(p, theInstr); } @@ -2498,7 +2519,8 @@ static UChar* mkFormVA ( UChar* p, UInt opc1, UInt r1, UInt r2, vassert(r3 < 0x20); vassert(r4 < 0x20); vassert(opc2 < 0x40); - theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (r4<<6) | opc2); + theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | + (r3<<11) | (r4<<6) | opc2); return emit32(p, theInstr); } @@ -2512,14 +2534,14 @@ static UChar* mkFormVA ( UChar* p, UInt opc1, UInt r1, UInt r2, use a call-return scheme to get from the dispatcher to generated code and back. */ -Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, - Bool mode64, void* dispatch ) +Int emit_PPCInstr ( UChar* buf, Int nbuf, PPCInstr* i, + Bool mode64, void* dispatch ) { UChar* p = &buf[0]; UChar* ptmp = p; vassert(nbuf >= 32); -// vex_printf("asm ");ppPPC32Instr(i, mode64); vex_printf("\n"); +// vex_printf("asm ");ppPPCInstr(i, mode64); vex_printf("\n"); switch (i->tag) { @@ -2529,12 +2551,12 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, goto done; case Pin_Alu: { - PPC32RH* srcR = i->Pin.Alu.srcR; - Bool immR = toBool(srcR->tag == Prh_Imm); - UInt r_dst = iregNo(i->Pin.Alu.dst, mode64); - UInt r_srcL = iregNo(i->Pin.Alu.srcL, mode64); - UInt r_srcR = immR ? (-1)/*bogus*/ : - iregNo(srcR->Prh.Reg.reg, mode64); + PPCRH* srcR = i->Pin.Alu.srcR; + Bool immR = toBool(srcR->tag == Prh_Imm); + UInt r_dst = iregNo(i->Pin.Alu.dst, mode64); + UInt r_srcL = iregNo(i->Pin.Alu.srcL, mode64); + UInt r_srcR = immR ? (-1)/*bogus*/ : + iregNo(srcR->Prh.Reg.reg, mode64); switch (i->Pin.Alu.op) { case Palu_ADD: @@ -2601,13 +2623,13 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, } case Pin_Shft: { - PPC32RH* srcR = i->Pin.Shft.srcR; - Bool sz32 = i->Pin.Shft.sz32; - Bool immR = toBool(srcR->tag == Prh_Imm); - UInt r_dst = iregNo(i->Pin.Shft.dst, mode64); - UInt r_srcL = iregNo(i->Pin.Shft.srcL, mode64); - UInt r_srcR = immR ? (-1)/*bogus*/ : - iregNo(srcR->Prh.Reg.reg, mode64); + PPCRH* srcR = i->Pin.Shft.srcR; + Bool sz32 = i->Pin.Shft.sz32; + Bool immR = toBool(srcR->tag == Prh_Imm); + UInt r_dst = iregNo(i->Pin.Shft.dst, mode64); + UInt r_srcL = iregNo(i->Pin.Shft.srcL, mode64); + UInt r_srcR = immR ? (-1)/*bogus*/ : + iregNo(srcR->Prh.Reg.reg, mode64); if (!mode64) vassert(sz32); @@ -2708,12 +2730,12 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, goto done; } - case Pin_AddSubC32: { - Bool isAdd = i->Pin.AddSubC32.isAdd; - Bool setC = i->Pin.AddSubC32.setC; - UInt r_srcL = iregNo(i->Pin.AddSubC32.srcL, mode64); - UInt r_srcR = iregNo(i->Pin.AddSubC32.srcR, mode64); - UInt r_dst = iregNo(i->Pin.AddSubC32.dst, mode64); + case Pin_AddSubC: { + Bool isAdd = i->Pin.AddSubC.isAdd; + Bool setC = i->Pin.AddSubC.setC; + UInt r_srcL = iregNo(i->Pin.AddSubC.srcL, mode64); + UInt r_srcR = iregNo(i->Pin.AddSubC.srcR, mode64); + UInt r_dst = iregNo(i->Pin.AddSubC.dst, mode64); if (isAdd) { if (setC) /* addc (PPC32 p348) */ @@ -2736,7 +2758,7 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, UInt fld1 = i->Pin.Cmp.crfD << 2; UInt r_srcL = iregNo(i->Pin.Cmp.srcL, mode64); UInt r_srcR, imm_srcR; - PPC32RH* srcR = i->Pin.Cmp.srcR; + PPCRH* srcR = i->Pin.Cmp.srcR; if (!mode64) // cmp double word invalid for mode32 vassert(sz32); @@ -2768,10 +2790,10 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, } case Pin_Unary: { - UInt r_dst = iregNo(i->Pin.Unary32.dst, mode64); - UInt r_src = iregNo(i->Pin.Unary32.src, mode64); + UInt r_dst = iregNo(i->Pin.Unary.dst, mode64); + UInt r_src = iregNo(i->Pin.Unary.src, mode64); - switch (i->Pin.Unary32.op) { + switch (i->Pin.Unary.op) { case Pun_NOT: // nor r_dst,r_src,r_src p = mkFormX(p, 31, r_src, r_dst, r_src, 124, 0); break; @@ -2848,10 +2870,10 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, } case Pin_Call: { - PPC32CondCode cond = i->Pin.Call.cond; - UInt r_dst = 10; + PPCCondCode cond = i->Pin.Call.cond; + UInt r_dst = 10; /* As per detailed comment for Pin_Call in - getRegUsage_PPC32Instr above, %r10 is used as an address temp */ + getRegUsage_PPCInstr above, %r10 is used as an address temp */ /* jump over the following insns if condition does not hold */ if (cond.test != Pct_ALWAYS) { @@ -2859,32 +2881,33 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, /* don't know how many bytes to jump over yet... make space for a jump instruction and fill in later. */ ptmp = p; /* fill in this bit later */ - p += 4; // p += 4 + p += 4; // p += 4 } - /* load target to r_dst */ - p = mkLoadImm(p, r_dst, i->Pin.Call.target, mode64); // p += 4|8|20 + /* load target to r_dst */ // p += 4|8|20 + p = mkLoadImm(p, r_dst, i->Pin.Call.target, mode64); /* mtspr 9,r_dst => move r_dst to count register */ - p = mkFormXFX(p, r_dst, 9, 467); // p += 4 + p = mkFormXFX(p, r_dst, 9, 467); // p += 4 /* bctrl => branch to count register (and save to lr) */ - p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 1); // p += 4 + p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 1); // p += 4 /* Fix up the conditional jump, if there was one. */ if (cond.test != Pct_ALWAYS) { Int delta = p - ptmp; vassert(delta >= 16 && delta <= 32); /* bc !ct,cf,delta */ - mkFormB(ptmp, invertCondTest(cond.test), cond.flag, (delta>>2), 0, 0); + mkFormB(ptmp, invertCondTest(cond.test), + cond.flag, (delta>>2), 0, 0); } goto done; } case Pin_Goto: { - UInt trc = 0; - UChar r_return = 3; /* Put target addr into %r3 */ - PPC32CondCode cond = i->Pin.Goto.cond; + UInt trc = 0; + UChar r_ret = 3; /* Put target addr into %r3 */ + PPCCondCode cond = i->Pin.Goto.cond; UInt r_dst; ULong imm_dst; @@ -2917,7 +2940,7 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, break; default: ppIRJumpKind(i->Pin.Goto.jk); - vpanic("emit_PPC32Instr.Pin_Goto: unknown jump kind"); + vpanic("emit_PPCInstr.Pin_Goto: unknown jump kind"); } if (trc !=0) { vassert(trc < 0x10000); @@ -2925,14 +2948,14 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, p = mkFormD(p, 14, 31, 0, trc); // p += 4 } - /* Get the destination address into %r_return */ + /* Get the destination address into %r_ret */ if (i->Pin.Goto.dst->tag == Pri_Imm) { imm_dst = i->Pin.Goto.dst->Pri.Imm; - p = mkLoadImm(p, r_return, imm_dst, mode64); // p += 4|8|20 + p = mkLoadImm(p, r_ret, imm_dst, mode64); // p += 4|8|20 } else { vassert(i->Pin.Goto.dst->tag == Pri_Reg); r_dst = iregNo(i->Pin.Goto.dst->Pri.Reg, mode64); - p = mkMoveReg(p, r_return, r_dst); // p += 4 + p = mkMoveReg(p, r_ret, r_dst); // p += 4 } /* blr */ @@ -2943,15 +2966,16 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, Int delta = p - ptmp; vassert(delta >= 12 && delta <= 32); /* bc !ct,cf,delta */ - mkFormB(ptmp, invertCondTest(cond.test), cond.flag, delta>>2, 0, 0); + mkFormB(ptmp, invertCondTest(cond.test), + cond.flag, delta>>2, 0, 0); } goto done; } case Pin_CMov: { - UInt r_dst, r_src; + UInt r_dst, r_src; ULong imm_src; - PPC32CondCode cond; + PPCCondCode cond; vassert(i->Pin.CMov.cond.test != Pct_ALWAYS); r_dst = iregNo(i->Pin.CMov.dst, mode64); @@ -2983,13 +3007,14 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, Int delta = p - ptmp; vassert(delta >= 8 && delta <= 24); /* bc !ct,cf,delta */ - mkFormB(ptmp, invertCondTest(cond.test), cond.flag, (delta>>2), 0, 0); + mkFormB(ptmp, invertCondTest(cond.test), + cond.flag, (delta>>2), 0, 0); } goto done; } case Pin_Load: { - PPC32AMode* am_addr = i->Pin.Load.src; + PPCAMode* am_addr = i->Pin.Load.src; UInt r_dst = iregNo(i->Pin.Load.dst, mode64); Bool syned = i->Pin.Load.syned; UInt opc1, opc2, sz = i->Pin.Load.sz; @@ -3021,20 +3046,19 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, } } - case Pin_Set32: { + case Pin_Set: { /* Make the destination register be 1 or 0, depending on whether the relevant condition holds. */ - UInt r_dst = iregNo(i->Pin.Set32.dst, mode64); - PPC32CondCode cond = i->Pin.Set32.cond; - UInt rot_imm; - UInt r_tmp; + UInt r_dst = iregNo(i->Pin.Set.dst, mode64); + PPCCondCode cond = i->Pin.Set.cond; + UInt rot_imm, r_tmp; if (cond.test == Pct_ALWAYS) { // Just load 1 to dst => li dst,1 p = mkFormD(p, 14, r_dst, 0, 1); } else { rot_imm = 1 + cond.flag; - r_tmp = 0; // Not within scope of regalloc, so no need to declare. + r_tmp = 0; // Not set in getAllocable, so no need to declare. // r_tmp = CR => mfcr r_tmp p = mkFormX(p, 31, r_tmp, 0, 0, 19, 0); @@ -3064,7 +3088,7 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, } case Pin_Store: { - PPC32AMode* am_addr = i->Pin.Store.dst; + PPCAMode* am_addr = i->Pin.Store.dst; UInt r_src = iregNo(i->Pin.Store.src, mode64); UInt opc1, opc2, sz = i->Pin.Store.sz; switch (i->Pin.Store.dst->tag) { @@ -3144,7 +3168,7 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, } case Pin_FpLdSt: { - PPC32AMode* am_addr = i->Pin.FpLdSt.addr; + PPCAMode* am_addr = i->Pin.FpLdSt.addr; UInt f_reg = fregNo(i->Pin.FpLdSt.reg); Bool idxd = toBool(i->Pin.FpLdSt.addr->tag == Pam_RR); UChar sz = i->Pin.FpLdSt.sz; @@ -3183,14 +3207,14 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, UInt r_dst = iregNo(i->Pin.FpF64toI32.dst, mode64); UInt fr_src = fregNo(i->Pin.FpF64toI32.src); UChar fr_tmp = 7; // Temp freg - PPC32AMode* am_addr; + PPCAMode* am_addr; // fctiw (conv f64 to i32), PPC32 p404 p = mkFormX(p, 63, fr_tmp, 0, fr_src, 14, 0); - // No RI form of stfiwx, so need PPC32AMode_RR: - am_addr = PPC32AMode_RR( StackFramePtr(mode64), - hregPPC_GPR0(mode64) ); + // No RI form of stfiwx, so need PPCAMode_RR: + am_addr = PPCAMode_RR( StackFramePtr(mode64), + hregPPC_GPR0(mode64) ); // stfiwx (store fp64[lo32] as int32), PPC32 p517 p = doAMode_RR(p, 31, 983, fr_tmp, am_addr, mode64); @@ -3204,13 +3228,13 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, UInt r_dst = iregNo(i->Pin.FpF64toI64.dst, mode64); UInt fr_src = fregNo(i->Pin.FpF64toI64.src); UChar fr_tmp = 7; // Temp freg - PPC32AMode* am_addr; + PPCAMode* am_addr; // fctid (conv f64 to i64), PPC64 p437 p = mkFormX(p, 63, fr_tmp, 0, fr_src, 814, 0); - am_addr = PPC32AMode_RR( StackFramePtr(mode64), - hregPPC_GPR0(mode64) ); + am_addr = PPCAMode_RR( StackFramePtr(mode64), + hregPPC_GPR0(mode64) ); // stfdx (store fp64), PPC64 p589 p = doAMode_RR(p, 31, 727, fr_tmp, am_addr, mode64); @@ -3224,8 +3248,8 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, UInt r_src = iregNo(i->Pin.FpI64toF64.src, mode64); UInt fr_dst = fregNo(i->Pin.FpI64toF64.dst); UChar fr_tmp = 7; // Temp freg - PPC32AMode* am_addr = PPC32AMode_RR( StackFramePtr(mode64), - hregPPC_GPR0(mode64) ); + PPCAMode* am_addr = PPCAMode_RR( StackFramePtr(mode64), + hregPPC_GPR0(mode64) ); // stdx r_src,r0,r1 p = doAMode_RR(p, 31, 149, r_src, am_addr, mode64); @@ -3239,9 +3263,9 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, } case Pin_FpCMov: { - UInt fr_dst = fregNo(i->Pin.FpCMov.dst); - UInt fr_src = fregNo(i->Pin.FpCMov.src); - PPC32CondCode cc = i->Pin.FpCMov.cond; + UInt fr_dst = fregNo(i->Pin.FpCMov.dst); + UInt fr_src = fregNo(i->Pin.FpCMov.src); + PPCCondCode cc = i->Pin.FpCMov.cond; if (fr_dst == fr_src) goto done; @@ -3303,16 +3327,17 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, // Only have AltiVec AMode_RR: kludge AMode_IR if (!idxd) { r_idx = 30; // XXX: Using r30 as temp - p = mkLoadImm(p, r_idx, i->Pin.AvLdSt.addr->Pam.IR.index, mode64); + p = mkLoadImm(p, r_idx, + i->Pin.AvLdSt.addr->Pam.IR.index, mode64); } else { r_idx = iregNo(i->Pin.AvLdSt.addr->Pam.RR.index, mode64); } if (i->Pin.FpLdSt.isLoad) { // Load from memory (1,2,4,16) - opc2 = (sz == 1) ? 7 : (sz == 2) ? 39 : (sz == 4) ? 71 : 103; + opc2 = (sz==1) ? 7 : (sz==2) ? 39 : (sz==4) ? 71 : 103; p = mkFormX(p, 31, v_reg, r_idx, r_base, opc2, 0); } else { // Store to memory (1,2,4,16) - opc2 = (sz == 1) ? 135 : (sz == 2) ? 167 : (sz == 4) ? 199 : 231; + opc2 = (sz==1) ? 135 : (sz==2) ? 167 : (sz==4) ? 199 : 231; p = mkFormX(p, 31, v_reg, r_idx, r_base, opc2, 0); } goto done; @@ -3545,28 +3570,28 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, */ UInt vB = 29; // XXX: Using v29 for temp do not change // without also changing - // getRegUsage_PPC32Instr + // getRegUsage_PPCInstr UInt konst = 0x1F; // Better way to load -0.0 (0x80000000) ? // vspltisw vB,0x1F (0x1F => each word of vB) p = mkFormVX( p, 4, vB, konst, 0, 908 ); - // vslw vB,vB,vB (each word of vB = (0x1F << 0x1F) = 0x80000000 + // vslw vB,vB,vB (each word of vB = (0x1F << 0x1F) = 0x80000000 p = mkFormVX( p, 4, vB, vB, vB, 388 ); // Finally, do the multiply: p = mkFormVA( p, 4, v_dst, v_srcL, vB, v_srcR, 46 ); break; } - case Pavfp_CMPEQF: - p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 198 ); // vcmpeqfp + case Pavfp_CMPEQF: // vcmpeqfp + p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 198 ); break; - case Pavfp_CMPGTF: - p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 710 ); // vcmpgtfp + case Pavfp_CMPGTF: // vcmpgtfp + p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 710 ); break; - case Pavfp_CMPGEF: - p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 454 ); // vcmpgefp + case Pavfp_CMPGEF: // vcmpgefp + p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 454 ); break; default: @@ -3652,9 +3677,9 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, } case Pin_AvCMov: { - UInt v_dst = vregNo(i->Pin.AvCMov.dst); - UInt v_src = vregNo(i->Pin.AvCMov.src); - PPC32CondCode cc = i->Pin.AvCMov.cond; + UInt v_dst = vregNo(i->Pin.AvCMov.dst); + UInt v_src = vregNo(i->Pin.AvCMov.src); + PPCCondCode cc = i->Pin.AvCMov.cond; if (v_dst == v_src) goto done; @@ -3682,8 +3707,8 @@ Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, bad: vex_printf("\n=> "); - ppPPC32Instr(i, mode64); - vpanic("emit_PPC32Instr"); + ppPPCInstr(i, mode64); + vpanic("emit_PPCInstr"); /*NOTREACHED*/ done: diff --git a/VEX/priv/host-ppc32/hdefs.h b/VEX/priv/host-ppc32/hdefs.h index 3131e1524d..169e5c20d9 100644 --- a/VEX/priv/host-ppc32/hdefs.h +++ b/VEX/priv/host-ppc32/hdefs.h @@ -44,8 +44,8 @@ without prior written permission. */ -#ifndef __LIBVEX_HOST_PPC32_HDEFS_H -#define __LIBVEX_HOST_PPC32_HDEFS_H +#ifndef __LIBVEX_HOST_PPC_HDEFS_H +#define __LIBVEX_HOST_PPC_HDEFS_H /* Num registers used for function calls */ #define PPC_N_REGPARMS 8 @@ -57,7 +57,7 @@ 32 real float regs, and 32 real vector regs. */ -extern void ppHRegPPC32 ( HReg ); +extern void ppHRegPPC ( HReg ); extern HReg hregPPC_GPR0 ( Bool mode64 ); // scratch reg / zero reg extern HReg hregPPC_GPR1 ( Bool mode64 ); // Stack Frame Pointer @@ -70,9 +70,9 @@ extern HReg hregPPC_GPR7 ( Bool mode64 ); extern HReg hregPPC_GPR8 ( Bool mode64 ); extern HReg hregPPC_GPR9 ( Bool mode64 ); extern HReg hregPPC_GPR10 ( Bool mode64 ); -extern HReg hregPPC_GPR11 ( Bool mode64 ); // not used: calls by ptr / env ptr for some langs -extern HReg hregPPC_GPR12 ( Bool mode64 ); // not used: exception handling and global linkage code -extern HReg hregPPC_GPR13 ( Bool mode64 ); // not used: thread specific pointer +extern HReg hregPPC_GPR11 ( Bool mode64 ); +extern HReg hregPPC_GPR12 ( Bool mode64 ); +extern HReg hregPPC_GPR13 ( Bool mode64 ); extern HReg hregPPC_GPR14 ( Bool mode64 ); extern HReg hregPPC_GPR15 ( Bool mode64 ); extern HReg hregPPC_GPR16 ( Bool mode64 ); @@ -89,74 +89,74 @@ extern HReg hregPPC_GPR26 ( Bool mode64 ); extern HReg hregPPC_GPR27 ( Bool mode64 ); extern HReg hregPPC_GPR28 ( Bool mode64 ); extern HReg hregPPC_GPR29 ( Bool mode64 ); // reserved for dispatcher -extern HReg hregPPC_GPR30 ( Bool mode64 ); // we use as VMX spill temporary +extern HReg hregPPC_GPR30 ( Bool mode64 ); // used as VMX spill temp extern HReg hregPPC_GPR31 ( Bool mode64 ); // GuestStatePtr (callee-saved) -extern HReg hregPPC32_FPR0 ( void ); -extern HReg hregPPC32_FPR1 ( void ); -extern HReg hregPPC32_FPR2 ( void ); -extern HReg hregPPC32_FPR3 ( void ); -extern HReg hregPPC32_FPR4 ( void ); -extern HReg hregPPC32_FPR5 ( void ); -extern HReg hregPPC32_FPR6 ( void ); -extern HReg hregPPC32_FPR7 ( void ); -extern HReg hregPPC32_FPR8 ( void ); -extern HReg hregPPC32_FPR9 ( void ); -extern HReg hregPPC32_FPR10 ( void ); -extern HReg hregPPC32_FPR11 ( void ); -extern HReg hregPPC32_FPR12 ( void ); -extern HReg hregPPC32_FPR13 ( void ); -extern HReg hregPPC32_FPR14 ( void ); -extern HReg hregPPC32_FPR15 ( void ); -extern HReg hregPPC32_FPR16 ( void ); -extern HReg hregPPC32_FPR17 ( void ); -extern HReg hregPPC32_FPR18 ( void ); -extern HReg hregPPC32_FPR19 ( void ); -extern HReg hregPPC32_FPR20 ( void ); -extern HReg hregPPC32_FPR21 ( void ); -extern HReg hregPPC32_FPR22 ( void ); -extern HReg hregPPC32_FPR23 ( void ); -extern HReg hregPPC32_FPR24 ( void ); -extern HReg hregPPC32_FPR25 ( void ); -extern HReg hregPPC32_FPR26 ( void ); -extern HReg hregPPC32_FPR27 ( void ); -extern HReg hregPPC32_FPR28 ( void ); -extern HReg hregPPC32_FPR29 ( void ); -extern HReg hregPPC32_FPR30 ( void ); -extern HReg hregPPC32_FPR31 ( void ); - -extern HReg hregPPC32_VR0 ( void ); -extern HReg hregPPC32_VR1 ( void ); -extern HReg hregPPC32_VR2 ( void ); -extern HReg hregPPC32_VR3 ( void ); -extern HReg hregPPC32_VR4 ( void ); -extern HReg hregPPC32_VR5 ( void ); -extern HReg hregPPC32_VR6 ( void ); -extern HReg hregPPC32_VR7 ( void ); -extern HReg hregPPC32_VR8 ( void ); -extern HReg hregPPC32_VR9 ( void ); -extern HReg hregPPC32_VR10 ( void ); -extern HReg hregPPC32_VR11 ( void ); -extern HReg hregPPC32_VR12 ( void ); -extern HReg hregPPC32_VR13 ( void ); -extern HReg hregPPC32_VR14 ( void ); -extern HReg hregPPC32_VR15 ( void ); -extern HReg hregPPC32_VR16 ( void ); -extern HReg hregPPC32_VR17 ( void ); -extern HReg hregPPC32_VR18 ( void ); -extern HReg hregPPC32_VR19 ( void ); -extern HReg hregPPC32_VR20 ( void ); -extern HReg hregPPC32_VR21 ( void ); -extern HReg hregPPC32_VR22 ( void ); -extern HReg hregPPC32_VR23 ( void ); -extern HReg hregPPC32_VR24 ( void ); -extern HReg hregPPC32_VR25 ( void ); -extern HReg hregPPC32_VR26 ( void ); -extern HReg hregPPC32_VR27 ( void ); -extern HReg hregPPC32_VR28 ( void ); -extern HReg hregPPC32_VR29 ( void ); -extern HReg hregPPC32_VR30 ( void ); -extern HReg hregPPC32_VR31 ( void ); +extern HReg hregPPC_FPR0 ( void ); +extern HReg hregPPC_FPR1 ( void ); +extern HReg hregPPC_FPR2 ( void ); +extern HReg hregPPC_FPR3 ( void ); +extern HReg hregPPC_FPR4 ( void ); +extern HReg hregPPC_FPR5 ( void ); +extern HReg hregPPC_FPR6 ( void ); +extern HReg hregPPC_FPR7 ( void ); +extern HReg hregPPC_FPR8 ( void ); +extern HReg hregPPC_FPR9 ( void ); +extern HReg hregPPC_FPR10 ( void ); +extern HReg hregPPC_FPR11 ( void ); +extern HReg hregPPC_FPR12 ( void ); +extern HReg hregPPC_FPR13 ( void ); +extern HReg hregPPC_FPR14 ( void ); +extern HReg hregPPC_FPR15 ( void ); +extern HReg hregPPC_FPR16 ( void ); +extern HReg hregPPC_FPR17 ( void ); +extern HReg hregPPC_FPR18 ( void ); +extern HReg hregPPC_FPR19 ( void ); +extern HReg hregPPC_FPR20 ( void ); +extern HReg hregPPC_FPR21 ( void ); +extern HReg hregPPC_FPR22 ( void ); +extern HReg hregPPC_FPR23 ( void ); +extern HReg hregPPC_FPR24 ( void ); +extern HReg hregPPC_FPR25 ( void ); +extern HReg hregPPC_FPR26 ( void ); +extern HReg hregPPC_FPR27 ( void ); +extern HReg hregPPC_FPR28 ( void ); +extern HReg hregPPC_FPR29 ( void ); +extern HReg hregPPC_FPR30 ( void ); +extern HReg hregPPC_FPR31 ( void ); + +extern HReg hregPPC_VR0 ( void ); +extern HReg hregPPC_VR1 ( void ); +extern HReg hregPPC_VR2 ( void ); +extern HReg hregPPC_VR3 ( void ); +extern HReg hregPPC_VR4 ( void ); +extern HReg hregPPC_VR5 ( void ); +extern HReg hregPPC_VR6 ( void ); +extern HReg hregPPC_VR7 ( void ); +extern HReg hregPPC_VR8 ( void ); +extern HReg hregPPC_VR9 ( void ); +extern HReg hregPPC_VR10 ( void ); +extern HReg hregPPC_VR11 ( void ); +extern HReg hregPPC_VR12 ( void ); +extern HReg hregPPC_VR13 ( void ); +extern HReg hregPPC_VR14 ( void ); +extern HReg hregPPC_VR15 ( void ); +extern HReg hregPPC_VR16 ( void ); +extern HReg hregPPC_VR17 ( void ); +extern HReg hregPPC_VR18 ( void ); +extern HReg hregPPC_VR19 ( void ); +extern HReg hregPPC_VR20 ( void ); +extern HReg hregPPC_VR21 ( void ); +extern HReg hregPPC_VR22 ( void ); +extern HReg hregPPC_VR23 ( void ); +extern HReg hregPPC_VR24 ( void ); +extern HReg hregPPC_VR25 ( void ); +extern HReg hregPPC_VR26 ( void ); +extern HReg hregPPC_VR27 ( void ); +extern HReg hregPPC_VR28 ( void ); +extern HReg hregPPC_VR29 ( void ); +extern HReg hregPPC_VR30 ( void ); +extern HReg hregPPC_VR31 ( void ); #define StackFramePtr(_mode64) hregPPC_GPR1(_mode64) #define GuestStatePtr(_mode64) hregPPC_GPR31(_mode64) @@ -175,7 +175,7 @@ typedef Pcf_7EQ = 30, /* zero | equal */ Pcf_7SO = 31 /* summary overflow */ } - PPC32CondFlag; + PPCCondFlag; typedef enum { /* Maps bc bitfield BO */ @@ -183,22 +183,22 @@ typedef Pct_TRUE = 0xC, Pct_ALWAYS = 0x14 } - PPC32CondTest; + PPCCondTest; typedef struct { - PPC32CondFlag flag; - PPC32CondTest test; + PPCCondFlag flag; + PPCCondTest test; } - PPC32CondCode; + PPCCondCode; -extern HChar* showPPC32CondCode ( PPC32CondCode ); +extern HChar* showPPCCondCode ( PPCCondCode ); /* constructor */ -extern PPC32CondCode mk_PPCCondCode ( PPC32CondTest, PPC32CondFlag ); +extern PPCCondCode mk_PPCCondCode ( PPCCondTest, PPCCondFlag ); /* false->true, true->false */ -extern PPC32CondTest invertCondTest ( PPC32CondTest ); +extern PPCCondTest invertCondTest ( PPCCondTest ); @@ -210,11 +210,11 @@ typedef Pam_IR, /* Immediate (signed 16-bit) + Reg */ Pam_RR /* Reg1 + Reg2 */ } - PPC32AModeTag; + PPCAModeTag; typedef struct { - PPC32AModeTag tag; + PPCAModeTag tag; union { struct { HReg base; @@ -226,14 +226,14 @@ typedef } RR; } Pam; } - PPC32AMode; + PPCAMode; -extern PPC32AMode* PPC32AMode_IR ( Int, HReg ); -extern PPC32AMode* PPC32AMode_RR ( HReg, HReg ); +extern PPCAMode* PPCAMode_IR ( Int, HReg ); +extern PPCAMode* PPCAMode_RR ( HReg, HReg ); -extern PPC32AMode* dopyPPC32AMode ( PPC32AMode* ); +extern PPCAMode* dopyPPCAMode ( PPCAMode* ); -extern void ppPPC32AMode ( PPC32AMode* ); +extern void ppPPCAMode ( PPCAMode* ); /* --------- Operand, which can be a reg or a u16/s16. --------- */ @@ -243,11 +243,11 @@ typedef Prh_Imm=1, Prh_Reg=2 } - PPC32RHTag; + PPCRHTag; typedef struct { - PPC32RHTag tag; + PPCRHTag tag; union { struct { Bool syned; @@ -259,12 +259,12 @@ typedef } Prh; } - PPC32RH; + PPCRH; -extern PPC32RH* PPC32RH_Imm ( Bool, UShort ); -extern PPC32RH* PPC32RH_Reg ( HReg ); +extern PPCRH* PPCRH_Imm ( Bool, UShort ); +extern PPCRH* PPCRH_Reg ( HReg ); -extern void ppPPC32RH ( PPC32RH* ); +extern void ppPPCRH ( PPCRH* ); /* --------- Operand, which can be a reg or a u32/64. --------- */ @@ -274,23 +274,23 @@ typedef Pri_Imm=3, Pri_Reg=4 } - PPC32RITag; + PPCRITag; typedef struct { - PPC32RITag tag; + PPCRITag tag; union { ULong Imm; HReg Reg; } Pri; } - PPC32RI; + PPCRI; -extern PPC32RI* PPC32RI_Imm ( ULong ); -extern PPC32RI* PPC32RI_Reg ( HReg ); +extern PPCRI* PPCRI_Imm ( ULong ); +extern PPCRI* PPCRI_Reg ( HReg ); -extern void ppPPC32RI ( PPC32RI* ); +extern void ppPPCRI ( PPCRI* ); /* --------- Operand, which can be a vector reg or a s6. --------- */ @@ -300,23 +300,23 @@ typedef Pvi_Imm=5, Pvi_Reg=6 } - PPC32VI5sTag; + PPCVI5sTag; typedef struct { - PPC32VI5sTag tag; + PPCVI5sTag tag; union { Char Imm5s; HReg Reg; } Pvi; } - PPC32VI5s; + PPCVI5s; -extern PPC32VI5s* PPC32VI5s_Imm ( Char ); -extern PPC32VI5s* PPC32VI5s_Reg ( HReg ); +extern PPCVI5s* PPCVI5s_Imm ( Char ); +extern PPCVI5s* PPCVI5s_Reg ( HReg ); -extern void ppPPC32VI5s ( PPC32VI5s* ); +extern void ppPPCVI5s ( PPCVI5s* ); /* --------- Instructions. --------- */ @@ -329,9 +329,9 @@ typedef Pun_CLZ32, Pun_CLZ64 } - PPC32UnaryOp; + PPCUnaryOp; -extern HChar* showPPC32UnaryOp ( PPC32UnaryOp ); +extern HChar* showPPCUnaryOp ( PPCUnaryOp ); /* --------- */ @@ -341,11 +341,11 @@ typedef Palu_ADD, Palu_SUB, Palu_AND, Palu_OR, Palu_XOR, } - PPC32AluOp; + PPCAluOp; extern -HChar* showPPC32AluOp ( PPC32AluOp, - Bool /* is the 2nd operand an immediate? */); +HChar* showPPCAluOp ( PPCAluOp, + Bool /* is the 2nd operand an immediate? */); /* --------- */ @@ -354,12 +354,12 @@ typedef Pshft_INVALID, Pshft_SHL, Pshft_SHR, Pshft_SAR, } - PPC32ShftOp; + PPCShftOp; extern -HChar* showPPC32ShftOp ( PPC32ShftOp, - Bool /* is the 2nd operand an immediate? */, - Bool /* is this a 32bit or 64bit op? */ ); +HChar* showPPCShftOp ( PPCShftOp, + Bool /* is the 2nd operand an immediate? */, + Bool /* is this a 32bit or 64bit op? */ ); /* --------- */ @@ -372,9 +372,9 @@ typedef /* Unary */ Pfp_SQRT, Pfp_ABS, Pfp_NEG, Pfp_MOV } - PPC32FpOp; + PPCFpOp; -extern HChar* showPPC32FpOp ( PPC32FpOp ); +extern HChar* showPPCFpOp ( PPCFpOp ); /* --------- */ @@ -411,9 +411,9 @@ typedef /* Merge */ Pav_MRGHI, Pav_MRGLO, } - PPC32AvOp; + PPCAvOp; -extern HChar* showPPC32AvOp ( PPC32AvOp ); +extern HChar* showPPCAvOp ( PPCAvOp ); /* --------- */ @@ -431,9 +431,9 @@ typedef Pavfp_CVTU2F, Pavfp_CVTS2F, Pavfp_QCVTF2U, Pavfp_QCVTF2S, Pavfp_ROUNDM, Pavfp_ROUNDP, Pavfp_ROUNDN, Pavfp_ROUNDZ, } - PPC32AvFpOp; + PPCAvFpOp; -extern HChar* showPPC32AvFpOp ( PPC32AvFpOp ); +extern HChar* showPPCAvFpOp ( PPCAvFpOp ); /* --------- */ @@ -442,7 +442,7 @@ typedef Pin_LI, /* load word (32/64-bit) immediate (fake insn) */ Pin_Alu, /* word add/sub/and/or/xor */ Pin_Shft, /* word shl/shr/sar */ - Pin_AddSubC32, /* 32-bit add/sub with read/write carry */ + Pin_AddSubC, /* add/sub with read/write carry */ Pin_Cmp, /* word compare */ Pin_Unary, /* not, neg, clz */ Pin_MulL, /* widening multiply */ @@ -450,9 +450,9 @@ typedef Pin_Call, /* call to address in register */ Pin_Goto, /* conditional/unconditional jmp to dst */ Pin_CMov, /* conditional move */ - Pin_Load, /* load a 8|16|32 bit value from mem */ - Pin_Store, /* store a 8|16|32 bit value to mem */ - Pin_Set32, /* convert condition code to 32-bit value */ + Pin_Load, /* load a 8|16|32|64 bit value from mem */ + Pin_Store, /* store a 8|16|32|64 bit value to mem */ + Pin_Set, /* convert condition code to value 0 or 1 */ Pin_MfCR, /* move from condition register to GPR */ Pin_MFence, /* mem fence */ @@ -461,7 +461,7 @@ typedef Pin_FpLdSt, /* FP load/store */ Pin_FpF64toF32, /* FP round IEEE754 double to IEEE754 single */ Pin_FpF64toI32, /* FP round IEEE754 double to 32-bit integer */ - Pin_FpF64toI64, /* FP round IEEE754 double to 32-bit integer */ + Pin_FpF64toI64, /* FP round IEEE754 double to 64-bit integer */ Pin_FpI64toF64, /* FP round IEEE754 64-bit integer to double */ Pin_FpCMov, /* FP floating point conditional move */ Pin_FpLdFPSCR, /* mtfsf */ @@ -487,16 +487,16 @@ typedef Pin_AvLdVSCR, /* mtvscr */ Pin_AvCMov /* AV conditional move */ } - PPC32InstrTag; + PPCInstrTag; /* Destinations are on the LEFT (first operand) */ typedef struct { - PPC32InstrTag tag; + PPCInstrTag tag; union { - /* Get a 32-bit literal into a register. May turn into one or - two real insns. */ + /* Get a 32/64-bit literal into a register. + May turn into a number of real insns. */ struct { HReg dst; ULong imm64; @@ -511,21 +511,21 @@ typedef is an unsigned 16. */ struct { - PPC32AluOp op; - HReg dst; - HReg srcL; - PPC32RH* srcR; + PPCAluOp op; + HReg dst; + HReg srcL; + PPCRH* srcR; } Alu; /* Integer shl/shr/sar. Limitations: the immediate, if it exists, is a signed 5-bit value between 1 and 31 inclusive. */ struct { - PPC32ShftOp op; - Bool sz32; /* mode64 has both 32 and 64bit shft */ - HReg dst; - HReg srcL; - PPC32RH* srcR; + PPCShftOp op; + Bool sz32; /* mode64 has both 32 and 64bit shft */ + HReg dst; + HReg srcL; + PPCRH* srcR; } Shft; /* */ struct { @@ -534,22 +534,22 @@ typedef HReg dst; HReg srcL; HReg srcR; - } AddSubC32; + } AddSubC; /* If signed, the immediate, if it exists, is a signed 16, else it is an unsigned 16. */ struct { - Bool syned; - Bool sz32; /* mode64 has both 32 and 64bit cmp */ - UInt crfD; - HReg srcL; - PPC32RH* srcR; + Bool syned; + Bool sz32; /* mode64 has both 32 and 64bit cmp */ + UInt crfD; + HReg srcL; + PPCRH* srcR; } Cmp; /* Not and Neg */ struct { - PPC32UnaryOp op; - HReg dst; - HReg src; - } Unary32; + PPCUnaryOp op; + HReg dst; + HReg src; + } Unary; struct { Bool syned; /* meaningless if hi32==False */ Bool hi; /* False=>low, True=>high */ @@ -572,77 +572,77 @@ typedef using a bit mask (1<dst, re-interpreting dst to an IEEE754 32-bit (float) type. */ struct { HReg src; HReg dst; } FpF64toF32; - /* By observing the current FPU rounding mode, round src into dst, + /* By observing the current FPU rounding mode, round src->dst, re-interpreting dst to an 32-bit integer type. */ struct { HReg src; @@ -653,8 +653,8 @@ typedef HReg src; HReg dst; } FpF64toI64; - /* By observing the current FPU rounding mode, reinterpret src from - a 64-bit integer to double type, and round into dst. */ + /* By observing the current FPU rounding mode, reinterpret src + from a 64bit integer to double type, and round into dst. */ struct { HReg src; HReg dst; @@ -662,9 +662,9 @@ typedef /* Mov src to dst on the given condition, which may not be the bogus Xcc_ALWAYS. */ struct { - PPC32CondCode cond; - HReg dst; - HReg src; + PPCCondCode cond; + HReg dst; + HReg src; } FpCMov; /* Load FP Status & Control Register */ struct { @@ -686,48 +686,48 @@ typedef /* Simplistic AltiVec */ struct { - Bool isLoad; - UChar sz; /* 8|16|32|128 */ - HReg reg; - PPC32AMode* addr; + Bool isLoad; + UChar sz; /* 8|16|32|128 */ + HReg reg; + PPCAMode* addr; } AvLdSt; struct { - PPC32AvOp op; - HReg dst; - HReg src; + PPCAvOp op; + HReg dst; + HReg src; } AvUnary; struct { - PPC32AvOp op; - HReg dst; - HReg srcL; - HReg srcR; + PPCAvOp op; + HReg dst; + HReg srcL; + HReg srcR; } AvBinary; struct { - PPC32AvOp op; - HReg dst; - HReg srcL; - HReg srcR; + PPCAvOp op; + HReg dst; + HReg srcL; + HReg srcR; } AvBin8x16; struct { - PPC32AvOp op; - HReg dst; - HReg srcL; - HReg srcR; + PPCAvOp op; + HReg dst; + HReg srcL; + HReg srcR; } AvBin16x8; struct { - PPC32AvOp op; - HReg dst; - HReg srcL; - HReg srcR; + PPCAvOp op; + HReg dst; + HReg srcL; + HReg srcR; } AvBin32x4; struct { - PPC32AvFpOp op; + PPCAvFpOp op; HReg dst; HReg srcL; HReg srcR; } AvBin32Fx4; struct { - PPC32AvFpOp op; + PPCAvFpOp op; HReg dst; HReg src; } AvUn32Fx4; @@ -753,14 +753,14 @@ typedef struct { UChar sz; /* 8,16,32 */ HReg dst; - PPC32VI5s* src; + PPCVI5s* src; } AvSplat; /* Mov src to dst on the given condition, which may not be the bogus Xcc_ALWAYS. */ struct { - PPC32CondCode cond; - HReg dst; - HReg src; + PPCCondCode cond; + HReg dst; + HReg src; } AvCMov; /* Load AltiVec Status & Control Register */ struct { @@ -768,71 +768,71 @@ typedef } AvLdVSCR; } Pin; } - PPC32Instr; - - -extern PPC32Instr* PPC32Instr_LI ( HReg, ULong, Bool ); -extern PPC32Instr* PPC32Instr_Alu ( PPC32AluOp, HReg, HReg, PPC32RH* ); -extern PPC32Instr* PPC32Instr_Shft ( PPC32ShftOp, Bool sz32, HReg, HReg, PPC32RH* ); -extern PPC32Instr* PPC32Instr_AddSubC32 ( Bool, Bool, HReg, HReg, HReg ); -extern PPC32Instr* PPC32Instr_Cmp ( Bool, Bool, UInt, HReg, PPC32RH* ); -extern PPC32Instr* PPC32Instr_Unary ( PPC32UnaryOp op, HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi32, Bool sz32, HReg, HReg, HReg ); -extern PPC32Instr* PPC32Instr_Div ( Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_Call ( PPC32CondCode, Addr64, UInt ); -extern PPC32Instr* PPC32Instr_Goto ( IRJumpKind, PPC32CondCode cond, PPC32RI* dst ); -extern PPC32Instr* PPC32Instr_CMov ( PPC32CondCode, HReg dst, PPC32RI* src ); -extern PPC32Instr* PPC32Instr_Load ( UChar sz, Bool syned, - HReg dst, PPC32AMode* src, Bool mode64 ); -extern PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst, - HReg src, Bool mode64 ); -extern PPC32Instr* PPC32Instr_Set32 ( PPC32CondCode cond, HReg dst ); -extern PPC32Instr* PPC32Instr_MfCR ( HReg dst ); -extern PPC32Instr* PPC32Instr_MFence ( void ); - -extern PPC32Instr* PPC32Instr_FpUnary ( PPC32FpOp op, HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_FpBinary ( PPC32FpOp op, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_FpLdSt ( Bool isLoad, UChar sz, HReg, PPC32AMode* ); -extern PPC32Instr* PPC32Instr_FpF64toF32 ( HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_FpF64toI32 ( HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_FpF64toI64 ( HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_FpI64toF64 ( HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_FpCMov ( PPC32CondCode, HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_FpLdFPSCR ( HReg src ); -extern PPC32Instr* PPC32Instr_FpCmp ( HReg dst, HReg srcL, HReg srcR ); - -extern PPC32Instr* PPC32Instr_RdWrLR ( Bool wrLR, HReg gpr ); - -extern PPC32Instr* PPC32Instr_AvLdSt ( Bool isLoad, UChar sz, HReg, PPC32AMode* ); -extern PPC32Instr* PPC32Instr_AvUnary ( PPC32AvOp op, HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_AvBinary ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_AvBin8x16 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_AvBin16x8 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_AvBin32x4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_AvBin32Fx4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_AvUn32Fx4 ( PPC32AvOp op, HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ); -extern PPC32Instr* PPC32Instr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_AvShlDbl ( UChar shift, HReg dst, HReg srcL, HReg srcR ); -extern PPC32Instr* PPC32Instr_AvSplat ( UChar sz, HReg dst, PPC32VI5s* src ); -extern PPC32Instr* PPC32Instr_AvCMov ( PPC32CondCode, HReg dst, HReg src ); -extern PPC32Instr* PPC32Instr_AvLdVSCR ( HReg src ); - -extern void ppPPC32Instr ( PPC32Instr*, Bool mode64 ); + PPCInstr; + + +extern PPCInstr* PPCInstr_LI ( HReg, ULong, Bool ); +extern PPCInstr* PPCInstr_Alu ( PPCAluOp, HReg, HReg, PPCRH* ); +extern PPCInstr* PPCInstr_Shft ( PPCShftOp, Bool sz32, HReg, HReg, PPCRH* ); +extern PPCInstr* PPCInstr_AddSubC ( Bool, Bool, HReg, HReg, HReg ); +extern PPCInstr* PPCInstr_Cmp ( Bool, Bool, UInt, HReg, PPCRH* ); +extern PPCInstr* PPCInstr_Unary ( PPCUnaryOp op, HReg dst, HReg src ); +extern PPCInstr* PPCInstr_MulL ( Bool syned, Bool hi32, Bool sz32, HReg, HReg, HReg ); +extern PPCInstr* PPCInstr_Div ( Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_Call ( PPCCondCode, Addr64, UInt ); +extern PPCInstr* PPCInstr_Goto ( IRJumpKind, PPCCondCode cond, PPCRI* dst ); +extern PPCInstr* PPCInstr_CMov ( PPCCondCode, HReg dst, PPCRI* src ); +extern PPCInstr* PPCInstr_Load ( UChar sz, Bool syned, + HReg dst, PPCAMode* src, Bool mode64 ); +extern PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst, + HReg src, Bool mode64 ); +extern PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst ); +extern PPCInstr* PPCInstr_MfCR ( HReg dst ); +extern PPCInstr* PPCInstr_MFence ( void ); + +extern PPCInstr* PPCInstr_FpUnary ( PPCFpOp op, HReg dst, HReg src ); +extern PPCInstr* PPCInstr_FpBinary ( PPCFpOp op, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_FpLdSt ( Bool isLoad, UChar sz, HReg, PPCAMode* ); +extern PPCInstr* PPCInstr_FpF64toF32 ( HReg dst, HReg src ); +extern PPCInstr* PPCInstr_FpF64toI32 ( HReg dst, HReg src ); +extern PPCInstr* PPCInstr_FpF64toI64 ( HReg dst, HReg src ); +extern PPCInstr* PPCInstr_FpI64toF64 ( HReg dst, HReg src ); +extern PPCInstr* PPCInstr_FpCMov ( PPCCondCode, HReg dst, HReg src ); +extern PPCInstr* PPCInstr_FpLdFPSCR ( HReg src ); +extern PPCInstr* PPCInstr_FpCmp ( HReg dst, HReg srcL, HReg srcR ); + +extern PPCInstr* PPCInstr_RdWrLR ( Bool wrLR, HReg gpr ); + +extern PPCInstr* PPCInstr_AvLdSt ( Bool isLoad, UChar sz, HReg, PPCAMode* ); +extern PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src ); +extern PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_AvUn32Fx4 ( PPCAvOp op, HReg dst, HReg src ); +extern PPCInstr* PPCInstr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ); +extern PPCInstr* PPCInstr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_AvShlDbl ( UChar shift, HReg dst, HReg srcL, HReg srcR ); +extern PPCInstr* PPCInstr_AvSplat ( UChar sz, HReg dst, PPCVI5s* src ); +extern PPCInstr* PPCInstr_AvCMov ( PPCCondCode, HReg dst, HReg src ); +extern PPCInstr* PPCInstr_AvLdVSCR ( HReg src ); + +extern void ppPPCInstr ( PPCInstr*, Bool mode64 ); /* Some functions that insulate the register allocator from details of the underlying instruction set. */ -extern void getRegUsage_PPC32Instr ( HRegUsage*, PPC32Instr*, Bool mode64 ); -extern void mapRegs_PPC32Instr ( HRegRemap*, PPC32Instr* , Bool mode64); -extern Bool isMove_PPC32Instr ( PPC32Instr*, HReg*, HReg* ); -extern Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr*, - Bool mode64, void* dispatch ); -extern PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 ); -extern PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 ); -extern void getAllocableRegs_PPC32 ( Int*, HReg**, Bool mode64 ); -extern HInstrArray* iselBB_PPC32 ( IRBB*, VexArchInfo* ); - -#endif /* ndef __LIBVEX_HOST_PPC32_HDEFS_H */ +extern void getRegUsage_PPCInstr ( HRegUsage*, PPCInstr*, Bool mode64 ); +extern void mapRegs_PPCInstr ( HRegRemap*, PPCInstr* , Bool mode64); +extern Bool isMove_PPCInstr ( PPCInstr*, HReg*, HReg* ); +extern Int emit_PPCInstr ( UChar* buf, Int nbuf, PPCInstr*, + Bool mode64, void* dispatch ); +extern PPCInstr* genSpill_PPC ( HReg rreg, UShort offsetB, Bool mode64 ); +extern PPCInstr* genReload_PPC ( HReg rreg, UShort offsetB, Bool mode64 ); +extern void getAllocableRegs_PPC ( Int*, HReg**, Bool mode64 ); +extern HInstrArray* iselBB_PPC ( IRBB*, VexArchInfo* ); + +#endif /* ndef __LIBVEX_HOST_PPC_HDEFS_H */ /*---------------------------------------------------------------*/ /*--- end host-ppc32/hdefs.h ---*/ diff --git a/VEX/priv/host-ppc32/isel.c b/VEX/priv/host-ppc32/isel.c index 61b05d7a12..0ea8577596 100644 --- a/VEX/priv/host-ppc32/isel.c +++ b/VEX/priv/host-ppc32/isel.c @@ -55,7 +55,7 @@ #include "host-ppc32/hdefs.h" /* Is our guest binary 32 or 64bit? Set at each call to - iselBB_PPC32 below. */ + iselBB_PPC below. */ static Bool mode64 = False; #define HRcIntWRDSZ (mode64 ? HRcInt64 : HRcInt32) @@ -113,7 +113,7 @@ static Bool mode64 = False; /*---------------------------------------------------------*/ -/*--- PPC32 FP Status & Control Register Conventions ---*/ +/*--- PPC FP Status & Control Register Conventions ---*/ /*---------------------------------------------------------*/ /* Vex-generated code expects to run with the FPU set as follows: all @@ -178,10 +178,9 @@ static IRExpr* bind ( Int binder ) same set of IRTemps as the type mapping does. - vregmap holds the primary register for the IRTemp. - - vregmapHI is only used for 64-bit integer-typed - IRTemps. It holds the identity of a second - 32-bit virtual HReg, which holds the high half - of the value. + - vregmapHI is only used in 32bit mode, for 64-bit integer- + typed IRTemps. It holds the identity of a second 32-bit + virtual HReg, which holds the high half of the value. - A copy of the link reg, so helper functions don't kill it. @@ -222,7 +221,8 @@ static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp ) return env->vregmap[tmp]; } -static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) +static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, + ISelEnv* env, IRTemp tmp ) { vassert(!mode64); vassert(tmp >= 0); @@ -232,7 +232,8 @@ static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) *vrHI = env->vregmapHI[tmp]; } -static void lookupIRTemp128 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) +static void lookupIRTemp128 ( HReg* vrHI, HReg* vrLO, + ISelEnv* env, IRTemp tmp ) { vassert(mode64); vassert(tmp >= 0); @@ -242,11 +243,11 @@ static void lookupIRTemp128 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) *vrHI = env->vregmapHI[tmp]; } -static void addInstr ( ISelEnv* env, PPC32Instr* instr ) +static void addInstr ( ISelEnv* env, PPCInstr* instr ) { addHInstr(env->code, instr); if (vex_traceflags & VEX_TRACE_VCODE) { - ppPPC32Instr(instr, mode64); + ppPPCInstr(instr, mode64); vex_printf("\n"); } } @@ -292,30 +293,30 @@ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e ); signed or not. If yes, this will never return -32768 as an immediate; this guaranteed that all signed immediates that are return can have their sign inverted if need be. */ -static PPC32RH* iselIntExpr_RH_wrk ( ISelEnv* env, +static PPCRH* iselIntExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e ); -static PPC32RH* iselIntExpr_RH ( ISelEnv* env, +static PPCRH* iselIntExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e ); /* Compute an I32 into a RI (reg or 32-bit immediate). */ -static PPC32RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e ); -static PPC32RI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e ); +static PPCRI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e ); +static PPCRI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e ); /* Compute an I8 into a reg-or-5-bit-unsigned-immediate, the latter being an immediate in the range 1 .. 31 inclusive. Used for doing shift amounts. */ -static PPC32RH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e ); -static PPC32RH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e ); +static PPCRH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e ); +static PPCRH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e ); /* Compute an I8 into a reg-or-6-bit-unsigned-immediate, the latter being an immediate in the range 1 .. 63 inclusive. Used for doing shift amounts. */ -static PPC32RH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e ); -static PPC32RH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e ); +static PPCRH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e ); +static PPCRH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e ); /* Compute an I32 into an AMode. */ -static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e ); -static PPC32AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e ); +static PPCAMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e ); +static PPCAMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e ); /* Compute an I64 into a GPR pair. */ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, @@ -329,8 +330,8 @@ static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, static void iselInt128Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ); -static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ); -static PPC32CondCode iselCondCode ( ISelEnv* env, IRExpr* e ); +static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ); +static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e ); static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ); static HReg iselDblExpr ( ISelEnv* env, IRExpr* e ); @@ -357,11 +358,11 @@ static HReg iselVecExpr ( ISelEnv* env, IRExpr* e ); /* Make an int reg-reg move. */ -static PPC32Instr* mk_iMOVds_RR ( HReg r_dst, HReg r_src ) +static PPCInstr* mk_iMOVds_RR ( HReg r_dst, HReg r_src ) { vassert(hregClass(r_dst) == HRcIntWRDSZ); vassert(hregClass(r_src) == HRcIntWRDSZ); - return PPC32Instr_Alu(Palu_OR, r_dst, r_src, PPC32RH_Reg(r_src)); + return PPCInstr_Alu(Palu_OR, r_dst, r_src, PPCRH_Reg(r_src)); } //.. /* Make a vector reg-reg move. */ @@ -379,16 +380,16 @@ static void add_to_sp ( ISelEnv* env, UInt n ) { HReg sp = StackFramePtr(mode64); vassert(n < 256 && (n%16) == 0); - addInstr(env, PPC32Instr_Alu( - Palu_ADD, sp, sp, PPC32RH_Imm(True,toUShort(n)))); + addInstr(env, PPCInstr_Alu( Palu_ADD, sp, sp, + PPCRH_Imm(True,toUShort(n)) )); } static void sub_from_sp ( ISelEnv* env, UInt n ) { HReg sp = StackFramePtr(mode64); vassert(n < 256 && (n%16) == 0); - addInstr(env, PPC32Instr_Alu( - Palu_SUB, sp, sp, PPC32RH_Imm(True,toUShort(n)))); + addInstr(env, PPCInstr_Alu( Palu_SUB, sp, sp, + PPCRH_Imm(True,toUShort(n)) )); } /* @@ -403,36 +404,37 @@ static HReg get_sp_aligned16 ( ISelEnv* env ) HReg align16 = newVRegI(env); addInstr(env, mk_iMOVds_RR(r, StackFramePtr(mode64))); // add 16 - addInstr(env, PPC32Instr_Alu( - Palu_ADD, r, r, PPC32RH_Imm(True,toUShort(16)))); + addInstr(env, PPCInstr_Alu( Palu_ADD, r, r, + PPCRH_Imm(True,toUShort(16)) )); // mask to quadword - addInstr(env, PPC32Instr_LI(align16, 0xFFFFFFFFFFFFFFF0ULL, mode64)); - addInstr(env, PPC32Instr_Alu(Palu_AND, r,r, PPC32RH_Reg(align16))); + addInstr(env, PPCInstr_LI(align16, 0xFFFFFFFFFFFFFFF0ULL, mode64)); + addInstr(env, PPCInstr_Alu(Palu_AND, r,r, PPCRH_Reg(align16))); return r; } /* Load 2*I32 regs to fp reg */ -static HReg mk_LoadRR32toFPR ( ISelEnv* env, HReg r_srcHi, HReg r_srcLo ) +static HReg mk_LoadRR32toFPR ( ISelEnv* env, + HReg r_srcHi, HReg r_srcLo ) { HReg fr_dst = newVRegF(env); - PPC32AMode *am_addr0, *am_addr1; + PPCAMode *am_addr0, *am_addr1; vassert(!mode64); vassert(hregClass(r_srcHi) == HRcInt32); vassert(hregClass(r_srcLo) == HRcInt32); sub_from_sp( env, 16 ); // Move SP down 16 bytes - am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64)); - am_addr1 = PPC32AMode_IR(4, StackFramePtr(mode64)); + am_addr0 = PPCAMode_IR( 0, StackFramePtr(mode64) ); + am_addr1 = PPCAMode_IR( 4, StackFramePtr(mode64) ); // store hi,lo as Ity_I32's - addInstr(env, PPC32Instr_Store( 4, am_addr0, r_srcHi, mode64 )); - addInstr(env, PPC32Instr_Store( 4, am_addr1, r_srcLo, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_addr0, r_srcHi, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_addr1, r_srcLo, mode64 )); // load as float - addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0)); + addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0)); add_to_sp( env, 16 ); // Reset SP return fr_dst; @@ -442,19 +444,19 @@ static HReg mk_LoadRR32toFPR ( ISelEnv* env, HReg r_srcHi, HReg r_srcLo ) static HReg mk_LoadR64toFPR ( ISelEnv* env, HReg r_src ) { HReg fr_dst = newVRegF(env); - PPC32AMode *am_addr0; + PPCAMode *am_addr0; vassert(mode64); vassert(hregClass(r_src) == HRcInt64); sub_from_sp( env, 16 ); // Move SP down 16 bytes - am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64)); + am_addr0 = PPCAMode_IR( 0, StackFramePtr(mode64) ); // store as Ity_I64 - addInstr(env, PPC32Instr_Store( 8, am_addr0, r_src, mode64 )); + addInstr(env, PPCInstr_Store( 8, am_addr0, r_src, mode64 )); // load as float - addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0)); + addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0)); add_to_sp( env, 16 ); // Reset SP return fr_dst; @@ -464,14 +466,14 @@ static HReg mk_LoadR64toFPR ( ISelEnv* env, HReg r_src ) /* Given an amode, return one which references 4 bytes further along. */ -static PPC32AMode* advance4 ( ISelEnv* env, PPC32AMode* am ) +static PPCAMode* advance4 ( ISelEnv* env, PPCAMode* am ) { - PPC32AMode* am4 = dopyPPC32AMode(am); + PPCAMode* am4 = dopyPPCAMode( am ); if (am4->tag == Pam_IR && am4->Pam.IR.index + 4 <= 32767) { am4->Pam.IR.index += 4; } else { - vpanic("advance4(ppc32,host)"); + vpanic("advance4(ppc,host)"); } return am4; } @@ -502,13 +504,13 @@ void doHelperCall ( ISelEnv* env, Bool passBBP, IRExpr* guard, IRCallee* cee, IRExpr** args ) { - PPC32CondCode cc; - HReg argregs[PPC_N_REGPARMS]; - HReg tmpregs[PPC_N_REGPARMS]; - Bool go_fast; - Int n_args, i, argreg; - UInt argiregs; - ULong target; + PPCCondCode cc; + HReg argregs[PPC_N_REGPARMS]; + HReg tmpregs[PPC_N_REGPARMS]; + Bool go_fast; + Int n_args, i, argreg; + UInt argiregs; + ULong target; /* Marshal args for a call and do the call. @@ -627,15 +629,15 @@ void doHelperCall ( ISelEnv* env, vassert(argreg < PPC_N_REGPARMS); vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 || typeOfIRExpr(env->type_env, args[i]) == Ity_I64); - if (!mode64) { if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) { argiregs |= (1 << (argreg+3)); - addInstr(env, mk_iMOVds_RR( argregs[argreg], - iselIntExpr_R(env, args[i]) )); + addInstr(env, + mk_iMOVds_RR( argregs[argreg], + iselIntExpr_R(env, args[i]) )); } else { // Ity_I64 HReg rHi, rLo; - if (argreg%2 == 1) // ppc32 abi spec for passing a LONG_LONG + if (argreg%2 == 1) // ppc32 abi spec for passing LONG_LONG argreg++; // XXX: odd argreg => even rN vassert(argreg < PPC_N_REGPARMS-1); iselInt64Expr(&rHi,&rLo, env, args[i]); @@ -664,7 +666,8 @@ void doHelperCall ( ISelEnv* env, /* This is pretty stupid; better to move directly to r3 after the rest of the args are done. */ tmpregs[argreg] = newVRegI(env); - addInstr(env, mk_iMOVds_RR( tmpregs[argreg], GuestStatePtr(mode64) )); + addInstr(env, mk_iMOVds_RR( tmpregs[argreg], + GuestStatePtr(mode64) )); argreg++; } @@ -677,7 +680,7 @@ void doHelperCall ( ISelEnv* env, tmpregs[argreg] = iselIntExpr_R(env, args[i]); } else { // Ity_I64 HReg rHi, rLo; - if (argreg%2 == 1) // ppc32 abi spec for passing a LONG_LONG + if (argreg%2 == 1) // ppc32 abi spec for passing LONG_LONG argreg++; // XXX: odd argreg => even rN vassert(argreg < PPC_N_REGPARMS-1); iselInt64Expr(&rHi,&rLo, env, args[i]); @@ -721,7 +724,7 @@ void doHelperCall ( ISelEnv* env, toUInt(Ptr_to_ULong(cee->addr)); /* Finally, the call itself. */ - addInstr(env, PPC32Instr_Call( cc, (Addr64)target, argiregs )); + addInstr(env, PPCInstr_Call( cc, (Addr64)target, argiregs )); } @@ -737,17 +740,17 @@ void set_FPU_rounding_default ( ISelEnv* env ) - so we can set the whole register at once (faster) note: upper 32 bits ignored by FpLdFPSCR */ - addInstr(env, PPC32Instr_LI(r_src, 0x0, mode64)); + addInstr(env, PPCInstr_LI(r_src, 0x0, mode64)); if (mode64) { fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64 } else { fr_src = mk_LoadRR32toFPR( env, r_src, r_src ); // 2*I32 -> F64 } - addInstr(env, PPC32Instr_FpLdFPSCR( fr_src )); + addInstr(env, PPCInstr_FpLdFPSCR( fr_src )); } -/* Convert IR rounding mode to PPC32 encoding */ -static HReg roundModeIRtoPPC32 ( ISelEnv* env, HReg r_rmIR ) +/* Convert IR rounding mode to PPC encoding */ +static HReg roundModeIRtoPPC ( ISelEnv* env, HReg r_rmIR ) { /* rounding mode | PPC | IR @@ -757,29 +760,29 @@ static HReg roundModeIRtoPPC32 ( ISelEnv* env, HReg r_rmIR ) to +infinity | 10 | 10 to -infinity | 11 | 01 */ - HReg r_rmPPC32 = newVRegI(env); - HReg r_tmp = newVRegI(env); + HReg r_rmPPC = newVRegI(env); + HReg r_tmp = newVRegI(env); vassert(hregClass(r_rmIR) == HRcIntWRDSZ); // AND r_rmIR,3 -- shouldn't be needed; paranoia - addInstr(env, - PPC32Instr_Alu(Palu_AND, r_rmIR, r_rmIR, PPC32RH_Imm(False,3))); - - // r_rmPPC32 = XOR( r_rmIR, (r_rmIR << 1) & 2) - addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/, - r_tmp, r_rmIR, PPC32RH_Imm(False,1))); - addInstr(env, - PPC32Instr_Alu(Palu_AND, r_tmp, r_tmp, PPC32RH_Imm(False,2))); - addInstr(env, - PPC32Instr_Alu(Palu_XOR, r_rmPPC32, r_rmIR, PPC32RH_Reg(r_tmp))); - return r_rmPPC32; + addInstr(env, PPCInstr_Alu( Palu_AND, r_rmIR, r_rmIR, + PPCRH_Imm(False,3) )); + + // r_rmPPC = XOR( r_rmIR, (r_rmIR << 1) & 2) + addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, + r_tmp, r_rmIR, PPCRH_Imm(False,1))); + addInstr(env, PPCInstr_Alu( Palu_AND, r_tmp, r_tmp, + PPCRH_Imm(False,2) )); + addInstr(env, PPCInstr_Alu( Palu_XOR, r_rmPPC, r_rmIR, + PPCRH_Reg(r_tmp) )); + return r_rmPPC; } /* Mess with the FPU's rounding mode: 'mode' is an I32-typed expression denoting a value in the range 0 .. 3, indicating a round - mode encoded as per type IRRoundingMode. Set the PPC32 FPSCR to have + mode encoded as per type IRRoundingMode. Set the PPC FPSCR to have the same rounding. For speed & simplicity, we're setting the *entire* FPSCR here. */ @@ -795,8 +798,8 @@ void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode ) - so we can set the whole register at once (faster) */ - // Resolve rounding mode and convert to PPC32 representation - r_src = roundModeIRtoPPC32( env, iselIntExpr_R(env, mode) ); + // Resolve rounding mode and convert to PPC representation + r_src = roundModeIRtoPPC( env, iselIntExpr_R(env, mode) ); // gpr -> fpr if (mode64) { fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64 @@ -805,7 +808,7 @@ void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode ) } // Move to FPSCR - addInstr(env, PPC32Instr_FpLdFPSCR( fr_src )); + addInstr(env, PPCInstr_FpLdFPSCR( fr_src )); } @@ -853,11 +856,11 @@ void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode ) */ static HReg mk_AvDuplicateRI( ISelEnv* env, IRExpr* e ) { - HReg r_src; - HReg dst = newVRegV(env); - PPC32RI* ri = iselIntExpr_RI(env, e); - IRType ty = typeOfIRExpr(env->type_env,e); - UInt sz = (ty == Ity_I8) ? 8 : (ty == Ity_I16) ? 16 : 32; + HReg r_src; + HReg dst = newVRegV(env); + PPCRI* ri = iselIntExpr_RI(env, e); + IRType ty = typeOfIRExpr(env->type_env,e); + UInt sz = (ty == Ity_I8) ? 8 : (ty == Ity_I16) ? 16 : 32; vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32); /* special case: immediate */ @@ -870,33 +873,33 @@ static HReg mk_AvDuplicateRI( ISelEnv* env, IRExpr* e ) if (simm6 > 15) { /* 16:31 inclusive */ HReg v1 = newVRegV(env); HReg v2 = newVRegV(env); - addInstr(env, PPC32Instr_AvSplat(sz, v1, PPC32VI5s_Imm(-16))); - addInstr(env, PPC32Instr_AvSplat(sz, v2, PPC32VI5s_Imm(simm6-16))); + addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16))); + addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6-16))); addInstr(env, - (sz== 8) ? PPC32Instr_AvBin8x16(Pav_SUBU, dst, v2, v1) : - (sz==16) ? PPC32Instr_AvBin16x8(Pav_SUBU, dst, v2, v1) - : PPC32Instr_AvBin32x4(Pav_SUBU, dst, v2, v1) ); + (sz== 8) ? PPCInstr_AvBin8x16(Pav_SUBU, dst, v2, v1) : + (sz==16) ? PPCInstr_AvBin16x8(Pav_SUBU, dst, v2, v1) + : PPCInstr_AvBin32x4(Pav_SUBU, dst, v2, v1) ); return dst; } if (simm6 < -16) { /* -32:-17 inclusive */ HReg v1 = newVRegV(env); HReg v2 = newVRegV(env); - addInstr(env, PPC32Instr_AvSplat(sz, v1, PPC32VI5s_Imm(-16))); - addInstr(env, PPC32Instr_AvSplat(sz, v2, PPC32VI5s_Imm(simm6+16))); + addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16))); + addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6+16))); addInstr(env, - (sz== 8) ? PPC32Instr_AvBin8x16(Pav_ADDU, dst, v2, v1) : - (sz==16) ? PPC32Instr_AvBin16x8(Pav_ADDU, dst, v2, v1) - : PPC32Instr_AvBin32x4(Pav_ADDU, dst, v2, v1) ); + (sz== 8) ? PPCInstr_AvBin8x16(Pav_ADDU, dst, v2, v1) : + (sz==16) ? PPCInstr_AvBin16x8(Pav_ADDU, dst, v2, v1) + : PPCInstr_AvBin32x4(Pav_ADDU, dst, v2, v1) ); return dst; } /* simplest form: -16:15 inclusive */ - addInstr(env, PPC32Instr_AvSplat(sz, dst, PPC32VI5s_Imm(simm6))); + addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Imm(simm6))); return dst; } /* no luck; use the Slow way. */ r_src = newVRegI(env); - addInstr(env, PPC32Instr_LI(r_src, (Long)simm32, mode64)); + addInstr(env, PPCInstr_LI(r_src, (Long)simm32, mode64)); } else { r_src = ri->Pri.Reg; @@ -905,26 +908,26 @@ static HReg mk_AvDuplicateRI( ISelEnv* env, IRExpr* e ) /* default case: store r_src in lowest lane of 16-aligned mem, load vector, splat lowest lane to dst */ { - /* CAB: Perhaps faster to store r_src multiple times (sz dependent), + /* CAB: Maybe faster to store r_src multiple times (sz dependent), and simply load the vector? */ HReg r_aligned16; HReg v_src = newVRegV(env); - PPC32AMode *am_off12; + PPCAMode *am_off12; sub_from_sp( env, 32 ); // Move SP down /* Get a 16-aligned address within our stack space */ r_aligned16 = get_sp_aligned16( env ); - am_off12 = PPC32AMode_IR( 12, r_aligned16); + am_off12 = PPCAMode_IR( 12, r_aligned16 ); /* Store r_src in low word of 16-aligned mem */ - addInstr(env, PPC32Instr_Store( 4, am_off12, r_src, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_off12, r_src, mode64 )); /* Load src to vector[low lane] */ - addInstr(env, PPC32Instr_AvLdSt( True/*load*/, 4, v_src, am_off12 )); + addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 4, v_src, am_off12 ) ); add_to_sp( env, 32 ); // Reset SP /* Finally, splat v_src[low_lane] to dst */ - addInstr(env, PPC32Instr_AvSplat(sz, dst, PPC32VI5s_Reg(v_src))); + addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Reg(v_src))); return dst; } } @@ -947,17 +950,17 @@ static HReg isNan ( ISelEnv* env, HReg vSrc ) /* 32bit float => sign(1) | expontent(8) | mantissa(23) nan => exponent all ones, mantissa > 0 */ - addInstr(env, PPC32Instr_AvBinary(Pav_AND, expt, vSrc, msk_exp)); - addInstr(env, PPC32Instr_AvBin32x4(Pav_CMPEQU, expt, expt, msk_exp)); - addInstr(env, PPC32Instr_AvBinary(Pav_AND, mnts, vSrc, msk_mnt)); - addInstr(env, PPC32Instr_AvBin32x4(Pav_CMPGTU, mnts, mnts, zeros)); - addInstr(env, PPC32Instr_AvBinary(Pav_AND, vIsNan, expt, mnts)); + addInstr(env, PPCInstr_AvBinary(Pav_AND, expt, vSrc, msk_exp)); + addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, expt, expt, msk_exp)); + addInstr(env, PPCInstr_AvBinary(Pav_AND, mnts, vSrc, msk_mnt)); + addInstr(env, PPCInstr_AvBin32x4(Pav_CMPGTU, mnts, mnts, zeros)); + addInstr(env, PPCInstr_AvBinary(Pav_AND, vIsNan, expt, mnts)); return vIsNan; } /*---------------------------------------------------------*/ -/*--- ISEL: Integer expressions (32/16/8 bit) ---*/ +/*--- ISEL: Integer expressions (64/32/16/8 bit) ---*/ /*---------------------------------------------------------*/ /* Select insns for an integer-typed expression, and add them to the @@ -968,10 +971,11 @@ static HReg isNan ( ISelEnv* env, HReg vSrc ) vregs to the same real register, so the copies will often disappear later in the game. - This should handle expressions of 32, 16 and 8-bit type. All - results are returned in a 32-bit register. For 16- and 8-bit - expressions, the upper 16/24 bits are arbitrary, so you should mask - or sign extend partial values if necessary. + This should handle expressions of 64, 32, 16 and 8-bit type. + All results are returned in a (mode64 ? 64bit : 32bit) register. + For 16- and 8-bit expressions, the upper (32/48/56 : 16/24) bits + are arbitrary, so you should mask or sign extend partial values + if necessary. */ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e ) @@ -1006,26 +1010,26 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) /* --------- LOAD --------- */ case Iex_Load: { HReg r_dst = newVRegI(env); - PPC32AMode* am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr); + PPCAMode* am_addr = iselIntExpr_AMode( env, e->Iex.Load.addr ); if (e->Iex.Load.end != Iend_BE) goto irreducible; - addInstr(env, PPC32Instr_Load( toUChar(sizeofIRType(ty)), - False, r_dst, am_addr, mode64 )); + addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)), + False, r_dst, am_addr, mode64 )); return r_dst; break; } /* --------- BINARY OP --------- */ case Iex_Binop: { - PPC32AluOp aluOp; - PPC32ShftOp shftOp; + PPCAluOp aluOp; + PPCShftOp shftOp; //.. /* Pattern: Sub32(0,x) */ //.. if (e->Iex.Binop.op == Iop_Sub32 && isZero32(e->Iex.Binop.arg1)) { //.. HReg dst = newVRegI(env); //.. HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg2); //.. addInstr(env, mk_iMOVsd_RR(reg,dst)); -//.. addInstr(env, PPC32Instr_Unary(Xun_NEG,PPC32RM_Reg(dst))); +//.. addInstr(env, PPCInstr_Unary(Xun_NEG,PPCRM_Reg(dst))); //.. return dst; //.. } @@ -1047,9 +1051,9 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) /* For commutative ops we assume any literal values are on the second operand. */ if (aluOp != Palu_INVALID) { - HReg r_dst = newVRegI(env); - HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); - PPC32RH* ri_srcR = NULL; + HReg r_dst = newVRegI(env); + HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); + PPCRH* ri_srcR = NULL; /* get right arg into an RH, in the appropriate way */ switch (aluOp) { case Palu_ADD: case Palu_SUB: @@ -1063,7 +1067,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) default: vpanic("iselIntExpr_R_wrk-aluOp-arg2"); } - addInstr(env, PPC32Instr_Alu(aluOp, r_dst, r_srcL, ri_srcR)); + addInstr(env, PPCInstr_Alu(aluOp, r_dst, r_srcL, ri_srcR)); return r_dst; } @@ -1080,9 +1084,9 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) } /* we assume any literal values are on the second operand. */ if (shftOp != Pshft_INVALID) { - HReg r_dst = newVRegI(env); - HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); - PPC32RH* ri_srcR = NULL; + HReg r_dst = newVRegI(env); + HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); + PPCRH* ri_srcR = NULL; /* get right arg into an RH, in the appropriate way */ switch (shftOp) { case Pshft_SHL: case Pshft_SHR: case Pshft_SAR: @@ -1097,12 +1101,15 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) /* widen the left arg if needed */ if (shftOp == Pshft_SHR || shftOp == Pshft_SAR) { if (ty == Ity_I8 || ty == Ity_I16) { - PPC32RH* amt = PPC32RH_Imm(False, toUShort(ty == Ity_I8 ? 24 : 16)); - HReg tmp = newVRegI(env); - addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/, - tmp, r_srcL, amt)); - addInstr(env, PPC32Instr_Shft(shftOp, True/*32bit shift*/, - tmp, tmp, amt)); + PPCRH* amt = PPCRH_Imm(False, + toUShort(ty == Ity_I8 ? 24 : 16)); + HReg tmp = newVRegI(env); + addInstr(env, PPCInstr_Shft(Pshft_SHL, + True/*32bit shift*/, + tmp, r_srcL, amt)); + addInstr(env, PPCInstr_Shft(shftOp, + True/*32bit shift*/, + tmp, tmp, amt)); r_srcL = tmp; vassert(0); /* AWAITING TEST CASE */ } @@ -1111,11 +1118,11 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) 32bit shifts are fine for all others */ if (ty == Ity_I64) { vassert(mode64); - addInstr(env, PPC32Instr_Shft(shftOp, False/*64bit shift*/, - r_dst, r_srcL, ri_srcR)); + addInstr(env, PPCInstr_Shft(shftOp, False/*64bit shift*/, + r_dst, r_srcL, ri_srcR)); } else { - addInstr(env, PPC32Instr_Shft(shftOp, True/*32bit shift*/, - r_dst, r_srcL, ri_srcR)); + addInstr(env, PPCInstr_Shft(shftOp, True/*32bit shift*/, + r_dst, r_srcL, ri_srcR)); } return r_dst; } @@ -1127,8 +1134,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg r_dst = newVRegI(env); HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_Div(syned, True/*32bit div*/, - r_dst, r_srcL, r_srcR)); + addInstr(env, PPCInstr_Div(syned, True/*32bit div*/, + r_dst, r_srcL, r_srcR)); return r_dst; } if (e->Iex.Binop.op == Iop_DivS64 || @@ -1138,8 +1145,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2); vassert(mode64); - addInstr(env, PPC32Instr_Div(syned, False/*64bit div*/, - r_dst, r_srcL, r_srcR)); + addInstr(env, PPCInstr_Div(syned, False/*64bit div*/, + r_dst, r_srcL, r_srcR)); return r_dst; } @@ -1152,8 +1159,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg r_dst = newVRegI(env); HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_MulL(syned, False/*lo32*/, sz32, - r_dst, r_srcL, r_srcR)); + addInstr(env, PPCInstr_MulL(syned, False/*lo32*/, sz32, + r_dst, r_srcL, r_srcR)); return r_dst; } @@ -1167,45 +1174,46 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2); vassert(mode64); - addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/, - False/*lo32*/, True/*32bit mul*/, - tLo, r_srcL, r_srcR)); - addInstr(env, PPC32Instr_MulL(syned, - True/*hi32*/, True/*32bit mul*/, - tHi, r_srcL, r_srcR)); - addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/, - r_dst, tHi, PPC32RH_Imm(False,32))); - addInstr(env, PPC32Instr_Alu(Palu_OR, r_dst, r_dst, PPC32RH_Reg(tLo))); + addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, + False/*lo32*/, True/*32bit mul*/, + tLo, r_srcL, r_srcR)); + addInstr(env, PPCInstr_MulL(syned, + True/*hi32*/, True/*32bit mul*/, + tHi, r_srcL, r_srcR)); + addInstr(env, PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/, + r_dst, tHi, PPCRH_Imm(False,32))); + addInstr(env, PPCInstr_Alu(Palu_OR, + r_dst, r_dst, PPCRH_Reg(tLo))); return r_dst; } /* El-mutanto 3-way compare? */ if (e->Iex.Binop.op == Iop_CmpORD32S || e->Iex.Binop.op == Iop_CmpORD32U) { - Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S); - HReg dst = newVRegI(env); - HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); - PPC32RH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_Cmp(syned, True/*32bit cmp*/, - 7/*cr*/, srcL, srcR)); - addInstr(env, PPC32Instr_MfCR(dst)); - addInstr(env, PPC32Instr_Alu(Palu_AND, dst, dst, - PPC32RH_Imm(False,7<<1))); + Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S); + HReg dst = newVRegI(env); + HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); + PPCRH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2); + addInstr(env, PPCInstr_Cmp(syned, True/*32bit cmp*/, + 7/*cr*/, srcL, srcR)); + addInstr(env, PPCInstr_MfCR(dst)); + addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst, + PPCRH_Imm(False,7<<1))); return dst; } if (e->Iex.Binop.op == Iop_CmpORD64S || e->Iex.Binop.op == Iop_CmpORD64U) { - Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD64S); - HReg dst = newVRegI(env); - HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); - PPC32RH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2); + Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD64S); + HReg dst = newVRegI(env); + HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); + PPCRH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2); vassert(mode64); - addInstr(env, PPC32Instr_Cmp(syned, False/*64bit cmp*/, - 7/*cr*/, srcL, srcR)); - addInstr(env, PPC32Instr_MfCR(dst)); - addInstr(env, PPC32Instr_Alu(Palu_AND, dst, dst, - PPC32RH_Imm(False,7<<1))); + addInstr(env, PPCInstr_Cmp(syned, False/*64bit cmp*/, + 7/*cr*/, srcL, srcR)); + addInstr(env, PPCInstr_MfCR(dst)); + addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst, + PPCRH_Imm(False,7<<1))); return dst; } @@ -1216,11 +1224,11 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) //zz HReg hi8s = iselIntExpr_R(env, e->Iex.Binop.arg1); //zz HReg lo8s = iselIntExpr_R(env, e->Iex.Binop.arg2); //zz addInstr(env, -//zz PPC32Instr_Alu(Palu_SHL, hi8, hi8s, PPC32RH_Imm(False,8))); +//zz PPCInstr_Alu(Palu_SHL, hi8, hi8s, PPCRH_Imm(False,8))); //zz addInstr(env, -//zz PPC32Instr_Alu(Palu_AND, lo8, lo8s, PPC32RH_Imm(False,0xFF))); +//zz PPCInstr_Alu(Palu_AND, lo8, lo8s, PPCRH_Imm(False,0xFF))); //zz addInstr(env, -//zz PPC32Instr_Alu(Palu_OR, hi8, hi8, PPC32RI_Reg(lo8))); +//zz PPCInstr_Alu(Palu_OR, hi8, hi8, PPCRI_Reg(lo8))); //zz return hi8; //zz } //zz @@ -1229,9 +1237,9 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) //zz HReg lo16 = newVRegI32(env); //zz HReg hi16s = iselIntExpr_R(env, e->Iex.Binop.arg1); //zz HReg lo16s = iselIntExpr_R(env, e->Iex.Binop.arg2); -//zz addInstr(env, mk_sh32(env, Psh_SHL, hi16, hi16s, PPC32RI_Imm(16))); -//zz addInstr(env, PPC32Instr_Alu(Palu_AND, lo16, lo16s, PPC32RI_Imm(0xFFFF))); -//zz addInstr(env, PPC32Instr_Alu(Palu_OR, hi16, hi16, PPC32RI_Reg(lo16))); +//zz addInstr(env, mk_sh32(env, Psh_SHL, hi16, hi16s, PPCRI_Imm(16))); +//zz addInstr(env, PPCInstr_Alu(Palu_AND, lo16, lo16s, PPCRI_Imm(0xFFFF))); +//zz addInstr(env, PPCInstr_Alu(Palu_OR, hi16, hi16, PPCRI_Reg(lo16))); //zz return hi16; //zz } @@ -1262,15 +1270,15 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg fr_srcL = iselDblExpr(env, e->Iex.Binop.arg1); HReg fr_srcR = iselDblExpr(env, e->Iex.Binop.arg2); - HReg r_ccPPC32 = newVRegI(env); + HReg r_ccPPC = newVRegI(env); HReg r_ccIR = newVRegI(env); HReg r_ccIR_b0 = newVRegI(env); HReg r_ccIR_b2 = newVRegI(env); HReg r_ccIR_b6 = newVRegI(env); - addInstr(env, PPC32Instr_FpCmp(r_ccPPC32, fr_srcL, fr_srcR)); + addInstr(env, PPCInstr_FpCmp(r_ccPPC, fr_srcL, fr_srcR)); - /* Map compare result from PPC32 to IR, + /* Map compare result from PPC to IR, conforming to CmpF64 definition. */ /* FP cmp result | PPC | IR @@ -1281,28 +1289,39 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) LT | 0x8 | 0x01 */ - // r_ccIR_b0 = r_ccPPC32[0] | r_ccPPC32[3] - addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/, - r_ccIR_b0, r_ccPPC32, PPC32RH_Imm(False,0x3))); - addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR_b0, r_ccPPC32, PPC32RH_Reg(r_ccIR_b0))); - addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b0, r_ccIR_b0, PPC32RH_Imm(False,0x1))); + // r_ccIR_b0 = r_ccPPC[0] | r_ccPPC[3] + addInstr(env, PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/, + r_ccIR_b0, r_ccPPC, + PPCRH_Imm(False,0x3))); + addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR_b0, + r_ccPPC, PPCRH_Reg(r_ccIR_b0))); + addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b0, + r_ccIR_b0, PPCRH_Imm(False,0x1))); - // r_ccIR_b2 = r_ccPPC32[0] - addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/, - r_ccIR_b2, r_ccPPC32, PPC32RH_Imm(False,0x2))); - addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b2, r_ccIR_b2, PPC32RH_Imm(False,0x4))); - - // r_ccIR_b6 = r_ccPPC32[0] | r_ccPPC32[1] - addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/, - r_ccIR_b6, r_ccPPC32, PPC32RH_Imm(False,0x1))); - addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR_b6, r_ccPPC32, PPC32RH_Reg(r_ccIR_b6))); - addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/, - r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm(False,0x6))); - addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm(False,0x40))); + // r_ccIR_b2 = r_ccPPC[0] + addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, + r_ccIR_b2, r_ccPPC, + PPCRH_Imm(False,0x2))); + addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b2, + r_ccIR_b2, PPCRH_Imm(False,0x4))); + + // r_ccIR_b6 = r_ccPPC[0] | r_ccPPC[1] + addInstr(env, PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/, + r_ccIR_b6, r_ccPPC, + PPCRH_Imm(False,0x1))); + addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR_b6, + r_ccPPC, PPCRH_Reg(r_ccIR_b6))); + addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, + r_ccIR_b6, r_ccIR_b6, + PPCRH_Imm(False,0x6))); + addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b6, + r_ccIR_b6, PPCRH_Imm(False,0x40))); // r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6 - addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR, r_ccIR_b0, PPC32RH_Reg(r_ccIR_b2))); - addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR, r_ccIR, PPC32RH_Reg(r_ccIR_b6))); + addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR, + r_ccIR_b0, PPCRH_Reg(r_ccIR_b2))); + addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR, + r_ccIR, PPCRH_Reg(r_ccIR_b6))); return r_ccIR; } @@ -1313,7 +1332,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); sub_from_sp( env, 16 ); - addInstr(env, PPC32Instr_FpF64toI32(r_dst, fr_src)); + addInstr(env, PPCInstr_FpF64toI32(r_dst, fr_src)); add_to_sp( env, 16 ); /* Restore default FPU rounding. */ @@ -1328,7 +1347,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); sub_from_sp( env, 16 ); - addInstr(env, PPC32Instr_FpF64toI64(r_dst, fr_src)); + addInstr(env, PPCInstr_FpF64toI64(r_dst, fr_src)); add_to_sp( env, 16 ); /* Restore default FPU rounding. */ @@ -1363,6 +1382,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) /* --------- UNARY OP --------- */ case Iex_Unop: { + IROp op_unop = e->Iex.Unop.op; + /* 1Uto8(32to1(expr32)) */ DEFINE_PATTERN(p_32to1_then_1Uto8, unop(Iop_1Uto8,unop(Iop_32to1,bind(0)))); @@ -1370,7 +1391,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) IRExpr* expr32 = mi.bindee[0]; HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, expr32); - addInstr(env, PPC32Instr_Alu(Palu_AND, r_dst, r_src, PPC32RH_Imm(False,1))); + addInstr(env, PPCInstr_Alu(Palu_AND, r_dst, + r_src, PPCRH_Imm(False,1))); return r_dst; } @@ -1382,13 +1404,13 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) IRExpr_Load(Iend_BE,Ity_I16,bind(0))) ); if (matchIRExpr(&mi,p_LDbe16_then_16Uto32,e)) { HReg r_dst = newVRegI(env); - PPC32AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] ); - addInstr(env, PPC32Instr_Load(2,False,r_dst,amode, mode64)); + PPCAMode* amode = iselIntExpr_AMode( env, mi.bindee[0] ); + addInstr(env, PPCInstr_Load(2,False,r_dst,amode, mode64)); return r_dst; } } - switch (e->Iex.Unop.op) { + switch (op_unop) { case Iop_8Uto16: case Iop_8Uto32: case Iop_8Uto64: @@ -1396,20 +1418,22 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) case Iop_16Uto64: { HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); - UShort mask = toUShort(e->Iex.Unop.op==Iop_16Uto64 ? 0xFFFF : - e->Iex.Unop.op==Iop_16Uto32 ? 0xFFFF : 0xFF); - addInstr(env, PPC32Instr_Alu(Palu_AND,r_dst,r_src, - PPC32RH_Imm(False,mask))); + UShort mask = toUShort(op_unop==Iop_16Uto64 ? 0xFFFF : + op_unop==Iop_16Uto32 ? 0xFFFF : 0xFF); + addInstr(env, PPCInstr_Alu(Palu_AND,r_dst,r_src, + PPCRH_Imm(False,mask))); return r_dst; } case Iop_32Uto64: { HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); vassert(mode64); - addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/, - r_dst, r_src, PPC32RH_Imm(False,32))); - addInstr(env, PPC32Instr_Shft(Pshft_SHR, False/*64bit shift*/, - r_dst, r_dst, PPC32RH_Imm(False,32))); + addInstr(env, + PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/, + r_dst, r_src, PPCRH_Imm(False,32))); + addInstr(env, + PPCInstr_Shft(Pshft_SHR, False/*64bit shift*/, + r_dst, r_dst, PPCRH_Imm(False,32))); return r_dst; } case Iop_8Sto16: @@ -1417,11 +1441,13 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) case Iop_16Sto32: { HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); - UShort amt = toUShort(e->Iex.Unop.op==Iop_16Sto32 ? 16 : 24); - addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/, - r_dst, r_src, PPC32RH_Imm(False,amt))); - addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/, - r_dst, r_dst, PPC32RH_Imm(False,amt))); + UShort amt = toUShort(op_unop==Iop_16Sto32 ? 16 : 24); + addInstr(env, + PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, + r_dst, r_src, PPCRH_Imm(False,amt))); + addInstr(env, + PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, + r_dst, r_dst, PPCRH_Imm(False,amt))); return r_dst; } case Iop_8Sto64: @@ -1429,13 +1455,15 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) case Iop_32Sto64: { HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); - UShort amt = toUShort(e->Iex.Unop.op==Iop_8Sto64 ? 56 : - e->Iex.Unop.op==Iop_16Sto64 ? 48 : 32); + UShort amt = toUShort(op_unop==Iop_8Sto64 ? 56 : + op_unop==Iop_16Sto64 ? 48 : 32); vassert(mode64); - addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/, - r_dst, r_src, PPC32RH_Imm(False,amt))); - addInstr(env, PPC32Instr_Shft(Pshft_SAR, False/*64bit shift*/, - r_dst, r_dst, PPC32RH_Imm(False,amt))); + addInstr(env, + PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/, + r_dst, r_src, PPCRH_Imm(False,amt))); + addInstr(env, + PPCInstr_Shft(Pshft_SAR, False/*64bit shift*/, + r_dst, r_dst, PPCRH_Imm(False,amt))); return r_dst; } case Iop_Not8: @@ -1444,7 +1472,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) case Iop_Not64: { HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Unary(Pun_NOT,r_dst,r_src)); + addInstr(env, PPCInstr_Unary(Pun_NOT,r_dst,r_src)); return r_dst; } case Iop_64HIto32: { @@ -1455,8 +1483,9 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) } else { HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Shft(Pshft_SHR, False/*64bit shift*/, - r_dst, r_src, PPC32RH_Imm(False,32))); + addInstr(env, + PPCInstr_Shft(Pshft_SHR, False/*64bit shift*/, + r_dst, r_src, PPCRH_Imm(False,32))); return r_dst; } } @@ -1471,8 +1500,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) //:: if (matchIRExpr(&mi,p_MullS32_then_64to32,e)) { //:: HReg r_dst = newVRegI32(env); //:: HReg r_srcL = iselIntExpr_R( env, mi.bindee[0] ); -//:: PPC32RI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] )); -//:: addInstr(env, PPC32Instr_MulL(True, 0, r_dst, r_srcL, ri_srcR)); +//:: PPCRI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] )); +//:: addInstr(env, PPCInstr_MulL(True, 0, r_dst, r_srcL, ri_srcR)); //:: return r_dst; //:: } //:: } @@ -1486,8 +1515,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) //:: if (matchIRExpr(&mi,p_MullU32_then_64to32,e)) { //:: HReg r_dst = newVRegI32(env); //:: HReg r_srcL = iselIntExpr_R( env, mi.bindee[0] ); -//:: PPC32RI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] )); -//:: addInstr(env, PPC32Instr_MulL(False, 0, r_dst, r_srcL, ri_srcR)); +//:: PPCRI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] )); +//:: addInstr(env, PPCInstr_MulL(False, 0, r_dst, r_srcL, ri_srcR)); //:: return r_dst; //:: } //:: } @@ -1513,9 +1542,10 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) case Iop_32HIto16: { HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); - UShort shift = toUShort(e->Iex.Unop.op == Iop_16HIto8 ? 8 : 16); - addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/, - r_dst, r_src, PPC32RH_Imm(False,shift))); + UShort shift = toUShort(op_unop == Iop_16HIto8 ? 8 : 16); + addInstr(env, + PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/, + r_dst, r_src, PPCRH_Imm(False,shift))); return r_dst; } case Iop_128HIto64: { @@ -1532,32 +1562,34 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) } case Iop_1Uto32: case Iop_1Uto8: { - HReg r_dst = newVRegI(env); - PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Set32(cond,r_dst)); + HReg r_dst = newVRegI(env); + PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); + addInstr(env, PPCInstr_Set(cond,r_dst)); return r_dst; } case Iop_1Sto8: case Iop_1Sto16: case Iop_1Sto32: { /* could do better than this, but for now ... */ - HReg r_dst = newVRegI(env); - PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Set32(cond,r_dst)); - addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/, - r_dst, r_dst, PPC32RH_Imm(False,31))); - addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/, - r_dst, r_dst, PPC32RH_Imm(False,31))); + HReg r_dst = newVRegI(env); + PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); + addInstr(env, PPCInstr_Set(cond,r_dst)); + addInstr(env, + PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, + r_dst, r_dst, PPCRH_Imm(False,31))); + addInstr(env, + PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, + r_dst, r_dst, PPCRH_Imm(False,31))); return r_dst; } case Iop_Clz32: case Iop_Clz64: { - PPC32UnaryOp op_clz = - (e->Iex.Unop.op == Iop_Clz32) ? Pun_CLZ32 : Pun_CLZ64; + PPCUnaryOp op_clz = (op_unop == Iop_Clz32) ? Pun_CLZ32 : + Pun_CLZ64; /* Count leading zeroes. */ HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Unary(op_clz,r_dst,r_src)); + addInstr(env, PPCInstr_Unary(op_clz,r_dst,r_src)); return r_dst; } case Iop_Neg8: @@ -1566,7 +1598,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) case Iop_Neg64: { HReg r_dst = newVRegI(env); HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Unary(Pun_NEG,r_dst,r_src)); + addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src)); return r_dst; } @@ -1574,17 +1606,19 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg r_aligned16; HReg dst = newVRegI(env); HReg vec = iselVecExpr(env, e->Iex.Unop.arg); - PPC32AMode *am_off0, *am_off12; + PPCAMode *am_off0, *am_off12; sub_from_sp( env, 32 ); // Move SP down 32 bytes // get a quadword aligned address within our stack space r_aligned16 = get_sp_aligned16( env ); - am_off0 = PPC32AMode_IR( 0, r_aligned16 ); - am_off12 = PPC32AMode_IR( 12,r_aligned16 ); + am_off0 = PPCAMode_IR( 0, r_aligned16 ); + am_off12 = PPCAMode_IR( 12,r_aligned16 ); // store vec, load low word to dst - addInstr(env, PPC32Instr_AvLdSt( False/*store*/, 16, vec, am_off0 )); - addInstr(env, PPC32Instr_Load( 4, False, dst, am_off12, mode64 )); + addInstr(env, + PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 )); + addInstr(env, + PPCInstr_Load( 4, False, dst, am_off12, mode64 )); add_to_sp( env, 32 ); // Reset SP return dst; @@ -1601,23 +1635,25 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) /* Given an IEEE754 double, produce an I64 with the same bit pattern. */ case Iop_ReinterpF64asI64: { - PPC32AMode *am_addr; + PPCAMode *am_addr; HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg); HReg r_dst = newVRegI(env); vassert(mode64); sub_from_sp( env, 16 ); // Move SP down 16 bytes - am_addr = PPC32AMode_IR(0, StackFramePtr(mode64)); + am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) ); // store as F64 - addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr )); + addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8, + fr_src, am_addr )); // load as Ity_I64 - addInstr(env, PPC32Instr_Load( 8, False, r_dst, am_addr, mode64 )); + addInstr(env, PPCInstr_Load( 8, False, + r_dst, am_addr, mode64 )); add_to_sp( env, 16 ); // Reset SP return r_dst; } - + default: break; } @@ -1629,10 +1665,10 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ((ty == Ity_I64) && mode64)) { HReg r_dst = newVRegI(env); - PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset, - GuestStatePtr(mode64) ); - addInstr(env, PPC32Instr_Load( toUChar(sizeofIRType(ty)), - False, r_dst, am_addr, mode64 )); + PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, + GuestStatePtr(mode64) ); + addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)), + False, r_dst, am_addr, mode64 )); return r_dst; } break; @@ -1662,7 +1698,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) goto irreducible; /* Marshal args, do the call, clear stack. */ - doHelperCall( env, False, NULL, e->Iex.CCall.cee, e->Iex.CCall.args ); + doHelperCall( env, False, NULL, + e->Iex.CCall.cee, e->Iex.CCall.args ); /* GPR3 now holds the destination address from Pin_Goto */ addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64))); @@ -1674,15 +1711,16 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) case Iex_Const: { Long l; HReg r_dst = newVRegI(env); - switch (e->Iex.Const.con->tag) { + IRConst* con = e->Iex.Const.con; + switch (con->tag) { case Ico_U64: vassert(mode64); - l = (Long) e->Iex.Const.con->Ico.U64; break; - case Ico_U32: l = (Long)(Int) e->Iex.Const.con->Ico.U32; break; - case Ico_U16: l = (Long)(Int)(Short)e->Iex.Const.con->Ico.U16; break; - case Ico_U8: l = (Long)(Int)(Char )e->Iex.Const.con->Ico.U8; break; - default: vpanic("iselIntExpr_R.const(ppc32)"); + l = (Long) con->Ico.U64; break; + case Ico_U32: l = (Long)(Int) con->Ico.U32; break; + case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break; + case Ico_U8: l = (Long)(Int)(Char )con->Ico.U8; break; + default: vpanic("iselIntExpr_R.const(ppc)"); } - addInstr(env, PPC32Instr_LI(r_dst, (ULong)l, mode64)); + addInstr(env, PPCInstr_LI(r_dst, (ULong)l, mode64)); return r_dst; } @@ -1691,17 +1729,18 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) if ((ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ((ty == Ity_I64) && mode64)) && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) { - PPC32CondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); - HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond); - HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX); - PPC32RI* r0 = iselIntExpr_RI(env, e->Iex.Mux0X.expr0); - HReg r_dst = newVRegI(env); - HReg r_tmp = newVRegI(env); + PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); + HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond); + HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX); + PPCRI* r0 = iselIntExpr_RI(env, e->Iex.Mux0X.expr0); + HReg r_dst = newVRegI(env); + HReg r_tmp = newVRegI(env); addInstr(env, mk_iMOVds_RR(r_dst,rX)); - addInstr(env, PPC32Instr_Alu(Palu_AND, r_tmp, r_cond, PPC32RH_Imm(False,0xFF))); - addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/, - 7/*cr*/, r_tmp, PPC32RH_Imm(False,0))); - addInstr(env, PPC32Instr_CMov(cc,r_dst,r0)); + addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp, + r_cond, PPCRH_Imm(False,0xFF))); + addInstr(env, PPCInstr_Cmp(False/*unsined*/, True/*32bit cmp*/, + 7/*cr*/, r_tmp, PPCRH_Imm(False,0))); + addInstr(env, PPCInstr_CMov(cc,r_dst,r0)); return r_dst; } break; @@ -1715,7 +1754,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) /* We get here if no pattern matched. */ irreducible: ppIRExpr(e); - vpanic("iselIntExpr_R(ppc32): cannot reduce tree"); + vpanic("iselIntExpr_R(ppc): cannot reduce tree"); } @@ -1739,7 +1778,7 @@ static Bool fits16bits ( UInt u ) return toBool(u == (UInt)i); } -static Bool sane_AMode ( PPC32AMode* am ) +static Bool sane_AMode ( PPCAMode* am ) { switch (am->tag) { case Pam_IR: @@ -1752,19 +1791,19 @@ static Bool sane_AMode ( PPC32AMode* am ) hregClass(am->Pam.RR.index) == HRcIntWRDSZ && hregIsVirtual(am->Pam.IR.index) ); default: - vpanic("sane_AMode: unknown ppc32 amode tag"); + vpanic("sane_AMode: unknown ppc amode tag"); } } -static PPC32AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e ) +static PPCAMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e ) { - PPC32AMode* am = iselIntExpr_AMode_wrk(env, e); + PPCAMode* am = iselIntExpr_AMode_wrk(env, e); vassert(sane_AMode(am)); return am; } /* DO NOT CALL THIS DIRECTLY ! */ -static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e ) +static PPCAMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e ) { IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == (mode64 ? Ity_I64 : Ity_I32)); @@ -1775,8 +1814,8 @@ static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e ) && e->Iex.Binop.arg2->tag == Iex_Const && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32 && fits16bits(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)) { - return PPC32AMode_IR(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32, - iselIntExpr_R(env, e->Iex.Binop.arg1)); + return PPCAMode_IR( e->Iex.Binop.arg2->Iex.Const.con->Ico.U32, + iselIntExpr_R(env, e->Iex.Binop.arg1) ); } /* Add32(expr,expr) */ @@ -1784,14 +1823,14 @@ static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e ) && e->Iex.Binop.op == Iop_Add32) { HReg r_base = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg r_idx = iselIntExpr_R(env, e->Iex.Binop.arg2); - return PPC32AMode_RR(r_idx, r_base); + return PPCAMode_RR( r_idx, r_base ); } /* Doesn't match anything in particular. Generate it into a register and use that. */ { HReg r1 = iselIntExpr_R(env, e); - return PPC32AMode_IR(0, r1); + return PPCAMode_IR( 0, r1 ); } } @@ -1804,9 +1843,9 @@ static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e ) immediate; this guaranteed that all signed immediates that are return can have their sign inverted if need be. */ -static PPC32RH* iselIntExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e ) +static PPCRH* iselIntExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e ) { - PPC32RH* ri = iselIntExpr_RH_wrk(env, syned, e); + PPCRH* ri = iselIntExpr_RH_wrk(env, syned, e); /* sanity checks ... */ switch (ri->tag) { case Prh_Imm: @@ -1819,12 +1858,12 @@ static PPC32RH* iselIntExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e ) vassert(hregIsVirtual(ri->Prh.Reg.reg)); return ri; default: - vpanic("iselIntExpr_RH: unknown ppc32 RH tag"); + vpanic("iselIntExpr_RH: unknown ppc RH tag"); } } /* DO NOT CALL THIS DIRECTLY ! */ -static PPC32RH* iselIntExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e ) +static PPCRH* iselIntExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e ) { ULong u; Long l; @@ -1834,23 +1873,24 @@ static PPC32RH* iselIntExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e ) /* special case: immediate */ if (e->tag == Iex_Const) { + IRConst* con = e->Iex.Const.con; /* What value are we aiming to generate? */ - switch (e->Iex.Const.con->tag) { + switch (con->tag) { /* Note: Not sign-extending - we carry 'syned' around */ case Ico_U64: vassert(mode64); - u = e->Iex.Const.con->Ico.U64; break; - case Ico_U32: u = 0xFFFFFFFF & e->Iex.Const.con->Ico.U32; break; - case Ico_U16: u = 0x0000FFFF & e->Iex.Const.con->Ico.U16; break; - case Ico_U8: u = 0x000000FF & e->Iex.Const.con->Ico.U8; break; - default: vpanic("iselIntExpr_RH.Iex_Const(ppc32h)"); + u = con->Ico.U64; break; + case Ico_U32: u = 0xFFFFFFFF & con->Ico.U32; break; + case Ico_U16: u = 0x0000FFFF & con->Ico.U16; break; + case Ico_U8: u = 0x000000FF & con->Ico.U8; break; + default: vpanic("iselIntExpr_RH.Iex_Const(ppch)"); } l = (Long)u; /* Now figure out if it's representable. */ if (!syned && u <= 65535) { - return PPC32RH_Imm(False/*unsigned*/, toUShort(u & 0xFFFF)); + return PPCRH_Imm(False/*unsigned*/, toUShort(u & 0xFFFF)); } if (syned && l >= -32767 && l <= 32767) { - return PPC32RH_Imm(True/*signed*/, toUShort(u & 0xFFFF)); + return PPCRH_Imm(True/*signed*/, toUShort(u & 0xFFFF)); } /* no luck; use the Slow Way. */ } @@ -1858,19 +1898,19 @@ static PPC32RH* iselIntExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e ) /* default case: calculate into a register and return that */ { HReg r = iselIntExpr_R ( env, e ); - return PPC32RH_Reg(r); + return PPCRH_Reg(r); } } /* --------------------- RIs --------------------- */ -/* Calculate an expression into an PPC32RI operand. As with +/* Calculate an expression into an PPCRI operand. As with iselIntExpr_R, the expression can have type 32, 16 or 8 bits. */ -static PPC32RI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e ) +static PPCRI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e ) { - PPC32RI* ri = iselIntExpr_RI_wrk(env, e); + PPCRI* ri = iselIntExpr_RI_wrk(env, e); /* sanity checks ... */ switch (ri->tag) { case Pri_Imm: @@ -1880,12 +1920,12 @@ static PPC32RI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e ) vassert(hregIsVirtual(ri->Pri.Reg)); return ri; default: - vpanic("iselIntExpr_RI: unknown ppc32 RI tag"); + vpanic("iselIntExpr_RI: unknown ppc RI tag"); } } /* DO NOT CALL THIS DIRECTLY ! */ -static PPC32RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e ) +static PPCRI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e ) { Long l; IRType ty = typeOfIRExpr(env->type_env,e); @@ -1894,21 +1934,22 @@ static PPC32RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e ) /* special case: immediate */ if (e->tag == Iex_Const) { - switch (e->Iex.Const.con->tag) { + IRConst* con = e->Iex.Const.con; + switch (con->tag) { case Ico_U64: vassert(mode64); - l = (Long) e->Iex.Const.con->Ico.U64; break; - case Ico_U32: l = (Long)(Int) e->Iex.Const.con->Ico.U32; break; - case Ico_U16: l = (Long)(Int)(Short)e->Iex.Const.con->Ico.U16; break; - case Ico_U8: l = (Long)(Int)(Char )e->Iex.Const.con->Ico.U8; break; - default: vpanic("iselIntExpr_RI.Iex_Const(ppc32h)"); + l = (Long) con->Ico.U64; break; + case Ico_U32: l = (Long)(Int) con->Ico.U32; break; + case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break; + case Ico_U8: l = (Long)(Int)(Char )con->Ico.U8; break; + default: vpanic("iselIntExpr_RI.Iex_Const(ppch)"); } - return PPC32RI_Imm((ULong)l); + return PPCRI_Imm((ULong)l); } /* default case: calculate into a register and return that */ { HReg r = iselIntExpr_R ( env, e ); - return PPC32RI_Reg(r); + return PPCRI_Reg(r); } } @@ -1919,9 +1960,9 @@ static PPC32RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e ) being an immediate in the range 1 .. 31 inclusive. Used for doing shift amounts. */ -static PPC32RH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e ) +static PPCRH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e ) { - PPC32RH* ri = iselIntExpr_RH5u_wrk(env, e); + PPCRH* ri = iselIntExpr_RH5u_wrk(env, e); /* sanity checks ... */ switch (ri->tag) { case Prh_Imm: @@ -1933,12 +1974,12 @@ static PPC32RH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e ) vassert(hregIsVirtual(ri->Prh.Reg.reg)); return ri; default: - vpanic("iselIntExpr_RH5u: unknown ppc32 RI tag"); + vpanic("iselIntExpr_RH5u: unknown ppc RI tag"); } } /* DO NOT CALL THIS DIRECTLY ! */ -static PPC32RH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e ) +static PPCRH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e ) { IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == Ity_I8); @@ -1948,13 +1989,13 @@ static PPC32RH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e ) && e->Iex.Const.con->tag == Ico_U8 && e->Iex.Const.con->Ico.U8 >= 1 && e->Iex.Const.con->Ico.U8 <= 31) { - return PPC32RH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8); + return PPCRH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8); } /* default case: calculate into a register and return that */ { HReg r = iselIntExpr_R ( env, e ); - return PPC32RH_Reg(r); + return PPCRH_Reg(r); } } @@ -1965,9 +2006,9 @@ static PPC32RH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e ) being an immediate in the range 1 .. 63 inclusive. Used for doing shift amounts. */ -static PPC32RH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e ) +static PPCRH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e ) { - PPC32RH* ri = iselIntExpr_RH6u_wrk(env, e); + PPCRH* ri = iselIntExpr_RH6u_wrk(env, e); /* sanity checks ... */ switch (ri->tag) { case Prh_Imm: @@ -1984,7 +2025,7 @@ static PPC32RH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e ) } /* DO NOT CALL THIS DIRECTLY ! */ -static PPC32RH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e ) +static PPCRH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e ) { IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == Ity_I8); @@ -1994,13 +2035,13 @@ static PPC32RH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e ) && e->Iex.Const.con->tag == Ico_U8 && e->Iex.Const.con->Ico.U8 >= 1 && e->Iex.Const.con->Ico.U8 <= 63) { - return PPC32RH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8); + return PPCRH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8); } /* default case: calculate into a register and return that */ { HReg r = iselIntExpr_R ( env, e ); - return PPC32RH_Reg(r); + return PPCRH_Reg(r); } } @@ -2011,14 +2052,14 @@ static PPC32RH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e ) condition code which would correspond when the expression would notionally have returned 1. */ -static PPC32CondCode iselCondCode ( ISelEnv* env, IRExpr* e ) +static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e ) { /* Uh, there's nothing we can sanity check here, unfortunately. */ return iselCondCode_wrk(env,e); } /* DO NOT CALL THIS DIRECTLY ! */ -static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) +static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) { // MatchInfo mi; // DECLARE_PATTERN(p_32to1); @@ -2032,16 +2073,16 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) if (e->tag == Iex_Const && e->Iex.Const.con->Ico.U1 == True) { // Make a compare that will always be true: HReg r_zero = newVRegI(env); - addInstr(env, PPC32Instr_LI(r_zero, 0, mode64)); - addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/, - 7/*cr*/, r_zero, PPC32RH_Reg(r_zero))); + addInstr(env, PPCInstr_LI(r_zero, 0, mode64)); + addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, + 7/*cr*/, r_zero, PPCRH_Reg(r_zero))); return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); } /* Not1(...) */ if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) { /* Generate code for the arg, and negate the test condition */ - PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg); + PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); cond.test = invertCondTest(cond.test); return cond; } @@ -2070,9 +2111,10 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); HReg tmp = newVRegI(env); /* could do better, probably -- andi. */ - addInstr(env, PPC32Instr_Alu(Palu_AND, tmp, src, PPC32RH_Imm(False,1))); - addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/, - 7/*cr*/, tmp, PPC32RH_Imm(False,1))); + addInstr(env, PPCInstr_Alu(Palu_AND, tmp, + src, PPCRH_Imm(False,1))); + addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, + 7/*cr*/, tmp, PPCRH_Imm(False,1))); return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); } @@ -2084,10 +2126,10 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) && e->Iex.Unop.op == Iop_CmpNEZ8) { HReg r_32 = iselIntExpr_R(env, e->Iex.Unop.arg); HReg r_l = newVRegI(env); - addInstr(env, PPC32Instr_Alu(Palu_AND, r_l, r_32, - PPC32RH_Imm(False,0xFF))); - addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/, - 7/*cr*/, r_l, PPC32RH_Imm(False,0))); + addInstr(env, PPCInstr_Alu(Palu_AND, r_l, r_32, + PPCRH_Imm(False,0xFF))); + addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, + 7/*cr*/, r_l, PPCRH_Imm(False,0))); return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); } @@ -2097,8 +2139,8 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_CmpNEZ32) { HReg r1 = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/, - 7/*cr*/, r1, PPC32RH_Imm(False,0))); + addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, + 7/*cr*/, r1, PPCRH_Imm(False,0))); return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); } @@ -2159,19 +2201,19 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) || e->Iex.Binop.op == Iop_CmpLE32U)) { Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S || e->Iex.Binop.op == Iop_CmpLE32S); - HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); - PPC32RH* ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_Cmp(syned, True/*32bit cmp*/, - 7/*cr*/, r1, ri2)); + HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); + PPCRH* ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2); + addInstr(env, PPCInstr_Cmp(syned, True/*32bit cmp*/, + 7/*cr*/, r1, ri2)); switch (e->Iex.Binop.op) { case Iop_CmpEQ32: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); case Iop_CmpNE32: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); -// case Iop_CmpLT32S: return mk_PPCCondCode( Pct_TRUE, Pcf_LT ); +// case Iop_CmpLT32S: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT ); case Iop_CmpLT32U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT ); -// case Iop_CmpLE32S: return mk_PPCCondCode( Pct_FALSE, Pcf_GT ); +// case Iop_CmpLE32S: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT ); case Iop_CmpLE32U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT ); - default: vpanic("iselCondCode(ppc32): CmpXX32"); + default: vpanic("iselCondCode(ppc): CmpXX32"); } } @@ -2185,20 +2227,20 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) || e->Iex.Binop.op == Iop_CmpLE64U)) { Bool syned = (e->Iex.Binop.op == Iop_CmpLT64S || e->Iex.Binop.op == Iop_CmpLE64S); - HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); - PPC32RH* ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2); + HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); + PPCRH* ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2); vassert(mode64); - addInstr(env, PPC32Instr_Cmp(syned, False/*64bit cmp*/, - 7/*cr*/, r1, ri2)); + addInstr(env, PPCInstr_Cmp(syned, False/*64bit cmp*/, + 7/*cr*/, r1, ri2)); switch (e->Iex.Binop.op) { case Iop_CmpEQ64: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); case Iop_CmpNE64: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); -// case Iop_CmpLT64S: return mk_PPCCondCode( Pct_TRUE, Pcf_LT ); +// case Iop_CmpLT64S: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT ); case Iop_CmpLT64U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT ); -// case Iop_CmpLE64S: return mk_PPCCondCode( Pct_FALSE, Pcf_GT ); +// case Iop_CmpLE64S: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT ); case Iop_CmpLE64U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT ); - default: vpanic("iselCondCode(ppc32): CmpXX64"); + default: vpanic("iselCondCode(ppc): CmpXX64"); } } @@ -2258,14 +2300,14 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) HReg tmp = newVRegI(env); iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg ); addInstr(env, mk_iMOVds_RR(tmp, lo)); - addInstr(env, PPC32Instr_Alu(Palu_OR, tmp, tmp, PPC32RH_Reg(hi))); - addInstr(env, PPC32Instr_Cmp(False/*sign*/, True/*32bit cmp*/, - 7/*cr*/, tmp,PPC32RH_Imm(False,0))); + addInstr(env, PPCInstr_Alu(Palu_OR, tmp, tmp, PPCRH_Reg(hi))); + addInstr(env, PPCInstr_Cmp(False/*sign*/, True/*32bit cmp*/, + 7/*cr*/, tmp,PPCRH_Imm(False,0))); return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); } else { // mode64 HReg r_src = iselIntExpr_R(env, e->Iex.Binop.arg1); - addInstr(env, PPC32Instr_Cmp(False/*sign*/, False/*64bit cmp*/, - 7/*cr*/, r_src,PPC32RH_Imm(False,0))); + addInstr(env, PPCInstr_Cmp(False/*sign*/, False/*64bit cmp*/, + 7/*cr*/, r_src,PPCRH_Imm(False,0))); return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); } } @@ -2274,9 +2316,12 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) if (e->tag == Iex_Tmp) { HReg r_src = lookupIRTemp(env, e->Iex.Tmp.tmp); HReg src_masked = newVRegI(env); - addInstr(env, PPC32Instr_Alu(Palu_AND, src_masked, r_src, PPC32RH_Imm(False,1))); - addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/, - 7/*cr*/, src_masked, PPC32RH_Imm(False,1))); + addInstr(env, + PPCInstr_Alu(Palu_AND, src_masked, + r_src, PPCRH_Imm(False,1))); + addInstr(env, + PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, + 7/*cr*/, src_masked, PPCRH_Imm(False,1))); return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); } @@ -2295,7 +2340,8 @@ static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) either real or virtual regs; in any case they must not be changed by subsequent code emitted by the caller. */ -static void iselInt128Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) +static void iselInt128Expr ( HReg* rHi, HReg* rLo, + ISelEnv* env, IRExpr* e ) { vassert(mode64); iselInt128Expr_wrk(rHi, rLo, env, e); @@ -2309,7 +2355,8 @@ static void iselInt128Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) } /* DO NOT CALL THIS DIRECTLY ! */ -static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) +static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, + ISelEnv* env, IRExpr* e ) { vassert(e); vassert(typeOfIRExpr(env->type_env,e) == Ity_I128); @@ -2331,12 +2378,12 @@ static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) Bool syned = toBool(e->Iex.Binop.op == Iop_MullS64); HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/, - False/*lo64*/, False/*64bit mul*/, - tLo, r_srcL, r_srcR)); - addInstr(env, PPC32Instr_MulL(syned, - True/*hi64*/, False/*64bit mul*/, - tHi, r_srcL, r_srcR)); + addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, + False/*lo64*/, False/*64bit mul*/, + tLo, r_srcL, r_srcR)); + addInstr(env, PPCInstr_MulL(syned, + True/*hi64*/, False/*64bit mul*/, + tHi, r_srcL, r_srcR)); *rHi = tHi; *rLo = tLo; return; @@ -2371,7 +2418,8 @@ static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) either real or virtual regs; in any case they must not be changed by subsequent code emitted by the caller. */ -static void iselInt64Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) +static void iselInt64Expr ( HReg* rHi, HReg* rLo, + ISelEnv* env, IRExpr* e ) { vassert(!mode64); iselInt64Expr_wrk(rHi, rLo, env, e); @@ -2385,7 +2433,8 @@ static void iselInt64Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) } /* DO NOT CALL THIS DIRECTLY ! */ -static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) +static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, + ISelEnv* env, IRExpr* e ) { // HWord fn = 0; /* helper fn for most SIMD64 stuff */ vassert(e); @@ -2399,8 +2448,8 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); vassert(e->Iex.Const.con->tag == Ico_U64); - addInstr(env, PPC32Instr_LI(tHi, wHi, mode64)); - addInstr(env, PPC32Instr_LI(tLo, wLo, mode64)); + addInstr(env, PPCInstr_LI(tHi, wHi, mode64)); + addInstr(env, PPCInstr_LI(tLo, wLo, mode64)); *rHi = tHi; *rLo = tLo; return; @@ -2430,13 +2479,13 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) /* 64-bit GET */ if (e->tag == Iex_Get) { - PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset, - GuestStatePtr(mode64) ); - PPC32AMode* am_addr4 = advance4(env, am_addr); + PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, + GuestStatePtr(mode64) ); + PPCAMode* am_addr4 = advance4(env, am_addr); HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); - addInstr(env, PPC32Instr_Load( 4, False, tHi, am_addr, mode64 )); - addInstr(env, PPC32Instr_Load( 4, False, tLo, am_addr4, mode64 )); + addInstr(env, PPCInstr_Load( 4, False, tHi, am_addr, mode64 )); + addInstr(env, PPCInstr_Load( 4, False, tLo, am_addr4, mode64 )); *rHi = tHi; *rLo = tLo; return; @@ -2463,7 +2512,7 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); - PPC32CondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); + PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond); HReg r_tmp = newVRegI(env); @@ -2472,13 +2521,13 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) addInstr(env, mk_iMOVds_RR(tHi,eXHi)); addInstr(env, mk_iMOVds_RR(tLo,eXLo)); - addInstr(env, PPC32Instr_Alu(Palu_AND, - r_tmp, r_cond, PPC32RH_Imm(False,0xFF))); - addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/, - 7/*cr*/, r_tmp, PPC32RH_Imm(False,0))); + addInstr(env, PPCInstr_Alu(Palu_AND, + r_tmp, r_cond, PPCRH_Imm(False,0xFF))); + addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, + 7/*cr*/, r_tmp, PPCRH_Imm(False,0))); - addInstr(env, PPC32Instr_CMov(cc,tHi,PPC32RI_Reg(e0Hi))); - addInstr(env, PPC32Instr_CMov(cc,tLo,PPC32RI_Reg(e0Lo))); + addInstr(env, PPCInstr_CMov(cc,tHi,PPCRI_Reg(e0Hi))); + addInstr(env, PPCInstr_CMov(cc,tLo,PPCRI_Reg(e0Lo))); *rHi = tHi; *rLo = tLo; return; @@ -2486,21 +2535,22 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) /* --------- BINARY ops --------- */ if (e->tag == Iex_Binop) { - switch (e->Iex.Binop.op) { + IROp op_binop = e->Iex.Binop.op; + switch (op_binop) { /* 32 x 32 -> 64 multiply */ case Iop_MullU32: case Iop_MullS32: { HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); - Bool syned = toBool(e->Iex.Binop.op == Iop_MullS32); + Bool syned = toBool(op_binop == Iop_MullS32); HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/, - False/*lo32*/, True/*32bit mul*/, - tLo, r_srcL, r_srcR)); - addInstr(env, PPC32Instr_MulL(syned, - True/*hi32*/, True/*32bit mul*/, - tHi, r_srcL, r_srcR)); + addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, + False/*lo32*/, True/*32bit mul*/, + tLo, r_srcL, r_srcR)); + addInstr(env, PPCInstr_MulL(syned, + True/*hi32*/, True/*32bit mul*/, + tHi, r_srcL, r_srcR)); *rHi = tHi; *rLo = tLo; return; @@ -2514,7 +2564,7 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) //.. HReg sHi, sLo; //.. HReg tLo = newVRegI32(env); //.. HReg tHi = newVRegI32(env); -//.. Bool syned = e->Iex.Binop.op == Iop_DivModS64to32; +//.. Bool syned = op_binop == Iop_DivModS64to32; //.. X86RM* rmRight = iselIntExpr_RM(env, e->Iex.Binop.arg2); //.. iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1); //.. addInstr(env, mk_iMOVsd_RR(sHi, hregX86_EDX())); @@ -2534,13 +2584,12 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) HReg xLo, xHi, yLo, yHi; HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); - PPC32AluOp op = e->Iex.Binop.op==Iop_Or64 ? Palu_OR - : e->Iex.Binop.op==Iop_And64 ? Palu_AND - : Palu_XOR; + PPCAluOp op = (op_binop == Iop_Or64) ? Palu_OR : + (op_binop == Iop_And64) ? Palu_AND : Palu_XOR; iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1); iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_Alu(op, tHi, xHi, PPC32RH_Reg(yHi))); - addInstr(env, PPC32Instr_Alu(op, tLo, xLo, PPC32RH_Reg(yLo))); + addInstr(env, PPCInstr_Alu(op, tHi, xHi, PPCRH_Reg(yHi))); + addInstr(env, PPCInstr_Alu(op, tLo, xLo, PPCRH_Reg(yLo))); *rHi = tHi; *rLo = tLo; return; @@ -2554,11 +2603,11 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) HReg tHi = newVRegI(env); iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1); iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2); -//.. if (e->Iex.Binop.op==Iop_Add64) { - addInstr(env, PPC32Instr_AddSubC32( True/*add*/, True /*set carry*/, - tLo, xLo, yLo)); - addInstr(env, PPC32Instr_AddSubC32( True/*add*/, False/*read carry*/, - tHi, xHi, yHi)); +//.. if (op_binop==Iop_Add64) { + addInstr(env, PPCInstr_AddSubC( True/*add*/, True /*set carry*/, + tLo, xLo, yLo)); + addInstr(env, PPCInstr_AddSubC( True/*add*/, False/*read carry*/, + tHi, xHi, yHi)); //.. } else { // Sub64 //.. } *rHi = tHi; @@ -2868,8 +2917,8 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) case Iop_32Sto64: { HReg tHi = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/, - tHi, src, PPC32RH_Imm(False,31))); + addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, + tHi, src, PPCRH_Imm(False,31))); *rHi = tHi; *rLo = src; return; @@ -2879,7 +2928,7 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) case Iop_32Uto64: { HReg tHi = newVRegI(env); HReg tLo = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_LI(tHi, 0, mode64)); + addInstr(env, PPCInstr_LI(tHi, 0, mode64)); *rHi = tHi; *rLo = tLo; return; @@ -2893,21 +2942,24 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); HReg vec = iselVecExpr(env, e->Iex.Unop.arg); - PPC32AMode *am_off0, *am_offLO, *am_offHI; + PPCAMode *am_off0, *am_offLO, *am_offHI; sub_from_sp( env, 32 ); // Move SP down 32 bytes // get a quadword aligned address within our stack space r_aligned16 = get_sp_aligned16( env ); - am_off0 = PPC32AMode_IR( 0, r_aligned16 ); - am_offHI = PPC32AMode_IR( off, r_aligned16 ); - am_offLO = PPC32AMode_IR( off+4, r_aligned16 ); + am_off0 = PPCAMode_IR( 0, r_aligned16 ); + am_offHI = PPCAMode_IR( off, r_aligned16 ); + am_offLO = PPCAMode_IR( off+4, r_aligned16 ); // store as Vec128 - addInstr(env, PPC32Instr_AvLdSt( False/*store*/, 16, vec, am_off0 )); + addInstr(env, + PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 )); // load hi,lo words (of hi/lo half of vec) as Ity_I32's - addInstr(env, PPC32Instr_Load( 4, False, tHi, am_offHI, mode64 )); - addInstr(env, PPC32Instr_Load( 4, False, tLo, am_offLO, mode64 )); + addInstr(env, + PPCInstr_Load( 4, False, tHi, am_offHI, mode64 )); + addInstr(env, + PPCInstr_Load( 4, False, tLo, am_offLO, mode64 )); add_to_sp( env, 32 ); // Reset SP *rHi = tHi; @@ -2919,12 +2971,12 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) case Iop_1Sto64: { HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); - PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_Set32(cond,tLo)); - addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/, - tLo, tLo, PPC32RH_Imm(False,31))); - addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/, - tLo, tLo, PPC32RH_Imm(False,31))); + PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); + addInstr(env, PPCInstr_Set(cond,tLo)); + addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, + tLo, tLo, PPCRH_Imm(False,31))); + addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, + tLo, tLo, PPCRH_Imm(False,31))); addInstr(env, mk_iMOVds_RR(tHi, tLo)); *rHi = tHi; *rLo = tLo; @@ -2937,11 +2989,11 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); iselInt64Expr(&yHi, &yLo, env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_LI(zero, 0, mode64)); - addInstr(env, PPC32Instr_AddSubC32( False/*sub*/, True /*set carry*/, - tLo, zero, yLo)); - addInstr(env, PPC32Instr_AddSubC32( False/*sub*/, False/*read carry*/, - tHi, zero, yHi)); + addInstr(env, PPCInstr_LI(zero, 0, mode64)); + addInstr(env, PPCInstr_AddSubC( False/*sub*/, True/*set carry*/, + tLo, zero, yLo)); + addInstr(env, PPCInstr_AddSubC( False/*sub*/, False/*read carry*/, + tHi, zero, yHi)); *rHi = tHi; *rLo = tLo; return; @@ -2966,21 +3018,24 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) /* Given an IEEE754 double, produce an I64 with the same bit pattern. */ case Iop_ReinterpF64asI64: { - PPC32AMode *am_addr0, *am_addr1; + PPCAMode *am_addr0, *am_addr1; HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg); HReg r_dstLo = newVRegI(env); HReg r_dstHi = newVRegI(env); sub_from_sp( env, 16 ); // Move SP down 16 bytes - am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64)); - am_addr1 = PPC32AMode_IR(4, StackFramePtr(mode64)); + am_addr0 = PPCAMode_IR( 0, StackFramePtr(mode64) ); + am_addr1 = PPCAMode_IR( 4, StackFramePtr(mode64) ); // store as F64 - addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr0 )); + addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8, + fr_src, am_addr0 )); // load hi,lo as Ity_I32's - addInstr(env, PPC32Instr_Load( 4, False, r_dstHi, am_addr0, mode64 )); - addInstr(env, PPC32Instr_Load( 4, False, r_dstLo, am_addr1, mode64 )); + addInstr(env, PPCInstr_Load( 4, False, r_dstHi, + am_addr0, mode64 )); + addInstr(env, PPCInstr_Load( 4, False, r_dstLo, + am_addr1, mode64 )); *rHi = r_dstHi; *rLo = r_dstLo; @@ -3036,9 +3091,9 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) //.. return; //.. } - vex_printf("iselInt64Expr(ppc32): No such tag(%u)\n", e->tag); + vex_printf("iselInt64Expr(ppc): No such tag(%u)\n", e->tag); ppIRExpr(e); - vpanic("iselInt64Expr(ppc32)"); + vpanic("iselInt64Expr(ppc)"); } @@ -3071,11 +3126,11 @@ static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e ) } if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) { - PPC32AMode* am_addr; + PPCAMode* am_addr; HReg r_dst = newVRegF(env); vassert(e->Iex.Load.ty == Ity_F32); am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr); - addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 4, r_dst, am_addr)); + addInstr(env, PPCInstr_FpLdSt(True/*load*/, 4, r_dst, am_addr)); return r_dst; } @@ -3087,16 +3142,16 @@ static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg r_dst = newVRegF(env); HReg r_src = iselDblExpr(env, e->Iex.Binop.arg2); set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); - addInstr(env, PPC32Instr_FpF64toF32(r_dst, r_src)); + addInstr(env, PPCInstr_FpF64toF32(r_dst, r_src)); set_FPU_rounding_default( env ); return r_dst; } if (e->tag == Iex_Get) { HReg r_dst = newVRegF(env); - PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset, - GuestStatePtr(mode64) ); - addInstr(env, PPC32Instr_FpLdSt( True/*load*/, 4, r_dst, am_addr )); + PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, + GuestStatePtr(mode64) ); + addInstr(env, PPCInstr_FpLdSt( True/*load*/, 4, r_dst, am_addr )); return r_dst; } @@ -3115,9 +3170,9 @@ static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e ) //.. return dst; //.. } - vex_printf("iselFltExpr(ppc32): No such tag(%u)\n", e->tag); + vex_printf("iselFltExpr(ppc): No such tag(%u)\n", e->tag); ppIRExpr(e); - vpanic("iselFltExpr_wrk(ppc32)"); + vpanic("iselFltExpr_wrk(ppc)"); } @@ -3185,35 +3240,35 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) u.u64 = e->Iex.Const.con->Ico.F64i; } else - vpanic("iselDblExpr(ppc32): const"); + vpanic("iselDblExpr(ppc): const"); if (!mode64) { HReg r_srcHi = newVRegI(env); HReg r_srcLo = newVRegI(env); - addInstr(env, PPC32Instr_LI(r_srcHi, u.u32x2[1], mode64)); - addInstr(env, PPC32Instr_LI(r_srcLo, u.u32x2[0], mode64)); + addInstr(env, PPCInstr_LI(r_srcHi, u.u32x2[1], mode64)); + addInstr(env, PPCInstr_LI(r_srcLo, u.u32x2[0], mode64)); return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo ); } else { // mode64 HReg r_src = newVRegI(env); - addInstr(env, PPC32Instr_LI(r_src, u.u64, mode64)); + addInstr(env, PPCInstr_LI(r_src, u.u64, mode64)); return mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64 } } if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) { HReg r_dst = newVRegF(env); - PPC32AMode* am_addr; + PPCAMode* am_addr; vassert(e->Iex.Load.ty == Ity_F64); am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr); - addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 8, r_dst, am_addr)); + addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_dst, am_addr)); return r_dst; } if (e->tag == Iex_Get) { HReg r_dst = newVRegF(env); - PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset, - GuestStatePtr(mode64) ); - addInstr(env, PPC32Instr_FpLdSt( True/*load*/, 8, r_dst, am_addr )); + PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, + GuestStatePtr(mode64) ); + addInstr(env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst, am_addr )); return r_dst; } @@ -3228,7 +3283,7 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) //.. } if (e->tag == Iex_Binop) { - PPC32FpOp fpop = Pfp_INVALID; + PPCFpOp fpop = Pfp_INVALID; switch (e->Iex.Binop.op) { case Iop_AddF64: fpop = Pfp_ADD; break; case Iop_SubF64: fpop = Pfp_SUB; break; @@ -3240,7 +3295,7 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg r_dst = newVRegF(env); HReg r_srcL = iselDblExpr(env, e->Iex.Binop.arg1); HReg r_srcR = iselDblExpr(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_FpBinary(fpop, r_dst, r_srcL, r_srcR)); + addInstr(env, PPCInstr_FpBinary(fpop, r_dst, r_srcL, r_srcR)); return r_dst; } @@ -3253,7 +3308,7 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); sub_from_sp( env, 16 ); - addInstr(env, PPC32Instr_FpI64toF64(fr_dst, r_src)); + addInstr(env, PPCInstr_FpI64toF64(fr_dst, r_src)); add_to_sp( env, 16 ); /* Restore default FPU rounding. */ @@ -3285,20 +3340,20 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) //.. HReg fr_dst = newVRegF(env); //.. HReg rHi,rLo; //.. iselInt64Expr( &rHi, &rLo, env, e->Iex.Binop.arg2); -//.. addInstr(env, PPC32Instr_Push(PPC32RMI_Reg(rHi))); -//.. addInstr(env, PPC32Instr_Push(PPC32RMI_Reg(rLo))); +//.. addInstr(env, PPCInstr_Push(PPCRMI_Reg(rHi))); +//.. addInstr(env, PPCInstr_Push(PPCRMI_Reg(rLo))); //.. //.. /* Set host rounding mode */ //.. set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); //.. -//.. PPC32AMode* am_addr = ... -//.. addInstr(env, PPC32Instr_FpLdSt( True/*load*/, 8, r_dst, -//.. PPC32AMode_IR(0, GuestStatePtr ) )); +//.. PPCAMode* am_addr = ... +//.. addInstr(env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst, +//.. PPCAMode_IR(0, GuestStatePtr ) )); //.. //.. -//.. addInstr(env, PPC32Instr_FpLdStI( +//.. addInstr(env, PPCInstr_FpLdStI( //.. True/*load*/, 8, fr_dst, -//.. PPC32AMode_IR(0, hregPPC32_ESP()))); +//.. PPCAMode_IR(0, hregPPC_ESP()))); //.. //.. /* Restore default FPU rounding. */ //.. set_FPU_rounding_default( env ); @@ -3308,7 +3363,7 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) //.. } if (e->tag == Iex_Unop) { - PPC32FpOp fpop = Pfp_INVALID; + PPCFpOp fpop = Pfp_INVALID; switch (e->Iex.Unop.op) { case Iop_NegF64: fpop = Pfp_NEG; break; case Iop_AbsF64: fpop = Pfp_ABS; break; @@ -3322,9 +3377,9 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) if (fpop != Pfp_INVALID) { HReg fr_dst = newVRegF(env); HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg); - addInstr(env, PPC32Instr_FpUnary(fpop, fr_dst, fr_src)); -//.. if (fpop != Pfp_SQRT && fpop != Xfp_NEG && fpop != Xfp_ABS) -//.. roundToF64(env, fr_dst); + addInstr(env, PPCInstr_FpUnary(fpop, fr_dst, fr_src)); +//.. if (fpop != Pfp_SQRT && fpop != Xfp_NEG && fpop != Xfp_ABS) +//.. roundToF64(env, fr_dst); return fr_dst; } } @@ -3369,24 +3424,25 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) if (e->tag == Iex_Mux0X) { if (ty == Ity_F64 && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) { - PPC32CondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); + PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond); HReg frX = iselDblExpr(env, e->Iex.Mux0X.exprX); HReg fr0 = iselDblExpr(env, e->Iex.Mux0X.expr0); HReg fr_dst = newVRegF(env); HReg r_tmp = newVRegI(env); - addInstr(env, PPC32Instr_Alu(Palu_AND, r_tmp, r_cond, PPC32RH_Imm(False,0xFF))); - addInstr(env, PPC32Instr_FpUnary( Pfp_MOV, fr_dst, frX )); - addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/, - 7/*cr*/, r_tmp, PPC32RH_Imm(False,0))); - addInstr(env, PPC32Instr_FpCMov( cc, fr_dst, fr0 )); + addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp, + r_cond, PPCRH_Imm(False,0xFF))); + addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, frX )); + addInstr(env, PPCInstr_Cmp(False/*unsined*/, True/*32bit cmp*/, + 7/*cr*/, r_tmp, PPCRH_Imm(False,0))); + addInstr(env, PPCInstr_FpCMov( cc, fr_dst, fr0 )); return fr_dst; } } - vex_printf("iselDblExpr(ppc32): No such tag(%u)\n", e->tag); + vex_printf("iselDblExpr(ppc): No such tag(%u)\n", e->tag); ppIRExpr(e); - vpanic("iselDblExpr_wrk(ppc32)"); + vpanic("iselDblExpr_wrk(ppc)"); } @@ -3409,8 +3465,8 @@ static HReg iselVecExpr ( ISelEnv* env, IRExpr* e ) static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) { //.. Bool arg1isEReg = False; - PPC32AvOp op = Pav_INVALID; - IRType ty = typeOfIRExpr(env->type_env,e); + PPCAvOp op = Pav_INVALID; + IRType ty = typeOfIRExpr(env->type_env,e); vassert(e); vassert(ty == Ity_V128); @@ -3419,21 +3475,22 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) } if (e->tag == Iex_Get) { - /* Guest state vectors are 16byte aligned, so don't need to worry here */ + /* Guest state vectors are 16byte aligned, + so don't need to worry here */ HReg dst = newVRegV(env); addInstr(env, - PPC32Instr_AvLdSt( True/*load*/, 16, dst, - PPC32AMode_IR(e->Iex.Get.offset, - GuestStatePtr(mode64)))); + PPCInstr_AvLdSt( True/*load*/, 16, dst, + PPCAMode_IR( e->Iex.Get.offset, + GuestStatePtr(mode64) ))); return dst; } if (e->tag == Iex_Load) { - PPC32AMode* am_addr; + PPCAMode* am_addr; HReg v_dst = newVRegV(env); vassert(e->Iex.Load.ty == Ity_V128); am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr); - addInstr(env, PPC32Instr_AvLdSt( True/*load*/, 16, v_dst, am_addr)); + addInstr(env, PPCInstr_AvLdSt( True/*load*/, 16, v_dst, am_addr)); return v_dst; } @@ -3450,7 +3507,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) case Iop_NotV128: { HReg arg = iselVecExpr(env, e->Iex.Unop.arg); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, arg)); + addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, arg)); return dst; } @@ -3486,9 +3543,9 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg arg = iselVecExpr(env, e->Iex.Unop.arg); HReg zero = newVRegV(env); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvBinary(Pav_XOR, zero, zero, zero)); - addInstr(env, PPC32Instr_AvBin8x16(Pav_CMPEQU, dst, arg, zero)); - addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, dst)); + addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero)); + addInstr(env, PPCInstr_AvBin8x16(Pav_CMPEQU, dst, arg, zero)); + addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst)); return dst; } @@ -3496,9 +3553,9 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg arg = iselVecExpr(env, e->Iex.Unop.arg); HReg zero = newVRegV(env); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvBinary(Pav_XOR, zero, zero, zero)); - addInstr(env, PPC32Instr_AvBin16x8(Pav_CMPEQU, dst, arg, zero)); - addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, dst)); + addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero)); + addInstr(env, PPCInstr_AvBin16x8(Pav_CMPEQU, dst, arg, zero)); + addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst)); return dst; } @@ -3506,9 +3563,9 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg arg = iselVecExpr(env, e->Iex.Unop.arg); HReg zero = newVRegV(env); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvBinary(Pav_XOR, zero, zero, zero)); - addInstr(env, PPC32Instr_AvBin32x4(Pav_CMPEQU, dst, arg, zero)); - addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, dst)); + addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero)); + addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, dst, arg, zero)); + addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst)); return dst; } @@ -3552,7 +3609,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) { HReg arg = iselVecExpr(env, e->Iex.Unop.arg); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvUn32Fx4(op, dst, arg)); + addInstr(env, PPCInstr_AvUn32Fx4(op, dst, arg)); return dst; } @@ -3609,28 +3666,28 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg r_aligned16, r_zeros; HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg); HReg dst = newVRegV(env); - PPC32AMode *am_off0, *am_off4, *am_off8, *am_off12; + PPCAMode *am_off0, *am_off4, *am_off8, *am_off12; sub_from_sp( env, 32 ); // Move SP down /* Get a quadword aligned address within our stack space */ r_aligned16 = get_sp_aligned16( env ); - am_off0 = PPC32AMode_IR( 0, r_aligned16); - am_off4 = PPC32AMode_IR( 4, r_aligned16); - am_off8 = PPC32AMode_IR( 8, r_aligned16); - am_off12 = PPC32AMode_IR( 12, r_aligned16); + am_off0 = PPCAMode_IR( 0, r_aligned16 ); + am_off4 = PPCAMode_IR( 4, r_aligned16 ); + am_off8 = PPCAMode_IR( 8, r_aligned16 ); + am_off12 = PPCAMode_IR( 12, r_aligned16 ); /* Store zeros */ r_zeros = newVRegI(env); - addInstr(env, PPC32Instr_LI(r_zeros, 0x0, mode64)); - addInstr(env, PPC32Instr_Store( 4, am_off0, r_zeros, mode64 )); - addInstr(env, PPC32Instr_Store( 4, am_off4, r_zeros, mode64 )); - addInstr(env, PPC32Instr_Store( 4, am_off8, r_zeros, mode64 )); + addInstr(env, PPCInstr_LI(r_zeros, 0x0, mode64)); + addInstr(env, PPCInstr_Store( 4, am_off0, r_zeros, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_off4, r_zeros, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_off8, r_zeros, mode64 )); /* Store r_src in low word of quadword-aligned mem */ - addInstr(env, PPC32Instr_Store( 4, am_off12, r_src, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_off12, r_src, mode64 )); /* Load word into low word of quadword vector reg */ - addInstr(env, PPC32Instr_AvLdSt( True/*load*/, 4, dst, am_off12 )); + addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 4, dst, am_off12 )); add_to_sp( env, 32 ); // Reset SP return dst; @@ -3693,29 +3750,29 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) case Iop_64HLtoV128: { if (!mode64) { HReg r3, r2, r1, r0, r_aligned16; - PPC32AMode *am_off0, *am_off4, *am_off8, *am_off12; + PPCAMode *am_off0, *am_off4, *am_off8, *am_off12; HReg dst = newVRegV(env); /* do this via the stack (easy, convenient, etc) */ sub_from_sp( env, 32 ); // Move SP down // get a quadword aligned address within our stack space r_aligned16 = get_sp_aligned16( env ); - am_off0 = PPC32AMode_IR( 0, r_aligned16); - am_off4 = PPC32AMode_IR( 4, r_aligned16); - am_off8 = PPC32AMode_IR( 8, r_aligned16); - am_off12 = PPC32AMode_IR( 12, r_aligned16); + am_off0 = PPCAMode_IR( 0, r_aligned16 ); + am_off4 = PPCAMode_IR( 4, r_aligned16 ); + am_off8 = PPCAMode_IR( 8, r_aligned16 ); + am_off12 = PPCAMode_IR( 12, r_aligned16 ); /* Do the less significant 64 bits */ iselInt64Expr(&r1, &r0, env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_Store( 4, am_off12, r0, mode64 )); - addInstr(env, PPC32Instr_Store( 4, am_off8, r1, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_off12, r0, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_off8, r1, mode64 )); /* Do the more significant 64 bits */ iselInt64Expr(&r3, &r2, env, e->Iex.Binop.arg1); - addInstr(env, PPC32Instr_Store( 4, am_off4, r2, mode64 )); - addInstr(env, PPC32Instr_Store( 4, am_off0, r3, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_off4, r2, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_off0, r3, mode64 )); /* Fetch result back from stack. */ - addInstr(env, PPC32Instr_AvLdSt(True/*load*/, 16, dst, am_off0)); + addInstr(env, PPCInstr_AvLdSt(True/*ld*/, 16, dst, am_off0)); add_to_sp( env, 32 ); // Reset SP return dst; @@ -3740,7 +3797,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg argL = iselVecExpr(env, e->Iex.Binop.arg1); HReg argR = iselVecExpr(env, e->Iex.Binop.arg2); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvBin32Fx4(op, dst, argL, argR)); + addInstr(env, PPCInstr_AvBin32Fx4(op, dst, argL, argR)); return dst; } @@ -3756,11 +3813,13 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg isNanLR = newVRegV(env); HReg isNanL = isNan(env, argL); HReg isNanR = isNan(env, argR); - addInstr(env, PPC32Instr_AvBinary(Pav_OR, isNanLR, isNanL, isNanR)); + addInstr(env, PPCInstr_AvBinary(Pav_OR, isNanLR, + isNanL, isNanR)); - addInstr(env, PPC32Instr_AvBin32Fx4(Pavfp_CMPGTF, dst, argL, argR)); - addInstr(env, PPC32Instr_AvBinary(Pav_OR, dst, dst, isNanLR)); - addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, dst)); + addInstr(env, PPCInstr_AvBin32Fx4(Pavfp_CMPGTF, dst, + argL, argR)); + addInstr(env, PPCInstr_AvBinary(Pav_OR, dst, dst, isNanLR)); + addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst)); return dst; } @@ -3828,7 +3887,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1); HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvBinary(op, dst, arg1, arg2)); + addInstr(env, PPCInstr_AvBinary(op, dst, arg1, arg2)); return dst; } @@ -3861,7 +3920,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1); HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvBin8x16(op, dst, arg1, arg2)); + addInstr(env, PPCInstr_AvBin8x16(op, dst, arg1, arg2)); return dst; } @@ -3895,7 +3954,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1); HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvBin16x8(op, dst, arg1, arg2)); + addInstr(env, PPCInstr_AvBin16x8(op, dst, arg1, arg2)); return dst; } @@ -3927,7 +3986,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1); HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2); HReg dst = newVRegV(env); - addInstr(env, PPC32Instr_AvBin32x4(op, dst, arg1, arg2)); + addInstr(env, PPCInstr_AvBin32x4(op, dst, arg1, arg2)); return dst; } @@ -3937,7 +3996,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1); HReg dst = newVRegV(env); HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_AvBin8x16(op, dst, r_src, v_shft)); + addInstr(env, PPCInstr_AvBin8x16(op, dst, r_src, v_shft)); return dst; } @@ -3948,7 +4007,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1); HReg dst = newVRegV(env); HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_AvBin16x8(op, dst, r_src, v_shft)); + addInstr(env, PPCInstr_AvBin16x8(op, dst, r_src, v_shft)); return dst; } @@ -3959,7 +4018,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1); HReg dst = newVRegV(env); HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_AvBin32x4(op, dst, r_src, v_shft)); + addInstr(env, PPCInstr_AvBin32x4(op, dst, r_src, v_shft)); return dst; } @@ -3970,7 +4029,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1); HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2); /* Note: shift value gets masked by 127 */ - addInstr(env, PPC32Instr_AvBinary(op, dst, r_src, v_shft)); + addInstr(env, PPCInstr_AvBinary(op, dst, r_src, v_shft)); return dst; } @@ -3978,7 +4037,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg dst = newVRegV(env); HReg v_src = iselVecExpr(env, e->Iex.Binop.arg1); HReg v_ctl = iselVecExpr(env, e->Iex.Binop.arg2); - addInstr(env, PPC32Instr_AvPerm(dst, v_src, v_src, v_ctl)); + addInstr(env, PPCInstr_AvPerm(dst, v_src, v_src, v_ctl)); return dst; } @@ -3999,10 +4058,10 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) //.. } // unused: vec_fail: - vex_printf("iselVecExpr(ppc32) (subarch = %s): can't reduce\n", + vex_printf("iselVecExpr(ppc) (subarch = %s): can't reduce\n", LibVEX_ppVexSubArch(env->subarch)); ppIRExpr(e); - vpanic("iselVecExpr_wrk(ppc32)"); + vpanic("iselVecExpr_wrk(ppc)"); } @@ -4022,7 +4081,7 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) /* --------- STORE --------- */ case Ist_Store: { - PPC32AMode* am_addr; + PPCAMode* am_addr; IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr); IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data); IREndness end = stmt->Ist.Store.end; @@ -4036,18 +4095,20 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 || (mode64 && (tyd == Ity_I64))) { HReg r_src = iselIntExpr_R(env, stmt->Ist.Store.data); - addInstr(env, PPC32Instr_Store( toUChar(sizeofIRType(tyd)), + addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(tyd)), am_addr, r_src, mode64 )); return; } if (tyd == Ity_F64) { HReg fr_src = iselDblExpr(env, stmt->Ist.Store.data); - addInstr(env, PPC32Instr_FpLdSt(False/*store*/, 8, fr_src, am_addr)); + addInstr(env, + PPCInstr_FpLdSt(False/*store*/, 8, fr_src, am_addr)); return; } if (tyd == Ity_F32) { HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data); - addInstr(env, PPC32Instr_FpLdSt(False/*store*/, 4, fr_src, am_addr)); + addInstr(env, + PPCInstr_FpLdSt(False/*store*/, 4, fr_src, am_addr)); return; } //.. if (tyd == Ity_I64) { @@ -4062,7 +4123,8 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) //.. } if (tyd == Ity_V128) { HReg v_src = iselVecExpr(env, stmt->Ist.Store.data); - addInstr(env, PPC32Instr_AvLdSt(False/*store*/, 16, v_src, am_addr)); + addInstr(env, + PPCInstr_AvLdSt(False/*store*/, 16, v_src, am_addr)); return; } break; @@ -4074,28 +4136,30 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ((ty == Ity_I64) && mode64)) { HReg r_src = iselIntExpr_R(env, stmt->Ist.Put.data); - PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset, - GuestStatePtr(mode64)); - addInstr(env, PPC32Instr_Store( toUChar(sizeofIRType(ty)), - am_addr, r_src, mode64 )); + PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, + GuestStatePtr(mode64) ); + addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(ty)), + am_addr, r_src, mode64 )); return; } if (!mode64 && ty == Ity_I64) { HReg rHi, rLo; - PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset, - GuestStatePtr(mode64)); - PPC32AMode* am_addr4 = advance4(env, am_addr); + PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, + GuestStatePtr(mode64) ); + PPCAMode* am_addr4 = advance4(env, am_addr); iselInt64Expr(&rHi,&rLo, env, stmt->Ist.Put.data); - addInstr(env, PPC32Instr_Store( 4, am_addr, rHi, mode64 )); - addInstr(env, PPC32Instr_Store( 4, am_addr4, rLo, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_addr, rHi, mode64 )); + addInstr(env, PPCInstr_Store( 4, am_addr4, rLo, mode64 )); return; } if (ty == Ity_V128) { - /* Guest state vectors are 16byte aligned, so don't need to worry here */ + /* Guest state vectors are 16byte aligned, + so don't need to worry here */ HReg v_src = iselVecExpr(env, stmt->Ist.Put.data); - PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset, - GuestStatePtr(mode64)); - addInstr(env, PPC32Instr_AvLdSt(False/*store*/, 16, v_src, am_addr)); + PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, + GuestStatePtr(mode64) ); + addInstr(env, + PPCInstr_AvLdSt(False/*store*/, 16, v_src, am_addr)); return; } //.. if (ty == Ity_F32) { @@ -4107,9 +4171,10 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) //.. } if (ty == Ity_F64) { HReg fr_src = iselDblExpr(env, stmt->Ist.Put.data); - PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset, - GuestStatePtr(mode64)); - addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr )); + PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, + GuestStatePtr(mode64) ); + addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8, + fr_src, am_addr )); return; } break; @@ -4172,27 +4237,27 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) return; } if (ty == Ity_I1) { - PPC32CondCode cond = iselCondCode(env, stmt->Ist.Tmp.data); + PPCCondCode cond = iselCondCode(env, stmt->Ist.Tmp.data); HReg r_dst = lookupIRTemp(env, tmp); - addInstr(env, PPC32Instr_Set32(cond, r_dst)); + addInstr(env, PPCInstr_Set(cond, r_dst)); return; } if (ty == Ity_F64) { HReg fr_dst = lookupIRTemp(env, tmp); HReg fr_src = iselDblExpr(env, stmt->Ist.Tmp.data); - addInstr(env, PPC32Instr_FpUnary(Pfp_MOV, fr_dst, fr_src)); + addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src)); return; } if (ty == Ity_F32) { HReg fr_dst = lookupIRTemp(env, tmp); HReg fr_src = iselFltExpr(env, stmt->Ist.Tmp.data); - addInstr(env, PPC32Instr_FpUnary(Pfp_MOV, fr_dst, fr_src)); + addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src)); return; } if (ty == Ity_V128) { HReg v_dst = lookupIRTemp(env, tmp); HReg v_src = iselVecExpr(env, stmt->Ist.Tmp.data); - addInstr(env, PPC32Instr_AvUnary(Pav_MOV, v_dst, v_src)); + addInstr(env, PPCInstr_AvUnary(Pav_MOV, v_dst, v_src)); return; } break; @@ -4239,7 +4304,7 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) /* --------- MEM FENCE --------- */ case Ist_MFence: - addInstr(env, PPC32Instr_MFence()); + addInstr(env, PPCInstr_MFence()); return; /* --------- INSTR MARK --------- */ @@ -4254,17 +4319,17 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) /* --------- EXIT --------- */ case Ist_Exit: { - PPC32RI* ri_dst; - PPC32CondCode cc; + PPCRI* ri_dst; + PPCCondCode cc; IRConstTag tag = stmt->Ist.Exit.dst->tag; if (!mode64 && (tag != Ico_U32)) - vpanic("iselStmt(ppc32): Ist_Exit: dst is not a 32-bit value"); + vpanic("iselStmt(ppc): Ist_Exit: dst is not a 32-bit value"); if (mode64 && (tag != Ico_U64)) vpanic("iselStmt(ppc64): Ist_Exit: dst is not a 64-bit value"); ri_dst = iselIntExpr_RI(env, IRExpr_Const(stmt->Ist.Exit.dst)); cc = iselCondCode(env,stmt->Ist.Exit.guard); - addInstr(env, PPC32Instr_RdWrLR(True, env->savedLR)); - addInstr(env, PPC32Instr_Goto(stmt->Ist.Exit.jk, cc, ri_dst)); + addInstr(env, PPCInstr_RdWrLR(True, env->savedLR)); + addInstr(env, PPCInstr_Goto(stmt->Ist.Exit.jk, cc, ri_dst)); return; } @@ -4272,7 +4337,7 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) } stmt_fail: ppIRStmt(stmt); - vpanic("iselStmt(ppc32)"); + vpanic("iselStmt(ppc)"); } @@ -4282,8 +4347,8 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) static void iselNext ( ISelEnv* env, IRExpr* next, IRJumpKind jk ) { - PPC32CondCode cond; - PPC32RI* ri; + PPCCondCode cond; + PPCRI* ri; if (vex_traceflags & VEX_TRACE_VCODE) { vex_printf("\n-- goto {"); ppIRJumpKind(jk); @@ -4293,8 +4358,8 @@ static void iselNext ( ISelEnv* env, IRExpr* next, IRJumpKind jk ) } cond = mk_PPCCondCode( Pct_ALWAYS, Pcf_7EQ ); ri = iselIntExpr_RI(env, next); - addInstr(env, PPC32Instr_RdWrLR(True, env->savedLR)); - addInstr(env, PPC32Instr_Goto(jk, cond, ri)); + addInstr(env, PPCInstr_RdWrLR(True, env->savedLR)); + addInstr(env, PPCInstr_Goto(jk, cond, ri)); } @@ -4302,9 +4367,9 @@ static void iselNext ( ISelEnv* env, IRExpr* next, IRJumpKind jk ) /*--- Insn selector top-level ---*/ /*---------------------------------------------------------*/ -/* Translate an entire BB to ppc32 code. */ +/* Translate an entire BB to ppc code. */ -HInstrArray* iselBB_PPC32 ( IRBB* bb, VexArchInfo* archinfo_host ) +HInstrArray* iselBB_PPC ( IRBB* bb, VexArchInfo* archinfo_host ) { Int i, j; HReg hreg, hregHI; @@ -4323,7 +4388,7 @@ HInstrArray* iselBB_PPC32 ( IRBB* bb, VexArchInfo* archinfo_host ) mode64 = True; break; default: - vpanic("iselBB_PPC32: illegal subarch"); + vpanic("iselBB_PPC: illegal subarch"); } /* Make up an initial environment to use. */ @@ -4371,10 +4436,7 @@ HInstrArray* iselBB_PPC32 ( IRBB* bb, VexArchInfo* archinfo_host ) case Ity_V128: hreg = mkHReg(j++, HRcVec128, True); break; default: ppIRType(bb->tyenv->types[i]); - if (mode64) - vpanic("iselBB(ppc64): IRTemp type"); - else - vpanic("iselBB(ppc32): IRTemp type"); + vpanic("iselBB(ppc): IRTemp type"); } env->vregmap[i] = hreg; env->vregmapHI[i] = hregHI; @@ -4383,7 +4445,7 @@ HInstrArray* iselBB_PPC32 ( IRBB* bb, VexArchInfo* archinfo_host ) /* Keep a copy of the link reg, so helper functions don't kill it. */ env->savedLR = newVRegI(env); - addInstr(env, PPC32Instr_RdWrLR(False, env->savedLR)); + addInstr(env, PPCInstr_RdWrLR(False, env->savedLR)); /* Ok, finally we can iterate over the statements. */ for (i = 0; i < bb->stmts_used; i++) diff --git a/VEX/priv/main/vex_main.c b/VEX/priv/main/vex_main.c index 1db2282928..8f5bc8efa5 100644 --- a/VEX/priv/main/vex_main.c +++ b/VEX/priv/main/vex_main.c @@ -280,17 +280,17 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchPPC32: mode64 = False; - getAllocableRegs_PPC32 ( &n_available_real_regs, - &available_real_regs, mode64 ); - isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPC32Instr; - getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPC32Instr; - mapRegs = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPC32Instr; - genSpill = (HInstr*(*)(HReg,Int,Bool)) genSpill_PPC32; - genReload = (HInstr*(*)(HReg,Int,Bool)) genReload_PPC32; - ppInstr = (void(*)(HInstr*,Bool)) ppPPC32Instr; - ppReg = (void(*)(HReg)) ppHRegPPC32; - iselBB = iselBB_PPC32; - emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPC32Instr; + getAllocableRegs_PPC ( &n_available_real_regs, + &available_real_regs, mode64 ); + isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr; + getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPCInstr; + mapRegs = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPCInstr; + genSpill = (HInstr*(*)(HReg,Int,Bool)) genSpill_PPC; + genReload = (HInstr*(*)(HReg,Int,Bool)) genReload_PPC; + ppInstr = (void(*)(HInstr*,Bool)) ppPPCInstr; + ppReg = (void(*)(HReg)) ppHRegPPC; + iselBB = iselBB_PPC; + emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr; host_is_bigendian = True; host_word_type = Ity_I32; vassert(vta->archinfo_guest.subarch == VexSubArchPPC32_I @@ -301,17 +301,17 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchPPC64: mode64 = True; - getAllocableRegs_PPC32 ( &n_available_real_regs, - &available_real_regs, mode64 ); - isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPC32Instr; - getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPC32Instr; - mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPC32Instr; - genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_PPC32; - genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_PPC32; - ppInstr = (void(*)(HInstr*, Bool)) ppPPC32Instr; - ppReg = (void(*)(HReg)) ppHRegPPC32; - iselBB = iselBB_PPC32; - emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPC32Instr; + getAllocableRegs_PPC ( &n_available_real_regs, + &available_real_regs, mode64 ); + isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr; + getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPCInstr; + mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPCInstr; + genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_PPC; + genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_PPC; + ppInstr = (void(*)(HInstr*, Bool)) ppPPCInstr; + ppReg = (void(*)(HReg)) ppHRegPPC; + iselBB = iselBB_PPC; + emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr; host_is_bigendian = True; host_word_type = Ity_I64; vassert(vta->archinfo_guest.subarch == VexSubArchPPC64_FI @@ -372,7 +372,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchPPC32: preciseMemExnsFn = guest_ppc32_state_requires_precise_mem_exns; - disInstrFn = disInstr_PPC32; + disInstrFn = disInstr_PPC; specHelper = guest_ppc32_spechelper; guest_sizeB = sizeof(VexGuestPPC32State); guest_word_type = Ity_I32; @@ -389,7 +389,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchPPC64: preciseMemExnsFn = guest_ppc64_state_requires_precise_mem_exns; - disInstrFn = disInstr_PPC32; + disInstrFn = disInstr_PPC; specHelper = guest_ppc64_spechelper; guest_sizeB = sizeof(VexGuestPPC64State); guest_word_type = Ity_I64; @@ -701,8 +701,8 @@ const HChar* LibVEX_ppVexSubArch ( VexSubArch subarch ) /* Write default settings info *vai. */ void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai ) { - vai->subarch = VexSubArch_INVALID; - vai->ppc32_cache_line_szB = 0; + vai->subarch = VexSubArch_INVALID; + vai->ppc_cache_line_szB = 0; } diff --git a/VEX/pub/libvex.h b/VEX/pub/libvex.h index 31b3f7d8eb..5fb9718cca 100644 --- a/VEX/pub/libvex.h +++ b/VEX/pub/libvex.h @@ -102,7 +102,7 @@ typedef /* This is the only mandatory field. */ VexSubArch subarch; /* PPC32/PPC64 only: size of cache line */ - Int ppc32_cache_line_szB; + Int ppc_cache_line_szB; } VexArchInfo; @@ -452,7 +452,7 @@ extern void LibVEX_ShowStats ( void ); ppc64 ~~~~~ - Probably the same as ppc32. + Same as ppc32. ALL GUEST ARCHITECTURES ~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/VEX/switchback/switchback.c b/VEX/switchback/switchback.c index 8e0e26bc89..1cf98ef766 100644 --- a/VEX/switchback/switchback.c +++ b/VEX/switchback/switchback.c @@ -846,7 +846,7 @@ void make_translation ( Addr64 guest_addr, Bool verbose ) LibVEX_default_VexArchInfo(&vex_archinfo); vex_archinfo.subarch = VexSubArch; - vex_archinfo.ppc32_cache_line_szB = CacheLineSize; + vex_archinfo.ppc_cache_line_szB = CacheLineSize; /* */ vta.arch_guest = VexArch; diff --git a/VEX/test_main.c b/VEX/test_main.c index c40a7f32b4..a2a63aac41 100644 --- a/VEX/test_main.c +++ b/VEX/test_main.c @@ -132,7 +132,7 @@ int main ( int argc, char** argv ) LibVEX_default_VexArchInfo(&vai_ppc32); vai_ppc32.subarch = VexSubArchPPC32_VFI; - vai_ppc32.ppc32_cache_line_szB = 128; + vai_ppc32.ppc_cache_line_szB = 128; /* ----- Set up args for LibVEX_Translate ----- */ #if 1 /* ppc32 -> ppc32 */ -- 2.47.3