And tidied up a fair bit while i was at it.
git-svn-id: svn://svn.valgrind.org/vex/trunk@1504
/* Only to be used within the guest-ppc32 directory. */
-#ifndef __LIBVEX_GUEST_PPC32_DEFS_H
-#define __LIBVEX_GUEST_PPC32_DEFS_H
+#ifndef __LIBVEX_GUEST_PPC_DEFS_H
+#define __LIBVEX_GUEST_PPC_DEFS_H
/*---------------------------------------------------------*/
-/*--- ppc32 to IR conversion ---*/
+/*--- ppc to IR conversion ---*/
/*---------------------------------------------------------*/
-/* Convert one ppc32 insn to IR. See the type DisOneInstrFn in
+/* Convert one ppc insn to IR. See the type DisOneInstrFn in
bb_to_IR.h. */
extern
-DisResult disInstr_PPC32 ( IRBB* irbb,
- Bool put_IP,
- Bool (*resteerOkFn) ( Addr64 ),
- UChar* guest_code,
- Long delta,
- Addr64 guest_IP,
- VexArchInfo* archinfo,
- Bool host_bigendian );
+DisResult disInstr_PPC ( IRBB* irbb,
+ Bool put_IP,
+ Bool (*resteerOkFn) ( Addr64 ),
+ UChar* guest_code,
+ Long delta,
+ Addr64 guest_IP,
+ VexArchInfo* archinfo,
+ Bool host_bigendian );
/* Used by the optimiser to specialise calls to helpers. */
extern
/* FP Rounding mode - different encoding to IR */
typedef
enum {
- PPC32rm_NEAREST = 0,
- PPC32rm_NegINF = 1,
- PPC32rm_PosINF = 2,
- PPC32rm_ZERO = 3
- } PPC32RoundingMode;
+ PPCrm_NEAREST = 0,
+ PPCrm_NegINF = 1,
+ PPCrm_PosINF = 2,
+ PPCrm_ZERO = 3
+ } PPCRoundingMode;
/* Floating point comparison values - different encoding to IR */
typedef
enum {
- PPC32cr_LT = 0x8,
- PPC32cr_GT = 0x4,
- PPC32cr_EQ = 0x2,
- PPC32cr_UN = 0x1
+ PPCcr_LT = 0x8,
+ PPCcr_GT = 0x4,
+ PPCcr_EQ = 0x2,
+ PPCcr_UN = 0x1
}
- PPC32CmpF64Result;
+ PPCCmpF64Result;
/*
Enumeration for xer_ca/ov calculation helper functions
*/
enum {
- /* 0 */ PPC32G_FLAG_OP_ADD=0, // addc[o], addic
- /* 1 */ PPC32G_FLAG_OP_ADDE, // adde[o], addme[o], addze[o]
- /* 2 */ PPC32G_FLAG_OP_DIVW, // divwo
- /* 3 */ PPC32G_FLAG_OP_DIVWU, // divwuo
- /* 4 */ PPC32G_FLAG_OP_MULLW, // mullwo
- /* 5 */ PPC32G_FLAG_OP_NEG, // nego
- /* 6 */ PPC32G_FLAG_OP_SUBF, // subfo
- /* 7 */ PPC32G_FLAG_OP_SUBFC, // subfc[o]
- /* 8 */ PPC32G_FLAG_OP_SUBFE, // subfe[o], subfme[o], subfze[o]
- /* 9 */ PPC32G_FLAG_OP_SUBFI, // subfic
- /* 10 */ PPC32G_FLAG_OP_SRAW, // sraw
- /* 11 */ PPC32G_FLAG_OP_SRAWI, // srawi
- /* 12 */ PPC32G_FLAG_OP_SRAD, // srad
- /* 13 */ PPC32G_FLAG_OP_SRADI, // sradi
- PPC32G_FLAG_OP_NUMBER
+ /* 0 */ PPCG_FLAG_OP_ADD=0, // addc[o], addic
+ /* 1 */ PPCG_FLAG_OP_ADDE, // adde[o], addme[o], addze[o]
+ /* 2 */ PPCG_FLAG_OP_DIVW, // divwo
+ /* 3 */ PPCG_FLAG_OP_DIVWU, // divwuo
+ /* 4 */ PPCG_FLAG_OP_MULLW, // mullwo
+ /* 5 */ PPCG_FLAG_OP_NEG, // nego
+ /* 6 */ PPCG_FLAG_OP_SUBF, // subfo
+ /* 7 */ PPCG_FLAG_OP_SUBFC, // subfc[o]
+ /* 8 */ PPCG_FLAG_OP_SUBFE, // subfe[o], subfme[o], subfze[o]
+ /* 9 */ PPCG_FLAG_OP_SUBFI, // subfic
+ /* 10 */ PPCG_FLAG_OP_SRAW, // sraw
+ /* 11 */ PPCG_FLAG_OP_SRAWI, // srawi
+ /* 12 */ PPCG_FLAG_OP_SRAD, // srad
+ /* 13 */ PPCG_FLAG_OP_SRADI, // sradi
+ PPCG_FLAG_OP_NUMBER
};
/*---------------------------------------------------------*/
-/*--- ppc32 guest helpers ---*/
+/*--- ppc guest helpers ---*/
/*---------------------------------------------------------*/
/* --- CLEAN HELPERS --- */
/* --- DIRTY HELPERS --- */
-extern ULong ppc32g_dirtyhelper_MFTB ( void );
+extern ULong ppcg_dirtyhelper_MFTB ( void );
extern void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State* gst,
UInt vD_idx, UInt sh,
UInt shift_right );
-#endif /* ndef __LIBVEX_GUEST_PPC32_DEFS_H */
+extern void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst,
+ UInt vD_idx, UInt sh,
+ UInt shift_right );
+
+#endif /* ndef __LIBVEX_GUEST_PPC_DEFS_H */
/*---------------------------------------------------------------*/
/*--- end guest-ppc32/gdefs.h ---*/
/* DIRTY HELPER (non-referentially-transparent) */
/* Horrible hack. On non-ppc32 platforms, return 1. */
/* Reads a complete, consistent 64-bit TB value. */
-ULong ppc32g_dirtyhelper_MFTB ( void )
+ULong ppcg_dirtyhelper_MFTB ( void )
{
# if defined(__powerpc__)
ULong res;
(*pU128_dst)[3] = (*pU128_src)[3];
}
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest state, writes guest mem) */
+void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst,
+ UInt vD_off, UInt sh, UInt shift_right )
+{
+ static
+ UChar ref[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F };
+ U128* pU128_src;
+ U128* pU128_dst;
+
+ vassert( vD_off <= sizeof(VexGuestPPC64State)-8 );
+ vassert( sh <= 15 );
+ vassert( shift_right <= 1 );
+ if (shift_right)
+ sh = 16-sh;
+ /* else shift left */
+
+ pU128_src = (U128*)&ref[sh];
+ pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
+
+ (*pU128_dst)[0] = (*pU128_src)[0];
+ (*pU128_dst)[1] = (*pU128_src)[1];
+ (*pU128_dst)[2] = (*pU128_src)[2];
+ (*pU128_dst)[3] = (*pU128_src)[3];
+}
+
/* Helper-function specialiser. */
vex_state->guest_CR7_321 = 0;
vex_state->guest_CR7_0 = 0;
- vex_state->guest_FPROUND = (UInt)PPC32rm_NEAREST;
+ vex_state->guest_FPROUND = (UInt)PPCrm_NEAREST;
vex_state->guest_VRSAVE = 0;
vex_state->guest_CR7_321 = 0;
vex_state->guest_CR7_0 = 0;
- vex_state->guest_FPROUND = (UInt)PPC32rm_NEAREST;
+ vex_state->guest_FPROUND = (UInt)PPCrm_NEAREST;
vex_state->guest_VRSAVE = 0;
*/
-/* Translates PPC32 & PPC64 code to IR. */
+/* Translates PPC32/64 code to IR. */
/* References
/*------------------------------------------------------------*/
/* These are set at the start of the translation of an insn, right
- down in disInstr_PPC32, so that we don't have to pass them around
+ down in disInstr_PPC, so that we don't have to pass them around
endlessly. They are all constant during the translation of any
given insn. */
static IRBB* irbb;
/* Is our guest binary 32 or 64bit? Set at each call to
- disInstr_PPC32 below. */
+ disInstr_PPC below. */
static Bool mode64 = False;
/*--- Debugging output ---*/
/*------------------------------------------------------------*/
-#define PPC_TOIR_DEBUG 0
-
#define DIP(format, args...) \
if (vex_traceflags & VEX_TRACE_FE) \
vex_printf(format, ## args)
if (vex_traceflags & VEX_TRACE_FE) \
vex_sprintf(buf, format, ## args)
-#if PPC_TOIR_DEBUG
-static void vex_printf_binary( ULong x, UInt len, Bool spaces )
-{
- UInt i;
- vassert(len > 0 && len <= 64);
-
- for (i=len; i>0; i--) {
- vex_printf("%d", ((x & (((ULong)1)<<(len-1))) != 0) );
- x = x << 1;
- if (((i-1)%4)==0 && (i > 1) && spaces) {
- vex_printf(" ");
- }
- }
-}
-#endif
-
/*------------------------------------------------------------*/
/*--- Offsets of various parts of the ppc32/64 guest state ---*/
/*------------------------------------------------------------*/
-// 64-bit offsets
-#define OFFB64_CIA offsetof(VexGuestPPC64State,guest_CIA)
-#define OFFB64_LR offsetof(VexGuestPPC64State,guest_LR)
-#define OFFB64_CTR offsetof(VexGuestPPC64State,guest_CTR)
-
-#define OFFB64_XER_SO offsetof(VexGuestPPC64State,guest_XER_SO)
-#define OFFB64_XER_OV offsetof(VexGuestPPC64State,guest_XER_OV)
-#define OFFB64_XER_CA offsetof(VexGuestPPC64State,guest_XER_CA)
-#define OFFB64_XER_BC offsetof(VexGuestPPC64State,guest_XER_BC)
-
-#define OFFB64_FPROUND offsetof(VexGuestPPC64State,guest_FPROUND)
-
-#define OFFB64_VRSAVE offsetof(VexGuestPPC64State,guest_VRSAVE)
-#define OFFB64_VSCR offsetof(VexGuestPPC64State,guest_VSCR)
-
-#define OFFB64_EMWARN offsetof(VexGuestPPC64State,guest_EMWARN)
-
-#define OFFB64_TISTART offsetof(VexGuestPPC64State,guest_TISTART)
-#define OFFB64_TILEN offsetof(VexGuestPPC64State,guest_TILEN)
+#define offsetofPPCGuestState(_x) \
+ (mode64 ? offsetof(VexGuestPPC64State, _x) : \
+ offsetof(VexGuestPPC32State, _x))
-#define OFFB64_RESVN offsetof(VexGuestPPC64State,guest_RESVN)
+#define OFFB_CIA offsetofPPCGuestState(guest_CIA)
+#define OFFB_LR offsetofPPCGuestState(guest_LR)
+#define OFFB_CTR offsetofPPCGuestState(guest_CTR)
+#define OFFB_XER_SO offsetofPPCGuestState(guest_XER_SO)
+#define OFFB_XER_OV offsetofPPCGuestState(guest_XER_OV)
+#define OFFB_XER_CA offsetofPPCGuestState(guest_XER_CA)
+#define OFFB_XER_BC offsetofPPCGuestState(guest_XER_BC)
+#define OFFB_FPROUND offsetofPPCGuestState(guest_FPROUND)
+#define OFFB_VRSAVE offsetofPPCGuestState(guest_VRSAVE)
+#define OFFB_VSCR offsetofPPCGuestState(guest_VSCR)
+#define OFFB_EMWARN offsetofPPCGuestState(guest_EMWARN)
+#define OFFB_TISTART offsetofPPCGuestState(guest_TISTART)
+#define OFFB_TILEN offsetofPPCGuestState(guest_TILEN)
+#define OFFB_RESVN offsetofPPCGuestState(guest_RESVN)
-// 32-bit offsets
-#define OFFB32_CIA offsetof(VexGuestPPC32State,guest_CIA)
-#define OFFB32_LR offsetof(VexGuestPPC32State,guest_LR)
-#define OFFB32_CTR offsetof(VexGuestPPC32State,guest_CTR)
-
-#define OFFB32_XER_SO offsetof(VexGuestPPC32State,guest_XER_SO)
-#define OFFB32_XER_OV offsetof(VexGuestPPC32State,guest_XER_OV)
-#define OFFB32_XER_CA offsetof(VexGuestPPC32State,guest_XER_CA)
-#define OFFB32_XER_BC offsetof(VexGuestPPC32State,guest_XER_BC)
-
-#define OFFB32_FPROUND offsetof(VexGuestPPC32State,guest_FPROUND)
-
-#define OFFB32_VRSAVE offsetof(VexGuestPPC32State,guest_VRSAVE)
-#define OFFB32_VSCR offsetof(VexGuestPPC32State,guest_VSCR)
-
-#define OFFB32_EMWARN offsetof(VexGuestPPC32State,guest_EMWARN)
-
-#define OFFB32_TISTART offsetof(VexGuestPPC32State,guest_TISTART)
-#define OFFB32_TILEN offsetof(VexGuestPPC32State,guest_TILEN)
-
-#define OFFB32_RESVN offsetof(VexGuestPPC32State,guest_RESVN)
/*------------------------------------------------------------*/
return mask;
}
+/* ditto for 64bit mask */
static ULong MASK64( UInt begin, UInt end )
{
vassert(begin < 64);
return (UInt)((((Int)x) << 16) >> 16);
}
-#if 0
-static UInt extend_s_26to32 ( UInt x )
-{
- return (UInt)((((Int)x) << 6) >> 6);
-}
-#endif
-
static ULong extend_s_16to64 ( UInt x )
{
return (ULong)((((Long)x) << 48) >> 48);
{
vassert(typeOfIRExpr(irbb->tyenv, arg1) == Ity_I1);
vassert(typeOfIRExpr(irbb->tyenv, arg2) == Ity_I1);
- return
- unop(Iop_32to1, binop(Iop_Or32, unop(Iop_1Uto32, arg1),
- unop(Iop_1Uto32, arg2)));
+ return unop(Iop_32to1, binop(Iop_Or32, unop(Iop_1Uto32, arg1),
+ unop(Iop_1Uto32, arg2)));
}
static IRExpr* mkAND1 ( IRExpr* arg1, IRExpr* arg2 )
{
vassert(typeOfIRExpr(irbb->tyenv, arg1) == Ity_I1);
vassert(typeOfIRExpr(irbb->tyenv, arg2) == Ity_I1);
- return
- unop(Iop_32to1, binop(Iop_And32, unop(Iop_1Uto32, arg1),
- unop(Iop_1Uto32, arg2)));
+ return unop(Iop_32to1, binop(Iop_And32, unop(Iop_1Uto32, arg1),
+ unop(Iop_1Uto32, arg2)));
}
/* expand V128_8Ux16 to 2x V128_16Ux8's */
-static void expand8Ux16( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
+static void expand8Ux16( IRExpr* vIn,
+ /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
{
IRTemp ones8x16 = newTemp(Ity_V128);
}
/* expand V128_8Sx16 to 2x V128_16Sx8's */
-static void expand8Sx16( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
+static void expand8Sx16( IRExpr* vIn,
+ /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
{
IRTemp ones8x16 = newTemp(Ity_V128);
}
/* expand V128_16Uto8 to 2x V128_32Ux4's */
-static void expand16Ux8( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
+static void expand16Ux8( IRExpr* vIn,
+ /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
{
IRTemp ones16x8 = newTemp(Ity_V128);
}
/* expand V128_16Sto8 to 2x V128_32Sx4's */
-static void expand16Sx8( IRExpr* vIn, /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
+static void expand16Sx8( IRExpr* vIn,
+ /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
{
IRTemp ones16x8 = newTemp(Ity_V128);
return adj + op8;
}
-/* Make sure we get valid 32 and 64bit addresses
- CAB: do we ever get -ve addresses/offsets?
-*/
+/* Make sure we get valid 32 and 64bit addresses */
static Addr64 mkSzAddr ( IRType ty, Addr64 addr )
{
vassert(ty == Ity_I32 || ty == Ity_I64);
vassert(archreg < 32);
// jrs: probably not necessary; only matters if we reference sub-parts
- // of the ppc32 registers, but that isn't the case
+ // of the ppc registers, but that isn't the case
// later: this might affect Altivec though?
vassert(host_is_bigendian);
- if (mode64) {
- switch (archreg) {
- case 0: return offsetof(VexGuestPPC64State, guest_GPR0);
- case 1: return offsetof(VexGuestPPC64State, guest_GPR1);
- case 2: return offsetof(VexGuestPPC64State, guest_GPR2);
- case 3: return offsetof(VexGuestPPC64State, guest_GPR3);
- case 4: return offsetof(VexGuestPPC64State, guest_GPR4);
- case 5: return offsetof(VexGuestPPC64State, guest_GPR5);
- case 6: return offsetof(VexGuestPPC64State, guest_GPR6);
- case 7: return offsetof(VexGuestPPC64State, guest_GPR7);
- case 8: return offsetof(VexGuestPPC64State, guest_GPR8);
- case 9: return offsetof(VexGuestPPC64State, guest_GPR9);
- case 10: return offsetof(VexGuestPPC64State, guest_GPR10);
- case 11: return offsetof(VexGuestPPC64State, guest_GPR11);
- case 12: return offsetof(VexGuestPPC64State, guest_GPR12);
- case 13: return offsetof(VexGuestPPC64State, guest_GPR13);
- case 14: return offsetof(VexGuestPPC64State, guest_GPR14);
- case 15: return offsetof(VexGuestPPC64State, guest_GPR15);
- case 16: return offsetof(VexGuestPPC64State, guest_GPR16);
- case 17: return offsetof(VexGuestPPC64State, guest_GPR17);
- case 18: return offsetof(VexGuestPPC64State, guest_GPR18);
- case 19: return offsetof(VexGuestPPC64State, guest_GPR19);
- case 20: return offsetof(VexGuestPPC64State, guest_GPR20);
- case 21: return offsetof(VexGuestPPC64State, guest_GPR21);
- case 22: return offsetof(VexGuestPPC64State, guest_GPR22);
- case 23: return offsetof(VexGuestPPC64State, guest_GPR23);
- case 24: return offsetof(VexGuestPPC64State, guest_GPR24);
- case 25: return offsetof(VexGuestPPC64State, guest_GPR25);
- case 26: return offsetof(VexGuestPPC64State, guest_GPR26);
- case 27: return offsetof(VexGuestPPC64State, guest_GPR27);
- case 28: return offsetof(VexGuestPPC64State, guest_GPR28);
- case 29: return offsetof(VexGuestPPC64State, guest_GPR29);
- case 30: return offsetof(VexGuestPPC64State, guest_GPR30);
- case 31: return offsetof(VexGuestPPC64State, guest_GPR31);
- default: break;
- }
- } else {
- switch (archreg) {
- case 0: return offsetof(VexGuestPPC32State, guest_GPR0);
- case 1: return offsetof(VexGuestPPC32State, guest_GPR1);
- case 2: return offsetof(VexGuestPPC32State, guest_GPR2);
- case 3: return offsetof(VexGuestPPC32State, guest_GPR3);
- case 4: return offsetof(VexGuestPPC32State, guest_GPR4);
- case 5: return offsetof(VexGuestPPC32State, guest_GPR5);
- case 6: return offsetof(VexGuestPPC32State, guest_GPR6);
- case 7: return offsetof(VexGuestPPC32State, guest_GPR7);
- case 8: return offsetof(VexGuestPPC32State, guest_GPR8);
- case 9: return offsetof(VexGuestPPC32State, guest_GPR9);
- case 10: return offsetof(VexGuestPPC32State, guest_GPR10);
- case 11: return offsetof(VexGuestPPC32State, guest_GPR11);
- case 12: return offsetof(VexGuestPPC32State, guest_GPR12);
- case 13: return offsetof(VexGuestPPC32State, guest_GPR13);
- case 14: return offsetof(VexGuestPPC32State, guest_GPR14);
- case 15: return offsetof(VexGuestPPC32State, guest_GPR15);
- case 16: return offsetof(VexGuestPPC32State, guest_GPR16);
- case 17: return offsetof(VexGuestPPC32State, guest_GPR17);
- case 18: return offsetof(VexGuestPPC32State, guest_GPR18);
- case 19: return offsetof(VexGuestPPC32State, guest_GPR19);
- case 20: return offsetof(VexGuestPPC32State, guest_GPR20);
- case 21: return offsetof(VexGuestPPC32State, guest_GPR21);
- case 22: return offsetof(VexGuestPPC32State, guest_GPR22);
- case 23: return offsetof(VexGuestPPC32State, guest_GPR23);
- case 24: return offsetof(VexGuestPPC32State, guest_GPR24);
- case 25: return offsetof(VexGuestPPC32State, guest_GPR25);
- case 26: return offsetof(VexGuestPPC32State, guest_GPR26);
- case 27: return offsetof(VexGuestPPC32State, guest_GPR27);
- case 28: return offsetof(VexGuestPPC32State, guest_GPR28);
- case 29: return offsetof(VexGuestPPC32State, guest_GPR29);
- case 30: return offsetof(VexGuestPPC32State, guest_GPR30);
- case 31: return offsetof(VexGuestPPC32State, guest_GPR31);
- default: break;
- }
- }
- vpanic("integerGuestRegOffset(ppc32,be)"); /*notreached*/
+ switch (archreg) {
+ case 0: return offsetofPPCGuestState(guest_GPR0);
+ case 1: return offsetofPPCGuestState(guest_GPR1);
+ case 2: return offsetofPPCGuestState(guest_GPR2);
+ case 3: return offsetofPPCGuestState(guest_GPR3);
+ case 4: return offsetofPPCGuestState(guest_GPR4);
+ case 5: return offsetofPPCGuestState(guest_GPR5);
+ case 6: return offsetofPPCGuestState(guest_GPR6);
+ case 7: return offsetofPPCGuestState(guest_GPR7);
+ case 8: return offsetofPPCGuestState(guest_GPR8);
+ case 9: return offsetofPPCGuestState(guest_GPR9);
+ case 10: return offsetofPPCGuestState(guest_GPR10);
+ case 11: return offsetofPPCGuestState(guest_GPR11);
+ case 12: return offsetofPPCGuestState(guest_GPR12);
+ case 13: return offsetofPPCGuestState(guest_GPR13);
+ case 14: return offsetofPPCGuestState(guest_GPR14);
+ case 15: return offsetofPPCGuestState(guest_GPR15);
+ case 16: return offsetofPPCGuestState(guest_GPR16);
+ case 17: return offsetofPPCGuestState(guest_GPR17);
+ case 18: return offsetofPPCGuestState(guest_GPR18);
+ case 19: return offsetofPPCGuestState(guest_GPR19);
+ case 20: return offsetofPPCGuestState(guest_GPR20);
+ case 21: return offsetofPPCGuestState(guest_GPR21);
+ case 22: return offsetofPPCGuestState(guest_GPR22);
+ case 23: return offsetofPPCGuestState(guest_GPR23);
+ case 24: return offsetofPPCGuestState(guest_GPR24);
+ case 25: return offsetofPPCGuestState(guest_GPR25);
+ case 26: return offsetofPPCGuestState(guest_GPR26);
+ case 27: return offsetofPPCGuestState(guest_GPR27);
+ case 28: return offsetofPPCGuestState(guest_GPR28);
+ case 29: return offsetofPPCGuestState(guest_GPR29);
+ case 30: return offsetofPPCGuestState(guest_GPR30);
+ case 31: return offsetofPPCGuestState(guest_GPR31);
+ default: break;
+ }
+ vpanic("integerGuestRegOffset(ppc,be)"); /*notreached*/
}
static IRExpr* getIReg ( UInt archreg )
{
vassert(archreg < 32);
- if (mode64) {
- switch (archreg) {
- case 0: return offsetof(VexGuestPPC64State, guest_FPR0);
- case 1: return offsetof(VexGuestPPC64State, guest_FPR1);
- case 2: return offsetof(VexGuestPPC64State, guest_FPR2);
- case 3: return offsetof(VexGuestPPC64State, guest_FPR3);
- case 4: return offsetof(VexGuestPPC64State, guest_FPR4);
- case 5: return offsetof(VexGuestPPC64State, guest_FPR5);
- case 6: return offsetof(VexGuestPPC64State, guest_FPR6);
- case 7: return offsetof(VexGuestPPC64State, guest_FPR7);
- case 8: return offsetof(VexGuestPPC64State, guest_FPR8);
- case 9: return offsetof(VexGuestPPC64State, guest_FPR9);
- case 10: return offsetof(VexGuestPPC64State, guest_FPR10);
- case 11: return offsetof(VexGuestPPC64State, guest_FPR11);
- case 12: return offsetof(VexGuestPPC64State, guest_FPR12);
- case 13: return offsetof(VexGuestPPC64State, guest_FPR13);
- case 14: return offsetof(VexGuestPPC64State, guest_FPR14);
- case 15: return offsetof(VexGuestPPC64State, guest_FPR15);
- case 16: return offsetof(VexGuestPPC64State, guest_FPR16);
- case 17: return offsetof(VexGuestPPC64State, guest_FPR17);
- case 18: return offsetof(VexGuestPPC64State, guest_FPR18);
- case 19: return offsetof(VexGuestPPC64State, guest_FPR19);
- case 20: return offsetof(VexGuestPPC64State, guest_FPR20);
- case 21: return offsetof(VexGuestPPC64State, guest_FPR21);
- case 22: return offsetof(VexGuestPPC64State, guest_FPR22);
- case 23: return offsetof(VexGuestPPC64State, guest_FPR23);
- case 24: return offsetof(VexGuestPPC64State, guest_FPR24);
- case 25: return offsetof(VexGuestPPC64State, guest_FPR25);
- case 26: return offsetof(VexGuestPPC64State, guest_FPR26);
- case 27: return offsetof(VexGuestPPC64State, guest_FPR27);
- case 28: return offsetof(VexGuestPPC64State, guest_FPR28);
- case 29: return offsetof(VexGuestPPC64State, guest_FPR29);
- case 30: return offsetof(VexGuestPPC64State, guest_FPR30);
- case 31: return offsetof(VexGuestPPC64State, guest_FPR31);
- default: break;
- }
- } else {
- switch (archreg) {
- case 0: return offsetof(VexGuestPPC32State, guest_FPR0);
- case 1: return offsetof(VexGuestPPC32State, guest_FPR1);
- case 2: return offsetof(VexGuestPPC32State, guest_FPR2);
- case 3: return offsetof(VexGuestPPC32State, guest_FPR3);
- case 4: return offsetof(VexGuestPPC32State, guest_FPR4);
- case 5: return offsetof(VexGuestPPC32State, guest_FPR5);
- case 6: return offsetof(VexGuestPPC32State, guest_FPR6);
- case 7: return offsetof(VexGuestPPC32State, guest_FPR7);
- case 8: return offsetof(VexGuestPPC32State, guest_FPR8);
- case 9: return offsetof(VexGuestPPC32State, guest_FPR9);
- case 10: return offsetof(VexGuestPPC32State, guest_FPR10);
- case 11: return offsetof(VexGuestPPC32State, guest_FPR11);
- case 12: return offsetof(VexGuestPPC32State, guest_FPR12);
- case 13: return offsetof(VexGuestPPC32State, guest_FPR13);
- case 14: return offsetof(VexGuestPPC32State, guest_FPR14);
- case 15: return offsetof(VexGuestPPC32State, guest_FPR15);
- case 16: return offsetof(VexGuestPPC32State, guest_FPR16);
- case 17: return offsetof(VexGuestPPC32State, guest_FPR17);
- case 18: return offsetof(VexGuestPPC32State, guest_FPR18);
- case 19: return offsetof(VexGuestPPC32State, guest_FPR19);
- case 20: return offsetof(VexGuestPPC32State, guest_FPR20);
- case 21: return offsetof(VexGuestPPC32State, guest_FPR21);
- case 22: return offsetof(VexGuestPPC32State, guest_FPR22);
- case 23: return offsetof(VexGuestPPC32State, guest_FPR23);
- case 24: return offsetof(VexGuestPPC32State, guest_FPR24);
- case 25: return offsetof(VexGuestPPC32State, guest_FPR25);
- case 26: return offsetof(VexGuestPPC32State, guest_FPR26);
- case 27: return offsetof(VexGuestPPC32State, guest_FPR27);
- case 28: return offsetof(VexGuestPPC32State, guest_FPR28);
- case 29: return offsetof(VexGuestPPC32State, guest_FPR29);
- case 30: return offsetof(VexGuestPPC32State, guest_FPR30);
- case 31: return offsetof(VexGuestPPC32State, guest_FPR31);
- default: break;
- }
- }
- vpanic("floatGuestRegOffset(ppc32)"); /*notreached*/
+ switch (archreg) {
+ case 0: return offsetofPPCGuestState(guest_FPR0);
+ case 1: return offsetofPPCGuestState(guest_FPR1);
+ case 2: return offsetofPPCGuestState(guest_FPR2);
+ case 3: return offsetofPPCGuestState(guest_FPR3);
+ case 4: return offsetofPPCGuestState(guest_FPR4);
+ case 5: return offsetofPPCGuestState(guest_FPR5);
+ case 6: return offsetofPPCGuestState(guest_FPR6);
+ case 7: return offsetofPPCGuestState(guest_FPR7);
+ case 8: return offsetofPPCGuestState(guest_FPR8);
+ case 9: return offsetofPPCGuestState(guest_FPR9);
+ case 10: return offsetofPPCGuestState(guest_FPR10);
+ case 11: return offsetofPPCGuestState(guest_FPR11);
+ case 12: return offsetofPPCGuestState(guest_FPR12);
+ case 13: return offsetofPPCGuestState(guest_FPR13);
+ case 14: return offsetofPPCGuestState(guest_FPR14);
+ case 15: return offsetofPPCGuestState(guest_FPR15);
+ case 16: return offsetofPPCGuestState(guest_FPR16);
+ case 17: return offsetofPPCGuestState(guest_FPR17);
+ case 18: return offsetofPPCGuestState(guest_FPR18);
+ case 19: return offsetofPPCGuestState(guest_FPR19);
+ case 20: return offsetofPPCGuestState(guest_FPR20);
+ case 21: return offsetofPPCGuestState(guest_FPR21);
+ case 22: return offsetofPPCGuestState(guest_FPR22);
+ case 23: return offsetofPPCGuestState(guest_FPR23);
+ case 24: return offsetofPPCGuestState(guest_FPR24);
+ case 25: return offsetofPPCGuestState(guest_FPR25);
+ case 26: return offsetofPPCGuestState(guest_FPR26);
+ case 27: return offsetofPPCGuestState(guest_FPR27);
+ case 28: return offsetofPPCGuestState(guest_FPR28);
+ case 29: return offsetofPPCGuestState(guest_FPR29);
+ case 30: return offsetofPPCGuestState(guest_FPR30);
+ case 31: return offsetofPPCGuestState(guest_FPR31);
+ default: break;
+ }
+ vpanic("floatGuestRegOffset(ppc)"); /*notreached*/
}
static IRExpr* getFReg ( UInt archreg )
{
vassert(archreg < 32);
- if (mode64) {
- switch (archreg) {
- case 0: return offsetof(VexGuestPPC64State, guest_VR0);
- case 1: return offsetof(VexGuestPPC64State, guest_VR1);
- case 2: return offsetof(VexGuestPPC64State, guest_VR2);
- case 3: return offsetof(VexGuestPPC64State, guest_VR3);
- case 4: return offsetof(VexGuestPPC64State, guest_VR4);
- case 5: return offsetof(VexGuestPPC64State, guest_VR5);
- case 6: return offsetof(VexGuestPPC64State, guest_VR6);
- case 7: return offsetof(VexGuestPPC64State, guest_VR7);
- case 8: return offsetof(VexGuestPPC64State, guest_VR8);
- case 9: return offsetof(VexGuestPPC64State, guest_VR9);
- case 10: return offsetof(VexGuestPPC64State, guest_VR10);
- case 11: return offsetof(VexGuestPPC64State, guest_VR11);
- case 12: return offsetof(VexGuestPPC64State, guest_VR12);
- case 13: return offsetof(VexGuestPPC64State, guest_VR13);
- case 14: return offsetof(VexGuestPPC64State, guest_VR14);
- case 15: return offsetof(VexGuestPPC64State, guest_VR15);
- case 16: return offsetof(VexGuestPPC64State, guest_VR16);
- case 17: return offsetof(VexGuestPPC64State, guest_VR17);
- case 18: return offsetof(VexGuestPPC64State, guest_VR18);
- case 19: return offsetof(VexGuestPPC64State, guest_VR19);
- case 20: return offsetof(VexGuestPPC64State, guest_VR20);
- case 21: return offsetof(VexGuestPPC64State, guest_VR21);
- case 22: return offsetof(VexGuestPPC64State, guest_VR22);
- case 23: return offsetof(VexGuestPPC64State, guest_VR23);
- case 24: return offsetof(VexGuestPPC64State, guest_VR24);
- case 25: return offsetof(VexGuestPPC64State, guest_VR25);
- case 26: return offsetof(VexGuestPPC64State, guest_VR26);
- case 27: return offsetof(VexGuestPPC64State, guest_VR27);
- case 28: return offsetof(VexGuestPPC64State, guest_VR28);
- case 29: return offsetof(VexGuestPPC64State, guest_VR29);
- case 30: return offsetof(VexGuestPPC64State, guest_VR30);
- case 31: return offsetof(VexGuestPPC64State, guest_VR31);
- default: break;
- }
- } else {
- switch (archreg) {
- case 0: return offsetof(VexGuestPPC32State, guest_VR0);
- case 1: return offsetof(VexGuestPPC32State, guest_VR1);
- case 2: return offsetof(VexGuestPPC32State, guest_VR2);
- case 3: return offsetof(VexGuestPPC32State, guest_VR3);
- case 4: return offsetof(VexGuestPPC32State, guest_VR4);
- case 5: return offsetof(VexGuestPPC32State, guest_VR5);
- case 6: return offsetof(VexGuestPPC32State, guest_VR6);
- case 7: return offsetof(VexGuestPPC32State, guest_VR7);
- case 8: return offsetof(VexGuestPPC32State, guest_VR8);
- case 9: return offsetof(VexGuestPPC32State, guest_VR9);
- case 10: return offsetof(VexGuestPPC32State, guest_VR10);
- case 11: return offsetof(VexGuestPPC32State, guest_VR11);
- case 12: return offsetof(VexGuestPPC32State, guest_VR12);
- case 13: return offsetof(VexGuestPPC32State, guest_VR13);
- case 14: return offsetof(VexGuestPPC32State, guest_VR14);
- case 15: return offsetof(VexGuestPPC32State, guest_VR15);
- case 16: return offsetof(VexGuestPPC32State, guest_VR16);
- case 17: return offsetof(VexGuestPPC32State, guest_VR17);
- case 18: return offsetof(VexGuestPPC32State, guest_VR18);
- case 19: return offsetof(VexGuestPPC32State, guest_VR19);
- case 20: return offsetof(VexGuestPPC32State, guest_VR20);
- case 21: return offsetof(VexGuestPPC32State, guest_VR21);
- case 22: return offsetof(VexGuestPPC32State, guest_VR22);
- case 23: return offsetof(VexGuestPPC32State, guest_VR23);
- case 24: return offsetof(VexGuestPPC32State, guest_VR24);
- case 25: return offsetof(VexGuestPPC32State, guest_VR25);
- case 26: return offsetof(VexGuestPPC32State, guest_VR26);
- case 27: return offsetof(VexGuestPPC32State, guest_VR27);
- case 28: return offsetof(VexGuestPPC32State, guest_VR28);
- case 29: return offsetof(VexGuestPPC32State, guest_VR29);
- case 30: return offsetof(VexGuestPPC32State, guest_VR30);
- case 31: return offsetof(VexGuestPPC32State, guest_VR31);
- default: break;
- }
- }
- vpanic("vextorGuestRegOffset(ppc32)"); /*notreached*/
+ switch (archreg) {
+ case 0: return offsetofPPCGuestState(guest_VR0);
+ case 1: return offsetofPPCGuestState(guest_VR1);
+ case 2: return offsetofPPCGuestState(guest_VR2);
+ case 3: return offsetofPPCGuestState(guest_VR3);
+ case 4: return offsetofPPCGuestState(guest_VR4);
+ case 5: return offsetofPPCGuestState(guest_VR5);
+ case 6: return offsetofPPCGuestState(guest_VR6);
+ case 7: return offsetofPPCGuestState(guest_VR7);
+ case 8: return offsetofPPCGuestState(guest_VR8);
+ case 9: return offsetofPPCGuestState(guest_VR9);
+ case 10: return offsetofPPCGuestState(guest_VR10);
+ case 11: return offsetofPPCGuestState(guest_VR11);
+ case 12: return offsetofPPCGuestState(guest_VR12);
+ case 13: return offsetofPPCGuestState(guest_VR13);
+ case 14: return offsetofPPCGuestState(guest_VR14);
+ case 15: return offsetofPPCGuestState(guest_VR15);
+ case 16: return offsetofPPCGuestState(guest_VR16);
+ case 17: return offsetofPPCGuestState(guest_VR17);
+ case 18: return offsetofPPCGuestState(guest_VR18);
+ case 19: return offsetofPPCGuestState(guest_VR19);
+ case 20: return offsetofPPCGuestState(guest_VR20);
+ case 21: return offsetofPPCGuestState(guest_VR21);
+ case 22: return offsetofPPCGuestState(guest_VR22);
+ case 23: return offsetofPPCGuestState(guest_VR23);
+ case 24: return offsetofPPCGuestState(guest_VR24);
+ case 25: return offsetofPPCGuestState(guest_VR25);
+ case 26: return offsetofPPCGuestState(guest_VR26);
+ case 27: return offsetofPPCGuestState(guest_VR27);
+ case 28: return offsetofPPCGuestState(guest_VR28);
+ case 29: return offsetofPPCGuestState(guest_VR29);
+ case 30: return offsetofPPCGuestState(guest_VR30);
+ case 31: return offsetofPPCGuestState(guest_VR31);
+ default: break;
+ }
+ vpanic("vextorGuestRegOffset(ppc)"); /*notreached*/
}
static IRExpr* getVReg ( UInt archreg )
static Int guestCR321offset ( UInt cr )
{
- if (mode64) {
- switch (cr) {
- case 0: return offsetof(VexGuestPPC64State, guest_CR0_321 );
- case 1: return offsetof(VexGuestPPC64State, guest_CR1_321 );
- case 2: return offsetof(VexGuestPPC64State, guest_CR2_321 );
- case 3: return offsetof(VexGuestPPC64State, guest_CR3_321 );
- case 4: return offsetof(VexGuestPPC64State, guest_CR4_321 );
- case 5: return offsetof(VexGuestPPC64State, guest_CR5_321 );
- case 6: return offsetof(VexGuestPPC64State, guest_CR6_321 );
- case 7: return offsetof(VexGuestPPC64State, guest_CR7_321 );
- default: vpanic("guestCR321offset(ppc32)");
- }
- } else {
- switch (cr) {
- case 0: return offsetof(VexGuestPPC32State, guest_CR0_321 );
- case 1: return offsetof(VexGuestPPC32State, guest_CR1_321 );
- case 2: return offsetof(VexGuestPPC32State, guest_CR2_321 );
- case 3: return offsetof(VexGuestPPC32State, guest_CR3_321 );
- case 4: return offsetof(VexGuestPPC32State, guest_CR4_321 );
- case 5: return offsetof(VexGuestPPC32State, guest_CR5_321 );
- case 6: return offsetof(VexGuestPPC32State, guest_CR6_321 );
- case 7: return offsetof(VexGuestPPC32State, guest_CR7_321 );
- default: vpanic("guestCR321offset(ppc32)");
- }
+ switch (cr) {
+ case 0: return offsetofPPCGuestState(guest_CR0_321 );
+ case 1: return offsetofPPCGuestState(guest_CR1_321 );
+ case 2: return offsetofPPCGuestState(guest_CR2_321 );
+ case 3: return offsetofPPCGuestState(guest_CR3_321 );
+ case 4: return offsetofPPCGuestState(guest_CR4_321 );
+ case 5: return offsetofPPCGuestState(guest_CR5_321 );
+ case 6: return offsetofPPCGuestState(guest_CR6_321 );
+ case 7: return offsetofPPCGuestState(guest_CR7_321 );
+ default: vpanic("guestCR321offset(ppc)");
}
}
static Int guestCR0offset ( UInt cr )
{
- if (mode64) {
- switch (cr) {
- case 0: return offsetof(VexGuestPPC64State, guest_CR0_0 );
- case 1: return offsetof(VexGuestPPC64State, guest_CR1_0 );
- case 2: return offsetof(VexGuestPPC64State, guest_CR2_0 );
- case 3: return offsetof(VexGuestPPC64State, guest_CR3_0 );
- case 4: return offsetof(VexGuestPPC64State, guest_CR4_0 );
- case 5: return offsetof(VexGuestPPC64State, guest_CR5_0 );
- case 6: return offsetof(VexGuestPPC64State, guest_CR6_0 );
- case 7: return offsetof(VexGuestPPC64State, guest_CR7_0 );
- default: vpanic("guestCR3offset(ppc32)");
- }
- } else {
- switch (cr) {
- case 0: return offsetof(VexGuestPPC32State, guest_CR0_0 );
- case 1: return offsetof(VexGuestPPC32State, guest_CR1_0 );
- case 2: return offsetof(VexGuestPPC32State, guest_CR2_0 );
- case 3: return offsetof(VexGuestPPC32State, guest_CR3_0 );
- case 4: return offsetof(VexGuestPPC32State, guest_CR4_0 );
- case 5: return offsetof(VexGuestPPC32State, guest_CR5_0 );
- case 6: return offsetof(VexGuestPPC32State, guest_CR6_0 );
- case 7: return offsetof(VexGuestPPC32State, guest_CR7_0 );
- default: vpanic("guestCR3offset(ppc32)");
- }
+ switch (cr) {
+ case 0: return offsetofPPCGuestState(guest_CR0_0 );
+ case 1: return offsetofPPCGuestState(guest_CR1_0 );
+ case 2: return offsetofPPCGuestState(guest_CR2_0 );
+ case 3: return offsetofPPCGuestState(guest_CR3_0 );
+ case 4: return offsetofPPCGuestState(guest_CR4_0 );
+ case 5: return offsetofPPCGuestState(guest_CR5_0 );
+ case 6: return offsetofPPCGuestState(guest_CR6_0 );
+ case 7: return offsetofPPCGuestState(guest_CR7_0 );
+ default: vpanic("guestCR3offset(ppc)");
}
}
case 16: mask = ((Long)-1) << 4; break; // quad-word aligned
default:
vex_printf("addr_align: align = %u\n", align);
- vpanic("addr_align(ppc32)");
+ vpanic("addr_align(ppc)");
}
vassert(typeOfIRExpr(irbb->tyenv,addr) == ty);
zero and nonzero if the bit is 1. Write into *where the index
of where the bit will be. */
-static IRExpr* /* :: Ity_I32 */ getCRbit_anywhere ( UInt bi, Int* where )
+static
+IRExpr* /* :: Ity_I32 */ getCRbit_anywhere ( UInt bi, Int* where )
{
UInt n = bi / 4;
UInt off = bi % 4;
unop(Iop_V128to32,
binop(Iop_AndV128,
binop(Iop_AndV128, mkexpr(v0), mkexpr(v1)),
- binop(Iop_AndV128, mkexpr(v2), mkexpr(v3)))))) );
+ binop(Iop_AndV128, mkexpr(v2), mkexpr(v3)))
+ ))) );
putCR321( 6, binop(Iop_Or8,
binop(Iop_Shl8, mkexpr(rOnes), mkU8(3)),
binop(Iop_Shl8, mkexpr(rZeros), mkU8(1))) );
{
vassert(typeOfIRExpr(irbb->tyenv, e) == Ity_I8);
IRExpr* so = binop(Iop_And8, e, mkU8(1));
- stmt( IRStmt_Put( (mode64 ? OFFB64_XER_SO : OFFB32_XER_SO), so) );
+ stmt( IRStmt_Put( OFFB_XER_SO, so ) );
}
static void putXER_OV ( IRExpr* e )
{
vassert(typeOfIRExpr(irbb->tyenv, e) == Ity_I8);
IRExpr* ov = binop(Iop_And8, e, mkU8(1));
- stmt( IRStmt_Put( (mode64 ? OFFB64_XER_OV : OFFB32_XER_OV), ov) );
+ stmt( IRStmt_Put( OFFB_XER_OV, ov ) );
}
static void putXER_CA ( IRExpr* e )
{
vassert(typeOfIRExpr(irbb->tyenv, e) == Ity_I8);
IRExpr* ca = binop(Iop_And8, e, mkU8(1));
- stmt( IRStmt_Put( (mode64 ? OFFB64_XER_CA : OFFB32_XER_CA), ca) );
+ stmt( IRStmt_Put( OFFB_XER_CA, ca ) );
}
static void putXER_BC ( IRExpr* e )
{
vassert(typeOfIRExpr(irbb->tyenv, e) == Ity_I8);
IRExpr* bc = binop(Iop_And8, e, mkU8(0x7F));
- stmt( IRStmt_Put( (mode64 ? OFFB64_XER_BC : OFFB32_XER_BC), bc) );
+ stmt( IRStmt_Put( OFFB_XER_BC, bc ) );
}
static IRExpr* /* :: Ity_I8 */ getXER_SO ( void )
{
- return mode64 ? IRExpr_Get( OFFB64_XER_SO, Ity_I8 ) :
- IRExpr_Get( OFFB32_XER_SO, Ity_I8 );
+ return IRExpr_Get( OFFB_XER_SO, Ity_I8 );
}
static IRExpr* /* :: Ity_I32 */ getXER_SO32 ( void )
static IRExpr* /* :: Ity_I8 */ getXER_OV ( void )
{
- return mode64 ? IRExpr_Get( OFFB64_XER_OV, Ity_I8 ) :
- IRExpr_Get( OFFB32_XER_OV, Ity_I8 );
+ return IRExpr_Get( OFFB_XER_OV, Ity_I8 );
}
static IRExpr* /* :: Ity_I32 */ getXER_OV32 ( void )
static IRExpr* /* :: Ity_I32 */ getXER_CA32 ( void )
{
- IRExpr* ca = mode64 ? IRExpr_Get( OFFB64_XER_CA, Ity_I8 ) :
- IRExpr_Get( OFFB32_XER_CA, Ity_I8 );
+ IRExpr* ca = IRExpr_Get( OFFB_XER_CA, Ity_I8 );
return binop( Iop_And32, unop(Iop_8Uto32, ca ), mkU32(1) );
}
static IRExpr* /* :: Ity_I8 */ getXER_BC ( void )
{
- return mode64 ? IRExpr_Get( OFFB64_XER_BC, Ity_I8 ) :
- IRExpr_Get( OFFB32_XER_BC, Ity_I8 );
+ return IRExpr_Get( OFFB_XER_BC, Ity_I8 );
}
static IRExpr* /* :: Ity_I32 */ getXER_BC32 ( void )
{
- IRExpr* bc = mode64 ? IRExpr_Get( OFFB64_XER_BC, Ity_I8 ) :
- IRExpr_Get( OFFB32_XER_BC, Ity_I8 );
+ IRExpr* bc = IRExpr_Get( OFFB_XER_BC, Ity_I8 );
return binop( Iop_And32, unop(Iop_8Uto32, bc), mkU32(0x7F) );
}
{
IRTemp t64;
IRExpr* xer_ov;
- vassert(op < PPC32G_FLAG_OP_NUMBER);
+ vassert(op < PPCG_FLAG_OP_NUMBER);
vassert(typeOfIRExpr(irbb->tyenv,res) == Ity_I32);
vassert(typeOfIRExpr(irbb->tyenv,argL) == Ity_I32);
vassert(typeOfIRExpr(irbb->tyenv,argR) == Ity_I32);
unop(Iop_Not32, (_jj))
switch (op) {
- case /* 0 */ PPC32G_FLAG_OP_ADD:
- case /* 1 */ PPC32G_FLAG_OP_ADDE:
+ case /* 0 */ PPCG_FLAG_OP_ADD:
+ case /* 1 */ PPCG_FLAG_OP_ADDE:
/* (argL^argR^-1) & (argL^res) & (1<<31) ?1:0 */
// i.e. ((both_same_sign) & (sign_changed) & (sign_mask))
xer_ov
= binop(Iop_Shr32, xer_ov, mkU8(31) );
break;
- case /* 2 */ PPC32G_FLAG_OP_DIVW:
+ case /* 2 */ PPCG_FLAG_OP_DIVW:
/* (argL == INT32_MIN && argR == -1) || argR == 0 */
xer_ov
= mkOR1(
= unop(Iop_1Uto32, xer_ov);
break;
- case /* 3 */ PPC32G_FLAG_OP_DIVWU:
+ case /* 3 */ PPCG_FLAG_OP_DIVWU:
/* argR == 0 */
xer_ov
= unop(Iop_1Uto32, binop(Iop_CmpEQ32, argR, mkU32(0)));
break;
- case /* 4 */ PPC32G_FLAG_OP_MULLW:
+ case /* 4 */ PPCG_FLAG_OP_MULLW:
/* OV true if result can't be represented in 32 bits
i.e sHi != sign extension of sLo */
t64 = newTemp(Ity_I64);
= unop(Iop_1Uto32, xer_ov);
break;
- case /* 5 */ PPC32G_FLAG_OP_NEG:
+ case /* 5 */ PPCG_FLAG_OP_NEG:
/* argL == INT32_MIN */
xer_ov
= unop( Iop_1Uto32,
binop(Iop_CmpEQ32, argL, mkU32(INT32_MIN)) );
break;
- case /* 6 */ PPC32G_FLAG_OP_SUBF:
- case /* 7 */ PPC32G_FLAG_OP_SUBFC:
- case /* 8 */ PPC32G_FLAG_OP_SUBFE:
+ case /* 6 */ PPCG_FLAG_OP_SUBF:
+ case /* 7 */ PPCG_FLAG_OP_SUBFC:
+ case /* 8 */ PPCG_FLAG_OP_SUBFE:
/* ((~argL)^argR^-1) & ((~argL)^res) & (1<<31) ?1:0; */
xer_ov
= AND3( XOR3(NOT(argL),argR,mkU32(-1)),
default:
vex_printf("set_XER_OV: op = %u\n", op);
- vpanic("set_XER_OV(ppc32)");
+ vpanic("set_XER_OV(ppc)");
}
/* xer_ov MUST denote either 0 or 1, no other value allowed */
IRExpr* argL, IRExpr* argR )
{
IRExpr* xer_ov;
- vassert(op < PPC32G_FLAG_OP_NUMBER);
+ vassert(op < PPCG_FLAG_OP_NUMBER);
vassert(typeOfIRExpr(irbb->tyenv,res) == Ity_I64);
vassert(typeOfIRExpr(irbb->tyenv,argL) == Ity_I64);
vassert(typeOfIRExpr(irbb->tyenv,argR) == Ity_I64);
unop(Iop_Not64, (_jj))
switch (op) {
- case /* 0 */ PPC32G_FLAG_OP_ADD:
- case /* 1 */ PPC32G_FLAG_OP_ADDE:
+ case /* 0 */ PPCG_FLAG_OP_ADD:
+ case /* 1 */ PPCG_FLAG_OP_ADDE:
/* (argL^argR^-1) & (argL^res) & (1<<63) ? 1:0 */
// i.e. ((both_same_sign) & (sign_changed) & (sign_mask))
xer_ov
= unop(Iop_64to1, binop(Iop_Shr64, xer_ov, mkU8(63)));
break;
- case /* 2 */ PPC32G_FLAG_OP_DIVW:
+ case /* 2 */ PPCG_FLAG_OP_DIVW:
/* (argL == INT64_MIN && argR == -1) || argR == 0 */
xer_ov
= mkOR1(
);
break;
- case /* 3 */ PPC32G_FLAG_OP_DIVWU:
+ case /* 3 */ PPCG_FLAG_OP_DIVWU:
/* argR == 0 */
xer_ov
= binop(Iop_CmpEQ64, argR, mkU64(0));
break;
- case /* 4 */ PPC32G_FLAG_OP_MULLW: {
+ case /* 4 */ PPCG_FLAG_OP_MULLW: {
/* OV true if result can't be represented in 64 bits
i.e sHi != sign extension of sLo */
xer_ov
break;
}
- case /* 5 */ PPC32G_FLAG_OP_NEG:
+ case /* 5 */ PPCG_FLAG_OP_NEG:
/* argL == INT64_MIN */
xer_ov
= binop(Iop_CmpEQ64, argL, mkU64(INT64_MIN));
break;
- case /* 6 */ PPC32G_FLAG_OP_SUBF:
- case /* 7 */ PPC32G_FLAG_OP_SUBFC:
- case /* 8 */ PPC32G_FLAG_OP_SUBFE:
+ case /* 6 */ PPCG_FLAG_OP_SUBF:
+ case /* 7 */ PPCG_FLAG_OP_SUBFC:
+ case /* 8 */ PPCG_FLAG_OP_SUBFE:
/* ((~argL)^argR^-1) & ((~argL)^res) & (1<<63) ?1:0; */
xer_ov
= AND3( XOR3(NOT(argL),argR,mkU64(-1)),
default:
vex_printf("set_XER_OV: op = %u\n", op);
- vpanic("set_XER_OV(ppc32)");
+ vpanic("set_XER_OV(ppc64)");
}
/* xer_ov MUST denote either 0 or 1, no other value allowed */
IRExpr* argL, IRExpr* argR, IRExpr* oldca )
{
IRExpr* xer_ca;
- vassert(op < PPC32G_FLAG_OP_NUMBER);
+ vassert(op < PPCG_FLAG_OP_NUMBER);
vassert(typeOfIRExpr(irbb->tyenv,res) == Ity_I32);
vassert(typeOfIRExpr(irbb->tyenv,argL) == Ity_I32);
vassert(typeOfIRExpr(irbb->tyenv,argR) == Ity_I32);
if it has any other value, that invariant has been violated. */
switch (op) {
- case /* 0 */ PPC32G_FLAG_OP_ADD:
+ case /* 0 */ PPCG_FLAG_OP_ADD:
/* res <u argL */
xer_ca
= unop(Iop_1Uto32, binop(Iop_CmpLT32U, res, argL));
break;
- case /* 1 */ PPC32G_FLAG_OP_ADDE:
+ case /* 1 */ PPCG_FLAG_OP_ADDE:
/* res <u argL || (old_ca==1 && res==argL) */
xer_ca
= mkOR1(
= unop(Iop_1Uto32, xer_ca);
break;
- case /* 8 */ PPC32G_FLAG_OP_SUBFE:
+ case /* 8 */ PPCG_FLAG_OP_SUBFE:
/* res <u argR || (old_ca==1 && res==argR) */
xer_ca
= mkOR1(
= unop(Iop_1Uto32, xer_ca);
break;
- case /* 7 */ PPC32G_FLAG_OP_SUBFC:
- case /* 9 */ PPC32G_FLAG_OP_SUBFI:
+ case /* 7 */ PPCG_FLAG_OP_SUBFC:
+ case /* 9 */ PPCG_FLAG_OP_SUBFI:
/* res <=u argR */
xer_ca
= unop(Iop_1Uto32, binop(Iop_CmpLE32U, res, argR));
break;
- case /* 10 */ PPC32G_FLAG_OP_SRAW:
+ case /* 10 */ PPCG_FLAG_OP_SRAW:
/* The shift amount is guaranteed to be in 0 .. 63 inclusive.
If it is <= 31, behave like SRAWI; else XER.CA is the sign
bit of argL. */
binop( Iop_And32,
argL,
binop( Iop_Sub32,
- binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,argR)),
+ binop(Iop_Shl32, mkU32(1),
+ unop(Iop_32to8,argR)),
mkU32(1) )
)
);
);
break;
- case /* 11 */ PPC32G_FLAG_OP_SRAWI:
+ case /* 11 */ PPCG_FLAG_OP_SRAWI:
/* xer_ca is 1 iff src was negative and bits_shifted_out !=
0. Since the shift amount is known to be in the range
0 .. 31 inclusive the following seems viable:
binop( Iop_And32,
argL,
binop( Iop_Sub32,
- binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,argR)),
+ binop(Iop_Shl32, mkU32(1),
+ unop(Iop_32to8,argR)),
mkU32(1) )
)
);
default:
vex_printf("set_XER_CA: op = %u\n", op);
- vpanic("set_XER_CA(ppc32)");
+ vpanic("set_XER_CA(ppc)");
}
/* xer_ca MUST denote either 0 or 1, no other value allowed */
IRExpr* argL, IRExpr* argR, IRExpr* oldca )
{
IRExpr* xer_ca;
- vassert(op < PPC32G_FLAG_OP_NUMBER);
+ vassert(op < PPCG_FLAG_OP_NUMBER);
vassert(typeOfIRExpr(irbb->tyenv,res) == Ity_I64);
vassert(typeOfIRExpr(irbb->tyenv,argL) == Ity_I64);
vassert(typeOfIRExpr(irbb->tyenv,argR) == Ity_I64);
if it has any other value, that invariant has been violated. */
switch (op) {
- case /* 0 */ PPC32G_FLAG_OP_ADD:
+ case /* 0 */ PPCG_FLAG_OP_ADD:
/* res <u argL */
xer_ca
= unop(Iop_1Uto32, binop(Iop_CmpLT64U, res, argL));
break;
- case /* 1 */ PPC32G_FLAG_OP_ADDE:
+ case /* 1 */ PPCG_FLAG_OP_ADDE:
/* res <u argL || (old_ca==1 && res==argL) */
xer_ca
= mkOR1(
= unop(Iop_1Uto32, xer_ca);
break;
- case /* 8 */ PPC32G_FLAG_OP_SUBFE:
+ case /* 8 */ PPCG_FLAG_OP_SUBFE:
/* res <u argR || (old_ca==1 && res==argR) */
xer_ca
= mkOR1(
= unop(Iop_1Uto32, xer_ca);
break;
- case /* 7 */ PPC32G_FLAG_OP_SUBFC:
- case /* 9 */ PPC32G_FLAG_OP_SUBFI:
+ case /* 7 */ PPCG_FLAG_OP_SUBFC:
+ case /* 9 */ PPCG_FLAG_OP_SUBFI:
/* res <=u argR */
xer_ca
= unop(Iop_1Uto32, binop(Iop_CmpLE64U, res, argR));
break;
- case /* 10 */ PPC32G_FLAG_OP_SRAW:
+ case /* 10 */ PPCG_FLAG_OP_SRAW:
/* The shift amount is guaranteed to be in 0 .. 31 inclusive.
If it is <= 31, behave like SRAWI; else XER.CA is the sign
bit of argL. */
binop( Iop_And64,
argL,
binop( Iop_Sub64,
- binop(Iop_Shl64, mkU64(1), unop(Iop_64to8,argR)),
+ binop(Iop_Shl64, mkU64(1),
+ unop(Iop_64to8,argR)),
mkU64(1) )
)
);
);
break;
- case /* 11 */ PPC32G_FLAG_OP_SRAWI:
+ case /* 11 */ PPCG_FLAG_OP_SRAWI:
/* xer_ca is 1 iff src was negative and bits_shifted_out != 0.
Since the shift amount is known to be in the range 0 .. 31
inclusive the following seems viable:
binop( Iop_And64,
argL,
binop( Iop_Sub64,
- binop(Iop_Shl64, mkU64(1), unop(Iop_64to8,argR)),
+ binop(Iop_Shl64, mkU64(1),
+ unop(Iop_64to8,argR)),
mkU64(1) )
)
);
break;
- case /* 12 */ PPC32G_FLAG_OP_SRAD:
+ case /* 12 */ PPCG_FLAG_OP_SRAD:
/* The shift amount is guaranteed to be in 0 .. 63 inclusive.
If it is <= 63, behave like SRADI; else XER.CA is the sign
bit of argL. */
binop( Iop_And64,
argL,
binop( Iop_Sub64,
- binop(Iop_Shl64, mkU64(1), unop(Iop_64to8,argR)),
+ binop(Iop_Shl64, mkU64(1),
+ unop(Iop_64to8,argR)),
mkU64(1) )
)
);
break;
- case /* 13 */ PPC32G_FLAG_OP_SRADI:
+ case /* 13 */ PPCG_FLAG_OP_SRADI:
/* xer_ca is 1 iff src was negative and bits_shifted_out != 0.
Since the shift amount is known to be in the range 0 .. 63
inclusive, the following seems viable:
binop( Iop_And64,
argL,
binop( Iop_Sub64,
- binop(Iop_Shl64, mkU64(1), unop(Iop_64to8,argR)),
+ binop(Iop_Shl64, mkU64(1),
+ unop(Iop_64to8,argR)),
mkU64(1) )
)
);
IRType ty = mode64 ? Ity_I64 : Ity_I32;
switch (reg) {
case PPC_GST_LR:
- return IRExpr_Get((mode64 ? OFFB64_LR : OFFB32_LR), ty);
+ return IRExpr_Get( OFFB_LR, ty );
case PPC_GST_CTR:
- return IRExpr_Get((mode64 ? OFFB64_CTR : OFFB32_CTR), ty);
+ return IRExpr_Get( OFFB_CTR, ty );
case PPC_GST_VRSAVE:
- return
- IRExpr_Get((mode64 ? OFFB64_VRSAVE : OFFB32_VRSAVE), Ity_I32);
+ return IRExpr_Get( OFFB_VRSAVE, Ity_I32 );
case PPC_GST_VSCR:
- return
- binop(Iop_And32,
- IRExpr_Get((mode64 ? OFFB64_VSCR : OFFB32_VSCR),Ity_I32),
- mkU32(MASK_VSCR_VALID));
+ return binop(Iop_And32, IRExpr_Get( OFFB_VSCR,Ity_I32 ),
+ mkU32(MASK_VSCR_VALID));
case PPC_GST_CR: {
/* Synthesise the entire CR into a single word. Expensive. */
getXER_BC32()));
case PPC_GST_RESVN:
- return IRExpr_Get((mode64 ? OFFB64_RESVN : OFFB32_RESVN), ty);
+ return IRExpr_Get( OFFB_RESVN, ty);
default:
- vpanic("getGST(ppc32)");
+ vex_printf("getGST(ppc): reg = %u", reg);
+ vpanic("getGST(ppc)");
}
}
vassert((mask & 0xF000) == 0xF000 || (mask & 0xF000) == 0x0);
/* all masks now refer to valid fields */
- /* Vex-generated code expects to run with the FPSCR set as follows:
+ /* Vex-generated code expects the FPSCR to be set as follows:
all exceptions masked, round-to-nearest.
This corresponds to a FPSCR value of 0x0. */
/* We're only keeping track of the rounding mode,
so if the mask isn't asking for this, just return 0x0 */
if (mask & 0x3) {
- assign( val, IRExpr_Get((mode64 ? OFFB64_FPROUND :
- OFFB32_FPROUND), Ity_I32) );
+ assign( val, IRExpr_Get( OFFB_FPROUND, Ity_I32 ) );
} else {
assign( val, mkU32(0x0) );
}
}
default:
- vex_printf("getGST_masked(ppc32): %u", reg);
- vpanic("getGST_masked(ppc32)");
+ vex_printf("getGST_masked(ppc): reg = %u", reg);
+ vpanic("getGST_masked(ppc)");
}
if (mask != 0xFFFFFFFF) {
static void putGST ( PPC_GST reg, IRExpr* src )
{
- IRType ty = mode64 ? Ity_I64 : Ity_I32;
+ IRType ty = mode64 ? Ity_I64 : Ity_I32;
+ IRType ty_src = typeOfIRExpr(irbb->tyenv,src );
vassert( reg < PPC_GST_MAX );
switch (reg) {
case PPC_GST_CIA:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == ty );
- stmt( IRStmt_Put( (mode64 ? OFFB64_CIA : OFFB32_CIA), src ) );
+ vassert( ty_src == ty );
+ stmt( IRStmt_Put( OFFB_CIA, src ) );
break;
case PPC_GST_LR:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == ty );
- stmt( IRStmt_Put( (mode64 ? OFFB64_LR : OFFB32_LR), src ) );
+ vassert( ty_src == ty );
+ stmt( IRStmt_Put( OFFB_LR, src ) );
break;
case PPC_GST_CTR:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == ty );
- stmt( IRStmt_Put( (mode64 ? OFFB64_CTR : OFFB32_CTR), src ) );
+ vassert( ty_src == ty );
+ stmt( IRStmt_Put( OFFB_CTR, src ) );
break;
case PPC_GST_VRSAVE:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == Ity_I32 );
- stmt( IRStmt_Put((mode64 ? OFFB64_VRSAVE :OFFB32_VRSAVE),src));
+ vassert( ty_src == Ity_I32 );
+ stmt( IRStmt_Put( OFFB_VRSAVE,src));
break;
case PPC_GST_VSCR:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == Ity_I32 );
- stmt( IRStmt_Put( (mode64 ? OFFB64_VSCR : OFFB32_VSCR),
+ vassert( ty_src == Ity_I32 );
+ stmt( IRStmt_Put( OFFB_VSCR,
binop(Iop_And32, src,
mkU32(MASK_VSCR_VALID)) ) );
break;
case PPC_GST_XER:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == Ity_I32 );
+ vassert( ty_src == Ity_I32 );
putXER_SO( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(31))) );
putXER_OV( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(30))) );
putXER_CA( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(29))) );
break;
case PPC_GST_EMWARN:
- vassert(typeOfIRExpr(irbb->tyenv,src) == Ity_I32);
- stmt( IRStmt_Put((mode64 ? OFFB64_EMWARN : OFFB32_EMWARN),src) );
+ vassert( ty_src == Ity_I32 );
+ stmt( IRStmt_Put( OFFB_EMWARN,src) );
break;
case PPC_GST_TISTART:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == ty );
- stmt( IRStmt_Put((mode64 ? OFFB64_TISTART :
- OFFB32_TISTART), src) );
+ vassert( ty_src == ty );
+ stmt( IRStmt_Put( OFFB_TISTART, src) );
break;
case PPC_GST_TILEN:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == ty );
- stmt( IRStmt_Put((mode64 ? OFFB64_TILEN : OFFB32_TILEN), src) );
+ vassert( ty_src == ty );
+ stmt( IRStmt_Put( OFFB_TILEN, src) );
break;
case PPC_GST_RESVN:
- vassert( typeOfIRExpr(irbb->tyenv,src ) == ty );
- stmt( IRStmt_Put((mode64 ? OFFB64_RESVN : OFFB32_RESVN), src) );
+ vassert( ty_src == ty );
+ stmt( IRStmt_Put( OFFB_RESVN, src) );
break;
default:
- vpanic("putGST(ppc32)");
+ vex_printf("putGST(ppc): reg = %u", reg);
+ vpanic("putGST(ppc)");
}
}
/* Allow writes to Rounding Mode */
if (mask & 0x3) {
- stmt( IRStmt_Put( (mode64 ? OFFB64_FPROUND : OFFB32_FPROUND),
+ stmt( IRStmt_Put( OFFB_FPROUND,
binop(Iop_And32, src, mkU32(0x3)) ));
}
}
default:
- vex_printf("putGST_masked(ppc32): %u", reg);
- vpanic("putGST_masked(ppc32)");
+ vex_printf("putGST_masked(ppc): reg = %u", reg);
+ vpanic("putGST_masked(ppc)");
}
}
DIP("addic r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
mkSzExtendS16(ty, uimm16) ) );
- set_XER_CA( ty, PPC32G_FLAG_OP_ADD,
+ set_XER_CA( ty, PPCG_FLAG_OP_ADD,
mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
break;
DIP("addic. r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
mkSzExtendS16(ty, uimm16) ) );
- set_XER_CA( ty, PPC32G_FLAG_OP_ADD,
+ set_XER_CA( ty, PPCG_FLAG_OP_ADD,
mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
do_rc = True; // Always record to CR
assign( rD, binop( mkSzOp(ty, Iop_Sub8),
mkSzExtendS16(ty, uimm16),
mkexpr(rA)) );
- set_XER_CA( ty, PPC32G_FLAG_OP_SUBFI,
+ set_XER_CA( ty, PPCG_FLAG_OP_SUBFI,
mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
break;
switch (opc2) {
case 0x10A: // add (Add, PPC32 p347)
DIP("add%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
assign( rD, binop( mkSzOp(ty, Iop_Add8),
mkexpr(rA), mkexpr(rB) ) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_ADD,
+ set_XER_OV( ty, PPCG_FLAG_OP_ADD,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
case 0x00A: // addc (Add Carrying, PPC32 p348)
DIP("addc%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
assign( rD, binop( mkSzOp(ty, Iop_Add8),
mkexpr(rA), mkexpr(rB)) );
- set_XER_CA( ty, PPC32G_FLAG_OP_ADD,
+ set_XER_CA( ty, PPCG_FLAG_OP_ADD,
mkexpr(rD), mkexpr(rA), mkexpr(rB),
mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_ADD,
+ set_XER_OV( ty, PPCG_FLAG_OP_ADD,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
case 0x08A: { // adde (Add Extended, PPC32 p349)
IRTemp old_xer_ca = newTemp(ty);
DIP("adde%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
// rD = rA + rB + XER[CA]
assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
binop( mkSzOp(ty, Iop_Add8),
mkexpr(rB), mkexpr(old_xer_ca))) );
- set_XER_CA( ty, PPC32G_FLAG_OP_ADDE,
+ set_XER_CA( ty, PPCG_FLAG_OP_ADDE,
mkexpr(rD), mkexpr(rA), mkexpr(rB),
mkexpr(old_xer_ca) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_ADDE,
+ set_XER_OV( ty, PPCG_FLAG_OP_ADDE,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
}
- case 0x0EA: { // addme (Add to Minus One Extended, PPC32 p354)
+ case 0x0EA: { // addme (Add to Minus One Extended, PPC32 p354)
IRTemp old_xer_ca = newTemp(ty);
IRExpr *min_one;
if (rB_addr != 0) {
- vex_printf("dis_int_arith(PPC32)(addme,rB_addr)\n");
+ vex_printf("dis_int_arith(ppc)(addme,rB_addr)\n");
return False;
}
DIP("addme%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
// rD = rA + (-1) + XER[CA]
// => Just another form of adde
assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
binop( mkSzOp(ty, Iop_Add8),
min_one, mkexpr(old_xer_ca)) ));
- set_XER_CA( ty, PPC32G_FLAG_OP_ADDE,
+ set_XER_CA( ty, PPCG_FLAG_OP_ADDE,
mkexpr(rD), mkexpr(rA), min_one,
mkexpr(old_xer_ca) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_ADDE,
+ set_XER_OV( ty, PPCG_FLAG_OP_ADDE,
mkexpr(rD), mkexpr(rA), min_one );
}
break;
case 0x0CA: { // addze (Add to Zero Extended, PPC32 p355)
IRTemp old_xer_ca = newTemp(ty);
if (rB_addr != 0) {
- vex_printf("dis_int_arith(PPC32)(addze,rB_addr)\n");
+ vex_printf("dis_int_arith(ppc)(addze,rB_addr)\n");
return False;
}
DIP("addze%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
// rD = rA + (0) + XER[CA]
// => Just another form of adde
assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
assign( rD, binop( mkSzOp(ty, Iop_Add8),
mkexpr(rA), mkexpr(old_xer_ca)) );
- set_XER_CA( ty, PPC32G_FLAG_OP_ADDE,
+ set_XER_CA( ty, PPCG_FLAG_OP_ADDE,
mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0),
mkexpr(old_xer_ca) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_ADDE,
+ set_XER_OV( ty, PPCG_FLAG_OP_ADDE,
mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0) );
}
break;
case 0x1EB: // divw (Divide Word, PPC32 p388)
DIP("divw%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
if (mode64) {
/* Note:
- makes set_CR0 happy */
IRExpr* dividend = mk64lo32Sto64( mkexpr(rA) );
IRExpr* divisor = mk64lo32Sto64( mkexpr(rB) );
- assign( rD, mk64lo32Uto64( binop(Iop_DivS64, dividend, divisor) ) );
+ assign( rD, mk64lo32Uto64( binop(Iop_DivS64, dividend,
+ divisor) ) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_DIVW,
+ set_XER_OV( ty, PPCG_FLAG_OP_DIVW,
mkexpr(rD), dividend, divisor );
}
} else {
assign( rD, binop(Iop_DivS32, mkexpr(rA), mkexpr(rB)) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_DIVW,
+ set_XER_OV( ty, PPCG_FLAG_OP_DIVW,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
}
case 0x1CB: // divwu (Divide Word Unsigned, PPC32 p389)
DIP("divwu%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
if (mode64) {
/* Note:
*/
IRExpr* dividend = mk64lo32Uto64( mkexpr(rA) );
IRExpr* divisor = mk64lo32Uto64( mkexpr(rB) );
- assign( rD, mk64lo32Uto64( binop(Iop_DivU64, dividend, divisor) ) );
+ assign( rD, mk64lo32Uto64( binop(Iop_DivU64, dividend,
+ divisor) ) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_DIVWU,
+ set_XER_OV( ty, PPCG_FLAG_OP_DIVWU,
mkexpr(rD), dividend, divisor );
}
} else {
assign( rD, binop(Iop_DivU32, mkexpr(rA), mkexpr(rB)) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_DIVWU,
+ set_XER_OV( ty, PPCG_FLAG_OP_DIVWU,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
}
case 0x04B: // mulhw (Multiply High Word, PPC32 p488)
if (flag_OE != 0) {
- vex_printf("dis_int_arith(PPC32)(mulhw,flag_OE)\n");
+ vex_printf("dis_int_arith(ppc)(mulhw,flag_OE)\n");
return False;
}
- DIP("mulhw%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ DIP("mulhw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
if (mode64) {
/* rD[hi32] are undefined: setting them to sign of lo32
}
break;
- case 0x00B: // mulhwu (Multiply High Word Unsigned, PPC32 p489)
+ case 0x00B: // mulhwu (Multiply High Word Unsigned, PPC32 p489)
if (flag_OE != 0) {
- vex_printf("dis_int_arith(PPC32)(mulhwu,flag_OE)\n");
+ vex_printf("dis_int_arith(ppc)(mulhwu,flag_OE)\n");
return False;
}
- DIP("mulhwu%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ DIP("mulhwu%s r%u,r%u,r%u\n", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
if (mode64) {
/* rD[hi32] are undefined: setting them to sign of lo32
case 0x0EB: // mullw (Multiply Low Word, PPC32 p491)
DIP("mullw%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
if (mode64) {
/* rD[hi32] are undefined: setting them to sign of lo32
IRExpr *b = unop(Iop_64to32, mkexpr(rB) );
assign( rD, binop(Iop_MullS32, a, b) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_MULLW,
+ set_XER_OV( ty, PPCG_FLAG_OP_MULLW,
mkexpr(rD),
unop(Iop_32Uto64, a), unop(Iop_32Uto64, b) );
}
binop(Iop_MullU32,
mkexpr(rA), mkexpr(rB))) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_MULLW,
+ set_XER_OV( ty, PPCG_FLAG_OP_MULLW,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
}
case 0x068: // neg (Negate, PPC32 p493)
if (rB_addr != 0) {
- vex_printf("dis_int_arith(PPC32)(neg,rB_addr)\n");
+ vex_printf("dis_int_arith(ppc)(neg,rB_addr)\n");
return False;
}
DIP("neg%s%s r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr);
// rD = (~rA) + 1
assign( rD, binop( mkSzOp(ty, Iop_Add8),
unop( mkSzOp(ty, Iop_Not8), mkexpr(rA) ),
mkSzImm(ty, 1)) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_NEG,
+ set_XER_OV( ty, PPCG_FLAG_OP_NEG,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
case 0x028: // subf (Subtract From, PPC32 p537)
DIP("subf%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
// rD = rB - rA
assign( rD, binop( mkSzOp(ty, Iop_Sub8),
mkexpr(rB), mkexpr(rA)) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_SUBF,
+ set_XER_OV( ty, PPCG_FLAG_OP_SUBF,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
case 0x008: // subfc (Subtract from Carrying, PPC32 p538)
DIP("subfc%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
// rD = rB - rA
assign( rD, binop( mkSzOp(ty, Iop_Sub8),
mkexpr(rB), mkexpr(rA)) );
- set_XER_CA( ty, PPC32G_FLAG_OP_SUBFC,
+ set_XER_CA( ty, PPCG_FLAG_OP_SUBFC,
mkexpr(rD), mkexpr(rA), mkexpr(rB),
mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_SUBFC,
+ set_XER_OV( ty, PPCG_FLAG_OP_SUBFC,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
case 0x088: {// subfe (Subtract from Extended, PPC32 p539)
IRTemp old_xer_ca = newTemp(ty);
DIP("subfe%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
// rD = (log not)rA + rB + XER[CA]
assign( old_xer_ca, mkSzWiden32(ty, getXER_CA32(), False) );
unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)),
binop( mkSzOp(ty, Iop_Add8),
mkexpr(rB), mkexpr(old_xer_ca))) );
- set_XER_CA( ty, PPC32G_FLAG_OP_SUBFE,
+ set_XER_CA( ty, PPCG_FLAG_OP_SUBFE,
mkexpr(rD), mkexpr(rA), mkexpr(rB),
mkexpr(old_xer_ca) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_SUBFE,
+ set_XER_OV( ty, PPCG_FLAG_OP_SUBFE,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
}
- case 0x0E8: { // subfme (Subtract from Minus One Extended, PPC32 p541)
+ case 0x0E8: { // subfme (Subtract from -1 Extended, PPC32 p541)
IRTemp old_xer_ca = newTemp(ty);
IRExpr *min_one;
if (rB_addr != 0) {
- vex_printf("dis_int_arith(PPC32)(subfme,rB_addr)\n");
+ vex_printf("dis_int_arith(ppc)(subfme,rB_addr)\n");
return False;
}
DIP("subfme%s%s r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr);
// rD = (log not)rA + (-1) + XER[CA]
// => Just another form of subfe
unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)),
binop( mkSzOp(ty, Iop_Add8),
min_one, mkexpr(old_xer_ca))) );
- set_XER_CA( ty, PPC32G_FLAG_OP_SUBFE,
+ set_XER_CA( ty, PPCG_FLAG_OP_SUBFE,
mkexpr(rD), mkexpr(rA), min_one,
mkexpr(old_xer_ca) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_SUBFE,
+ set_XER_OV( ty, PPCG_FLAG_OP_SUBFE,
mkexpr(rD), mkexpr(rA), min_one );
}
break;
}
- case 0x0C8: { // subfze (Subtract from Zero Extended, PPC32 p542)
+ case 0x0C8: { // subfze (Subtract from Zero Extended, PPC32 p542)
IRTemp old_xer_ca = newTemp(ty);
if (rB_addr != 0) {
- vex_printf("dis_int_arith(PPC32)(subfze,rB_addr)\n");
+ vex_printf("dis_int_arith(ppc)(subfze,rB_addr)\n");
return False;
}
DIP("subfze%s%s r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr);
// rD = (log not)rA + (0) + XER[CA]
// => Just another form of subfe
assign( rD, binop( mkSzOp(ty, Iop_Add8),
unop( mkSzOp(ty, Iop_Not8),
mkexpr(rA)), mkexpr(old_xer_ca)) );
- set_XER_CA( ty, PPC32G_FLAG_OP_SUBFE,
+ set_XER_CA( ty, PPCG_FLAG_OP_SUBFE,
mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0),
mkexpr(old_xer_ca) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_SUBFE,
+ set_XER_OV( ty, PPCG_FLAG_OP_SUBFE,
mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0) );
}
break;
/* 64bit Arithmetic */
- case 0x49: // mulhd (Multiply High Double Word, PPC64 p539)
+ case 0x49: // mulhd (Multiply High DWord, PPC64 p539)
if (flag_OE != 0) {
- vex_printf("dis_int_arith(PPC32)(mulhd,flagOE)\n");
+ vex_printf("dis_int_arith(ppc)(mulhd,flagOE)\n");
return False;
}
- DIP("mulhd%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ DIP("mulhd%s r%u,r%u,r%u\n", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
assign( rD, unop(Iop_128HIto64,
binop(Iop_MullS64,
break;
- case 0x9: // mulhdu (Multiply High Double Word Unsigned, PPC64 p540)
+ case 0x9: // mulhdu (Multiply High DWord Unsigned, PPC64 p540)
if (flag_OE != 0) {
- vex_printf("dis_int_arith(PPC32)(mulhdu,flagOE)\n");
+ vex_printf("dis_int_arith(ppc)(mulhdu,flagOE)\n");
return False;
}
- DIP("mulhdu%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ DIP("mulhdu%s r%u,r%u,r%u\n", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
assign( rD, unop(Iop_128HIto64,
binop(Iop_MullU64,
mkexpr(rA), mkexpr(rB))) );
break;
- case 0xE9: // mulld (Multiply Low Double Word, PPC64 p543)
+ case 0xE9: // mulld (Multiply Low DWord, PPC64 p543)
DIP("mulld%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
assign( rD, binop(Iop_Mul64, mkexpr(rA), mkexpr(rB)) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_MULLW,
+ set_XER_OV( ty, PPCG_FLAG_OP_MULLW,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
- case 0x1E9: // divd (Divide Double Word, PPC64 p419)
+ case 0x1E9: // divd (Divide DWord, PPC64 p419)
DIP("divd%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
assign( rD, binop(Iop_DivS64, mkexpr(rA), mkexpr(rB)) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_DIVW,
+ set_XER_OV( ty, PPCG_FLAG_OP_DIVW,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
=> rD=undef, if(flag_rC) CR7=undef, if(flag_OE) XER_OV=1
=> But _no_ exception raised. */
- case 0x1C9: // divdu (Divide Double Word Unsigned, PPC64 p420)
+ case 0x1C9: // divdu (Divide DWord Unsigned, PPC64 p420)
DIP("divdu%s%s r%u,r%u,r%u\n",
- flag_OE ? "o" : "", flag_rC ? "." : "",
+ flag_OE ? "o" : "", flag_rC ? ".":"",
rD_addr, rA_addr, rB_addr);
assign( rD, binop(Iop_DivU64, mkexpr(rA), mkexpr(rB)) );
if (flag_OE) {
- set_XER_OV( ty, PPC32G_FLAG_OP_DIVWU,
+ set_XER_OV( ty, PPCG_FLAG_OP_DIVWU,
mkexpr(rD), mkexpr(rA), mkexpr(rB) );
}
break;
/* Note: ditto comment divd, for (x / 0) */
default:
- vex_printf("dis_int_arith(PPC32)(opc2)\n");
+ vex_printf("dis_int_arith(ppc)(opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_int_arith(PPC32)(opc1)\n");
+ vex_printf("dis_int_arith(ppc)(opc1)\n");
return False;
}
IRExpr *b;
if (!mode64 && flag_L==1) { // L==1 invalid for 32 bit.
- vex_printf("dis_int_cmp(PPC32)(flag_L)\n");
+ vex_printf("dis_int_cmp(ppc)(flag_L)\n");
return False;
}
if (b22 != 0) {
- vex_printf("dis_int_cmp(PPC32)(b22)\n");
+ vex_printf("dis_int_cmp(ppc)(b22)\n");
return False;
}
/* X Form */
case 0x1F:
if (b0 != 0) {
- vex_printf("dis_int_cmp(PPC32)(0x1F,b0)\n");
+ vex_printf("dis_int_cmp(ppc)(0x1F,b0)\n");
return False;
}
b = getIReg(rB_addr);
break;
default:
- vex_printf("dis_int_cmp(PPC32)(opc2)\n");
+ vex_printf("dis_int_cmp(ppc)(opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_int_cmp(PPC32)(opc1)\n");
+ vex_printf("dis_int_cmp(ppc)(opc1)\n");
return False;
}
switch (opc2) {
case 0x01C: // and (AND, PPC32 p356)
DIP("and%s r%u,r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
assign(rA, binop( mkSzOp(ty, Iop_And8),
mkexpr(rS), mkexpr(rB)));
break;
case 0x03C: // andc (AND with Complement, PPC32 p357)
DIP("andc%s r%u,r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
assign(rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS),
unop( mkSzOp(ty, Iop_Not8),
mkexpr(rB))));
case 0x01A: { // cntlzw (Count Leading Zeros Word, PPC32 p371)
IRExpr* lo32;
if (rB_addr!=0) {
- vex_printf("dis_int_logic(PPC32)(cntlzw,rB_addr)\n");
+ vex_printf("dis_int_logic(ppc)(cntlzw,rB_addr)\n");
return False;
}
DIP("cntlzw%s r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr);
// mode64: count in low word only
lo32 = mode64 ? unop(Iop_64to32, mkexpr(rS)) : mkexpr(rS);
// Iop_Clz32 undefined for arg==0, so deal with that case:
irx = binop(Iop_CmpNE32, lo32, mkU32(0));
- assign(rA, IRExpr_Mux0X( unop(Iop_1Uto8, irx),
- mkSzImm(ty, 32),
- mkSzWiden32(ty, unop(Iop_Clz32, lo32), False) ));
+ assign(rA, mkSzWiden32(ty,
+ IRExpr_Mux0X( unop(Iop_1Uto8, irx),
+ mkU32(32),
+ unop(Iop_Clz32, lo32)),
+ False));
+
// TODO: alternatively: assign(rA, verbose_Clz32(rS));
break;
}
case 0x11C: // eqv (Equivalent, PPC32 p396)
DIP("eqv%s r%u,r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
assign( rA, unop( mkSzOp(ty, Iop_Not8),
binop( mkSzOp(ty, Iop_Xor8),
mkexpr(rS), mkexpr(rB))) );
case 0x3BA: // extsb (Extend Sign Byte, PPC32 p397
if (rB_addr!=0) {
- vex_printf("dis_int_logic(PPC32)(extsb,rB_addr)\n");
+ vex_printf("dis_int_logic(ppc)(extsb,rB_addr)\n");
return False;
}
DIP("extsb%s r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr);
if (mode64)
assign( rA, unop(Iop_8Sto64, unop(Iop_64to8, mkexpr(rS))) );
else
case 0x39A: // extsh (Extend Sign Half Word, PPC32 p398)
if (rB_addr!=0) {
- vex_printf("dis_int_logic(PPC32)(extsh,rB_addr)\n");
+ vex_printf("dis_int_logic(ppc)(extsh,rB_addr)\n");
return False;
}
DIP("extsh%s r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr);
if (mode64)
- assign( rA, unop(Iop_16Sto64, unop(Iop_64to16, mkexpr(rS))) );
+ assign( rA, unop(Iop_16Sto64,
+ unop(Iop_64to16, mkexpr(rS))) );
else
- assign( rA, unop(Iop_16Sto32, unop(Iop_32to16, mkexpr(rS))) );
+ assign( rA, unop(Iop_16Sto32,
+ unop(Iop_32to16, mkexpr(rS))) );
break;
case 0x1DC: // nand (NAND, PPC32 p492)
DIP("nand%s r%u,r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
assign( rA, unop( mkSzOp(ty, Iop_Not8),
binop( mkSzOp(ty, Iop_And8),
mkexpr(rS), mkexpr(rB))) );
case 0x07C: // nor (NOR, PPC32 p494)
DIP("nor%s r%u,r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
assign( rA, unop( mkSzOp(ty, Iop_Not8),
binop( mkSzOp(ty, Iop_Or8),
mkexpr(rS), mkexpr(rB))) );
assign( rA, mkexpr(rS) );
} else {
DIP("or%s r%u,r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
assign( rA, binop( mkSzOp(ty, Iop_Or8),
mkexpr(rS), mkexpr(rB)) );
}
case 0x19C: // orc (OR with Complement, PPC32 p496)
DIP("orc%s r%u,r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS),
unop(mkSzOp(ty, Iop_Not8), mkexpr(rB))));
break;
case 0x13C: // xor (XOR, PPC32 p549)
DIP("xor%s r%u,r%u,r%u\n",
- flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
assign( rA, binop( mkSzOp(ty, Iop_Xor8),
mkexpr(rS), mkexpr(rB)) );
break;
/* 64bit Integer Logical Instructions */
case 0x3DA: // extsw (Extend Sign Word, PPC64 p430)
if (rB_addr!=0) {
- vex_printf("dis_int_logic(PPC32)(extsw,rB_addr)\n");
+ vex_printf("dis_int_logic(ppc)(extsw,rB_addr)\n");
return False;
}
- DIP("extsw%s r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr);
+ DIP("extsw%s r%u,r%u\n", flag_rC ? ".":"", rA_addr, rS_addr);
assign(rA, unop(Iop_32Sto64, unop(Iop_64to32, mkexpr(rS))));
break;
- case 0x03A: // cntlzd (Count Leading Zeros DW, PPC64 p401)
+ case 0x03A: // cntlzd (Count Leading Zeros DWord, PPC64 p401)
if (rB_addr!=0) {
- vex_printf("dis_int_logic(PPC32)(cntlzd,rB_addr)\n");
+ vex_printf("dis_int_logic(ppc)(cntlzd,rB_addr)\n");
return False;
}
- DIP("cntlzd%s r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr);
+ DIP("cntlzd%s r%u,r%u\n",
+ flag_rC ? ".":"", rA_addr, rS_addr);
// Iop_Clz64 undefined for arg==0, so deal with that case:
irx = binop(Iop_CmpNE64, mkexpr(rS), mkU64(0));
assign(rA, IRExpr_Mux0X( unop(Iop_1Uto8, irx),
mkU64(64),
unop(Iop_Clz64, mkexpr(rS)) ));
+ // TODO: alternatively: assign(rA, verbose_Clz64(rS));
break;
default:
- vex_printf("dis_int_logic(PPC32)(opc2)\n");
+ vex_printf("dis_int_logic(ppc)(opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_int_logic(PPC32)(opc1)\n");
+ vex_printf("dis_int_logic(ppc)(opc1)\n");
return False;
}
switch (opc1) {
case 0x14: {
- // rlwimi (Rotate Left Word Immediate then Mask Insert, PPC32 p500)
- DIP("rlwimi%s r%u,r%u,%d,%d,%d\n", flag_rC ? "." : "",
+ // rlwimi (Rotate Left Word Imm then Mask Insert, PPC32 p500)
+ DIP("rlwimi%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
if (mode64) {
// tmp32 = (ROTL(rS_Lo32, Imm)
mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
r = unop(Iop_32Uto64, r);
- assign( rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))) );
+ assign( rot, binop(Iop_Or64, r,
+ binop(Iop_Shl64, r, mkU8(32))) );
assign( rA,
binop(Iop_Or64,
binop(Iop_And64, mkexpr(rot), mkU64(mask64)),
}
case 0x15: {
- // rlwinm (Rotate Left Word Immediate then AND with Mask, PPC32 p501)
+ // rlwinm (Rotate Left Word Imm then AND with Mask, PPC32 p501)
vassert(MaskBeg < 32);
vassert(MaskEnd < 32);
vassert(sh_imm < 32);
if (mode64) {
mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
- DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? "." : "",
+ DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
// tmp32 = (ROTL(rS_Lo32, Imm)
// rA = ((tmp32 || tmp32) & mask64)
r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
r = unop(Iop_32Uto64, r);
- assign( rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))) );
+ assign( rot, binop(Iop_Or64, r,
+ binop(Iop_Shl64, r, mkU8(32))) );
assign( rA, binop(Iop_And64, mkexpr(rot), mkU64(mask64)) );
}
else {
if (MaskBeg == 0 && sh_imm+MaskEnd == 31) {
/* Special-case the ,n,0,31-n form as that is just n-bit
- shift left (PPC32 p501) */
- DIP("slwi%s r%u,r%u,%d\n", flag_rC ? "." : "",
+ shift left, PPC32 p501 */
+ DIP("slwi%s r%u,r%u,%d\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm);
assign( rA, binop(Iop_Shl32, mkexpr(rS), mkU8(sh_imm)) );
}
else if (MaskEnd == 31 && sh_imm+MaskBeg == 32) {
/* Special-case the ,32-n,n,31 form as that is just n-bit
- unsigned shift right (PPC32 p501) */
- DIP("srwi%s r%u,r%u,%d\n", flag_rC ? "." : "",
+ unsigned shift right, PPC32 p501 */
+ DIP("srwi%s r%u,r%u,%d\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm);
assign( rA, binop(Iop_Shr32, mkexpr(rS), mkU8(MaskBeg)) );
}
else {
/* General case. */
mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
- DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? "." : "",
+ DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
// rA = ROTL(rS, Imm) & mask
- assign( rA, binop(Iop_And32, ROTL(mkexpr(rS), mkU8(sh_imm)),
+ assign( rA, binop(Iop_And32,
+ ROTL(mkexpr(rS), mkU8(sh_imm)),
mkU32(mask32)) );
}
}
case 0x17: {
// rlwnm (Rotate Left Word then AND with Mask, PPC32 p503
- DIP("rlwnm%s r%u,r%u,r%u,%d,%d\n", flag_rC ? "." : "",
+ DIP("rlwnm%s r%u,r%u,r%u,%d,%d\n", flag_rC ? ".":"",
rA_addr, rS_addr, rB_addr, MaskBeg, MaskEnd);
if (mode64) {
mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
// rA = ROTL(rS, rB[0-4]) & mask
// note, ROTL does the masking, so we don't do it here
assign( rA, binop(Iop_And32,
- ROTL(mkexpr(rS), unop(Iop_32to8, mkexpr(rB))),
+ ROTL(mkexpr(rS),
+ unop(Iop_32to8, mkexpr(rB))),
mkU32(mask32)) );
}
break;
/* r = ROTL64( rS, rB_lo6) */
r = ROTL( mkexpr(rS), unop(Iop_64to8, mkexpr(rB)) );
- if (b1 == 0) { // rldcl (Rotate Left DW then Clear Left, PPC64 p555)
- DIP("rldcl%s r%u,r%u,r%u,%u\n", flag_rC ? "." : "",
+ if (b1 == 0) { // rldcl (Rotl DWord, Clear Left, PPC64 p555)
+ DIP("rldcl%s r%u,r%u,r%u,%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, rB_addr, msk_imm);
// note, ROTL does the masking, so we don't do it here
mask64 = MASK64(0, 63-msk_imm);
assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
break;
- } else { // rldcr (Rotate Left DW then Clear Right, PPC64 p556)
- DIP("rldcr%s r%u,r%u,r%u,%u\n", flag_rC ? "." : "",
+ } else { // rldcr (Rotl DWord, Clear Right, PPC64 p556)
+ DIP("rldcr%s r%u,r%u,r%u,%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, rB_addr, msk_imm);
mask64 = MASK64(63-msk_imm, 63);
assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
}
break;
}
- case 0x2: // rldic (Rotate Left DW Imm then Clear, PPC64 p557)
- DIP("rldic%s r%u,r%u,%u,%u\n", flag_rC ? "." : "",
+ case 0x2: // rldic (Rotl DWord Imm, Clear, PPC64 p557)
+ DIP("rldic%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm, msk_imm);
r = ROTL(mkexpr(rS), mkU8(sh_imm));
mask64 = MASK64(sh_imm, 63-msk_imm);
assign(rA, r & m);
*/
- case 0x0: // rldicl (Rotate Left DW Imm then Clear Left, PPC64 p558)
- DIP("rldicl%s r%u,r%u,%u,%u\n", flag_rC ? "." : "",
+ case 0x0: // rldicl (Rotl DWord Imm, Clear Left, PPC64 p558)
+ DIP("rldicl%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm, msk_imm);
r = ROTL(mkexpr(rS), mkU8(sh_imm));
mask64 = MASK64(0, 63-msk_imm);
assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
break;
- // later: deal with special case: (msk_imm + sh_imm == 63) => SHR(63 - sh_imm)
+ /* later: deal with special case:
+ (msk_imm + sh_imm == 63) => SHR(63 - sh_imm) */
- case 0x1: // rldicr (Rotate Left DW Imm then Clear Right, PPC64 p559)
- DIP("rldicr%s r%u,r%u,%u,%u\n", flag_rC ? "." : "",
+ case 0x1: // rldicr (Rotl DWord Imm, Clear Right, PPC64 p559)
+ DIP("rldicr%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm, msk_imm);
r = ROTL(mkexpr(rS), mkU8(sh_imm));
mask64 = MASK64(63-msk_imm, 63);
assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
break;
- // later: deal with special case: (msk_imm == sh_imm) => SHL(sh_imm)
+ /* later: deal with special case:
+ (msk_imm == sh_imm) => SHL(sh_imm) */
- case 0x3: { // rldimi (Rotate Left DW Imm then Mask Insert, PPC64 p560)
+ case 0x3: { // rldimi (Rotl DWord Imm, Mask Insert, PPC64 p560)
IRTemp rA_orig = newTemp(ty);
- DIP("rldimi%s r%u,r%u,%u,%u\n", flag_rC ? "." : "",
+ DIP("rldimi%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm, msk_imm);
r = ROTL(mkexpr(rS), mkU8(sh_imm));
mask64 = MASK64(sh_imm, 63-msk_imm);
assign( rA_orig, getIReg(rA_addr) );
assign( rA, binop(Iop_Or64,
binop(Iop_And64, mkU64(mask64), r),
- binop(Iop_And64, mkU64(~mask64), mkexpr(rA_orig))) );
+ binop(Iop_And64, mkU64(~mask64),
+ mkexpr(rA_orig))) );
break;
}
default:
- vex_printf("dis_int_rot(PPC32)(opc2)\n");
+ vex_printf("dis_int_rot(ppc)(opc2)\n");
return False;
}
break;
}
default:
- vex_printf("dis_int_rot(PPC32)(opc1)\n");
+ vex_printf("dis_int_rot(ppc)(opc1)\n");
return False;
}
putIReg( rD_addr, mkSzWiden8(ty, val, False) );
break;
- case 0x23: // lbzu (Load B & Zero with Update, PPC32 p434)
+ case 0x23: // lbzu (Load B & Zero, Update, PPC32 p434)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lbzu,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lbzu,rA_addr|rD_addr)\n");
return False;
}
DIP("lbzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
putIReg( rA_addr, mkexpr(EA) );
break;
- case 0x2A: // lha (Load HW Algebraic, PPC32 p445)
+ case 0x2A: // lha (Load HW Alg, PPC32 p445)
DIP("lha r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
val = loadBE(Ity_I16, mkexpr(EA));
putIReg( rD_addr, mkSzWiden16(ty, val, True) );
break;
- case 0x2B: // lhau (Load HW Algebraic with Update, PPC32 p446)
+ case 0x2B: // lhau (Load HW Alg, Update, PPC32 p446)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lhau,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lhau,rA_addr|rD_addr)\n");
return False;
}
DIP("lhau r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
putIReg( rD_addr, mkSzWiden16(ty, val, False) );
break;
- case 0x29: // lhzu (Load HW & and Zero with Update, PPC32 p451)
+ case 0x29: // lhzu (Load HW & and Zero, Update, PPC32 p451)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lhzu,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lhzu,rA_addr|rD_addr)\n");
return False;
}
DIP("lhzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
putIReg( rD_addr, mkSzWiden32(ty, val, False) );
break;
- case 0x21: // lwzu (Load W & Zero with Update, PPC32 p461))
+ case 0x21: // lwzu (Load W & Zero, Update, PPC32 p461))
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lwzu,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lwzu,rA_addr|rD_addr)\n");
return False;
}
DIP("lwzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
/* X Form */
case 0x1F:
if (b0 != 0) {
- vex_printf("dis_int_load(PPC32)(Ox1F,b0)\n");
+ vex_printf("dis_int_load(ppc)(Ox1F,b0)\n");
return False;
}
switch (opc2) {
- case 0x077: // lbzux (Load B & Zero with Update Indexed, PPC32 p435)
+ case 0x077: // lbzux (Load B & Zero, Update Indexed, PPC32 p435)
DIP("lbzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lwzux,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lwzux,rA_addr|rD_addr)\n");
return False;
}
val = loadBE(Ity_I8, mkexpr(EA));
putIReg( rA_addr, mkexpr(EA) );
break;
- case 0x057: // lbzx (Load B & Zero Indexed, PPC32 p436)
+ case 0x057: // lbzx (Load B & Zero, Indexed, PPC32 p436)
DIP("lbzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I8, mkexpr(EA));
putIReg( rD_addr, mkSzWiden8(ty, val, False) );
break;
- case 0x177: // lhaux (Load HW Algebraic with Update Indexed, PPC32 p447)
+ case 0x177: // lhaux (Load HW Alg, Update Indexed, PPC32 p447)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lhaux,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lhaux,rA_addr|rD_addr)\n");
return False;
}
DIP("lhaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
putIReg( rA_addr, mkexpr(EA) );
break;
- case 0x157: // lhax (Load HW Algebraic Indexed, PPC32 p448)
+ case 0x157: // lhax (Load HW Alg, Indexed, PPC32 p448)
DIP("lhax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I16, mkexpr(EA));
putIReg( rD_addr, mkSzWiden16(ty, val, True) );
break;
- case 0x137: // lhzux (Load HW & Zero with Update Indexed, PPC32 p452)
+ case 0x137: // lhzux (Load HW & Zero, Update Indexed, PPC32 p452)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lhzux,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lhzux,rA_addr|rD_addr)\n");
return False;
}
DIP("lhzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
putIReg( rA_addr, mkexpr(EA) );
break;
- case 0x117: // lhzx (Load HW & Zero Indexed, PPC32 p453)
+ case 0x117: // lhzx (Load HW & Zero, Indexed, PPC32 p453)
DIP("lhzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I16, mkexpr(EA));
putIReg( rD_addr, mkSzWiden16(ty, val, False) );
break;
- case 0x037: // lwzux (Load W & Zero with Update Indexed, PPC32 p462)
+ case 0x037: // lwzux (Load W & Zero, Update Indexed, PPC32 p462)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lwzux,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lwzux,rA_addr|rD_addr)\n");
return False;
}
DIP("lwzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
putIReg( rA_addr, mkexpr(EA) );
break;
- case 0x017: // lwzx (Load W & Zero Indexed, PPC32 p463)
+ case 0x017: // lwzx (Load W & Zero, Indexed, PPC32 p463)
DIP("lwzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
val = loadBE(Ity_I32, mkexpr(EA));
putIReg( rD_addr, mkSzWiden32(ty, val, False) );
/* 64bit Loads */
- case 0x035: // ldux (Load DW with Update Indexed, PPC64 p475)
+ case 0x035: // ldux (Load DWord, Update Indexed, PPC64 p475)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(ldux,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(ldux,rA_addr|rD_addr)\n");
return False;
}
DIP("ldux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
putIReg( rA_addr, mkexpr(EA) );
break;
- case 0x015: // ldx (Load DW Indexed, PPC64 p476)
+ case 0x015: // ldx (Load DWord, Indexed, PPC64 p476)
DIP("ldx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) );
break;
- case 0x175: // lwaux (Load W Algebraic with Update Indexed, PPC64 p501)
+ case 0x175: // lwaux (Load W Alg, Update Indexed, PPC64 p501)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(lwaux,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(lwaux,rA_addr|rD_addr)\n");
return False;
}
DIP("lwaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
- putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
+ putIReg( rD_addr,
+ unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
putIReg( rA_addr, mkexpr(EA) );
break;
- case 0x155: // lwax (Load W Algebraic Indexed, PPC64 p502)
+ case 0x155: // lwax (Load W Alg, Indexed, PPC64 p502)
DIP("lwax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
- putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
+ putIReg( rD_addr,
+ unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
break;
default:
- vex_printf("dis_int_load(PPC32)(opc2)\n");
+ vex_printf("dis_int_load(ppc)(opc2)\n");
return False;
}
break;
/* DS Form - 64bit Loads */
case 0x3A:
switch (b1<<1 | b0) {
- case 0x0: // ld (Load Double Word, PPC64 p472)
+ case 0x0: // ld (Load DWord, PPC64 p472)
DIP("ld r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) );
break;
- case 0x1: // ldu (Load Double Word with Update, PPC64 p474)
+ case 0x1: // ldu (Load DWord, Update, PPC64 p474)
if (rA_addr == 0 || rA_addr == rD_addr) {
- vex_printf("dis_int_load(PPC32)(ldu,rA_addr|rD_addr)\n");
+ vex_printf("dis_int_load(ppc)(ldu,rA_addr|rD_addr)\n");
return False;
}
DIP("ldu r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
putIReg( rA_addr, mkexpr(EA) );
break;
- case 0x2: // lwa (Load Word Algebraic, PPC64 p499)
+ case 0x2: // lwa (Load Word Alg, PPC64 p499)
DIP("lwa r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
- putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
+ putIReg( rD_addr,
+ unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
break;
default:
- vex_printf("dis_int_load(PPC32)(0x3A, opc2)\n");
+ vex_printf("dis_int_load(ppc)(0x3A, opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_int_load(PPC32)(opc1)\n");
+ vex_printf("dis_int_load(ppc)(opc1)\n");
return False;
}
return True;
UChar b0 = ifieldBIT0(theInstr);
Int simm16 = extend_s_16to32(uimm16);
-
IRType ty = mode64 ? Ity_I64 : Ity_I32;
IRTemp rS = newTemp(ty);
IRTemp rB = newTemp(ty);
storeBE( mkexpr(EA), mkSzNarrow8(ty, mkexpr(rS)) );
break;
- case 0x27: // stbu (Store B with Update, PPC32 p510)
+ case 0x27: // stbu (Store B, Update, PPC32 p510)
if (rA_addr == 0 ) {
- vex_printf("dis_int_store(PPC32)(stbu,rA_addr)\n");
+ vex_printf("dis_int_store(ppc)(stbu,rA_addr)\n");
return False;
}
DIP("stbu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
storeBE( mkexpr(EA), mkSzNarrow16(ty, mkexpr(rS)) );
break;
- case 0x2D: // sthu (Store HW with Update, PPC32 p524)
+ case 0x2D: // sthu (Store HW, Update, PPC32 p524)
if (rA_addr == 0) {
- vex_printf("dis_int_store(PPC32)(sthu,rA_addr)\n");
+ vex_printf("dis_int_store(ppc)(sthu,rA_addr)\n");
return False;
}
DIP("sthu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) );
break;
- case 0x25: // stwu (Store W with Update, PPC32 p534)
+ case 0x25: // stwu (Store W, Update, PPC32 p534)
if (rA_addr == 0) {
- vex_printf("dis_int_store(PPC32)(stwu,rA_addr)\n");
+ vex_printf("dis_int_store(ppc)(stwu,rA_addr)\n");
return False;
}
DIP("stwu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
/* X Form : all these use EA_indexed */
case 0x1F:
if (b0 != 0) {
- vex_printf("dis_int_store(PPC32)(0x1F,b0)\n");
+ vex_printf("dis_int_store(ppc)(0x1F,b0)\n");
return False;
}
switch (opc2) {
- case 0x0F7: // stbux (Store B with Update Indexed, PPC32 p511)
+ case 0x0F7: // stbux (Store B, Update Indexed, PPC32 p511)
if (rA_addr == 0) {
- vex_printf("dis_int_store(PPC32)(stbux,rA_addr)\n");
+ vex_printf("dis_int_store(ppc)(stbux,rA_addr)\n");
return False;
}
DIP("stbux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
storeBE( mkexpr(EA), mkSzNarrow8(ty, mkexpr(rS)) );
break;
- case 0x1B7: // sthux (Store HW with Update Indexed, PPC32 p525)
+ case 0x1B7: // sthux (Store HW, Update Indexed, PPC32 p525)
if (rA_addr == 0) {
- vex_printf("dis_int_store(PPC32)(sthux,rA_addr)\n");
+ vex_printf("dis_int_store(ppc)(sthux,rA_addr)\n");
return False;
}
DIP("sthux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
storeBE( mkexpr(EA), mkSzNarrow16(ty, mkexpr(rS)) );
break;
- case 0x0B7: // stwux (Store W with Update Indexed, PPC32 p535)
+ case 0x0B7: // stwux (Store W, Update Indexed, PPC32 p535)
if (rA_addr == 0) {
- vex_printf("dis_int_store(PPC32)(stwux,rA_addr)\n");
+ vex_printf("dis_int_store(ppc)(stwux,rA_addr)\n");
return False;
}
DIP("stwux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
/* 64bit Stores */
- case 0x0B5: // stdux (Store DW with Update Indexed, PPC64 p584)
+ case 0x0B5: // stdux (Store DWord, Update Indexed, PPC64 p584)
if (rA_addr == 0) {
- vex_printf("dis_int_store(PPC32)(stdux,rA_addr)\n");
+ vex_printf("dis_int_store(ppc)(stdux,rA_addr)\n");
return False;
}
DIP("stdux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
storeBE( mkexpr(EA), mkexpr(rS) );
break;
- case 0x095: // stdx (Store DW Indexed, PPC64 p585)
+ case 0x095: // stdx (Store DWord Indexed, PPC64 p585)
DIP("stdx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
storeBE( mkexpr(EA), mkexpr(rS) );
break;
default:
- vex_printf("dis_int_store(PPC32)(opc2)\n");
+ vex_printf("dis_int_store(ppc)(opc2)\n");
return False;
}
break;
/* DS Form - 64bit Stores */
case 0x3E:
switch (b1<<1 | b0) {
- case 0x0: // std (Store Double Word, PPC64 p580)
+ case 0x0: // std (Store DWord, PPC64 p580)
DIP("std r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
storeBE( mkexpr(EA), mkexpr(rS) );
break;
- case 0x1: // stdu (Store Double Word with Update, PPC64 p583)
+ case 0x1: // stdu (Store DWord, Update, PPC64 p583)
DIP("stdu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
putIReg( rA_addr, mkexpr(EA) );
storeBE( mkexpr(EA), mkexpr(rS) );
break;
default:
- vex_printf("dis_int_load(PPC32)(0x3A, opc2)\n");
+ vex_printf("dis_int_load(ppc)(0x3A, opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_int_store(PPC32)(opc1)\n");
+ vex_printf("dis_int_store(ppc)(opc1)\n");
return False;
}
return True;
switch (opc1) {
case 0x2E: // lmw (Load Multiple Word, PPC32 p454)
if (rA_addr >= rD_addr) {
- vex_printf("dis_int_ldst_mult(PPC32)(lmw,rA_addr)\n");
+ vex_printf("dis_int_ldst_mult(ppc)(lmw,rA_addr)\n");
return False;
}
DIP("lmw r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
for (r = rD_addr; r <= 31; r++) {
irx_addr = binop(Iop_Add32, mkexpr(EA), mkU32(ea_off));
- putIReg( r, mkSzWiden32(ty, loadBE(Ity_I32, irx_addr ), False) );
+ putIReg( r, mkSzWiden32(ty, loadBE(Ity_I32, irx_addr ),
+ False) );
ea_off += 4;
}
break;
break;
default:
- vex_printf("dis_int_ldst_mult(PPC32)(opc1)\n");
+ vex_printf("dis_int_ldst_mult(ppc)(opc1)\n");
return False;
}
return True;
*stopHere = False;
if (opc1 != 0x1F || b0 != 0) {
- vex_printf("dis_int_ldst_str(PPC32)(opc1)\n");
+ vex_printf("dis_int_ldst_str(ppc)(opc1)\n");
return False;
}
return True;
default:
- vex_printf("dis_int_ldst_str(PPC32)(opc2)\n");
+ vex_printf("dis_int_ldst_str(ppc)(opc2)\n");
return False;
}
return True;
/* We have to invert the sense of the information held in
cr_bi. For that we need to know which bit
getCRbit_anywhere regards as significant. */
- assign( res, binop(Iop_Xor32, mkexpr(cr_bi), mkU32(1<<where)) );
+ assign( res, binop(Iop_Xor32, mkexpr(cr_bi),
+ mkU32(1<<where)) );
}
}
return mkexpr(res);
case 0x13:
if (b11to15!=0) {
- vex_printf("dis_int_branch(PPC32)(0x13,b11to15)\n");
+ vex_printf("dis_int_branch(ppc)(0x13,b11to15)\n");
return False;
}
switch (opc2) {
case 0x210: // bcctr (Branch Cond. to Count Register, PPC32 p363)
- if ((BO & 0x4) == 0) { // "decrement and test CTR" option invalid
- vex_printf("dis_int_branch(PPC32)(bcctr,BO)\n");
+ if ((BO & 0x4) == 0) { // "decr and test CTR" option invalid
+ vex_printf("dis_int_branch(ppc)(bcctr,BO)\n");
return False;
}
DIP("bcctr%s 0x%x, 0x%x\n", flag_LK ? "l" : "", BO, BI);
break;
default:
- vex_printf("dis_int_branch(PPC32)(opc2)\n");
+ vex_printf("dis_int_branch(ppc)(opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_int_branch(PPC32)(opc1)\n");
+ vex_printf("dis_int_branch(ppc)(opc1)\n");
return False;
}
IRTemp crbB = newTemp(Ity_I32);
if (opc1 != 19 || b0 != 0) {
- vex_printf("dis_cond_logic(PPC32)(opc1)\n");
+ vex_printf("dis_cond_logic(ppc)(opc1)\n");
return False;
}
if (opc2 == 0) { // mcrf (Move Cond Reg Field, PPC32 p464)
if (((crbD_addr & 0x3) != 0) ||
((crbA_addr & 0x3) != 0) || (crbB_addr != 0)) {
- vex_printf("dis_cond_logic(PPC32)(crbD|crbA|crbB != 0)\n");
+ vex_printf("dis_cond_logic(ppc)(crbD|crbA|crbB != 0)\n");
return False;
}
DIP("mcrf cr%u,cr%u\n", crfD_addr, crfS_addr);
assign( crbD, binop(Iop_Xor32, mkexpr(crbA), mkexpr(crbB)) );
break;
default:
- vex_printf("dis_cond_logic(PPC32)(opc2)\n");
+ vex_printf("dis_cond_logic(ppc)(opc2)\n");
return False;
}
IRType ty = mode64 ? Ity_I64 : Ity_I32;
if (theInstr != 0x44000002) {
- vex_printf("dis_syslink(PPC32)(theInstr)\n");
+ vex_printf("dis_syslink(ppc)(theInstr)\n");
return False;
}
/* XL-Form */
case 0x13: // isync (Instruction Synchronize, PPC32 p432)
if (opc2 != 0x096) {
- vex_printf("dis_memsync(PPC32)(0x13,opc2)\n");
+ vex_printf("dis_memsync(ppc)(0x13,opc2)\n");
return False;
}
if (b11to25 != 0 || b0 != 0) {
- vex_printf("dis_memsync(PPC32)(0x13,b11to25|b0)\n");
+ vex_printf("dis_memsync(ppc)(0x13,b11to25|b0)\n");
return False;
}
DIP("isync\n");
/* X-Form */
case 0x1F:
switch (opc2) {
- case 0x356: // eieio (Enforce In-Order Execution of I/O, PPC32 p394)
+ case 0x356: // eieio (Enforce In-Order Exec of I/O, PPC32 p394)
if (b11to25 != 0 || b0 != 0) {
- vex_printf("dis_memsync(PPC32)(eiei0,b11to25|b0)\n");
+ vex_printf("dis_memsync(ppc)(eiei0,b11to25|b0)\n");
return False;
}
DIP("eieio\n");
case 0x014: // lwarx (Load Word and Reserve Indexed, PPC32 p458)
if (b0 != 0) {
- vex_printf("dis_memsync(PPC32)(lwarx,b0)\n");
+ vex_printf("dis_memsync(ppc)(lwarx,b0)\n");
return False;
}
DIP("lwarx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
- putIReg( rD_addr, mkSzWiden32(ty, loadBE(Ity_I32, mkexpr(EA)), False) );
+ putIReg( rD_addr, mkSzWiden32(ty, loadBE(Ity_I32, mkexpr(EA)),
+ False) );
/* Take a reservation */
putGST( PPC_GST_RESVN, mkexpr(EA) );
break;
// stwcx. (Store Word Conditional Indexed, PPC32 p532)
IRTemp resaddr = newTemp(ty);
if (b0 != 1) {
- vex_printf("dis_memsync(PPC32)(stwcx.,b0)\n");
+ vex_printf("dis_memsync(ppc)(stwcx.,b0)\n");
return False;
}
DIP("stwcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
case 0x256: // sync (Synchronize, PPC32 p543)
if (b11to25 != 0 || b0 != 0) {
- vex_printf("dis_memsync(PPC32)(sync,b11to25|b0)\n");
+ vex_printf("dis_memsync(ppc)(sync,b11to25|b0)\n");
return False;
}
DIP("sync\n");
/* 64bit Memsync */
- case 0x054: // ldarx (Load DW and Reserve Indexed, PPC64 p473)
+ case 0x054: // ldarx (Load DWord and Reserve Indexed, PPC64 p473)
if (b0 != 0) {
- vex_printf("dis_memsync(PPC32)(ldarx,b0)\n");
+ vex_printf("dis_memsync(ppc)(ldarx,b0)\n");
return False;
}
DIP("ldarx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
putGST( PPC_GST_RESVN, mkexpr(EA) );
break;
- case 0x0D6: { // stdcx. (Store DW Condition Indexed, PPC64 p581)
+ case 0x0D6: { // stdcx. (Store DWord Condition Indexd, PPC64 p581)
IRTemp resaddr = newTemp(ty);
if (b0 != 1) {
- vex_printf("dis_memsync(PPC32)(stdcx.,b0)\n");
+ vex_printf("dis_memsync(ppc)(stdcx.,b0)\n");
return False;
}
DIP("stdcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
}
default:
- vex_printf("dis_memsync(PPC32)(opc2)\n");
+ vex_printf("dis_memsync(ppc)(opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_memsync(PPC32)(opc1)\n");
+ vex_printf("dis_memsync(ppc)(opc1)\n");
return False;
}
return True;
if (opc1 == 0x1F) {
switch (opc2) {
case 0x018: { // slw (Shift Left Word, PPC32 p505)
- DIP("slw%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ DIP("slw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, rB_addr);
/* rA = rS << rB */
/* ppc32 semantics are:
break;
}
- case 0x318: { // sraw (Shift Right Algebraic Word, PPC32 p506)
+ case 0x318: { // sraw (Shift Right Alg Word, PPC32 p506)
IRTemp sh_amt = newTemp(Ity_I32);
- DIP("sraw%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ DIP("sraw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, rB_addr);
/* JRS: my reading of the (poorly worded) PPC32 doc p506 is:
amt = rB & 63
rA = Sar32( rS, amt > 31 ? 31 : amt )
XER.CA = amt > 31 ? sign-of-rS : (computation as per srawi)
*/
- assign( sh_amt, binop(Iop_And32, mkU32(0x3F), mkexpr(rB_lo32)) );
+ assign( sh_amt, binop(Iop_And32, mkU32(0x3F),
+ mkexpr(rB_lo32)) );
assign( outofrange,
unop( Iop_1Uto8,
- binop(Iop_CmpLT32U, mkU32(31), mkexpr(sh_amt)) ));
+ binop(Iop_CmpLT32U, mkU32(31),
+ mkexpr(sh_amt)) ));
e_tmp = binop( Iop_Sar32,
mkexpr(rS_lo32),
unop( Iop_32to8,
mkU32(31)) ) );
assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */True) );
- set_XER_CA( ty, PPC32G_FLAG_OP_SRAW,
+ set_XER_CA( ty, PPCG_FLAG_OP_SRAW,
mkexpr(rA),
mkSzWiden32(ty, mkexpr(rS_lo32), True),
mkSzWiden32(ty, mkexpr(sh_amt), True ),
break;
}
- case 0x338: // srawi (Shift Right Algebraic Word Immediate, PPC32 p507)
- DIP("srawi%s r%u,r%u,%d\n", flag_rC ? "." : "",
+ case 0x338: // srawi (Shift Right Alg Word Immediate, PPC32 p507)
+ DIP("srawi%s r%u,r%u,%d\n", flag_rC ? ".":"",
rA_addr, rS_addr, sh_imm);
vassert(sh_imm < 32);
if (mode64) {
assign( rA, binop(Iop_Sar64,
- binop(Iop_Shl64, getIReg(rS_addr), mkU8(32)),
+ binop(Iop_Shl64, getIReg(rS_addr),
+ mkU8(32)),
mkU8(32 + sh_imm)) );
} else {
- assign( rA, binop(Iop_Sar32, mkexpr(rS_lo32), mkU8(sh_imm)) );
+ assign( rA, binop(Iop_Sar32, mkexpr(rS_lo32),
+ mkU8(sh_imm)) );
}
- set_XER_CA( ty, PPC32G_FLAG_OP_SRAWI,
+ set_XER_CA( ty, PPCG_FLAG_OP_SRAWI,
mkexpr(rA),
- mkSzWiden32(ty, mkexpr(rS_lo32), /* Signed */True),
+ mkSzWiden32(ty, mkexpr(rS_lo32), /* Syned */True),
mkSzImm(ty, sh_imm),
- mkSzWiden32(ty, getXER_CA32(), /* Signed */False) );
+ mkSzWiden32(ty, getXER_CA32(), /* Syned */False) );
break;
case 0x218: // srw (Shift Right Word, PPC32 p508)
- DIP("srw%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ DIP("srw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
rA_addr, rS_addr, rB_addr);
/* rA = rS >>u rB */
/* ppc32 semantics are:
binop( Iop_Shr32,
mkexpr(rS_lo32),
unop( Iop_32to8,
- binop(Iop_And32, mkexpr(rB_lo32), mkU32(31)))),
+ binop(Iop_And32, mkexpr(rB_lo32),
+ mkU32(31)))),
unop( Iop_Not32,
binop( Iop_Sar32,
- binop(Iop_Shl32, mkexpr(rB_lo32), mkU8(26)),
+ binop(Iop_Shl32, mkexpr(rB_lo32),
+ mkU8(26)),
mkU8(31))));
assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */False) );
break;
/* 64bit Shifts */
- case 0x01B: // sld (Shift Left DW, PPC64 p568)
- DIP("sld%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ case 0x01B: // sld (Shift Left DWord, PPC64 p568)
+ DIP("sld%s r%u,r%u,r%u\n",
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
/* rA = rS << rB */
/* ppc64 semantics are:
slw(x,y) = (x << (y & 63)) -- primary result
mkU8(63)))) );
break;
- case 0x31A: { // srad (Shift Right Algebraic DW, PPC64 p570)
+ case 0x31A: { // srad (Shift Right Alg DWord, PPC64 p570)
IRTemp sh_amt = newTemp(Ity_I64);
- DIP("srad%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ DIP("srad%s r%u,r%u,r%u\n",
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
/* amt = rB & 127
rA = Sar64( rS, amt > 63 ? 63 : amt )
XER.CA = amt > 63 ? sign-of-rS : (computation as per srawi)
assign( sh_amt, binop(Iop_And64, mkU64(0x7F), mkexpr(rB)) );
assign( outofrange,
unop( Iop_1Uto8,
- binop(Iop_CmpLT64U, mkU64(63), mkexpr(sh_amt)) ));
+ binop(Iop_CmpLT64U, mkU64(63),
+ mkexpr(sh_amt)) ));
assign( rA,
binop( Iop_Sar64,
mkexpr(rS),
mkexpr(sh_amt),
mkU64(63)) ))
);
- set_XER_CA( ty, PPC32G_FLAG_OP_SRAD,
+ set_XER_CA( ty, PPCG_FLAG_OP_SRAD,
mkexpr(rA), mkexpr(rS), mkexpr(sh_amt),
- mkSzWiden32(ty, getXER_CA32(), /* Signed */False) );
+ mkSzWiden32(ty, getXER_CA32(), /* Syned */False) );
break;
}
- case 0x33A: case 0x33B: // sradi (Shift Right Algebraic DW Imm, PPC64 p571)
+ case 0x33A: case 0x33B: // sradi (Shr Alg DWord Imm, PPC64 p571)
sh_imm |= b1<<5;
vassert(sh_imm < 64);
- DIP("sradi%s r%u,r%u,%u\n", flag_rC ? "." : "", rA_addr, rS_addr, sh_imm);
+ DIP("sradi%s r%u,r%u,%u\n",
+ flag_rC ? ".":"", rA_addr, rS_addr, sh_imm);
assign( rA, binop(Iop_Sar64, getIReg(rS_addr), mkU8(sh_imm)) );
- set_XER_CA( ty, PPC32G_FLAG_OP_SRADI,
+ set_XER_CA( ty, PPCG_FLAG_OP_SRADI,
mkexpr(rA),
getIReg(rS_addr),
mkU64(sh_imm),
- mkSzWiden32(ty, getXER_CA32(), /* Signed */False) );
+ mkSzWiden32(ty, getXER_CA32(), /* Syned */False) );
break;
- case 0x21B: // srd (Shift Right DW, PPC64 p574)
- DIP("srd%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ case 0x21B: // srd (Shift Right DWord, PPC64 p574)
+ DIP("srd%s r%u,r%u,r%u\n",
+ flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
/* rA = rS >>u rB */
/* ppc semantics are:
srw(x,y) = (x >>u (y & 63)) -- primary result
break;
default:
- vex_printf("dis_int_shift(PPC32)(opc2)\n");
+ vex_printf("dis_int_shift(ppc)(opc2)\n");
return False;
}
} else {
- vex_printf("dis_int_shift(PPC32)(opc1)\n");
+ vex_printf("dis_int_shift(ppc)(opc1)\n");
return False;
}
IRTemp w2 = newTemp(Ity_I32);
if (opc1 != 0x1F || b0 != 0) {
- vex_printf("dis_int_ldst_rev(PPC32)(opc1|b0)\n");
+ vex_printf("dis_int_ldst_rev(ppc)(opc1|b0)\n");
return False;
}
DIP("lwbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
assign( w1, loadBE(Ity_I32, mkexpr(EA)) );
assign( w2, gen_byterev32(w1) );
- putIReg( rD_addr, mkSzWiden32(ty, mkexpr(w2), /* Signed */False) );
+ putIReg( rD_addr, mkSzWiden32(ty, mkexpr(w2),
+ /* Signed */False) );
break;
//zz case 0x396: // sthbrx (Store Half Word Byte-Reverse Indexed, PPC32 p523)
//zz storeBE( mkexpr(EA), getIReg(tmp16) );
//zz break;
- case 0x296: // stwbrx (Store Word Byte-Reverse Indexed, PPC32 p531)
+ case 0x296: // stwbrx (Store Word Byte-Reverse Indxd, PPC32 p531)
DIP("stwbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
assign( w1, mkSzNarrow32(ty, getIReg(rS_addr)) );
storeBE( mkexpr(EA), gen_byterev32(w1) );
break;
default:
- vex_printf("dis_int_ldst_rev(PPC32)(opc2)\n");
+ vex_printf("dis_int_ldst_rev(ppc)(opc2)\n");
return False;
}
return True;
TBR = ((TBR & 31) << 5) | ((TBR >> 5) & 31);
if (opc1 != 0x1F || b0 != 0) {
- vex_printf("dis_proc_ctl(PPC32)(opc1|b0)\n");
+ vex_printf("dis_proc_ctl(ppc)(opc1|b0)\n");
return False;
}
switch (opc2) {
/* X-Form */
- case 0x200: { // mcrxr (Move to Condition Register from XER, PPC32 p466)
+ case 0x200: { // mcrxr (Move to Cond Register from XER, PPC32 p466)
if (b21to22 != 0 || b11to20 != 0) {
- vex_printf("dis_proc_ctl(PPC32)(mcrxr,b21to22|b11to20)\n");
+ vex_printf("dis_proc_ctl(ppc)(mcrxr,b21to22|b11to20)\n");
return False;
}
DIP("mcrxr crf%d\n", crfD);
break;
}
- case 0x013: // mfcr (Move from Condition Register, PPC32 p467)
+ case 0x013: // mfcr (Move from Cond Register, PPC32 p467)
if (b11to20 != 0) {
- vex_printf("dis_proc_ctl(PPC32)(mfcr,b11to20)\n");
+ vex_printf("dis_proc_ctl(ppc)(mfcr,b11to20)\n");
return False;
}
DIP("mfcr r%u\n", rD_addr);
break;
default:
- vex_printf("dis_proc_ctl(PPC32)(mfspr,SPR)(0x%x)\n", SPR);
+ vex_printf("dis_proc_ctl(ppc)(mfspr,SPR)(0x%x)\n", SPR);
return False;
}
break;
case 0x173: { // mftb (Move from Time Base, PPC32 p475)
IRTemp val = newTemp(Ity_I64);
IRExpr** args = mkIRExprVec_0();
- IRDirty* d = unsafeIRDirty_1_N (
- val,
- 0/*regparms*/,
- "ppc32g_dirtyhelper_MFTB",
- &ppc32g_dirtyhelper_MFTB,
- args
- );
+ IRDirty* d = unsafeIRDirty_1_N( val,
+ 0/*regparms*/,
+ "ppcg_dirtyhelper_MFTB",
+ &ppcg_dirtyhelper_MFTB,
+ args );
/* execute the dirty call, dumping the result in val. */
stmt( IRStmt_Dirty(d) );
break;
}
- case 0x090: { // mtcrf (Move to Condition Register Fields, PPC32 p477)
+ case 0x090: { // mtcrf (Move to Cond Register Fields, PPC32 p477)
Int cr;
UChar shft;
if (b11 != 0 || b20 != 0) {
- vex_printf("dis_proc_ctl(PPC32)(mtcrf,b11|b20)\n");
+ vex_printf("dis_proc_ctl(ppc)(mtcrf,b11|b20)\n");
return False;
}
DIP("mtcrf 0x%x,r%u\n", CRM, rS_addr);
break;
default:
- vex_printf("dis_proc_ctl(PPC32)(mtspr,SPR)(%u)\n", SPR);
+ vex_printf("dis_proc_ctl(ppc)(mtspr,SPR)(%u)\n", SPR);
return False;
}
break;
default:
- vex_printf("dis_proc_ctl(PPC32)(opc2)\n");
+ vex_printf("dis_proc_ctl(ppc)(opc2)\n");
return False;
}
return True;
UChar rB_addr = ifieldRegB(theInstr);
UInt opc2 = ifieldOPClo10(theInstr);
UChar b0 = ifieldBIT0(theInstr);
- UInt lineszB = guest_archinfo->ppc32_cache_line_szB;
+ UInt lineszB = guest_archinfo->ppc_cache_line_szB;
IRType ty = mode64 ? Ity_I64 : Ity_I32;
if (opc1 != 0x1F || b21to25 != 0 || b0 != 0) {
- vex_printf("dis_cache_manage(PPC32)(opc1|b21to25|b0)\n");
+ vex_printf("dis_cache_manage(ppc)(opc1|b21to25|b0)\n");
return False;
}
//zz case 0x2F6: // dcba (Data Cache Block Allocate, PPC32 p380)
//zz vassert(0); /* AWAITING TEST CASE */
//zz DIP("dcba r%u,r%u\n", rA_addr, rB_addr);
-//zz if (0) vex_printf("vex ppc32->IR: kludged dcba\n");
+//zz if (0) vex_printf("vex ppc->IR: kludged dcba\n");
//zz break;
case 0x056: // dcbf (Data Cache Block Flush, PPC32 p382)
DIP("dcbf r%u,r%u\n", rA_addr, rB_addr);
/* nop as far as vex is concerned */
- if (0) vex_printf("vex ppc32->IR: kludged dcbf\n");
+ if (0) vex_printf("vex ppc->IR: kludged dcbf\n");
break;
case 0x036: // dcbst (Data Cache Block Store, PPC32 p384)
}
default:
- vex_printf("dis_cache_manage(PPC32)(opc2)\n");
+ vex_printf("dis_cache_manage(ppc)(opc2)\n");
return False;
}
return True;
/* --------- Synthesise a 2-bit FPU rounding mode. --------- */
/* Produces a value in 0 .. 3, which is encoded as per the type
- IRRoundingMode. PPC32RoundingMode encoding is different to
+ IRRoundingMode. PPCRoundingMode encoding is different to
IRRoundingMode, so need to map it.
*/
static IRExpr* /* :: Ity_I32 */ get_roundingmode ( void )
- returns type Ity_F64 */
static IRExpr* roundToSgl ( IRExpr* src )
{
- return unop(Iop_F32toF64, binop(Iop_F64toF32, get_roundingmode(), src));
+ return unop(Iop_F32toF64,
+ binop(Iop_F64toF32, get_roundingmode(), src));
}
case 0x30: // lfs (Load Float Single, PPC32 p441)
DIP("lfs fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
assign( EA, ea_rAor0_simm(rA_addr, simm16) );
- putFReg( frD_addr, unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) );
+ putFReg( frD_addr,
+ unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) );
break;
- case 0x31: // lfsu (Load Float Single with Update, PPC32 p442)
+ case 0x31: // lfsu (Load Float Single, Update, PPC32 p442)
if (rA_addr == 0) {
- vex_printf("dis_fp_load(PPC32)(instr,lfsu)\n");
+ vex_printf("dis_fp_load(ppc)(instr,lfsu)\n");
return False;
}
DIP("lfsu fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
assign( EA, ea_rA_simm(rA_addr, simm16) );
- putFReg( frD_addr, unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) );
+ putFReg( frD_addr,
+ unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) );
putIReg( rA_addr, mkexpr(EA) );
break;
putFReg( frD_addr, loadBE(Ity_F64, mkexpr(EA)) );
break;
- case 0x33: // lfdu (Load Float Double with Update, PPC32 p438)
+ case 0x33: // lfdu (Load Float Double, Update, PPC32 p438)
if (rA_addr == 0) {
- vex_printf("dis_fp_load(PPC32)(instr,lfdu)\n");
+ vex_printf("dis_fp_load(ppc)(instr,lfdu)\n");
return False;
}
DIP("lfdu fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
case 0x1F:
if (b0 != 0) {
- vex_printf("dis_fp_load(PPC32)(instr,b0)\n");
+ vex_printf("dis_fp_load(ppc)(instr,b0)\n");
return False;
}
loadBE(Ity_F32, mkexpr(EA))) );
break;
- case 0x237: // lfsux (Load Float Single with Update Indexed, PPC32 p443)
+ case 0x237: // lfsux (Load Float Single, Update Indxd, PPC32 p443)
if (rA_addr == 0) {
- vex_printf("dis_fp_load(PPC32)(instr,lfsux)\n");
+ vex_printf("dis_fp_load(ppc)(instr,lfsux)\n");
return False;
}
DIP("lfsux fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
- putFReg( frD_addr, unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) );
+ putFReg( frD_addr,
+ unop(Iop_F32toF64, loadBE(Ity_F32, mkexpr(EA))) );
putIReg( rA_addr, mkexpr(EA) );
break;
putFReg( frD_addr, loadBE(Ity_F64, mkexpr(EA)) );
break;
- case 0x277: // lfdux (Load Float Double with Update Indexed, PPC32 p439)
+ case 0x277: // lfdux (Load Float Double, Update Indxd, PPC32 p439)
if (rA_addr == 0) {
- vex_printf("dis_fp_load(PPC32)(instr,lfdux)\n");
+ vex_printf("dis_fp_load(ppc)(instr,lfdux)\n");
return False;
}
DIP("lfdux fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
break;
default:
- vex_printf("dis_fp_load(PPC32)(opc2)\n");
+ vex_printf("dis_fp_load(ppc)(opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_fp_load(PPC32)(opc1)\n");
+ vex_printf("dis_fp_load(ppc)(opc1)\n");
return False;
}
return True;
binop(Iop_F64toF32, get_roundingmode(), mkexpr(frS)) );
break;
- case 0x35: // stfsu (Store Float Single with Update, PPC32 p519)
+ case 0x35: // stfsu (Store Float Single, Update, PPC32 p519)
if (rA_addr == 0) {
- vex_printf("dis_fp_store(PPC32)(instr,stfsu)\n");
+ vex_printf("dis_fp_store(ppc)(instr,stfsu)\n");
return False;
}
DIP("stfsu fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr);
storeBE( mkexpr(EA), mkexpr(frS) );
break;
- case 0x37: // stfdu (Store Float Double with Update, PPC32 p514)
+ case 0x37: // stfdu (Store Float Double, Update, PPC32 p514)
if (rA_addr == 0) {
- vex_printf("dis_fp_store(PPC32)(instr,stfdu)\n");
+ vex_printf("dis_fp_store(ppc)(instr,stfdu)\n");
return False;
}
DIP("stfdu fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr);
case 0x1F:
if (b0 != 0) {
- vex_printf("dis_fp_store(PPC32)(instr,b0)\n");
+ vex_printf("dis_fp_store(ppc)(instr,b0)\n");
return False;
}
DIP("stfsx fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
/* This implementation loses accuracy - see note for stfs */
- storeBE( mkexpr(EA),
- binop(Iop_F64toF32, get_roundingmode(), mkexpr(frS)) );
+ storeBE( mkexpr(EA), binop(Iop_F64toF32,
+ get_roundingmode(), mkexpr(frS)) );
break;
- case 0x2B7: // stfsux (Store Float Single with Update Indexed, PPC32 p520)
+ case 0x2B7: // stfsux (Store Float Sgl, Update Indxd, PPC32 p520)
if (rA_addr == 0) {
- vex_printf("dis_fp_store(PPC32)(instr,stfsux)\n");
+ vex_printf("dis_fp_store(ppc)(instr,stfsux)\n");
return False;
}
DIP("stfsux fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
/* This implementation loses accuracy - see note for stfs */
- storeBE( mkexpr(EA),
- binop(Iop_F64toF32, get_roundingmode(), mkexpr(frS)) );
+ storeBE( mkexpr(EA), binop(Iop_F64toF32,
+ get_roundingmode(), mkexpr(frS)) );
putIReg( rA_addr, mkexpr(EA) );
break;
storeBE( mkexpr(EA), mkexpr(frS) );
break;
- case 0x2F7: // stfdux (Store Float Double with Update Indexed, PPC32 p515)
+ case 0x2F7: // stfdux (Store Float Dbl, Update Indxd, PPC32 p515)
if (rA_addr == 0) {
- vex_printf("dis_fp_store(PPC32)(instr,stfdux)\n");
+ vex_printf("dis_fp_store(ppc)(instr,stfdux)\n");
return False;
}
DIP("stfdux fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
//zz break;
default:
- vex_printf("dis_fp_store(PPC32)(opc2)\n");
+ vex_printf("dis_fp_store(ppc)(opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_fp_store(PPC32)(opc1)\n");
+ vex_printf("dis_fp_store(ppc)(opc1)\n");
return False;
}
return True;
switch (opc2) {
case 0x12: // fdivs (Floating Divide Single, PPC32 p407)
if (frC_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fdivs)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fdivs)\n");
return False;
}
- DIP("fdivs%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fdivs%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frB_addr);
- assign( frD, roundToSgl( binop(Iop_DivF64, mkexpr(frA), mkexpr(frB)) ));
+ assign( frD, roundToSgl( binop(Iop_DivF64,
+ mkexpr(frA), mkexpr(frB)) ));
break;
case 0x14: // fsubs (Floating Subtract Single, PPC32 p430)
if (frC_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fsubs)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fsubs)\n");
return False;
}
- DIP("fsubs%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fsubs%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frB_addr);
assign( frD, roundToSgl(
binop(Iop_SubF64, mkexpr(frA), mkexpr(frB)) ));
case 0x15: // fadds (Floating Add Single, PPC32 p401)
if (frC_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fadds)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fadds)\n");
return False;
}
- DIP("fadds%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fadds%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frB_addr);
assign( frD, roundToSgl(
binop(Iop_AddF64, mkexpr(frA), mkexpr(frB)) ));
//zz case 0x16: // fsqrts (Floating SqRt (Single-Precision), PPC32 p428)
//zz if (frA_addr != 0 || frC_addr != 0) {
-//zz vex_printf("dis_fp_arith(PPC32)(instr,fsqrts)\n");
+//zz vex_printf("dis_fp_arith(ppc)(instr,fsqrts)\n");
//zz return False;
//zz }
-//zz DIP("fsqrts%s fr%u,fr%u\n", flag_rC ? "." : "",
+//zz DIP("fsqrts%s fr%u,fr%u\n", flag_rC ? ".":"",
//zz frD_addr, frB_addr);
//zz assign( frD, roundToSgl( unop(Iop_SqrtF64, mkexpr(frB)) ));
//zz break;
//zz case 0x18: // fres (Floating Reciprocal Estimate Single, PPC32 p421)
//zz if (frA_addr != 0 || frC_addr != 0) {
-//zz vex_printf("dis_fp_arith(PPC32)(instr,fres)\n");
+//zz vex_printf("dis_fp_arith(ppc)(instr,fres)\n");
//zz return False;
//zz }
-//zz DIP("fres%s fr%u,fr%u\n", flag_rC ? "." : "",
+//zz DIP("fres%s fr%u,fr%u\n", flag_rC ? ".":"",
//zz frD_addr, frB_addr);
//zz DIP(" => not implemented\n");
//zz // CAB: Can we use one of the 128 bit SIMD Iop_Recip32F ops?
case 0x19: // fmuls (Floating Multiply Single, PPC32 p414)
if (frB_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fmuls)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fmuls)\n");
return False;
}
- DIP("fmuls%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fmuls%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr);
- assign( frD, roundToSgl( binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)) ));
+ assign( frD, roundToSgl( binop(Iop_MulF64,
+ mkexpr(frA), mkexpr(frC)) ));
break;
default:
- vex_printf("dis_fp_arith(PPC32)(3B: opc2)\n");
+ vex_printf("dis_fp_arith(ppc)(3B: opc2)\n");
return False;
}
break;
case 0x3F:
switch (opc2) {
- case 0x12: // fdiv (Floating Divide (Double-Precision), PPC32 p406)
+ case 0x12: // fdiv (Floating Div (Double-Precision), PPC32 p406)
if (frC_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fdiv)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fdiv)\n");
return False;
}
- DIP("fdiv%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fdiv%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frB_addr);
assign( frD, binop( Iop_DivF64, mkexpr(frA), mkexpr(frB) ) );
break;
- case 0x14: // fsub (Floating Subtract (Double-Precision), PPC32 p429)
+ case 0x14: // fsub (Floating Sub (Double-Precision), PPC32 p429)
if (frC_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fsub)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fsub)\n");
return False;
}
- DIP("fsub%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fsub%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frB_addr);
assign( frD, binop( Iop_SubF64, mkexpr(frA), mkexpr(frB) ) );
break;
case 0x15: // fadd (Floating Add (Double-Precision), PPC32 p400)
if (frC_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fadd)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fadd)\n");
return False;
}
- DIP("fadd%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fadd%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frB_addr);
assign( frD, binop( Iop_AddF64, mkexpr(frA), mkexpr(frB) ) );
break;
case 0x16: // fsqrt (Floating SqRt (Double-Precision), PPC32 p427)
if (frA_addr != 0 || frC_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fsqrt)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fsqrt)\n");
return False;
}
- DIP("fsqrt%s fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fsqrt%s fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frB_addr);
assign( frD, unop( Iop_SqrtF64, mkexpr(frB) ) );
break;
IRTemp cc = newTemp(Ity_I32);
IRTemp cc_b0 = newTemp(Ity_I32);
- DIP("fsel%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fsel%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
// cc: UN == 0x41, LT == 0x01, GT == 0x00, EQ == 0x40
// => GT|EQ == (cc & 0x1 == 0)
- assign( cc, binop(Iop_CmpF64, mkexpr(frA), IRExpr_Const(IRConst_F64(0))) );
+ assign( cc, binop(Iop_CmpF64, mkexpr(frA),
+ IRExpr_Const(IRConst_F64(0))) );
assign( cc_b0, binop(Iop_And32, mkexpr(cc), mkU32(1)) );
// frD = (frA >= 0.0) ? frC : frB
break;
}
- case 0x19: // fmul (Floating Multiply (Double Precision), PPC32 p413)
+ case 0x19: // fmul (Floating Mult (Double Precision), PPC32 p413)
if (frB_addr != 0) {
- vex_printf("dis_fp_arith(PPC32)(instr,fmul)\n");
+ vex_printf("dis_fp_arith(ppc)(instr,fmul)\n");
return False;
}
- DIP("fmul%s fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fmul%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr);
assign( frD, binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ) );
break;
-//zz case 0x1A: // frsqrte (Floating Reciprocal SqRt Estimate, PPC32 p424)
+//zz case 0x1A: // frsqrte (Floating Recip SqRt Est., PPC32 p424)
//zz if (frA_addr != 0 || frC_addr != 0) {
-//zz vex_printf("dis_fp_arith(PPC32)(instr,frsqrte)\n");
+//zz vex_printf("dis_fp_arith(ppc)(instr,frsqrte)\n");
//zz return False;
//zz }
-//zz DIP("frsqrte%s fr%u,fr%u\n", flag_rC ? "." : "",
+//zz DIP("frsqrte%s fr%u,fr%u\n", flag_rC ? ".":"",
//zz frD_addr, frB_addr);
//zz DIP(" => not implemented\n");
//zz // CAB: Iop_SqrtF64, then one of the 128 bit SIMD Iop_Recip32F ops?
//zz return False;
default:
- vex_printf("dis_fp_arith(PPC32)(3F: opc2)\n");
+ vex_printf("dis_fp_arith(ppc)(3F: opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_fp_arith(PPC32)(opc1)\n");
+ vex_printf("dis_fp_arith(ppc)(opc1)\n");
return False;
}
case 0x3B:
switch (opc2) {
case 0x1C: // fmsubs (Floating Mult-Subtr Single, PPC32 p412)
- DIP("fmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
- assign( frD, roundToSgl(
- binop( Iop_SubF64,
- binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)),
- mkexpr(frB)) ));
+ assign( frD, roundToSgl( binop( Iop_SubF64,
+ binop(Iop_MulF64, mkexpr(frA),
+ mkexpr(frC)),
+ mkexpr(frB)) ));
break;
case 0x1D: // fmadds (Floating Mult-Add Single, PPC32 p409)
- DIP("fmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
- assign( frD, roundToSgl(
- binop( Iop_AddF64,
- binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)),
- mkexpr(frB)) ));
+ assign( frD, roundToSgl( binop( Iop_AddF64,
+ binop(Iop_MulF64, mkexpr(frA),
+ mkexpr(frC)),
+ mkexpr(frB)) ));
break;
case 0x1E: // fnmsubs (Float Neg Mult-Subtr Single, PPC32 p420)
- DIP("fnmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fnmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
assign( frD, roundToSgl(
- unop(Iop_NegF64,
- binop(Iop_SubF64,
- binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)),
- mkexpr(frB))) ));
+ unop(Iop_NegF64,
+ binop(Iop_SubF64,
+ binop(Iop_MulF64, mkexpr(frA),
+ mkexpr(frC)),
+ mkexpr(frB))) ));
break;
case 0x1F: // fnmadds (Floating Negative Multiply-Add Single, PPC32 p418)
- DIP("fnmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ DIP("fnmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
assign( frD, roundToSgl(
- unop(Iop_NegF64,
- binop(Iop_AddF64,
- binop(Iop_MulF64, mkexpr(frA), mkexpr(frC)),
- mkexpr(frB))) ));
+ unop(Iop_NegF64,
+ binop(Iop_AddF64,
+ binop(Iop_MulF64, mkexpr(frA),
+ mkexpr(frC)),
+ mkexpr(frB))) ));
break;
default:
- vex_printf("dis_fp_multadd(PPC32)(3B: opc2)\n");
+ vex_printf("dis_fp_multadd(ppc)(3B: opc2)\n");
return False;
}
break;
case 0x3F:
switch (opc2) {
- case 0x1C: // fmsub (Float Mult-Subtr (Double Precision), PPC32 p411)
- DIP("fmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ case 0x1C: // fmsub (Float Mult-Sub (Dbl Precision), PPC32 p411)
+ DIP("fmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
assign( frD, binop( Iop_SubF64,
- binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ),
+ binop( Iop_MulF64, mkexpr(frA),
+ mkexpr(frC) ),
mkexpr(frB) ));
break;
- case 0x1D: // fmadd (Float Mult-Add (Double Precision), PPC32 p408)
- DIP("fmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ case 0x1D: // fmadd (Float Mult-Add (Dbl Precision), PPC32 p408)
+ DIP("fmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
assign( frD, binop( Iop_AddF64,
- binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ),
+ binop( Iop_MulF64, mkexpr(frA),
+ mkexpr(frC) ),
mkexpr(frB) ));
break;
- case 0x1E: // fnmsub (Float Neg Mult-Subtr (Double Precision), PPC32 p419)
- DIP("fnmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ case 0x1E: // fnmsub (Float Neg Mult-Subtr (Dbl Precision), PPC32 p419)
+ DIP("fnmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
assign( frD, unop( Iop_NegF64,
binop( Iop_SubF64,
- binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ),
+ binop( Iop_MulF64, mkexpr(frA),
+ mkexpr(frC) ),
mkexpr(frB) )));
break;
- case 0x1F: // fnmadd (Float Neg Mult-Add (Double Precision), PPC32 p417)
- DIP("fnmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? "." : "",
+ case 0x1F: // fnmadd (Float Neg Mult-Add (Dbl Precision), PPC32 p417)
+ DIP("fnmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
frD_addr, frA_addr, frC_addr, frB_addr);
assign( frD, unop( Iop_NegF64,
binop( Iop_AddF64,
- binop( Iop_MulF64, mkexpr(frA), mkexpr(frC) ),
+ binop( Iop_MulF64, mkexpr(frA),
+ mkexpr(frC) ),
mkexpr(frB) )));
break;
default:
- vex_printf("dis_fp_multadd(PPC32)(3F: opc2)\n");
+ vex_printf("dis_fp_multadd(ppc)(3F: opc2)\n");
return False;
}
break;
default:
- vex_printf("dis_fp_multadd(PPC32)(opc1)\n");
+ vex_printf("dis_fp_multadd(ppc)(opc1)\n");
return False;
}
IRTemp frB = newTemp(Ity_F64);
if (opc1 != 0x3F || b21to22 != 0 || b0 != 0) {
- vex_printf("dis_fp_cmp(PPC32)(instr)\n");
+ vex_printf("dis_fp_cmp(ppc)(instr)\n");
return False;
}
DIP("fcmpo crf%d,fr%u,fr%u\n", crfD, frA_addr, frB_addr);
break;
default:
- vex_printf("dis_fp_cmp(PPC32)(opc2)\n");
+ vex_printf("dis_fp_cmp(ppc)(opc2)\n");
return False;
}
return True;
IRTemp r_tmp64 = newTemp(Ity_I64);
if (opc1 != 0x3F || b16to20 != 0) {
- vex_printf("dis_fp_round(PPC32)(instr)\n");
+ vex_printf("dis_fp_round(ppc)(instr)\n");
return False;
}
switch (opc2) {
case 0x00C: // frsp (Float Round to Single, PPC32 p423)
- DIP("frsp%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP("frsp%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
assign( frD, roundToSgl( mkexpr(frB) ));
break;
case 0x00E: // fctiw (Float Conv to Int, PPC32 p404)
- DIP("fctiw%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
- assign( r_tmp32, binop(Iop_F64toI32, get_roundingmode(), mkexpr(frB)) );
+ DIP("fctiw%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+ assign( r_tmp32,
+ binop(Iop_F64toI32, get_roundingmode(), mkexpr(frB)) );
assign( frD, unop( Iop_ReinterpI64asF64,
unop( Iop_32Uto64, mkexpr(r_tmp32))));
break;
case 0x00F: // fctiwz (Float Conv to Int, Round to Zero, PPC32 p405)
- DIP("fctiwz%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP("fctiwz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
assign( r_tmp32, binop(Iop_F64toI32, mkU32(0x3), mkexpr(frB)) );
assign( frD, unop( Iop_ReinterpI64asF64,
unop( Iop_32Uto64, mkexpr(r_tmp32))));
/* 64bit FP conversions */
- case 0x32E: // fctid (Float Conv to Int DW, PPC64 p437)
- DIP("fctid%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
- assign( r_tmp64, binop(Iop_F64toI64, get_roundingmode(), mkexpr(frB)) );
+ case 0x32E: // fctid (Float Conv to Int DWord, PPC64 p437)
+ DIP("fctid%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+ assign( r_tmp64,
+ binop(Iop_F64toI64, get_roundingmode(), mkexpr(frB)) );
assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) );
break;
- case 0x32F: // fctidz (Float Conv to Int DW, Round to Zero, PPC64 p437)
- DIP("fctidz%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ case 0x32F: // fctidz (Float Conv to Int DWord, Round to Zero, PPC64 p437)
+ DIP("fctidz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
assign( r_tmp64, binop(Iop_F64toI64, mkU32(0x3), mkexpr(frB)) );
assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) );
break;
- case 0x34E: // fcfid (Float Conv from Int DW, PPC64 p434)
- DIP("fcfid%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ case 0x34E: // fcfid (Float Conv from Int DWord, PPC64 p434)
+ DIP("fcfid%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
- assign( frD, binop(Iop_I64toF64, get_roundingmode(), mkexpr(r_tmp64)) );
+ assign( frD, binop(Iop_I64toF64, get_roundingmode(),
+ mkexpr(r_tmp64)) );
break;
default:
- vex_printf("dis_fp_round(PPC32)(opc2)\n");
+ vex_printf("dis_fp_round(ppc)(opc2)\n");
return False;
}
IRTemp frB = newTemp(Ity_F64);
if (opc1 != 0x3F || b16to20 != 0) {
- vex_printf("dis_fp_move(PPC32)(instr)\n");
+ vex_printf("dis_fp_move(ppc)(instr)\n");
return False;
}
switch (opc2) {
case 0x028: // fneg (Floating Negate, PPC32 p416)
- DIP("fneg%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP("fneg%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
assign( frD, unop( Iop_NegF64, mkexpr(frB) ));
break;
case 0x048: // fmr (Floating Move Register, PPC32 p410)
- DIP("fmr%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP("fmr%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
assign( frD, mkexpr(frB) );
break;
case 0x088: // fnabs (Floating Negative Absolute Value, PPC32 p415)
- DIP("fnabs%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP("fnabs%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
assign( frD, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr(frB) )));
break;
case 0x108: // fabs (Floating Absolute Value, PPC32 p399)
- DIP("fabs%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP("fabs%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
assign( frD, unop( Iop_AbsF64, mkexpr(frB) ));
break;
default:
- vex_printf("dis_fp_move(PPC32)(opc2)\n");
+ vex_printf("dis_fp_move(ppc)(opc2)\n");
return False;
}
UChar flag_rC = ifieldBIT0(theInstr);
if (opc1 != 0x3F) {
- vex_printf("dis_fp_scr(PPC32)(instr)\n");
+ vex_printf("dis_fp_scr(ppc)(instr)\n");
return False;
}
//zz UInt b11to20 = IFIELD(theInstr, 11, 10);
//zz
//zz if (b11to20 != 0) {
-//zz vex_printf("dis_fp_scr(PPC32)(instr,mtfsb1)\n");
+//zz vex_printf("dis_fp_scr(ppc)(instr,mtfsb1)\n");
//zz return False;
//zz }
-//zz DIP("mtfsb1%s crb%d \n", flag_rC ? "." : "", crbD);
+//zz DIP("mtfsb1%s crb%d \n", flag_rC ? ".":"", crbD);
//zz putGST_masked( PPC_GST_FPSCR, mkU32(1<<(31-crbD)), 1<<(31-crbD) );
//zz break;
//zz }
//zz IRTemp tmp = newTemp(Ity_I32);
//zz
//zz if (b21to22 != 0 || b11to17 != 0 || flag_rC != 0) {
-//zz vex_printf("dis_fp_scr(PPC32)(instr,mcrfs)\n");
+//zz vex_printf("dis_fp_scr(ppc)(instr,mcrfs)\n");
//zz return False;
//zz }
//zz DIP("mcrfs crf%d,crf%d\n", crfD, crfS);
UInt b11to20 = IFIELD(theInstr, 11, 10);
if (b11to20 != 0) {
- vex_printf("dis_fp_scr(PPC32)(instr,mtfsb0)\n");
+ vex_printf("dis_fp_scr(ppc)(instr,mtfsb0)\n");
return False;
}
- DIP("mtfsb0%s crb%d\n", flag_rC ? "." : "", crbD);
+ DIP("mtfsb0%s crb%d\n", flag_rC ? ".":"", crbD);
putGST_masked( PPC_GST_FPSCR, mkU32(0), 1<<(31-crbD) );
break;
}
UChar b11 = toUChar( IFIELD( theInstr, 11, 1 ) );
if (b16to22 != 0 || b11 != 0) {
- vex_printf("dis_fp_scr(PPC32)(instr,mtfsfi)\n");
+ vex_printf("dis_fp_scr(ppc)(instr,mtfsfi)\n");
return False;
}
- DIP("mtfsfi%s crf%d,%d\n", flag_rC ? "." : "", crfD, IMM);
+ DIP("mtfsfi%s crf%d,%d\n", flag_rC ? ".":"", crfD, IMM);
putGST_field( PPC_GST_FPSCR, mkU32(IMM), crfD );
break;
}
UInt b11to20 = IFIELD(theInstr, 11, 10);
if (b11to20 != 0) {
- vex_printf("dis_fp_scr(PPC32)(instr,mffs)\n");
+ vex_printf("dis_fp_scr(ppc)(instr,mffs)\n");
return False;
}
- DIP("mffs%s fr%u\n", flag_rC ? "." : "", frD_addr);
- putFReg( frD_addr, unop( Iop_ReinterpI64asF64,
- unop( Iop_32Uto64,
- getGST_masked( PPC_GST_FPSCR, 0x3 ) )));
+ DIP("mffs%s fr%u\n", flag_rC ? ".":"", frD_addr);
+ putFReg( frD_addr,
+ unop( Iop_ReinterpI64asF64,
+ unop( Iop_32Uto64,
+ getGST_masked( PPC_GST_FPSCR, MASK_FPSCR_RN ) )));
break;
}
Int i, mask;
if (b25 != 0 || b16 != 0) {
- vex_printf("dis_fp_scr(PPC32)(instr,mtfsf)\n");
+ vex_printf("dis_fp_scr(ppc)(instr,mtfsf)\n");
return False;
}
- DIP("mtfsf%s %d,fr%u\n", flag_rC ? "." : "", FM, frB_addr);
+ DIP("mtfsf%s %d,fr%u\n", flag_rC ? ".":"", FM, frB_addr);
assign( frB, getFReg(frB_addr));
assign( rB_32, unop( Iop_64to32,
unop( Iop_ReinterpF64asI64, mkexpr(frB) )));
}
default:
- vex_printf("dis_fp_scr(PPC32)(opc2)\n");
+ vex_printf("dis_fp_scr(ppc)(opc2)\n");
return False;
}
return True;
UChar b0 = ifieldBIT0(theInstr);
if (opc1 != 0x1F || b23to24 != 0 || b0 != 0) {
- vex_printf("dis_av_datastream(PPC32)(instr)\n");
+ vex_printf("dis_av_datastream(ppc)(instr)\n");
return False;
}
switch (opc2) {
case 0x156: // dst (Data Stream Touch, AV p115)
- DIP("dst%s r%u,r%u,%d\n", flag_T ? "t" : "", rA_addr, rB_addr, STRM);
+ DIP("dst%s r%u,r%u,%d\n", flag_T ? "t" : "",
+ rA_addr, rB_addr, STRM);
DIP(" => not implemented\n");
return False;
case 0x176: // dstst (Data Stream Touch for Store, AV p117)
- DIP("dstst%s r%u,r%u,%d\n", flag_T ? "t" : "", rA_addr, rB_addr, STRM);
+ DIP("dstst%s r%u,r%u,%d\n", flag_T ? "t" : "",
+ rA_addr, rB_addr, STRM);
DIP(" => not implemented\n");
return False;
case 0x336: // dss (Data Stream Stop, AV p114)
if (rA_addr != 0 || rB_addr != 0) {
- vex_printf("dis_av_datastream(PPC32)(opc2,dst)\n");
+ vex_printf("dis_av_datastream(ppc)(opc2,dst)\n");
return False;
}
if (flag_A == 0) {
return False;
default:
- vex_printf("dis_av_datastream(PPC32)(opc2)\n");
+ vex_printf("dis_av_datastream(ppc)(opc2)\n");
return False;
}
return True;
UInt opc2 = IFIELD( theInstr, 0, 11 );
if (opc1 != 0x4) {
- vex_printf("dis_av_procctl(PPC32)(instr)\n");
+ vex_printf("dis_av_procctl(ppc)(instr)\n");
return False;
}
switch (opc2) {
case 0x604: // mfvscr (Move from VSCR, AV p129)
if (vA_addr != 0 || vB_addr != 0) {
- vex_printf("dis_av_procctl(PPC32)(opc2,dst)\n");
+ vex_printf("dis_av_procctl(ppc)(opc2,dst)\n");
return False;
}
DIP("mfvscr v%d\n", vD_addr);
case 0x644: { // mtvscr (Move to VSCR, AV p130)
IRTemp vB = newTemp(Ity_V128);
if (vD_addr != 0 || vA_addr != 0) {
- vex_printf("dis_av_procctl(PPC32)(opc2,dst)\n");
+ vex_printf("dis_av_procctl(ppc)(opc2,dst)\n");
return False;
}
DIP("mtvscr v%d\n", vB_addr);
break;
}
default:
- vex_printf("dis_av_procctl(PPC32)(opc2)\n");
+ vex_printf("dis_av_procctl(ppc)(opc2)\n");
return False;
}
return True;
IRTemp addr_align16 = newTemp(ty);
if (opc1 != 0x1F || b0 != 0) {
- vex_printf("dis_av_load(PPC32)(instr)\n");
+ vex_printf("dis_av_load(ppc)(instr)\n");
return False;
}
mkU32(vD_off),
binop(Iop_And32, mkexpr(EA_lo32), mkU32(0xF)),
mkU32(0)/*left*/ );
- IRDirty* d = unsafeIRDirty_0_N (
- 0/*regparms*/,
- "ppc32g_dirtyhelper_LVS",
- &ppc32g_dirtyhelper_LVS,
- args );
+ IRDirty* d;
+ if (!mode64) {
+ d = unsafeIRDirty_0_N ( 0/*regparms*/,
+ "ppc32g_dirtyhelper_LVS",
+ &ppc32g_dirtyhelper_LVS,
+ args );
+ } else {
+ d = unsafeIRDirty_0_N ( 0/*regparms*/,
+ "ppc64g_dirtyhelper_LVS",
+ &ppc64g_dirtyhelper_LVS,
+ args );
+ }
DIP("lvsl v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
/* declare guest state effects */
d->needsBBP = True;
mkU32(vD_off),
binop(Iop_And32, mkexpr(EA_lo32), mkU32(0xF)),
mkU32(1)/*right*/ );
- IRDirty* d = unsafeIRDirty_0_N (
- 0/*regparms*/,
- "ppc32g_dirtyhelper_LVS",
- &ppc32g_dirtyhelper_LVS,
- args );
+ IRDirty* d;
+ if (!mode64) {
+ d = unsafeIRDirty_0_N ( 0/*regparms*/,
+ "ppc32g_dirtyhelper_LVS",
+ &ppc32g_dirtyhelper_LVS,
+ args );
+ } else {
+ d = unsafeIRDirty_0_N ( 0/*regparms*/,
+ "ppc64g_dirtyhelper_LVS",
+ &ppc64g_dirtyhelper_LVS,
+ args );
+ }
DIP("lvsr v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
/* declare guest state effects */
d->needsBBP = True;
return False;
default:
- vex_printf("dis_av_load(PPC32)(opc2)\n");
+ vex_printf("dis_av_load(ppc)(opc2)\n");
return False;
}
return True;
IRTemp idx = newTemp(Ity_I8);
if (opc1 != 0x1F || b0 != 0) {
- vex_printf("dis_av_store(PPC32)(instr)\n");
+ vex_printf("dis_av_store(ppc)(instr)\n");
return False;
}
assign( eb, binop(Iop_And8, mkU8(0xF),
unop(Iop_32to8,
mkSzNarrow32(ty, mkexpr(EA)) )) );
- assign( idx, binop(Iop_Shl8, binop(Iop_Sub8, mkU8(15), mkexpr(eb)),
+ assign( idx, binop(Iop_Shl8,
+ binop(Iop_Sub8, mkU8(15), mkexpr(eb)),
mkU8(3)) );
storeBE( mkexpr(EA),
unop(Iop_32to8, unop(Iop_V128to32,
mkSzNarrow32(ty, addr_align(mkexpr(EA), 2)) );
assign( eb, binop(Iop_And8, mkU8(0xF),
unop(Iop_32to8, mkexpr(addr_aligned) )) );
- assign( idx, binop(Iop_Shl8, binop(Iop_Sub8, mkU8(14), mkexpr(eb)),
+ assign( idx, binop(Iop_Shl8,
+ binop(Iop_Sub8, mkU8(14), mkexpr(eb)),
mkU8(3)) );
storeBE( mkexpr(addr_aligned),
unop(Iop_32to16, unop(Iop_V128to32,
mkSzNarrow32(ty, addr_align(mkexpr(EA), 4)) );
assign( eb, binop(Iop_And8, mkU8(0xF),
unop(Iop_32to8, mkexpr(addr_aligned) )) );
- assign( idx, binop(Iop_Shl8, binop(Iop_Sub8, mkU8(12), mkexpr(eb)),
+ assign( idx, binop(Iop_Shl8,
+ binop(Iop_Sub8, mkU8(12), mkexpr(eb)),
mkU8(3)) );
storeBE( mkexpr(addr_aligned),
unop(Iop_V128to32,
DIP("stvxl v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
DIP(" => not implemented\n");
return False;
-
// STORE(vS, 16, addr_align( mkexpr(EA), 16 ));
+// break;
default:
- vex_printf("dis_av_store(PPC32)(opc2)\n");
+ vex_printf("dis_av_store(ppc)(opc2)\n");
return False;
}
return True;
assign( vB, getVReg(vB_addr));
if (opc1 != 0x4) {
- vex_printf("dis_av_arith(PPC32)(opc1 != 0x4)\n");
+ vex_printf("dis_av_arith(ppc)(opc1 != 0x4)\n");
return False;
}
// TODO: set VSCR[SAT]
break;
- case 0x640: // vsubuhs (Subtract Unsigned Half Word Saturate, AV p268)
+ case 0x640: // vsubuhs (Subtract Unsigned HWord Saturate, AV p268)
DIP("vsubuhs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
putVReg( vD_addr, binop(Iop_QSub16Ux8, mkexpr(vA), mkexpr(vB)) );
// TODO: set VSCR[SAT]
/* Multiply */
case 0x008: // vmuloub (Multiply Odd Unsigned Byte, AV p213)
DIP("vmuloub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_MullEven8Ux16, mkexpr(vA), mkexpr(vB)));
+ putVReg( vD_addr,
+ binop(Iop_MullEven8Ux16, mkexpr(vA), mkexpr(vB)));
break;
case 0x048: // vmulouh (Multiply Odd Unsigned Half Word, AV p214)
DIP("vmulouh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)));
+ putVReg( vD_addr,
+ binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)));
break;
case 0x108: // vmulosb (Multiply Odd Signed Byte, AV p211)
DIP("vmulosb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_MullEven8Sx16, mkexpr(vA), mkexpr(vB)));
+ putVReg( vD_addr,
+ binop(Iop_MullEven8Sx16, mkexpr(vA), mkexpr(vB)));
break;
case 0x148: // vmulosh (Multiply Odd Signed Half Word, AV p212)
DIP("vmulosh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)));
+ putVReg( vD_addr,
+ binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)));
break;
case 0x208: // vmuleub (Multiply Even Unsigned Byte, AV p209)
/* add lanes */
assign( z3, binop(Iop_Add64, mkexpr(b3),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a15), mkexpr(a14)),
- binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a15), mkexpr(a14)),
+ binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) );
assign( z2, binop(Iop_Add64, mkexpr(b2),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a11), mkexpr(a10)),
- binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a11), mkexpr(a10)),
+ binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) );
assign( z1, binop(Iop_Add64, mkexpr(b1),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a7), mkexpr(a6)),
- binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a7), mkexpr(a6)),
+ binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) );
assign( z0, binop(Iop_Add64, mkexpr(b0),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
- binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
+ binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
/* saturate-narrow to 32bit, and combine to V128 */
putVReg( vD_addr, mkV128from4x64U( mkexpr(z3), mkexpr(z2),
/* add lanes */
assign( z3, binop(Iop_Add64, mkexpr(b3),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a15), mkexpr(a14)),
- binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a15), mkexpr(a14)),
+ binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) );
assign( z2, binop(Iop_Add64, mkexpr(b2),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a11), mkexpr(a10)),
- binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a11), mkexpr(a10)),
+ binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) );
assign( z1, binop(Iop_Add64, mkexpr(b1),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a7), mkexpr(a6)),
- binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a7), mkexpr(a6)),
+ binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) );
assign( z0, binop(Iop_Add64, mkexpr(b0),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
- binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
+ binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
/* saturate-narrow to 32bit, and combine to V128 */
putVReg( vD_addr, mkV128from4x64S( mkexpr(z3), mkexpr(z2),
/* add lanes */
assign( z0, binop(Iop_Add64, mkexpr(b0),
- binop(Iop_Add64,
- binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
- binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
+ binop(Iop_Add64,
+ binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
+ binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
/* saturate-narrow to 32bit, and combine to V128 */
putVReg( vD_addr, mkV128from4x64S( mkU64(0), mkU64(0),
break;
}
default:
- vex_printf("dis_av_arith(PPC32)(opc2=0x%x)\n", opc2);
+ vex_printf("dis_av_arith(ppc)(opc2=0x%x)\n", opc2);
return False;
}
return True;
assign( vB, getVReg(vB_addr));
if (opc1 != 0x4) {
- vex_printf("dis_av_logic(PPC32)(opc1 != 0x4)\n");
+ vex_printf("dis_av_logic(ppc)(opc1 != 0x4)\n");
return False;
}
break;
default:
- vex_printf("dis_av_logic(PPC32)(opc2=0x%x)\n", opc2);
+ vex_printf("dis_av_logic(ppc)(opc2=0x%x)\n", opc2);
return False;
}
return True;
assign( vB, getVReg(vB_addr));
if (opc1 != 0x4) {
- vex_printf("dis_av_cmp(PPC32)(instr)\n");
+ vex_printf("dis_av_cmp(ppc)(instr)\n");
return False;
}
switch (opc2) {
case 0x006: // vcmpequb (Compare Equal-to Unsigned B, AV p160)
- DIP("vcmpequb%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpequb%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpEQ8x16, mkexpr(vA), mkexpr(vB)) );
break;
case 0x046: // vcmpequh (Compare Equal-to Unsigned HW, AV p161)
- DIP("vcmpequh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpequh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpEQ16x8, mkexpr(vA), mkexpr(vB)) );
break;
case 0x086: // vcmpequw (Compare Equal-to Unsigned W, AV p162)
- DIP("vcmpequw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpequw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpEQ32x4, mkexpr(vA), mkexpr(vB)) );
break;
case 0x206: // vcmpgtub (Compare Greater-than Unsigned B, AV p168)
- DIP("vcmpgtub%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpgtub%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpGT8Ux16, mkexpr(vA), mkexpr(vB)) );
break;
case 0x246: // vcmpgtuh (Compare Greater-than Unsigned HW, AV p169)
- DIP("vcmpgtuh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpgtuh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpGT16Ux8, mkexpr(vA), mkexpr(vB)) );
break;
case 0x286: // vcmpgtuw (Compare Greater-than Unsigned W, AV p170)
- DIP("vcmpgtuw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpgtuw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpGT32Ux4, mkexpr(vA), mkexpr(vB)) );
break;
case 0x306: // vcmpgtsb (Compare Greater-than Signed B, AV p165)
- DIP("vcmpgtsb%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpgtsb%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpGT8Sx16, mkexpr(vA), mkexpr(vB)) );
break;
case 0x346: // vcmpgtsh (Compare Greater-than Signed HW, AV p166)
- DIP("vcmpgtsh%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpgtsh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpGT16Sx8, mkexpr(vA), mkexpr(vB)) );
break;
case 0x386: // vcmpgtsw (Compare Greater-than Signed W, AV p167)
- DIP("vcmpgtsw%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpgtsw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpGT32Sx4, mkexpr(vA), mkexpr(vB)) );
break;
default:
- vex_printf("dis_av_cmp(PPC32)(opc2)\n");
+ vex_printf("dis_av_cmp(ppc)(opc2)\n");
return False;
}
assign( zeros, unop(Iop_Dup32x4, mkU32(0)) );
if (opc1 != 0x4) {
- vex_printf("dis_av_multarith(PPC32)(instr)\n");
+ vex_printf("dis_av_multarith(ppc)(instr)\n");
return False;
}
switch (opc2) {
/* Multiply-Add */
- case 0x20: { // vmhaddshs (Multiply High, Add Signed HW Saturate, AV p185)
+ case 0x20: { // vmhaddshs (Mult Hi, Add Signed HW Saturate, AV p185)
IRTemp cSigns = newTemp(Ity_V128);
- DIP("vmhaddshs v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
- assign( cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)) );
- assign( aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)) );
- assign( bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)) );
- assign( cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns), mkexpr(vC)) );
- assign( aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)) );
- assign( bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)) );
- assign( cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns), mkexpr(vC)) );
+ DIP("vmhaddshs v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
+ assign(cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)));
+ assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
+ assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
+ assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns),mkexpr(vC)));
+ assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
+ assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
+ assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns),mkexpr(vC)));
assign( zLo, binop(Iop_Add32x4, mkexpr(cLo),
binop(Iop_SarN32x4,
mkexpr(aHi), mkexpr(bHi)),
mkU8(15))) );
- putVReg( vD_addr, binop(Iop_QNarrow32Sx4, mkexpr(zHi), mkexpr(zLo)) );
+ putVReg( vD_addr,
+ binop(Iop_QNarrow32Sx4, mkexpr(zHi), mkexpr(zLo)) );
break;
}
- case 0x21: { // vmhraddshs (Multiply High Round, Add Signed HW Saturate, AV p186)
+ case 0x21: { // vmhraddshs (Mult High Round, Add Signed HW Saturate, AV p186)
IRTemp zKonst = newTemp(Ity_V128);
IRTemp cSigns = newTemp(Ity_V128);
- DIP("vmhraddshs v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
- assign( cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)) );
- assign( aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)) );
- assign( bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)) );
- assign( cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns), mkexpr(vC)) );
- assign( aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)) );
- assign( bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)) );
- assign( cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns), mkexpr(vC)) );
+ DIP("vmhraddshs v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
+ assign(cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)) );
+ assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
+ assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
+ assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns),mkexpr(vC)));
+ assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
+ assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
+ assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns),mkexpr(vC)));
/* shifting our const avoids store/load version of Dup */
assign( zKonst, binop(Iop_ShlN32x4, unop(Iop_Dup32x4, mkU32(0x1)),
putVReg( vD_addr, binop(Iop_QNarrow32Sx4, mkexpr(zHi), mkexpr(zLo)) );
break;
}
- case 0x22: { // vmladduhm (Multiply Low, Add Unsigned HW Modulo, AV p194)
- DIP("vmladduhm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
- assign( aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)) );
- assign( bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)) );
- assign( cLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vC)) );
- assign( aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)) );
- assign( bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)) );
- assign( cHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vC)) );
- assign( zLo, binop(Iop_Add32x4,
- binop(Iop_MullEven16Ux8, mkexpr(aLo), mkexpr(bLo) ),
- mkexpr(cLo)) );
- assign( zHi, binop(Iop_Add32x4,
- binop(Iop_MullEven16Ux8, mkexpr(aHi), mkexpr(bHi) ),
- mkexpr(cHi)) );
- putVReg( vD_addr, binop(Iop_Narrow32x4, mkexpr(zHi), mkexpr(zLo)) );
+ case 0x22: { // vmladduhm (Mult Low, Add Unsigned HW Modulo, AV p194)
+ DIP("vmladduhm v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
+ assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
+ assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
+ assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vC)));
+ assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
+ assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
+ assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vC)));
+ assign(zLo, binop(Iop_Add32x4,
+ binop(Iop_MullEven16Ux8, mkexpr(aLo), mkexpr(bLo)),
+ mkexpr(cLo)) );
+ assign(zHi, binop(Iop_Add32x4,
+ binop(Iop_MullEven16Ux8, mkexpr(aHi), mkexpr(bHi)),
+ mkexpr(cHi)));
+ putVReg(vD_addr, binop(Iop_Narrow32x4, mkexpr(zHi), mkexpr(zLo)));
break;
}
case 0x24: { // vmsumubm (Multiply Sum Unsigned B Modulo, AV p204)
IRTemp abEE, abEO, abOE, abOO;
abEE = abEO = abOE = abOO = IRTemp_INVALID;
- DIP("vmsumubm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
+ DIP("vmsumubm v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
/* multiply vA,vB (unsigned, widening) */
assign( abEvn, MK_Iop_MullOdd8Ux16( mkexpr(vA), mkexpr(vB) ));
expand16Ux8( mkexpr(abOdd), &abOE, &abOO );
putVReg( vD_addr,
- binop(Iop_Add32x4, mkexpr(vC),
- binop(Iop_Add32x4,
- binop(Iop_Add32x4, mkexpr(abEE), mkexpr(abEO)),
- binop(Iop_Add32x4, mkexpr(abOE), mkexpr(abOO)))) );
+ binop(Iop_Add32x4, mkexpr(vC),
+ binop(Iop_Add32x4,
+ binop(Iop_Add32x4, mkexpr(abEE), mkexpr(abEO)),
+ binop(Iop_Add32x4, mkexpr(abOE), mkexpr(abOO)))) );
break;
}
case 0x25: { // vmsummbm (Multiply Sum Mixed-Sign B Modulo, AV p201)
IRTemp abOE = newTemp(Ity_V128);
IRTemp abOO = newTemp(Ity_V128);
aEvn = aOdd = bEvn = bOdd = IRTemp_INVALID;
- DIP("vmsummbm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
+ DIP("vmsummbm v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
/* sign-extend vA, zero-extend vB, for mixed-sign multiply
(separating out adjacent lanes to different vectors) */
/* add results together, + vC */
putVReg( vD_addr,
- binop(Iop_QAdd32Sx4, mkexpr(vC),
- binop(Iop_QAdd32Sx4,
- binop(Iop_QAdd32Sx4, mkexpr(abEE), mkexpr(abEO)),
- binop(Iop_QAdd32Sx4, mkexpr(abOE), mkexpr(abOO)))) );
+ binop(Iop_QAdd32Sx4, mkexpr(vC),
+ binop(Iop_QAdd32Sx4,
+ binop(Iop_QAdd32Sx4, mkexpr(abEE), mkexpr(abEO)),
+ binop(Iop_QAdd32Sx4, mkexpr(abOE), mkexpr(abOO)))) );
break;
}
case 0x26: { // vmsumuhm (Multiply Sum Unsigned HW Modulo, AV p205)
- DIP("vmsumuhm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
+ DIP("vmsumuhm v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
assign( abEvn, MK_Iop_MullOdd16Ux8( mkexpr(vA), mkexpr(vB) ));
assign( abOdd, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)) );
putVReg( vD_addr,
- binop(Iop_Add32x4, mkexpr(vC),
- binop(Iop_Add32x4, mkexpr(abEvn), mkexpr(abOdd))) );
+ binop(Iop_Add32x4, mkexpr(vC),
+ binop(Iop_Add32x4, mkexpr(abEvn), mkexpr(abOdd))) );
break;
}
case 0x27: { // vmsumuhs (Multiply Sum Unsigned HW Saturate, AV p206)
- DIP("vmsumuhs v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
+ DIP("vmsumuhs v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
/* widening multiply, separating lanes */
assign( abEvn, MK_Iop_MullOdd16Ux8(mkexpr(vA), mkexpr(vB) ));
assign( abOdd, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)) );
break;
}
case 0x28: { // vmsumshm (Multiply Sum Signed HW Modulo, AV p202)
- DIP("vmsumshm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
+ DIP("vmsumshm v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
assign( abEvn, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) ));
assign( abOdd, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)) );
putVReg( vD_addr,
- binop(Iop_Add32x4, mkexpr(vC),
- binop(Iop_Add32x4, mkexpr(abOdd), mkexpr(abEvn))) );
+ binop(Iop_Add32x4, mkexpr(vC),
+ binop(Iop_Add32x4, mkexpr(abOdd), mkexpr(abEvn))) );
break;
}
case 0x29: { // vmsumshs (Multiply Sum Signed HW Saturate, AV p203)
- DIP("vmsumshs v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
+ DIP("vmsumshs v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
/* widening multiply, separating lanes */
assign( abEvn, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) ));
assign( abOdd, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)) );
break;
}
default:
- vex_printf("dis_av_multarith(PPC32)(opc2)\n");
+ vex_printf("dis_av_multarith(ppc)(opc2)\n");
return False;
}
return True;
assign( vB, getVReg(vB_addr));
if (opc1 != 0x4){
- vex_printf("dis_av_shift(PPC32)(instr)\n");
+ vex_printf("dis_av_shift(ppc)(instr)\n");
return False;
}
binop(Iop_ShrV128, mkexpr(vA), mkexpr(sh)) );
break;
}
- case 0x304: // vsrab (Shift Right Algebraic B, AV p253)
+ case 0x304: // vsrab (Shift Right Alg B, AV p253)
DIP("vsrab v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
putVReg( vD_addr, binop(Iop_Sar8x16, mkexpr(vA), mkexpr(vB)) );
break;
- case 0x344: // vsrah (Shift Right Algebraic HW, AV p254)
+ case 0x344: // vsrah (Shift Right Alg HW, AV p254)
DIP("vsrah v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
putVReg( vD_addr, binop(Iop_Sar16x8, mkexpr(vA), mkexpr(vB)) );
break;
- case 0x384: // vsraw (Shift Right Algebraic W, AV p255)
+ case 0x384: // vsraw (Shift Right Alg W, AV p255)
DIP("vsraw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
putVReg( vD_addr, binop(Iop_Sar32x4, mkexpr(vA), mkexpr(vB)) );
break;
}
default:
- vex_printf("dis_av_shift(PPC32)(opc2)\n");
+ vex_printf("dis_av_shift(ppc)(opc2)\n");
return False;
}
return True;
assign( vC, getVReg(vC_addr));
if (opc1 != 0x4) {
- vex_printf("dis_av_permute(PPC32)(instr)\n");
+ vex_printf("dis_av_permute(ppc)(instr)\n");
return False;
}
IRTemp b_perm = newTemp(Ity_V128);
IRTemp mask = newTemp(Ity_V128);
IRTemp vC_andF = newTemp(Ity_V128);
- DIP("vperm v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
+ DIP("vperm v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vB_addr, vC_addr);
/* Limit the Perm8x16 steering values to 0 .. 15 as that is what
IR specifies, and also to hide irrelevant bits from
memcheck */
- assign( vC_andF, binop(Iop_AndV128, mkexpr(vC),
- unop(Iop_Dup8x16, mkU8(0xF))) );
- assign( a_perm, binop(Iop_Perm8x16, mkexpr(vA), mkexpr(vC_andF)) );
- assign( b_perm, binop(Iop_Perm8x16, mkexpr(vB), mkexpr(vC_andF)) );
+ assign( vC_andF,
+ binop(Iop_AndV128, mkexpr(vC),
+ unop(Iop_Dup8x16, mkU8(0xF))) );
+ assign( a_perm,
+ binop(Iop_Perm8x16, mkexpr(vA), mkexpr(vC_andF)) );
+ assign( b_perm,
+ binop(Iop_Perm8x16, mkexpr(vB), mkexpr(vC_andF)) );
// mask[i8] = (vC[i8]_4 == 1) ? 0xFF : 0x0
assign( mask, binop(Iop_SarN8x16,
binop(Iop_ShlN8x16, mkexpr(vC), mkU8(3)),
}
case 0x2C: // vsldoi (Shift Left Double by Octet Imm, AV p241)
if (b10 != 0) {
- vex_printf("dis_av_permute(PPC32)(vsldoi)\n");
+ vex_printf("dis_av_permute(ppc)(vsldoi)\n");
return False;
}
- DIP("vsldoi v%d,v%d,v%d,%d\n", vD_addr, vA_addr, vB_addr, SHB_uimm4);
+ DIP("vsldoi v%d,v%d,v%d,%d\n",
+ vD_addr, vA_addr, vB_addr, SHB_uimm4);
if (SHB_uimm4 == 0)
putVReg( vD_addr, mkexpr(vA) );
else
putVReg( vD_addr,
- binop(Iop_OrV128,
- binop(Iop_ShlV128, mkexpr(vA), mkU8(SHB_uimm4*8)),
- binop(Iop_ShrV128, mkexpr(vB), mkU8((16-SHB_uimm4)*8))) );
+ binop(Iop_OrV128,
+ binop(Iop_ShlV128, mkexpr(vA), mkU8(SHB_uimm4*8)),
+ binop(Iop_ShrV128, mkexpr(vB), mkU8((16-SHB_uimm4)*8))) );
return True;
default:
case 0x34C: // vspltish (Splat Immediate Signed HW, AV p248)
DIP("vspltish v%d,%d\n", vD_addr, (Char)SIMM_8);
- putVReg( vD_addr, unop(Iop_Dup16x8, mkU16(extend_s_8to32(SIMM_8))) );
+ putVReg( vD_addr,
+ unop(Iop_Dup16x8, mkU16(extend_s_8to32(SIMM_8))) );
break;
case 0x38C: // vspltisw (Splat Immediate Signed W, AV p249)
DIP("vspltisw v%d,%d\n", vD_addr, (Char)SIMM_8);
- putVReg( vD_addr, unop(Iop_Dup32x4, mkU32(extend_s_8to32(SIMM_8))) );
+ putVReg( vD_addr,
+ unop(Iop_Dup32x4, mkU32(extend_s_8to32(SIMM_8))) );
break;
default:
- vex_printf("dis_av_permute(PPC32)(opc2)\n");
+ vex_printf("dis_av_permute(ppc)(opc2)\n");
return False;
}
return True;
assign( vB, getVReg(vB_addr));
if (opc1 != 0x4) {
- vex_printf("dis_av_pack(PPC32)(instr)\n");
+ vex_printf("dis_av_pack(ppc)(instr)\n");
return False;
}
case 0x08E: // vpkuhus (Pack Unsigned HW Unsigned Saturate, AV p225)
DIP("vpkuhus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_QNarrow16Ux8, mkexpr(vA), mkexpr(vB)) );
+ putVReg( vD_addr,
+ binop(Iop_QNarrow16Ux8, mkexpr(vA), mkexpr(vB)) );
// TODO: set VSCR[SAT]
return True;
case 0x0CE: // vpkuwus (Pack Unsigned W Unsigned Saturate, AV p227)
DIP("vpkuwus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_QNarrow32Ux4, mkexpr(vA), mkexpr(vB)) );
+ putVReg( vD_addr,
+ binop(Iop_QNarrow32Ux4, mkexpr(vA), mkexpr(vB)) );
// TODO: set VSCR[SAT]
return True;
}
case 0x18E: // vpkshss (Pack Signed HW Signed Saturate, AV p220)
DIP("vpkshss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_QNarrow16Sx8, mkexpr(vA), mkexpr(vB)) );
+ putVReg( vD_addr,
+ binop(Iop_QNarrow16Sx8, mkexpr(vA), mkexpr(vB)) );
// TODO: set VSCR[SAT]
return True;
case 0x1CE: // vpkswss (Pack Signed W Signed Saturate, AV p222)
DIP("vpkswss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_QNarrow32Sx4, mkexpr(vA), mkexpr(vB)) );
+ putVReg( vD_addr,
+ binop(Iop_QNarrow32Sx4, mkexpr(vA), mkexpr(vB)) );
// TODO: set VSCR[SAT]
return True;
case 0x30E: { // vpkpx (Pack Pixel, AV p219)
/* CAB: Worth a new primop? */
- /* Using shifts to compact pixel elements, then packing them them */
+ /* Using shifts to compact pixel elements, then packing them */
IRTemp a1 = newTemp(Ity_V128);
IRTemp a2 = newTemp(Ity_V128);
IRTemp a3 = newTemp(Ity_V128);
if (vA_addr != 0) {
- vex_printf("dis_av_pack(PPC32)(vA_addr)\n");
+ vex_printf("dis_av_pack(ppc)(vA_addr)\n");
return False;
}
case 0x20E: { // vupkhsb (Unpack High Signed B, AV p277)
DIP("vupkhsb v%d,v%d\n", vD_addr, vB_addr);
assign( signs, binop(Iop_CmpGT8Sx16, mkexpr(zeros), mkexpr(vB)) );
- putVReg( vD_addr, binop(Iop_InterleaveHI8x16, mkexpr(signs), mkexpr(vB)) );
+ putVReg( vD_addr,
+ binop(Iop_InterleaveHI8x16, mkexpr(signs), mkexpr(vB)) );
break;
}
case 0x24E: { // vupkhsh (Unpack High Signed HW, AV p278)
DIP("vupkhsh v%d,v%d\n", vD_addr, vB_addr);
assign( signs, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vB)) );
- putVReg( vD_addr, binop(Iop_InterleaveHI16x8, mkexpr(signs), mkexpr(vB)) );
+ putVReg( vD_addr,
+ binop(Iop_InterleaveHI16x8, mkexpr(signs), mkexpr(vB)) );
break;
}
case 0x28E: { // vupklsb (Unpack Low Signed B, AV p280)
DIP("vupklsb v%d,v%d\n", vD_addr, vB_addr);
assign( signs, binop(Iop_CmpGT8Sx16, mkexpr(zeros), mkexpr(vB)) );
- putVReg( vD_addr, binop(Iop_InterleaveLO8x16, mkexpr(signs), mkexpr(vB)) );
+ putVReg( vD_addr,
+ binop(Iop_InterleaveLO8x16, mkexpr(signs), mkexpr(vB)) );
break;
}
case 0x2CE: { // vupklsh (Unpack Low Signed HW, AV p281)
DIP("vupklsh v%d,v%d\n", vD_addr, vB_addr);
assign( signs, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vB)) );
- putVReg( vD_addr, binop(Iop_InterleaveLO16x8, mkexpr(signs), mkexpr(vB)) );
+ putVReg( vD_addr,
+ binop(Iop_InterleaveLO16x8, mkexpr(signs), mkexpr(vB)) );
break;
}
case 0x34E: { // vupkhpx (Unpack High Pixel16, AV p276)
mkU8(11)) );
assign( z23, binop(Iop_InterleaveHI16x8, mkexpr(zeros),
binop(Iop_OrV128, mkexpr(z2), mkexpr(z3))) );
- putVReg( vD_addr, binop(Iop_OrV128,
- binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)),
- mkexpr(z23)) );
+ putVReg( vD_addr,
+ binop(Iop_OrV128,
+ binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)),
+ mkexpr(z23)) );
break;
}
case 0x3CE: { // vupklpx (Unpack Low Pixel16, AV p279)
mkU8(11)) );
assign( z23, binop(Iop_InterleaveLO16x8, mkexpr(zeros),
binop(Iop_OrV128, mkexpr(z2), mkexpr(z3))) );
- putVReg( vD_addr, binop(Iop_OrV128,
- binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)),
- mkexpr(z23)) );
+ putVReg( vD_addr,
+ binop(Iop_OrV128,
+ binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)),
+ mkexpr(z23)) );
break;
}
default:
- vex_printf("dis_av_pack(PPC32)(opc2)\n");
+ vex_printf("dis_av_pack(ppc)(opc2)\n");
return False;
}
return True;
assign( vC, getVReg(vC_addr));
if (opc1 != 0x4) {
- vex_printf("dis_av_fp_arith(PPC32)(instr)\n");
+ vex_printf("dis_av_fp_arith(ppc)(instr)\n");
return False;
}
opc2 = IFIELD( theInstr, 0, 6 );
switch (opc2) {
case 0x2E: // vmaddfp (Multiply Add FP, AV p177)
- DIP("vmaddfp v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vC_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_Add32Fx4, mkexpr(vB),
- binop(Iop_Mul32Fx4, mkexpr(vA), mkexpr(vC))) );
+ DIP("vmaddfp v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vC_addr, vB_addr);
+ putVReg( vD_addr,
+ binop(Iop_Add32Fx4, mkexpr(vB),
+ binop(Iop_Mul32Fx4, mkexpr(vA), mkexpr(vC))) );
return True;
case 0x2F: { // vnmsubfp (Negative Multiply-Subtract FP, AV p215)
- DIP("vnmsubfp v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vC_addr, vB_addr);
- putVReg( vD_addr, binop(Iop_Sub32Fx4,
- mkexpr(vB),
- binop(Iop_Mul32Fx4, mkexpr(vA), mkexpr(vC))) );
+ DIP("vnmsubfp v%d,v%d,v%d,v%d\n",
+ vD_addr, vA_addr, vC_addr, vB_addr);
+ putVReg( vD_addr,
+ binop(Iop_Sub32Fx4,
+ mkexpr(vB),
+ binop(Iop_Mul32Fx4, mkexpr(vA), mkexpr(vC))) );
return True;
}
if (vA_addr != 0) {
- vex_printf("dis_av_fp_arith(PPC32)(vA_addr)\n");
+ vex_printf("dis_av_fp_arith(ppc)(vA_addr)\n");
return False;
}
putVReg( vD_addr, unop(Iop_Recip32Fx4, mkexpr(vB)) );
return True;
- case 0x14A: // vrsqrtefp (Reciprocal Square Root Estimate FP, AV p237)
+ case 0x14A: // vrsqrtefp (Reciprocal Sqrt Estimate FP, AV p237)
DIP("vrsqrtefp v%d,v%d\n", vD_addr, vB_addr);
putVReg( vD_addr, unop(Iop_RSqrt32Fx4, mkexpr(vB)) );
return True;
return False;
default:
- vex_printf("dis_av_fp_arith(PPC32)(opc2=0x%x)\n",opc2);
+ vex_printf("dis_av_fp_arith(ppc)(opc2=0x%x)\n",opc2);
return False;
}
return True;
assign( vB, getVReg(vB_addr));
if (opc1 != 0x4) {
- vex_printf("dis_av_fp_cmp(PPC32)(instr)\n");
+ vex_printf("dis_av_fp_cmp(ppc)(instr)\n");
return False;
}
switch (opc2) {
case 0x0C6: // vcmpeqfp (Compare Equal-to FP, AV p159)
- DIP("vcmpeqfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpeqfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpEQ32Fx4, mkexpr(vA), mkexpr(vB)) );
break;
- case 0x1C6: // vcmpgefp (Compare Greater-than-or-Equal-to FP, AV p163)
- DIP("vcmpgefp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ case 0x1C6: // vcmpgefp (Compare Greater-than-or-Equal-to, AV p163)
+ DIP("vcmpgefp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpGE32Fx4, mkexpr(vA), mkexpr(vB)) );
break;
case 0x2C6: // vcmpgtfp (Compare Greater-than FP, AV p164)
- DIP("vcmpgtfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpgtfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
assign( vD, binop(Iop_CmpGT32Fx4, mkexpr(vA), mkexpr(vB)) );
break;
IRTemp gt = newTemp(Ity_V128);
IRTemp lt = newTemp(Ity_V128);
IRTemp zeros = newTemp(Ity_V128);
- DIP("vcmpbfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""), vD_addr, vA_addr, vB_addr);
+ DIP("vcmpbfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+ vD_addr, vA_addr, vB_addr);
cmp_bounds = True;
assign( zeros, unop(Iop_Dup32x4, mkU32(0)) );
/* Note: making use of fact that the ppc backend for compare insns
- return zero'd lanes if either of the corresponding arg lanes is a nan.
+ return zero'd lanes if either of the corresponding arg lanes is
+ a nan.
Perhaps better to have an irop Iop_isNan32Fx4, but then we'd
need this for the other compares too (vcmpeqfp etc)...
binop(Iop_CmpLE32Fx4, mkexpr(vA), mkexpr(vB))) );
assign( lt, unop(Iop_NotV128,
binop(Iop_CmpGE32Fx4, mkexpr(vA),
- binop(Iop_Sub32Fx4, mkexpr(zeros), mkexpr(vB)))) );
+ binop(Iop_Sub32Fx4, mkexpr(zeros),
+ mkexpr(vB)))) );
// finally, just shift gt,lt to correct position
assign( vD, binop(Iop_ShlN32x4,
}
default:
- vex_printf("dis_av_fp_cmp(PPC32)(opc2)\n");
+ vex_printf("dis_av_fp_cmp(ppc)(opc2)\n");
return False;
}
scale = (float)( (unsigned int) 1<<UIMM_5 );
assign( vScale, unop(Iop_Dup32x4, mkU32( float_to_bits(scale) )) );
inv_scale = 1/scale;
- assign( vInvScale, unop(Iop_Dup32x4, mkU32( float_to_bits(inv_scale) )) );
+ assign( vInvScale,
+ unop(Iop_Dup32x4, mkU32( float_to_bits(inv_scale) )) );
if (opc1 != 0x4) {
- vex_printf("dis_av_fp_convert(PPC32)(instr)\n");
+ vex_printf("dis_av_fp_convert(ppc)(instr)\n");
return False;
}
}
if (UIMM_5 != 0) {
- vex_printf("dis_av_fp_convert(PPC32)(UIMM_5)\n");
+ vex_printf("dis_av_fp_convert(ppc)(UIMM_5)\n");
return False;
}
break;
default:
- vex_printf("dis_av_fp_convert(PPC32)(opc2)\n");
+ vex_printf("dis_av_fp_convert(ppc)(opc2)\n");
return False;
}
return True;
is located in host memory at &guest_code[delta]. */
static
-DisResult disInstr_PPC32_WRK (
+DisResult disInstr_PPC_WRK (
Bool put_IP,
Bool (*resteerOkFn) ( Addr64 ),
Long delta64,
and have done. */
theInstr = getUIntBigendianly( (UChar*)(&guest_code[delta]) );
-#if 0
- vex_printf("disInstr(ppc32): instr: ");
- vex_printf_binary( theInstr, 32, True );
- vex_printf("\n");
-#endif
-
// vex_printf("insn: 0x%x\n", theInstr);
if (mode64) {
/* Floating Point Load Instructions */
case 0x30: case 0x31: case 0x32: // lfs, lfsu, lfd
case 0x33: // lfdu
- if (!allow_FP) goto decode_failure;
+ if (!allow_FP) goto decode_noFP;
if (dis_fp_load( theInstr )) goto decode_success;
goto decode_failure;
/* Floating Point Store Instructions */
case 0x34: case 0x35: case 0x36: // stfsx, stfsux, stfdx
case 0x37: // stfdux
- if (!allow_FP) goto decode_failure;
+ if (!allow_FP) goto decode_noFP;
if (dis_fp_store( theInstr )) goto decode_success;
goto decode_failure;
goto decode_failure;
case 0x3B:
- if (!allow_FP) goto decode_failure;
+ if (!allow_FP) goto decode_noFP;
opc2 = IFIELD(theInstr, 1, 5);
switch (opc2) {
goto decode_failure;
case 0x3F:
- if (!allow_FP) goto decode_failure;
- /* Instrs using opc[1:5] never overlap with instrs using opc[1:10],
+ if (!allow_FP) goto decode_noFP;
+ /* Instrs using opc[1:5] never overlap instrs using opc[1:10],
so we can simply fall through the first switch statement */
opc2 = IFIELD(theInstr, 1, 5);
/* Floating Point Load Instructions */
case 0x217: case 0x237: case 0x257: // lfsx, lfsux, lfdx
case 0x277: // lfdux
- if (!allow_FP) goto decode_failure;
+ if (!allow_FP) goto decode_noFP;
if (dis_fp_load( theInstr )) goto decode_success;
goto decode_failure;
/* Floating Point Store Instructions */
case 0x297: case 0x2B7: case 0x2D7: // stfs, stfsu, stfd
case 0x2F7: case 0x3D7: // stfdu, stfiwx
- if (!allow_FP) goto decode_failure;
+ if (!allow_FP) goto decode_noFP;
if (dis_fp_store( theInstr )) goto decode_success;
goto decode_failure;
/* AV Cache Control - Data streams */
case 0x156: case 0x176: case 0x336: // dst, dstst, dss
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_datastream( theInstr )) goto decode_success;
goto decode_failure;
case 0x006: case 0x026: // lvsl, lvsr
case 0x007: case 0x027: case 0x047: // lvebx, lvehx, lvewx
case 0x067: case 0x167: // lvx, lvxl
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_load( theInstr )) goto decode_success;
goto decode_failure;
/* AV Store */
case 0x087: case 0x0A7: case 0x0C7: // stvebx, stvehx, stvewx
case 0x0E7: case 0x1E7: // stvx, stvxl
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_store( theInstr )) goto decode_success;
goto decode_failure;
case 0x20: case 0x21: case 0x22: // vmhaddshs, vmhraddshs, vmladduhm
case 0x24: case 0x25: case 0x26: // vmsumubm, vmsummbm, vmsumuhm
case 0x27: case 0x28: case 0x29: // vmsumuhs, vmsumshm, vmsumshs
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_multarith( theInstr )) goto decode_success;
goto decode_failure;
case 0x2A: // vsel
case 0x2B: // vperm
case 0x2C: // vsldoi
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_permute( theInstr )) goto decode_success;
goto decode_failure;
/* AV Floating Point Mult-Add/Sub */
case 0x2E: case 0x2F: // vmaddfp, vnmsubfp
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_fp_arith( theInstr )) goto decode_success;
goto decode_failure;
case 0x308: case 0x348: // vmulesb, vmulesh
case 0x608: case 0x708: case 0x648: // vsum4ubs, vsum4sbs, vsum4shs
case 0x688: case 0x788: // vsum2sws, vsumsws
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_arith( theInstr )) goto decode_success;
goto decode_failure;
case 0x304: case 0x344: case 0x384: // vsrab, vsrah, vsraw
case 0x1C4: case 0x2C4: // vsl, vsr
case 0x40C: case 0x44C: // vslo, vsro
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_shift( theInstr )) goto decode_success;
goto decode_failure;
/* AV Logic */
case 0x404: case 0x444: case 0x484: // vand, vandc, vor
case 0x4C4: case 0x504: // vxor, vnor
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_logic( theInstr )) goto decode_success;
goto decode_failure;
/* AV Processor Control */
case 0x604: case 0x644: // mfvscr, mtvscr
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_procctl( theInstr )) goto decode_success;
goto decode_failure;
case 0x10A: case 0x14A: case 0x18A: // vrefp, vrsqrtefp, vexptefp
case 0x1CA: // vlogefp
case 0x40A: case 0x44A: // vmaxfp, vminfp
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_fp_arith( theInstr )) goto decode_success;
goto decode_failure;
case 0x2CA: // vrfim
case 0x30A: case 0x34A: case 0x38A: // vcfux, vcfsx, vctuxs
case 0x3CA: // vctsxs
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_fp_convert( theInstr )) goto decode_success;
goto decode_failure;
case 0x10C: case 0x14C: case 0x18C: // vmrglb, vmrglh, vmrglw
case 0x20C: case 0x24C: case 0x28C: // vspltb, vsplth, vspltw
case 0x30C: case 0x34C: case 0x38C: // vspltisb, vspltish, vspltisw
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_permute( theInstr )) goto decode_success;
goto decode_failure;
case 0x20E: case 0x24E: case 0x28E: // vupkhsb, vupkhsh, vupklsb
case 0x2CE: // vupklsh
case 0x30E: case 0x34E: case 0x3CE: // vpkpx, vupkhpx, vupklpx
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_pack( theInstr )) goto decode_success;
goto decode_failure;
case 0x006: case 0x046: case 0x086: // vcmpequb, vcmpequh, vcmpequw
case 0x206: case 0x246: case 0x286: // vcmpgtub, vcmpgtuh, vcmpgtuw
case 0x306: case 0x346: case 0x386: // vcmpgtsb, vcmpgtsh, vcmpgtsw
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_cmp( theInstr )) goto decode_success;
goto decode_failure;
/* AV Floating Point Compare */
case 0x0C6: case 0x1C6: case 0x2C6: // vcmpeqfp, vcmpgefp, vcmpgtfp
case 0x3C6: // vcmpbfp
- if (!allow_VMX) goto decode_failure;
+ if (!allow_VMX) goto decode_noVMX;
if (dis_av_fp_cmp( theInstr )) goto decode_success;
goto decode_failure;
break;
default:
+ decode_noFP:
+ vassert(!allow_FP);
+ vex_printf("disInstr(ppc): Floating Point insns disabled for this arch.\n");
+ goto decode_failure;
+
+ decode_noVMX:
+ vassert(!allow_VMX);
+ vex_printf("disInstr(ppc): AltiVec insns disabled for this arch.\n");
+ goto decode_failure;
+
decode_failure:
/* All decode failures end up here. */
opc2 = (theInstr) & 0x7FF;
- vex_printf("disInstr(ppc32): unhandled instruction: "
+ vex_printf("disInstr(ppc): unhandled instruction: "
"0x%x\n", theInstr);
vex_printf(" primary %d(0x%x), secondary %u(0x%x)\n",
opc1, opc1, opc2, opc2);
-
-#if PPC_TOIR_DEBUG
- vex_printf("disInstr(ppc32): instr: ");
- vex_printf_binary( theInstr, 32, True );
- vex_printf("\n");
-
- vex_printf("disInstr(ppc32): opcode1: ");
- vex_printf_binary( opc1, 6, False );
- vex_printf("\n");
-
- vex_printf("disInstr(ppc32): opcode2: ");
- vex_printf_binary( opc2, 10, False );
- vex_printf("\n\n");
-#endif
-
/* Tell the dispatcher that this insn cannot be decoded, and so has
not been executed, and (is currently) the next to be executed.
/* Disassemble a single instruction into IR. The instruction
is located in host memory at &guest_code[delta]. */
-DisResult disInstr_PPC32 ( IRBB* irbb_IN,
- Bool put_IP,
- Bool (*resteerOkFn) ( Addr64 ),
- UChar* guest_code_IN,
- Long delta,
- Addr64 guest_IP,
- VexArchInfo* archinfo,
- Bool host_bigendian_IN )
+DisResult disInstr_PPC ( IRBB* irbb_IN,
+ Bool put_IP,
+ Bool (*resteerOkFn) ( Addr64 ),
+ UChar* guest_code_IN,
+ Long delta,
+ Addr64 guest_IP,
+ VexArchInfo* archinfo,
+ Bool host_bigendian_IN )
{
IRType ty;
DisResult dres;
mode64 = True;
break;
default:
- vpanic("disInstr_PPC32: illegal subarch");
+ vpanic("disInstr_PPC(): illegal subarch");
}
ty = mode64 ? Ity_I64 : Ity_I32;
guest_CIA_curr_instr = mkSzAddr(ty, guest_IP);
guest_CIA_bbstart = mkSzAddr(ty, guest_IP - delta);
- dres = disInstr_PPC32_WRK ( put_IP, resteerOkFn,
- delta, archinfo );
+ dres = disInstr_PPC_WRK ( put_IP, resteerOkFn,
+ delta, archinfo );
return dres;
}
/* --------- Registers. --------- */
-void ppHRegPPC32 ( HReg reg )
+void ppHRegPPC ( HReg reg )
{
Int r;
static HChar* ireg32_names[32]
- = { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
- "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
- "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
- "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31" };
+ = { "%r0", "%r1", "%r2", "%r3",
+ "%r4", "%r5", "%r6", "%r7",
+ "%r8", "%r9", "%r10", "%r11",
+ "%r12", "%r13", "%r14", "%r15",
+ "%r16", "%r17", "%r18", "%r19",
+ "%r20", "%r21", "%r22", "%r23",
+ "%r24", "%r25", "%r26", "%r27",
+ "%r28", "%r29", "%r30", "%r31" };
/* Be generic for all virtual regs. */
if (hregIsVirtual(reg)) {
ppHReg(reg);
vex_printf("%%v%d", r);
return;
default:
- vpanic("ppHRegPPC32");
+ vpanic("ppHRegPPC");
}
}
#undef MK_INT_HREG
-HReg hregPPC32_FPR0 ( void ) { return mkHReg( 0, HRcFlt64, False); }
-HReg hregPPC32_FPR1 ( void ) { return mkHReg( 1, HRcFlt64, False); }
-HReg hregPPC32_FPR2 ( void ) { return mkHReg( 2, HRcFlt64, False); }
-HReg hregPPC32_FPR3 ( void ) { return mkHReg( 3, HRcFlt64, False); }
-HReg hregPPC32_FPR4 ( void ) { return mkHReg( 4, HRcFlt64, False); }
-HReg hregPPC32_FPR5 ( void ) { return mkHReg( 5, HRcFlt64, False); }
-HReg hregPPC32_FPR6 ( void ) { return mkHReg( 6, HRcFlt64, False); }
-HReg hregPPC32_FPR7 ( void ) { return mkHReg( 7, HRcFlt64, False); }
-HReg hregPPC32_FPR8 ( void ) { return mkHReg( 8, HRcFlt64, False); }
-HReg hregPPC32_FPR9 ( void ) { return mkHReg( 9, HRcFlt64, False); }
-HReg hregPPC32_FPR10 ( void ) { return mkHReg(10, HRcFlt64, False); }
-HReg hregPPC32_FPR11 ( void ) { return mkHReg(11, HRcFlt64, False); }
-HReg hregPPC32_FPR12 ( void ) { return mkHReg(12, HRcFlt64, False); }
-HReg hregPPC32_FPR13 ( void ) { return mkHReg(13, HRcFlt64, False); }
-HReg hregPPC32_FPR14 ( void ) { return mkHReg(14, HRcFlt64, False); }
-HReg hregPPC32_FPR15 ( void ) { return mkHReg(15, HRcFlt64, False); }
-HReg hregPPC32_FPR16 ( void ) { return mkHReg(16, HRcFlt64, False); }
-HReg hregPPC32_FPR17 ( void ) { return mkHReg(17, HRcFlt64, False); }
-HReg hregPPC32_FPR18 ( void ) { return mkHReg(18, HRcFlt64, False); }
-HReg hregPPC32_FPR19 ( void ) { return mkHReg(19, HRcFlt64, False); }
-HReg hregPPC32_FPR20 ( void ) { return mkHReg(20, HRcFlt64, False); }
-HReg hregPPC32_FPR21 ( void ) { return mkHReg(21, HRcFlt64, False); }
-HReg hregPPC32_FPR22 ( void ) { return mkHReg(22, HRcFlt64, False); }
-HReg hregPPC32_FPR23 ( void ) { return mkHReg(23, HRcFlt64, False); }
-HReg hregPPC32_FPR24 ( void ) { return mkHReg(24, HRcFlt64, False); }
-HReg hregPPC32_FPR25 ( void ) { return mkHReg(25, HRcFlt64, False); }
-HReg hregPPC32_FPR26 ( void ) { return mkHReg(26, HRcFlt64, False); }
-HReg hregPPC32_FPR27 ( void ) { return mkHReg(27, HRcFlt64, False); }
-HReg hregPPC32_FPR28 ( void ) { return mkHReg(28, HRcFlt64, False); }
-HReg hregPPC32_FPR29 ( void ) { return mkHReg(29, HRcFlt64, False); }
-HReg hregPPC32_FPR30 ( void ) { return mkHReg(30, HRcFlt64, False); }
-HReg hregPPC32_FPR31 ( void ) { return mkHReg(31, HRcFlt64, False); }
-
-HReg hregPPC32_VR0 ( void ) { return mkHReg( 0, HRcVec128, False); }
-HReg hregPPC32_VR1 ( void ) { return mkHReg( 1, HRcVec128, False); }
-HReg hregPPC32_VR2 ( void ) { return mkHReg( 2, HRcVec128, False); }
-HReg hregPPC32_VR3 ( void ) { return mkHReg( 3, HRcVec128, False); }
-HReg hregPPC32_VR4 ( void ) { return mkHReg( 4, HRcVec128, False); }
-HReg hregPPC32_VR5 ( void ) { return mkHReg( 5, HRcVec128, False); }
-HReg hregPPC32_VR6 ( void ) { return mkHReg( 6, HRcVec128, False); }
-HReg hregPPC32_VR7 ( void ) { return mkHReg( 7, HRcVec128, False); }
-HReg hregPPC32_VR8 ( void ) { return mkHReg( 8, HRcVec128, False); }
-HReg hregPPC32_VR9 ( void ) { return mkHReg( 9, HRcVec128, False); }
-HReg hregPPC32_VR10 ( void ) { return mkHReg(10, HRcVec128, False); }
-HReg hregPPC32_VR11 ( void ) { return mkHReg(11, HRcVec128, False); }
-HReg hregPPC32_VR12 ( void ) { return mkHReg(12, HRcVec128, False); }
-HReg hregPPC32_VR13 ( void ) { return mkHReg(13, HRcVec128, False); }
-HReg hregPPC32_VR14 ( void ) { return mkHReg(14, HRcVec128, False); }
-HReg hregPPC32_VR15 ( void ) { return mkHReg(15, HRcVec128, False); }
-HReg hregPPC32_VR16 ( void ) { return mkHReg(16, HRcVec128, False); }
-HReg hregPPC32_VR17 ( void ) { return mkHReg(17, HRcVec128, False); }
-HReg hregPPC32_VR18 ( void ) { return mkHReg(18, HRcVec128, False); }
-HReg hregPPC32_VR19 ( void ) { return mkHReg(19, HRcVec128, False); }
-HReg hregPPC32_VR20 ( void ) { return mkHReg(20, HRcVec128, False); }
-HReg hregPPC32_VR21 ( void ) { return mkHReg(21, HRcVec128, False); }
-HReg hregPPC32_VR22 ( void ) { return mkHReg(22, HRcVec128, False); }
-HReg hregPPC32_VR23 ( void ) { return mkHReg(23, HRcVec128, False); }
-HReg hregPPC32_VR24 ( void ) { return mkHReg(24, HRcVec128, False); }
-HReg hregPPC32_VR25 ( void ) { return mkHReg(25, HRcVec128, False); }
-HReg hregPPC32_VR26 ( void ) { return mkHReg(26, HRcVec128, False); }
-HReg hregPPC32_VR27 ( void ) { return mkHReg(27, HRcVec128, False); }
-HReg hregPPC32_VR28 ( void ) { return mkHReg(28, HRcVec128, False); }
-HReg hregPPC32_VR29 ( void ) { return mkHReg(29, HRcVec128, False); }
-HReg hregPPC32_VR30 ( void ) { return mkHReg(30, HRcVec128, False); }
-HReg hregPPC32_VR31 ( void ) { return mkHReg(31, HRcVec128, False); }
-
-void getAllocableRegs_PPC32 ( Int* nregs, HReg** arr, Bool mode64 )
+HReg hregPPC_FPR0 ( void ) { return mkHReg( 0, HRcFlt64, False); }
+HReg hregPPC_FPR1 ( void ) { return mkHReg( 1, HRcFlt64, False); }
+HReg hregPPC_FPR2 ( void ) { return mkHReg( 2, HRcFlt64, False); }
+HReg hregPPC_FPR3 ( void ) { return mkHReg( 3, HRcFlt64, False); }
+HReg hregPPC_FPR4 ( void ) { return mkHReg( 4, HRcFlt64, False); }
+HReg hregPPC_FPR5 ( void ) { return mkHReg( 5, HRcFlt64, False); }
+HReg hregPPC_FPR6 ( void ) { return mkHReg( 6, HRcFlt64, False); }
+HReg hregPPC_FPR7 ( void ) { return mkHReg( 7, HRcFlt64, False); }
+HReg hregPPC_FPR8 ( void ) { return mkHReg( 8, HRcFlt64, False); }
+HReg hregPPC_FPR9 ( void ) { return mkHReg( 9, HRcFlt64, False); }
+HReg hregPPC_FPR10 ( void ) { return mkHReg(10, HRcFlt64, False); }
+HReg hregPPC_FPR11 ( void ) { return mkHReg(11, HRcFlt64, False); }
+HReg hregPPC_FPR12 ( void ) { return mkHReg(12, HRcFlt64, False); }
+HReg hregPPC_FPR13 ( void ) { return mkHReg(13, HRcFlt64, False); }
+HReg hregPPC_FPR14 ( void ) { return mkHReg(14, HRcFlt64, False); }
+HReg hregPPC_FPR15 ( void ) { return mkHReg(15, HRcFlt64, False); }
+HReg hregPPC_FPR16 ( void ) { return mkHReg(16, HRcFlt64, False); }
+HReg hregPPC_FPR17 ( void ) { return mkHReg(17, HRcFlt64, False); }
+HReg hregPPC_FPR18 ( void ) { return mkHReg(18, HRcFlt64, False); }
+HReg hregPPC_FPR19 ( void ) { return mkHReg(19, HRcFlt64, False); }
+HReg hregPPC_FPR20 ( void ) { return mkHReg(20, HRcFlt64, False); }
+HReg hregPPC_FPR21 ( void ) { return mkHReg(21, HRcFlt64, False); }
+HReg hregPPC_FPR22 ( void ) { return mkHReg(22, HRcFlt64, False); }
+HReg hregPPC_FPR23 ( void ) { return mkHReg(23, HRcFlt64, False); }
+HReg hregPPC_FPR24 ( void ) { return mkHReg(24, HRcFlt64, False); }
+HReg hregPPC_FPR25 ( void ) { return mkHReg(25, HRcFlt64, False); }
+HReg hregPPC_FPR26 ( void ) { return mkHReg(26, HRcFlt64, False); }
+HReg hregPPC_FPR27 ( void ) { return mkHReg(27, HRcFlt64, False); }
+HReg hregPPC_FPR28 ( void ) { return mkHReg(28, HRcFlt64, False); }
+HReg hregPPC_FPR29 ( void ) { return mkHReg(29, HRcFlt64, False); }
+HReg hregPPC_FPR30 ( void ) { return mkHReg(30, HRcFlt64, False); }
+HReg hregPPC_FPR31 ( void ) { return mkHReg(31, HRcFlt64, False); }
+
+HReg hregPPC_VR0 ( void ) { return mkHReg( 0, HRcVec128, False); }
+HReg hregPPC_VR1 ( void ) { return mkHReg( 1, HRcVec128, False); }
+HReg hregPPC_VR2 ( void ) { return mkHReg( 2, HRcVec128, False); }
+HReg hregPPC_VR3 ( void ) { return mkHReg( 3, HRcVec128, False); }
+HReg hregPPC_VR4 ( void ) { return mkHReg( 4, HRcVec128, False); }
+HReg hregPPC_VR5 ( void ) { return mkHReg( 5, HRcVec128, False); }
+HReg hregPPC_VR6 ( void ) { return mkHReg( 6, HRcVec128, False); }
+HReg hregPPC_VR7 ( void ) { return mkHReg( 7, HRcVec128, False); }
+HReg hregPPC_VR8 ( void ) { return mkHReg( 8, HRcVec128, False); }
+HReg hregPPC_VR9 ( void ) { return mkHReg( 9, HRcVec128, False); }
+HReg hregPPC_VR10 ( void ) { return mkHReg(10, HRcVec128, False); }
+HReg hregPPC_VR11 ( void ) { return mkHReg(11, HRcVec128, False); }
+HReg hregPPC_VR12 ( void ) { return mkHReg(12, HRcVec128, False); }
+HReg hregPPC_VR13 ( void ) { return mkHReg(13, HRcVec128, False); }
+HReg hregPPC_VR14 ( void ) { return mkHReg(14, HRcVec128, False); }
+HReg hregPPC_VR15 ( void ) { return mkHReg(15, HRcVec128, False); }
+HReg hregPPC_VR16 ( void ) { return mkHReg(16, HRcVec128, False); }
+HReg hregPPC_VR17 ( void ) { return mkHReg(17, HRcVec128, False); }
+HReg hregPPC_VR18 ( void ) { return mkHReg(18, HRcVec128, False); }
+HReg hregPPC_VR19 ( void ) { return mkHReg(19, HRcVec128, False); }
+HReg hregPPC_VR20 ( void ) { return mkHReg(20, HRcVec128, False); }
+HReg hregPPC_VR21 ( void ) { return mkHReg(21, HRcVec128, False); }
+HReg hregPPC_VR22 ( void ) { return mkHReg(22, HRcVec128, False); }
+HReg hregPPC_VR23 ( void ) { return mkHReg(23, HRcVec128, False); }
+HReg hregPPC_VR24 ( void ) { return mkHReg(24, HRcVec128, False); }
+HReg hregPPC_VR25 ( void ) { return mkHReg(25, HRcVec128, False); }
+HReg hregPPC_VR26 ( void ) { return mkHReg(26, HRcVec128, False); }
+HReg hregPPC_VR27 ( void ) { return mkHReg(27, HRcVec128, False); }
+HReg hregPPC_VR28 ( void ) { return mkHReg(28, HRcVec128, False); }
+HReg hregPPC_VR29 ( void ) { return mkHReg(29, HRcVec128, False); }
+HReg hregPPC_VR30 ( void ) { return mkHReg(30, HRcVec128, False); }
+HReg hregPPC_VR31 ( void ) { return mkHReg(31, HRcVec128, False); }
+
+void getAllocableRegs_PPC ( Int* nregs, HReg** arr, Bool mode64 )
{
UInt i=0;
if (mode64)
else
*nregs = (32-7) + (32-24) + (32-24);
*arr = LibVEX_Alloc(*nregs * sizeof(HReg));
- // GPR0 = scratch reg where possible - some ops interpret as value zero
+ // GPR0 = scratch reg where poss. - some ops interpret as value zero
// GPR1 = stack pointer
// GPR2 = TOC pointer
(*arr)[i++] = hregPPC_GPR3(mode64);
/* Don't waste the reg-allocs's time trawling through zillions of
FP registers - they mostly will never be used. We'll tolerate
the occasional extra spill instead. */
- (*arr)[i++] = hregPPC32_FPR0();
- (*arr)[i++] = hregPPC32_FPR1();
- (*arr)[i++] = hregPPC32_FPR2();
- (*arr)[i++] = hregPPC32_FPR3();
- (*arr)[i++] = hregPPC32_FPR4();
- (*arr)[i++] = hregPPC32_FPR5();
- (*arr)[i++] = hregPPC32_FPR6();
- (*arr)[i++] = hregPPC32_FPR7();
+ (*arr)[i++] = hregPPC_FPR0();
+ (*arr)[i++] = hregPPC_FPR1();
+ (*arr)[i++] = hregPPC_FPR2();
+ (*arr)[i++] = hregPPC_FPR3();
+ (*arr)[i++] = hregPPC_FPR4();
+ (*arr)[i++] = hregPPC_FPR5();
+ (*arr)[i++] = hregPPC_FPR6();
+ (*arr)[i++] = hregPPC_FPR7();
/* Same deal re Altivec */
/* NB, vr29 is used as a scratch temporary -- do not allocate */
- (*arr)[i++] = hregPPC32_VR0();
- (*arr)[i++] = hregPPC32_VR1();
- (*arr)[i++] = hregPPC32_VR2();
- (*arr)[i++] = hregPPC32_VR3();
- (*arr)[i++] = hregPPC32_VR4();
- (*arr)[i++] = hregPPC32_VR5();
- (*arr)[i++] = hregPPC32_VR6();
- (*arr)[i++] = hregPPC32_VR7();
+ (*arr)[i++] = hregPPC_VR0();
+ (*arr)[i++] = hregPPC_VR1();
+ (*arr)[i++] = hregPPC_VR2();
+ (*arr)[i++] = hregPPC_VR3();
+ (*arr)[i++] = hregPPC_VR4();
+ (*arr)[i++] = hregPPC_VR5();
+ (*arr)[i++] = hregPPC_VR6();
+ (*arr)[i++] = hregPPC_VR7();
vassert(i == *nregs);
}
/* --------- Condition codes, Intel encoding. --------- */
-HChar* showPPC32CondCode ( PPC32CondCode cond )
+HChar* showPPCCondCode ( PPCCondCode cond )
{
if (cond.test == Pct_ALWAYS) return "always";
switch (cond.flag) {
- case Pcf_7SO: return (cond.test == Pct_TRUE) ? "cr7.so=1" : "cr7.so=0";
- case Pcf_7EQ: return (cond.test == Pct_TRUE) ? "cr7.eq=1" : "cr7.eq=0";
- case Pcf_7GT: return (cond.test == Pct_TRUE) ? "cr7.gt=1" : "cr7.gt=0";
- case Pcf_7LT: return (cond.test == Pct_TRUE) ? "cr7.lt=1" : "cr7.lt=0";
- default: vpanic("ppPPC32CondCode");
+ case Pcf_7SO:
+ return (cond.test == Pct_TRUE) ? "cr7.so=1" : "cr7.so=0";
+ case Pcf_7EQ:
+ return (cond.test == Pct_TRUE) ? "cr7.eq=1" : "cr7.eq=0";
+ case Pcf_7GT:
+ return (cond.test == Pct_TRUE) ? "cr7.gt=1" : "cr7.gt=0";
+ case Pcf_7LT:
+ return (cond.test == Pct_TRUE) ? "cr7.lt=1" : "cr7.lt=0";
+ default: vpanic("ppPPCCondCode");
}
}
/* construct condition code */
-PPC32CondCode mk_PPCCondCode ( PPC32CondTest test, PPC32CondFlag flag )
+PPCCondCode mk_PPCCondCode ( PPCCondTest test, PPCCondFlag flag )
{
- PPC32CondCode cc;
+ PPCCondCode cc;
cc.flag = flag;
cc.test = test;
return cc;
}
/* false->true, true->false */
-PPC32CondTest invertCondTest ( PPC32CondTest ct )
+PPCCondTest invertCondTest ( PPCCondTest ct )
{
vassert(ct != Pct_ALWAYS);
return (ct == Pct_TRUE) ? Pct_FALSE : Pct_TRUE;
/* --------- PPCAMode: memory address expressions. --------- */
-PPC32AMode* PPC32AMode_IR ( Int idx, HReg base ) {
- PPC32AMode* am = LibVEX_Alloc(sizeof(PPC32AMode));
+PPCAMode* PPCAMode_IR ( Int idx, HReg base ) {
+ PPCAMode* am = LibVEX_Alloc(sizeof(PPCAMode));
vassert(idx >= -0x8000 && idx < 0x8000);
am->tag = Pam_IR;
am->Pam.IR.base = base;
am->Pam.IR.index = idx;
return am;
}
-PPC32AMode* PPC32AMode_RR ( HReg idx, HReg base ) {
- PPC32AMode* am = LibVEX_Alloc(sizeof(PPC32AMode));
+PPCAMode* PPCAMode_RR ( HReg idx, HReg base ) {
+ PPCAMode* am = LibVEX_Alloc(sizeof(PPCAMode));
am->tag = Pam_RR;
am->Pam.RR.base = base;
am->Pam.RR.index = idx;
return am;
}
-PPC32AMode* dopyPPC32AMode ( PPC32AMode* am ) {
+PPCAMode* dopyPPCAMode ( PPCAMode* am ) {
switch (am->tag) {
case Pam_IR:
- return PPC32AMode_IR( am->Pam.IR.index, am->Pam.IR.base );
+ return PPCAMode_IR( am->Pam.IR.index, am->Pam.IR.base );
case Pam_RR:
- return PPC32AMode_RR( am->Pam.RR.index, am->Pam.RR.base );
+ return PPCAMode_RR( am->Pam.RR.index, am->Pam.RR.base );
default:
- vpanic("dopyPPC32AMode");
+ vpanic("dopyPPCAMode");
}
}
-void ppPPC32AMode ( PPC32AMode* am ) {
+void ppPPCAMode ( PPCAMode* am ) {
switch (am->tag) {
case Pam_IR:
if (am->Pam.IR.index == 0)
vex_printf("0(");
else
vex_printf("%d(", (Int)am->Pam.IR.index);
- ppHRegPPC32(am->Pam.IR.base);
+ ppHRegPPC(am->Pam.IR.base);
vex_printf(")");
return;
case Pam_RR:
- ppHRegPPC32(am->Pam.RR.base);
+ ppHRegPPC(am->Pam.RR.base);
vex_printf(",");
- ppHRegPPC32(am->Pam.RR.index);
+ ppHRegPPC(am->Pam.RR.index);
return;
default:
- vpanic("ppPPC32AMode");
+ vpanic("ppPPCAMode");
}
}
-static void addRegUsage_PPC32AMode ( HRegUsage* u, PPC32AMode* am ) {
+static void addRegUsage_PPCAMode ( HRegUsage* u, PPCAMode* am ) {
switch (am->tag) {
case Pam_IR:
addHRegUse(u, HRmRead, am->Pam.IR.base);
addHRegUse(u, HRmRead, am->Pam.RR.index);
return;
default:
- vpanic("addRegUsage_PPC32AMode");
+ vpanic("addRegUsage_PPCAMode");
}
}
-static void mapRegs_PPC32AMode ( HRegRemap* m, PPC32AMode* am ) {
+static void mapRegs_PPCAMode ( HRegRemap* m, PPCAMode* am ) {
switch (am->tag) {
case Pam_IR:
am->Pam.IR.base = lookupHRegRemap(m, am->Pam.IR.base);
am->Pam.RR.index = lookupHRegRemap(m, am->Pam.RR.index);
return;
default:
- vpanic("mapRegs_PPC32AMode");
+ vpanic("mapRegs_PPCAMode");
}
}
/* --------- Operand, which can be a reg or a u16/s16. --------- */
-PPC32RH* PPC32RH_Imm ( Bool syned, UShort imm16 ) {
- PPC32RH* op = LibVEX_Alloc(sizeof(PPC32RH));
+PPCRH* PPCRH_Imm ( Bool syned, UShort imm16 ) {
+ PPCRH* op = LibVEX_Alloc(sizeof(PPCRH));
op->tag = Prh_Imm;
op->Prh.Imm.syned = syned;
op->Prh.Imm.imm16 = imm16;
vassert(syned == True || syned == False);
return op;
}
-PPC32RH* PPC32RH_Reg ( HReg reg ) {
- PPC32RH* op = LibVEX_Alloc(sizeof(PPC32RH));
+PPCRH* PPCRH_Reg ( HReg reg ) {
+ PPCRH* op = LibVEX_Alloc(sizeof(PPCRH));
op->tag = Prh_Reg;
op->Prh.Reg.reg = reg;
return op;
}
-void ppPPC32RH ( PPC32RH* op ) {
+void ppPPCRH ( PPCRH* op ) {
switch (op->tag) {
case Prh_Imm:
if (op->Prh.Imm.syned)
vex_printf("%u", (UInt)(UShort)op->Prh.Imm.imm16);
return;
case Prh_Reg:
- ppHRegPPC32(op->Prh.Reg.reg);
+ ppHRegPPC(op->Prh.Reg.reg);
return;
default:
- vpanic("ppPPC32RH");
+ vpanic("ppPPCRH");
}
}
-/* An PPC32RH can only be used in a "read" context (what would it mean
+/* An PPCRH can only be used in a "read" context (what would it mean
to write or modify a literal?) and so we enumerate its registers
accordingly. */
-static void addRegUsage_PPC32RH ( HRegUsage* u, PPC32RH* op ) {
+static void addRegUsage_PPCRH ( HRegUsage* u, PPCRH* op ) {
switch (op->tag) {
case Prh_Imm:
return;
addHRegUse(u, HRmRead, op->Prh.Reg.reg);
return;
default:
- vpanic("addRegUsage_PPC32RH");
+ vpanic("addRegUsage_PPCRH");
}
}
-static void mapRegs_PPC32RH ( HRegRemap* m, PPC32RH* op ) {
+static void mapRegs_PPCRH ( HRegRemap* m, PPCRH* op ) {
switch (op->tag) {
case Prh_Imm:
return;
op->Prh.Reg.reg = lookupHRegRemap(m, op->Prh.Reg.reg);
return;
default:
- vpanic("mapRegs_PPC32RH");
+ vpanic("mapRegs_PPCRH");
}
}
/* --------- Operand, which can be a reg or a u32/64. --------- */
-PPC32RI* PPC32RI_Imm ( ULong imm64 ) {
- PPC32RI* op = LibVEX_Alloc(sizeof(PPC32RI));
+PPCRI* PPCRI_Imm ( ULong imm64 ) {
+ PPCRI* op = LibVEX_Alloc(sizeof(PPCRI));
op->tag = Pri_Imm;
op->Pri.Imm = imm64;
return op;
}
-PPC32RI* PPC32RI_Reg ( HReg reg ) {
- PPC32RI* op = LibVEX_Alloc(sizeof(PPC32RI));
+PPCRI* PPCRI_Reg ( HReg reg ) {
+ PPCRI* op = LibVEX_Alloc(sizeof(PPCRI));
op->tag = Pri_Reg;
op->Pri.Reg = reg;
return op;
}
-void ppPPC32RI ( PPC32RI* dst ) {
+void ppPPCRI ( PPCRI* dst ) {
switch (dst->tag) {
case Pri_Imm:
vex_printf("0x%llx", dst->Pri.Imm);
break;
case Pri_Reg:
- ppHRegPPC32(dst->Pri.Reg);
+ ppHRegPPC(dst->Pri.Reg);
break;
default:
- vpanic("ppPPC32RI");
+ vpanic("ppPPCRI");
}
}
-/* An PPC32RI can only be used in a "read" context (what would it
+/* An PPCRI can only be used in a "read" context (what would it
mean to write or modify a literal?) and so we enumerate its
registers accordingly. */
-static void addRegUsage_PPC32RI ( HRegUsage* u, PPC32RI* dst ) {
+static void addRegUsage_PPCRI ( HRegUsage* u, PPCRI* dst ) {
switch (dst->tag) {
case Pri_Imm:
return;
addHRegUse(u, HRmRead, dst->Pri.Reg);
return;
default:
- vpanic("addRegUsage_PPC32RI");
+ vpanic("addRegUsage_PPCRI");
}
}
-static void mapRegs_PPC32RI ( HRegRemap* m, PPC32RI* dst ) {
+static void mapRegs_PPCRI ( HRegRemap* m, PPCRI* dst ) {
switch (dst->tag) {
case Pri_Imm:
return;
dst->Pri.Reg = lookupHRegRemap(m, dst->Pri.Reg);
return;
default:
- vpanic("mapRegs_PPC32RI");
+ vpanic("mapRegs_PPCRI");
}
}
/* --------- Operand, which can be a vector reg or a simm5. --------- */
-PPC32VI5s* PPC32VI5s_Imm ( Char simm5 ) {
- PPC32VI5s* op = LibVEX_Alloc(sizeof(PPC32VI5s));
+PPCVI5s* PPCVI5s_Imm ( Char simm5 ) {
+ PPCVI5s* op = LibVEX_Alloc(sizeof(PPCVI5s));
op->tag = Pvi_Imm;
op->Pvi.Imm5s = simm5;
vassert(simm5 >= -16 && simm5 <= 15);
return op;
}
-PPC32VI5s* PPC32VI5s_Reg ( HReg reg ) {
- PPC32VI5s* op = LibVEX_Alloc(sizeof(PPC32VI5s));
- op->tag = Pvi_Reg;
- op->Pvi.Reg = reg;
+PPCVI5s* PPCVI5s_Reg ( HReg reg ) {
+ PPCVI5s* op = LibVEX_Alloc(sizeof(PPCVI5s));
+ op->tag = Pvi_Reg;
+ op->Pvi.Reg = reg;
vassert(hregClass(reg) == HRcVec128);
return op;
}
-void ppPPC32VI5s ( PPC32VI5s* src ) {
+void ppPPCVI5s ( PPCVI5s* src ) {
switch (src->tag) {
case Pvi_Imm:
vex_printf("%d", (Int)src->Pvi.Imm5s);
break;
case Pvi_Reg:
- ppHRegPPC32(src->Pvi.Reg);
+ ppHRegPPC(src->Pvi.Reg);
break;
default:
- vpanic("ppPPC32VI5s");
+ vpanic("ppPPCVI5s");
}
}
-/* An PPC32VI5s can only be used in a "read" context (what would it
+/* An PPCVI5s can only be used in a "read" context (what would it
mean to write or modify a literal?) and so we enumerate its
registers accordingly. */
-static void addRegUsage_PPC32VI5s ( HRegUsage* u, PPC32VI5s* dst ) {
+static void addRegUsage_PPCVI5s ( HRegUsage* u, PPCVI5s* dst ) {
switch (dst->tag) {
case Pvi_Imm:
return;
addHRegUse(u, HRmRead, dst->Pvi.Reg);
return;
default:
- vpanic("addRegUsage_PPC32VI5s");
+ vpanic("addRegUsage_PPCVI5s");
}
}
-static void mapRegs_PPC32VI5s ( HRegRemap* m, PPC32VI5s* dst ) {
+static void mapRegs_PPCVI5s ( HRegRemap* m, PPCVI5s* dst ) {
switch (dst->tag) {
case Pvi_Imm:
return;
dst->Pvi.Reg = lookupHRegRemap(m, dst->Pvi.Reg);
return;
default:
- vpanic("mapRegs_PPC32VI5s");
+ vpanic("mapRegs_PPCVI5s");
}
}
/* --------- Instructions. --------- */
-HChar* showPPC32UnaryOp ( PPC32UnaryOp op ) {
+HChar* showPPCUnaryOp ( PPCUnaryOp op ) {
switch (op) {
- case Pun_NOT: return "not";
- case Pun_NEG: return "neg";
+ case Pun_NOT: return "not";
+ case Pun_NEG: return "neg";
case Pun_CLZ32: return "cntlzw";
case Pun_CLZ64: return "cntlzd";
- default: vpanic("showPPC32UnaryOp");
+ default: vpanic("showPPCUnaryOp");
}
}
-HChar* showPPC32AluOp ( PPC32AluOp op, Bool immR ) {
+HChar* showPPCAluOp ( PPCAluOp op, Bool immR ) {
switch (op) {
case Palu_ADD: return immR ? "addi" : "add";
case Palu_SUB: return immR ? "subi" : "sub";
case Palu_AND: return immR ? "andi." : "and";
case Palu_OR: return immR ? "ori" : "or";
case Palu_XOR: return immR ? "xori" : "xor";
- default: vpanic("showPPC32AluOp");
+ default: vpanic("showPPCAluOp");
}
}
-HChar* showPPC32ShftOp ( PPC32ShftOp op, Bool immR, Bool sz32 ) {
+HChar* showPPCShftOp ( PPCShftOp op, Bool immR, Bool sz32 ) {
switch (op) {
case Pshft_SHL: return sz32 ? (immR ? "slwi" : "slw") :
(immR ? "sldi" : "sld");
(immR ? "srdi" : "srd");
case Pshft_SAR: return sz32 ? (immR ? "srawi" : "sraw") :
(immR ? "sradi" : "srad");
- default: vpanic("showPPC32ShftOp");
+ default: vpanic("showPPCShftOp");
}
}
-HChar* showPPC32FpOp ( PPC32FpOp op ) {
+HChar* showPPCFpOp ( PPCFpOp op ) {
switch (op) {
case Pfp_ADD: return "fadd";
case Pfp_SUB: return "fsub";
case Pfp_ABS: return "fabs";
case Pfp_NEG: return "fneg";
case Pfp_MOV: return "fmr";
- default: vpanic("showPPC32FpOp");
+ default: vpanic("showPPCFpOp");
}
}
-HChar* showPPC32AvOp ( PPC32AvOp op ) {
+HChar* showPPCAvOp ( PPCAvOp op ) {
switch (op) {
/* Unary */
case Pav_MRGHI: return "vmrgh"; // b,h,w
case Pav_MRGLO: return "vmrgl"; // b,h,w
- default: vpanic("showPPC32AvOp");
+ default: vpanic("showPPCAvOp");
}
}
-HChar* showPPC32AvFpOp ( PPC32AvFpOp op ) {
+HChar* showPPCAvFpOp ( PPCAvFpOp op ) {
switch (op) {
/* Floating Point Binary */
case Pavfp_ADDF: return "vaddfp";
case Pavfp_ROUNDN: return "vrfin";
case Pavfp_ROUNDZ: return "vrfiz";
- default: vpanic("showPPC32AvFpOp");
+ default: vpanic("showPPCAvFpOp");
}
}
-PPC32Instr* PPC32Instr_LI ( HReg dst, ULong imm64, Bool mode64 )
+PPCInstr* PPCInstr_LI ( HReg dst, ULong imm64, Bool mode64 )
{
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_LI;
i->Pin.LI.dst = dst;
i->Pin.LI.imm64 = imm64;
vassert( (Long)imm64 == (Long)(Int)(UInt)imm64 );
return i;
}
-PPC32Instr* PPC32Instr_Alu ( PPC32AluOp op, HReg dst,
- HReg srcL, PPC32RH* srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_Alu ( PPCAluOp op, HReg dst,
+ HReg srcL, PPCRH* srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_Alu;
i->Pin.Alu.op = op;
i->Pin.Alu.dst = dst;
i->Pin.Alu.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_Shft ( PPC32ShftOp op, Bool sz32,
- HReg dst, HReg srcL, PPC32RH* srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_Shft ( PPCShftOp op, Bool sz32,
+ HReg dst, HReg srcL, PPCRH* srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_Shft;
i->Pin.Shft.op = op;
i->Pin.Shft.sz32 = sz32;
i->Pin.Shft.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_AddSubC32 ( Bool isAdd, Bool setC,
- HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_AddSubC32;
- i->Pin.AddSubC32.isAdd = isAdd;
- i->Pin.AddSubC32.setC = setC;
- i->Pin.AddSubC32.dst = dst;
- i->Pin.AddSubC32.srcL = srcL;
- i->Pin.AddSubC32.srcR = srcR;
+PPCInstr* PPCInstr_AddSubC ( Bool isAdd, Bool setC,
+ HReg dst, HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_AddSubC;
+ i->Pin.AddSubC.isAdd = isAdd;
+ i->Pin.AddSubC.setC = setC;
+ i->Pin.AddSubC.dst = dst;
+ i->Pin.AddSubC.srcL = srcL;
+ i->Pin.AddSubC.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_Cmp ( Bool syned, Bool sz32,
- UInt crfD, HReg srcL, PPC32RH* srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_Cmp ( Bool syned, Bool sz32,
+ UInt crfD, HReg srcL, PPCRH* srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_Cmp;
i->Pin.Cmp.syned = syned;
i->Pin.Cmp.sz32 = sz32;
i->Pin.Cmp.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_Unary ( PPC32UnaryOp op, HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_Unary;
- i->Pin.Unary32.op = op;
- i->Pin.Unary32.dst = dst;
- i->Pin.Unary32.src = src;
+PPCInstr* PPCInstr_Unary ( PPCUnaryOp op, HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_Unary;
+ i->Pin.Unary.op = op;
+ i->Pin.Unary.dst = dst;
+ i->Pin.Unary.src = src;
return i;
}
-PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi, Bool sz32,
- HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_MulL ( Bool syned, Bool hi, Bool sz32,
+ HReg dst, HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_MulL;
i->Pin.MulL.syned = syned;
i->Pin.MulL.hi = hi;
if (!hi) vassert(!syned);
return i;
}
-PPC32Instr* PPC32Instr_Div ( Bool syned, Bool sz32,
- HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_Div;
- i->Pin.Div.syned = syned;
- i->Pin.Div.sz32 = sz32;
- i->Pin.Div.dst = dst;
- i->Pin.Div.srcL = srcL;
- i->Pin.Div.srcR = srcR;
+PPCInstr* PPCInstr_Div ( Bool syned, Bool sz32,
+ HReg dst, HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_Div;
+ i->Pin.Div.syned = syned;
+ i->Pin.Div.sz32 = sz32;
+ i->Pin.Div.dst = dst;
+ i->Pin.Div.srcL = srcL;
+ i->Pin.Div.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_Call ( PPC32CondCode cond,
- Addr64 target, UInt argiregs ) {
+PPCInstr* PPCInstr_Call ( PPCCondCode cond,
+ Addr64 target, UInt argiregs ) {
UInt mask;
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_Call;
i->Pin.Call.cond = cond;
i->Pin.Call.target = target;
vassert(0 == (argiregs & ~mask));
return i;
}
-PPC32Instr* PPC32Instr_Goto ( IRJumpKind jk,
- PPC32CondCode cond, PPC32RI* dst ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_Goto ( IRJumpKind jk,
+ PPCCondCode cond, PPCRI* dst ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_Goto;
i->Pin.Goto.cond = cond;
i->Pin.Goto.dst = dst;
i->Pin.Goto.jk = jk;
return i;
}
-PPC32Instr* PPC32Instr_CMov ( PPC32CondCode cond,
- HReg dst, PPC32RI* src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_CMov ( PPCCondCode cond,
+ HReg dst, PPCRI* src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_CMov;
i->Pin.CMov.cond = cond;
i->Pin.CMov.src = src;
vassert(cond.test != Pct_ALWAYS);
return i;
}
-PPC32Instr* PPC32Instr_Load ( UChar sz, Bool syned,
- HReg dst, PPC32AMode* src, Bool mode64 ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_Load ( UChar sz, Bool syned,
+ HReg dst, PPCAMode* src, Bool mode64 ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_Load;
i->Pin.Load.sz = sz;
i->Pin.Load.syned = syned;
if (sz == 8) vassert(mode64);
return i;
}
-PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst, HReg src,
- Bool mode64 ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst, HReg src,
+ Bool mode64 ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_Store;
i->Pin.Store.sz = sz;
i->Pin.Store.src = src;
if (sz == 8) vassert(mode64);
return i;
}
-PPC32Instr* PPC32Instr_Set32 ( PPC32CondCode cond, HReg dst ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_Set32;
- i->Pin.Set32.cond = cond;
- i->Pin.Set32.dst = dst;
+PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_Set;
+ i->Pin.Set.cond = cond;
+ i->Pin.Set.dst = dst;
return i;
}
-PPC32Instr* PPC32Instr_MfCR ( HReg dst )
+PPCInstr* PPCInstr_MfCR ( HReg dst )
{
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_MfCR;
- i->Pin.MfCR.dst = dst;
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_MfCR;
+ i->Pin.MfCR.dst = dst;
return i;
}
-PPC32Instr* PPC32Instr_MFence ( void )
+PPCInstr* PPCInstr_MFence ( void )
{
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_MFence;
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_MFence;
return i;
}
-PPC32Instr* PPC32Instr_FpUnary ( PPC32FpOp op, HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpUnary ( PPCFpOp op, HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpUnary;
i->Pin.FpUnary.op = op;
i->Pin.FpUnary.dst = dst;
i->Pin.FpUnary.src = src;
return i;
}
-PPC32Instr* PPC32Instr_FpBinary ( PPC32FpOp op, HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpBinary ( PPCFpOp op, HReg dst,
+ HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpBinary;
i->Pin.FpBinary.op = op;
i->Pin.FpBinary.dst = dst;
i->Pin.FpBinary.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, PPC32AMode* addr ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpLdSt ( Bool isLoad, UChar sz,
+ HReg reg, PPCAMode* addr ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpLdSt;
i->Pin.FpLdSt.isLoad = isLoad;
i->Pin.FpLdSt.sz = sz;
vassert(sz == 4 || sz == 8);
return i;
}
-PPC32Instr* PPC32Instr_FpF64toF32 ( HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpF64toF32 ( HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpF64toF32;
i->Pin.FpF64toF32.dst = dst;
i->Pin.FpF64toF32.src = src;
return i;
}
-PPC32Instr* PPC32Instr_FpF64toI32 ( HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpF64toI32 ( HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpF64toI32;
i->Pin.FpF64toI32.dst = dst;
i->Pin.FpF64toI32.src = src;
return i;
}
-PPC32Instr* PPC32Instr_FpF64toI64 ( HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpF64toI64 ( HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpF64toI64;
i->Pin.FpF64toI64.dst = dst;
i->Pin.FpF64toI64.src = src;
return i;
}
-PPC32Instr* PPC32Instr_FpI64toF64 ( HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpI64toF64 ( HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpI64toF64;
i->Pin.FpI64toF64.dst = dst;
i->Pin.FpI64toF64.src = src;
return i;
}
-PPC32Instr* PPC32Instr_FpCMov ( PPC32CondCode cond, HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpCMov ( PPCCondCode cond, HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpCMov;
i->Pin.FpCMov.cond = cond;
i->Pin.FpCMov.dst = dst;
vassert(cond.test != Pct_ALWAYS);
return i;
}
-PPC32Instr* PPC32Instr_FpLdFPSCR ( HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpLdFPSCR ( HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpLdFPSCR;
i->Pin.FpLdFPSCR.src = src;
return i;
}
-PPC32Instr* PPC32Instr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_FpCmp;
i->Pin.FpCmp.dst = dst;
i->Pin.FpCmp.srcL = srcL;
}
/* Read/Write Link Register */
-PPC32Instr* PPC32Instr_RdWrLR ( Bool wrLR, HReg gpr ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_RdWrLR ( Bool wrLR, HReg gpr ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_RdWrLR;
i->Pin.RdWrLR.wrLR = wrLR;
i->Pin.RdWrLR.gpr = gpr;
}
/* AltiVec */
-PPC32Instr* PPC32Instr_AvLdSt ( Bool isLoad, UChar sz, HReg reg, PPC32AMode* addr ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvLdSt ( Bool isLoad, UChar sz,
+ HReg reg, PPCAMode* addr ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvLdSt;
i->Pin.AvLdSt.isLoad = isLoad;
i->Pin.AvLdSt.sz = sz;
i->Pin.AvLdSt.addr = addr;
return i;
}
-PPC32Instr* PPC32Instr_AvUnary ( PPC32AvOp op, HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvUnary;
i->Pin.AvUnary.op = op;
i->Pin.AvUnary.dst = dst;
i->Pin.AvUnary.src = src;
return i;
}
-PPC32Instr* PPC32Instr_AvBinary ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst,
+ HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvBinary;
i->Pin.AvBinary.op = op;
i->Pin.AvBinary.dst = dst;
i->Pin.AvBinary.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_AvBin8x16 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_AvBin8x16;
+PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst,
+ HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_AvBin8x16;
i->Pin.AvBin8x16.op = op;
i->Pin.AvBin8x16.dst = dst;
i->Pin.AvBin8x16.srcL = srcL;
i->Pin.AvBin8x16.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_AvBin16x8 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_AvBin16x8;
+PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst,
+ HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_AvBin16x8;
i->Pin.AvBin16x8.op = op;
i->Pin.AvBin16x8.dst = dst;
i->Pin.AvBin16x8.srcL = srcL;
i->Pin.AvBin16x8.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_AvBin32x4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_AvBin32x4;
+PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst,
+ HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ i->tag = Pin_AvBin32x4;
i->Pin.AvBin32x4.op = op;
i->Pin.AvBin32x4.dst = dst;
i->Pin.AvBin32x4.srcL = srcL;
i->Pin.AvBin32x4.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_AvBin32Fx4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvOp op, HReg dst,
+ HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvBin32Fx4;
i->Pin.AvBin32Fx4.op = op;
i->Pin.AvBin32Fx4.dst = dst;
i->Pin.AvBin32Fx4.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_AvUn32Fx4 ( PPC32AvOp op, HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvUn32Fx4 ( PPCAvOp op, HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvUn32Fx4;
i->Pin.AvUn32Fx4.op = op;
i->Pin.AvUn32Fx4.dst = dst;
i->Pin.AvUn32Fx4.src = src;
return i;
}
-PPC32Instr* PPC32Instr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvPerm;
i->Pin.AvPerm.dst = dst;
i->Pin.AvPerm.srcL = srcL;
i->Pin.AvPerm.ctl = ctl;
return i;
}
-PPC32Instr* PPC32Instr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvSel;
i->Pin.AvSel.ctl = ctl;
i->Pin.AvSel.dst = dst;
i->Pin.AvSel.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_AvShlDbl ( UChar shift, HReg dst, HReg srcL, HReg srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvShlDbl ( UChar shift, HReg dst,
+ HReg srcL, HReg srcR ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvShlDbl;
i->Pin.AvShlDbl.shift = shift;
i->Pin.AvShlDbl.dst = dst;
i->Pin.AvShlDbl.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_AvSplat ( UChar sz, HReg dst, PPC32VI5s* src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvSplat ( UChar sz, HReg dst, PPCVI5s* src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvSplat;
i->Pin.AvSplat.sz = sz;
i->Pin.AvSplat.dst = dst;
i->Pin.AvSplat.src = src;
return i;
}
-PPC32Instr* PPC32Instr_AvCMov ( PPC32CondCode cond, HReg dst, HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvCMov ( PPCCondCode cond, HReg dst, HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvCMov;
i->Pin.AvCMov.cond = cond;
i->Pin.AvCMov.dst = dst;
vassert(cond.test != Pct_ALWAYS);
return i;
}
-PPC32Instr* PPC32Instr_AvLdVSCR ( HReg src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+PPCInstr* PPCInstr_AvLdVSCR ( HReg src ) {
+ PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
i->tag = Pin_AvLdVSCR;
i->Pin.AvLdVSCR.src = src;
return i;
static void ppLoadImm ( HReg dst, ULong imm, Bool mode64 ) {
#if 1
vex_printf("li_word ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
if (!mode64) {
vassert(imm == (ULong)(Long)(Int)(UInt)imm);
vex_printf(",0x%08x", (UInt)imm);
vex_printf(",0x%016llx", imm);
}
#else
-// if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
- if (imm == (ULong)(Long)(Int)(Short)(UShort)imm) {
+ if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
// sign-extendable from 16 bits
vex_printf("li ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",0x%x", (UInt)imm);
} else {
-// if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
- if (imm == (ULong)(Long)(Int)(UInt)imm) {
+ if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
// sign-extendable from 32 bits
vex_printf("lis ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",0x%x ; ", (UInt)(imm >> 16));
vex_printf("ori ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",0x%x", (UInt)(imm & 0xFFFF));
} else {
// full 64bit immediate load: 5 (five!) insns.
// load high word
vex_printf("lis ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",0x%x ; ", (UInt)(imm >> 48) & 0xFFFF);
vex_printf("ori ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",0x%x ; ", (UInt)(imm >> 32) & 0xFFFF);
// shift r_dst low word to high word => rldicr
vex_printf("rldicr ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",32,31 ; ");
// load low word
vex_printf("oris ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",0x%x ; ", (UInt)(imm >> 16) & 0xFFFF);
vex_printf("ori ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",0x%x", (UInt)(imm >> 0) & 0xFFFF);
}
}
static void ppMovReg ( HReg dst, HReg src ) {
if (hregNumber(dst) != hregNumber(src)) {
vex_printf("mr ");
- ppHRegPPC32(dst);
+ ppHRegPPC(dst);
vex_printf(",");
- ppHRegPPC32(src);
+ ppHRegPPC(src);
}
}
-void ppPPC32Instr ( PPC32Instr* i, Bool mode64 )
+void ppPPCInstr ( PPCInstr* i, Bool mode64 )
{
switch (i->tag) {
case Pin_LI:
ppLoadImm(i->Pin.LI.dst, i->Pin.LI.imm64, mode64);
break;
case Pin_Alu: {
- HReg r_srcL = i->Pin.Alu.srcL;
- PPC32RH* rh_srcR = i->Pin.Alu.srcR;
+ HReg r_srcL = i->Pin.Alu.srcL;
+ PPCRH* rh_srcR = i->Pin.Alu.srcR;
/* special-case "mr" */
if (i->Pin.Alu.op == Palu_OR && // or Rd,Rs,Rs == mr Rd,Rs
rh_srcR->tag == Prh_Reg &&
rh_srcR->Prh.Reg.reg == r_srcL) {
vex_printf("mr ");
- ppHRegPPC32(i->Pin.Alu.dst);
+ ppHRegPPC(i->Pin.Alu.dst);
vex_printf(",");
- ppHRegPPC32(r_srcL);
+ ppHRegPPC(r_srcL);
return;
}
/* special-case "li" */
rh_srcR->tag == Prh_Imm &&
hregNumber(r_srcL) == 0) {
vex_printf("li ");
- ppHRegPPC32(i->Pin.Alu.dst);
+ ppHRegPPC(i->Pin.Alu.dst);
vex_printf(",");
- ppPPC32RH(rh_srcR);
+ ppPPCRH(rh_srcR);
return;
}
/* generic */
- vex_printf("%s ", showPPC32AluOp(i->Pin.Alu.op,
- toBool(rh_srcR->tag == Prh_Imm)));
- ppHRegPPC32(i->Pin.Alu.dst);
+ vex_printf("%s ", showPPCAluOp(i->Pin.Alu.op,
+ toBool(rh_srcR->tag == Prh_Imm)));
+ ppHRegPPC(i->Pin.Alu.dst);
vex_printf(",");
- ppHRegPPC32(r_srcL);
+ ppHRegPPC(r_srcL);
vex_printf(",");
- ppPPC32RH(rh_srcR);
+ ppPPCRH(rh_srcR);
return;
}
case Pin_Shft: {
- HReg r_srcL = i->Pin.Shft.srcL;
- PPC32RH* rh_srcR = i->Pin.Shft.srcR;
- vex_printf("%s ", showPPC32ShftOp(i->Pin.Shft.op,
- toBool(rh_srcR->tag == Prh_Imm),
- i->Pin.Shft.sz32));
- ppHRegPPC32(i->Pin.Shft.dst);
+ HReg r_srcL = i->Pin.Shft.srcL;
+ PPCRH* rh_srcR = i->Pin.Shft.srcR;
+ vex_printf("%s ", showPPCShftOp(i->Pin.Shft.op,
+ toBool(rh_srcR->tag == Prh_Imm),
+ i->Pin.Shft.sz32));
+ ppHRegPPC(i->Pin.Shft.dst);
vex_printf(",");
- ppHRegPPC32(r_srcL);
+ ppHRegPPC(r_srcL);
vex_printf(",");
- ppPPC32RH(rh_srcR);
+ ppPPCRH(rh_srcR);
return;
}
- case Pin_AddSubC32:
+ case Pin_AddSubC:
vex_printf("%s%s ",
- i->Pin.AddSubC32.isAdd ? "add" : "sub",
- i->Pin.AddSubC32.setC ? "c" : "e");
- ppHRegPPC32(i->Pin.AddSubC32.dst);
+ i->Pin.AddSubC.isAdd ? "add" : "sub",
+ i->Pin.AddSubC.setC ? "c" : "e");
+ ppHRegPPC(i->Pin.AddSubC.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AddSubC32.srcL);
+ ppHRegPPC(i->Pin.AddSubC.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AddSubC32.srcR);
+ ppHRegPPC(i->Pin.AddSubC.srcR);
return;
case Pin_Cmp:
vex_printf("%s%c%s %%cr%u,",
i->Pin.Cmp.sz32 ? 'w' : 'd',
i->Pin.Cmp.srcR->tag == Prh_Imm ? "i" : "",
i->Pin.Cmp.crfD);
- ppHRegPPC32(i->Pin.Cmp.srcL);
+ ppHRegPPC(i->Pin.Cmp.srcL);
vex_printf(",");
- ppPPC32RH(i->Pin.Cmp.srcR);
+ ppPPCRH(i->Pin.Cmp.srcR);
return;
case Pin_Unary:
- vex_printf("%s ", showPPC32UnaryOp(i->Pin.Unary32.op));
- ppHRegPPC32(i->Pin.Unary32.dst);
+ vex_printf("%s ", showPPCUnaryOp(i->Pin.Unary.op));
+ ppHRegPPC(i->Pin.Unary.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.Unary32.src);
+ ppHRegPPC(i->Pin.Unary.src);
return;
case Pin_MulL:
vex_printf("mul%c%c%s ",
i->Pin.MulL.hi ? 'h' : 'l',
i->Pin.MulL.sz32 ? 'w' : 'd',
i->Pin.MulL.hi ? (i->Pin.MulL.syned ? "s" : "u") : "");
- ppHRegPPC32(i->Pin.MulL.dst);
+ ppHRegPPC(i->Pin.MulL.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.MulL.srcL);
+ ppHRegPPC(i->Pin.MulL.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.MulL.srcR);
+ ppHRegPPC(i->Pin.MulL.srcR);
return;
case Pin_Div:
vex_printf("div%c%s ",
i->Pin.Div.sz32 ? 'w' : 'd',
i->Pin.Div.syned ? "" : "u");
- ppHRegPPC32(i->Pin.Div.dst);
+ ppHRegPPC(i->Pin.Div.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.Div.srcL);
+ ppHRegPPC(i->Pin.Div.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.Div.srcR);
+ ppHRegPPC(i->Pin.Div.srcR);
return;
case Pin_Call: {
Int n;
vex_printf("call: ");
if (i->Pin.Call.cond.test != Pct_ALWAYS) {
- vex_printf("if (%s) ", showPPC32CondCode(i->Pin.Call.cond));
+ vex_printf("if (%s) ", showPPCCondCode(i->Pin.Call.cond));
}
vex_printf("{ ");
ppLoadImm(hregPPC_GPR10(mode64), i->Pin.Call.target, mode64);
case Pin_Goto:
vex_printf("goto: ");
if (i->Pin.Goto.cond.test != Pct_ALWAYS) {
- vex_printf("if (%s) ", showPPC32CondCode(i->Pin.Goto.cond));
+ vex_printf("if (%s) ", showPPCCondCode(i->Pin.Goto.cond));
}
vex_printf("{ ");
if (i->Pin.Goto.jk != Ijk_Boring
vex_printf(" ; ");
}
if (i->Pin.Goto.dst->tag == Pri_Imm) {
- ppLoadImm(hregPPC_GPR3(mode64), i->Pin.Goto.dst->Pri.Imm, mode64);
+ ppLoadImm(hregPPC_GPR3(mode64), i->Pin.Goto.dst->Pri.Imm,
+ mode64);
} else {
ppMovReg(hregPPC_GPR3(mode64), i->Pin.Goto.dst->Pri.Reg);
}
vex_printf(" ; blr }");
return;
case Pin_CMov:
- vex_printf("cmov (%s) ", showPPC32CondCode(i->Pin.CMov.cond));
- ppHRegPPC32(i->Pin.CMov.dst);
+ vex_printf("cmov (%s) ", showPPCCondCode(i->Pin.CMov.cond));
+ ppHRegPPC(i->Pin.CMov.dst);
vex_printf(",");
- ppPPC32RI(i->Pin.CMov.src);
+ ppPPCRI(i->Pin.CMov.src);
vex_printf(": ");
if (i->Pin.CMov.cond.test != Pct_ALWAYS) {
- vex_printf("if (%s) ", showPPC32CondCode(i->Pin.CMov.cond));
+ vex_printf("if (%s) ", showPPCCondCode(i->Pin.CMov.cond));
}
vex_printf("{ ");
if (i->Pin.CMov.src->tag == Pri_Imm) {
UChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : 'd';
HChar* s_syned = i->Pin.Load.syned ? "a" : sz==8 ? "" : "z";
vex_printf("l%c%s%s ", c_sz, s_syned, idxd ? "x" : "" );
- ppHRegPPC32(i->Pin.Load.dst);
+ ppHRegPPC(i->Pin.Load.dst);
vex_printf(",");
- ppPPC32AMode(i->Pin.Load.src);
+ ppPPCAMode(i->Pin.Load.src);
return;
}
case Pin_Store: {
Bool idxd = toBool(i->Pin.Store.dst->tag == Pam_RR);
UChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : /*8*/ 'd';
vex_printf("st%c%s ", c_sz, idxd ? "x" : "" );
- ppHRegPPC32(i->Pin.Store.src);
+ ppHRegPPC(i->Pin.Store.src);
vex_printf(",");
- ppPPC32AMode(i->Pin.Store.dst);
+ ppPPCAMode(i->Pin.Store.dst);
return;
}
- case Pin_Set32: {
- PPC32CondCode cc = i->Pin.Set32.cond;
- vex_printf("set32 (%s),", showPPC32CondCode(cc));
- ppHRegPPC32(i->Pin.Set32.dst);
+ case Pin_Set: {
+ PPCCondCode cc = i->Pin.Set.cond;
+ vex_printf("set (%s),", showPPCCondCode(cc));
+ ppHRegPPC(i->Pin.Set.dst);
if (cc.test == Pct_ALWAYS) {
vex_printf(": { li ");
- ppHRegPPC32(i->Pin.Set32.dst);
+ ppHRegPPC(i->Pin.Set.dst);
vex_printf(",1 }");
} else {
vex_printf(": { mfcr r0 ; rlwinm ");
- ppHRegPPC32(i->Pin.Set32.dst);
+ ppHRegPPC(i->Pin.Set.dst);
vex_printf(",r0,%u,31,31", cc.flag+1);
if (cc.test == Pct_FALSE) {
vex_printf("; xori ");
- ppHRegPPC32(i->Pin.Set32.dst);
+ ppHRegPPC(i->Pin.Set.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.Set32.dst);
+ ppHRegPPC(i->Pin.Set.dst);
vex_printf(",1");
}
vex_printf(" }");
}
case Pin_MfCR:
vex_printf("mfcr ");
- ppHRegPPC32(i->Pin.MfCR.dst);
+ ppHRegPPC(i->Pin.MfCR.dst);
break;
case Pin_MFence:
vex_printf("mfence (=sync)");
return;
case Pin_FpUnary:
- vex_printf("%s ", showPPC32FpOp(i->Pin.FpUnary.op));
- ppHRegPPC32(i->Pin.FpUnary.dst);
+ vex_printf("%s ", showPPCFpOp(i->Pin.FpUnary.op));
+ ppHRegPPC(i->Pin.FpUnary.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.FpUnary.src);
+ ppHRegPPC(i->Pin.FpUnary.src);
return;
case Pin_FpBinary:
- vex_printf("%s ", showPPC32FpOp(i->Pin.FpBinary.op));
- ppHRegPPC32(i->Pin.FpBinary.dst);
+ vex_printf("%s ", showPPCFpOp(i->Pin.FpBinary.op));
+ ppHRegPPC(i->Pin.FpBinary.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.FpBinary.srcL);
+ ppHRegPPC(i->Pin.FpBinary.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.FpBinary.srcR);
+ ppHRegPPC(i->Pin.FpBinary.srcR);
return;
case Pin_FpLdSt: {
UChar sz = i->Pin.FpLdSt.sz;
vex_printf("lf%c%s ",
(sz==4 ? 's' : 'd'),
idxd ? "x" : "" );
- ppHRegPPC32(i->Pin.FpLdSt.reg);
+ ppHRegPPC(i->Pin.FpLdSt.reg);
vex_printf(",");
- ppPPC32AMode(i->Pin.FpLdSt.addr);
+ ppPPCAMode(i->Pin.FpLdSt.addr);
} else {
vex_printf("stf%c%s ",
(sz==4 ? 's' : 'd'),
idxd ? "x" : "" );
- ppHRegPPC32(i->Pin.FpLdSt.reg);
+ ppHRegPPC(i->Pin.FpLdSt.reg);
vex_printf(",");
- ppPPC32AMode(i->Pin.FpLdSt.addr);
+ ppPPCAMode(i->Pin.FpLdSt.addr);
}
return;
}
case Pin_FpF64toF32:
vex_printf("frsp ");
- ppHRegPPC32(i->Pin.FpF64toF32.dst);
+ ppHRegPPC(i->Pin.FpF64toF32.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.FpF64toF32.src);
+ ppHRegPPC(i->Pin.FpF64toF32.src);
return;
case Pin_FpF64toI32:
vex_printf("fctiw %%fr7,");
- ppHRegPPC32(i->Pin.FpF64toI32.src);
+ ppHRegPPC(i->Pin.FpF64toI32.src);
vex_printf("; stfiwx %%fr7,%%r0,%%r1");
vex_printf("; lwzx ");
- ppHRegPPC32(i->Pin.FpF64toI32.dst);
+ ppHRegPPC(i->Pin.FpF64toI32.dst);
vex_printf(",%%r0,%%r1");
return;
case Pin_FpF64toI64:
vex_printf("fctid %%fr7,");
- ppHRegPPC32(i->Pin.FpF64toI64.src);
+ ppHRegPPC(i->Pin.FpF64toI64.src);
vex_printf("; stfdx %%fr7,%%r0,%%r1");
vex_printf("; ldx ");
- ppHRegPPC32(i->Pin.FpF64toI64.dst);
+ ppHRegPPC(i->Pin.FpF64toI64.dst);
vex_printf(",%%r0,%%r1");
return;
case Pin_FpI64toF64:
vex_printf("stdx ");
- ppHRegPPC32(i->Pin.FpI64toF64.src);
+ ppHRegPPC(i->Pin.FpI64toF64.src);
vex_printf(",%%r0,%%r1");
vex_printf("; lfdx %%fr7,%%r0,%%r1");
vex_printf("; fcfid ");
- ppHRegPPC32(i->Pin.FpI64toF64.dst);
+ ppHRegPPC(i->Pin.FpI64toF64.dst);
vex_printf(",%%r7");
return;
case Pin_FpCMov:
- vex_printf("fpcmov (%s) ", showPPC32CondCode(i->Pin.FpCMov.cond));
- ppHRegPPC32(i->Pin.FpCMov.dst);
+ vex_printf("fpcmov (%s) ", showPPCCondCode(i->Pin.FpCMov.cond));
+ ppHRegPPC(i->Pin.FpCMov.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.FpCMov.src);
+ ppHRegPPC(i->Pin.FpCMov.src);
vex_printf(": ");
vex_printf("if (fr_dst != fr_src) { ");
if (i->Pin.FpCMov.cond.test != Pct_ALWAYS) {
- vex_printf("if (%s) { ", showPPC32CondCode(i->Pin.FpCMov.cond));
+ vex_printf("if (%s) { ", showPPCCondCode(i->Pin.FpCMov.cond));
}
vex_printf("fmr ");
- ppHRegPPC32(i->Pin.FpCMov.dst);
+ ppHRegPPC(i->Pin.FpCMov.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.FpCMov.src);
+ ppHRegPPC(i->Pin.FpCMov.src);
if (i->Pin.FpCMov.cond.test != Pct_ALWAYS)
vex_printf(" }");
vex_printf(" }");
return;
case Pin_FpLdFPSCR:
vex_printf("mtfsf 0xFF,");
- ppHRegPPC32(i->Pin.FpLdFPSCR.src);
+ ppHRegPPC(i->Pin.FpLdFPSCR.src);
return;
case Pin_FpCmp:
vex_printf("fcmpo %%cr1,");
- ppHRegPPC32(i->Pin.FpCmp.srcL);
+ ppHRegPPC(i->Pin.FpCmp.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.FpCmp.srcR);
+ ppHRegPPC(i->Pin.FpCmp.srcR);
vex_printf("; mfcr ");
- ppHRegPPC32(i->Pin.FpCmp.dst);
+ ppHRegPPC(i->Pin.FpCmp.dst);
vex_printf("; rlwinm ");
- ppHRegPPC32(i->Pin.FpCmp.dst);
+ ppHRegPPC(i->Pin.FpCmp.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.FpCmp.dst);
+ ppHRegPPC(i->Pin.FpCmp.dst);
vex_printf(",8,28,31");
return;
case Pin_RdWrLR:
vex_printf("%s ", i->Pin.RdWrLR.wrLR ? "mtlr" : "mflr");
- ppHRegPPC32(i->Pin.RdWrLR.gpr);
+ ppHRegPPC(i->Pin.RdWrLR.gpr);
return;
case Pin_AvLdSt: {
vex_printf("lv%sx ", str_size);
else
vex_printf("stv%sx ", str_size);
- ppHRegPPC32(i->Pin.AvLdSt.reg);
+ ppHRegPPC(i->Pin.AvLdSt.reg);
vex_printf(",");
if (i->Pin.AvLdSt.addr->tag == Pam_IR)
vex_printf("%%r30");
else
- ppHRegPPC32(i->Pin.AvLdSt.addr->Pam.RR.index);
+ ppHRegPPC(i->Pin.AvLdSt.addr->Pam.RR.index);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvLdSt.addr->Pam.RR.base);
+ ppHRegPPC(i->Pin.AvLdSt.addr->Pam.RR.base);
return;
}
case Pin_AvUnary:
- vex_printf("%s ", showPPC32AvOp(i->Pin.AvUnary.op));
- ppHRegPPC32(i->Pin.AvUnary.dst);
+ vex_printf("%s ", showPPCAvOp(i->Pin.AvUnary.op));
+ ppHRegPPC(i->Pin.AvUnary.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvUnary.src);
+ ppHRegPPC(i->Pin.AvUnary.src);
return;
case Pin_AvBinary:
- vex_printf("%s ", showPPC32AvOp(i->Pin.AvBinary.op));
- ppHRegPPC32(i->Pin.AvBinary.dst);
+ vex_printf("%s ", showPPCAvOp(i->Pin.AvBinary.op));
+ ppHRegPPC(i->Pin.AvBinary.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBinary.srcL);
+ ppHRegPPC(i->Pin.AvBinary.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBinary.srcR);
+ ppHRegPPC(i->Pin.AvBinary.srcR);
return;
case Pin_AvBin8x16:
- vex_printf("%s(b) ", showPPC32AvOp(i->Pin.AvBin8x16.op));
- ppHRegPPC32(i->Pin.AvBin8x16.dst);
+ vex_printf("%s(b) ", showPPCAvOp(i->Pin.AvBin8x16.op));
+ ppHRegPPC(i->Pin.AvBin8x16.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBin8x16.srcL);
+ ppHRegPPC(i->Pin.AvBin8x16.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBin8x16.srcR);
+ ppHRegPPC(i->Pin.AvBin8x16.srcR);
return;
case Pin_AvBin16x8:
- vex_printf("%s(h) ", showPPC32AvOp(i->Pin.AvBin16x8.op));
- ppHRegPPC32(i->Pin.AvBin16x8.dst);
+ vex_printf("%s(h) ", showPPCAvOp(i->Pin.AvBin16x8.op));
+ ppHRegPPC(i->Pin.AvBin16x8.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBin16x8.srcL);
+ ppHRegPPC(i->Pin.AvBin16x8.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBin16x8.srcR);
+ ppHRegPPC(i->Pin.AvBin16x8.srcR);
return;
case Pin_AvBin32x4:
- vex_printf("%s(w) ", showPPC32AvOp(i->Pin.AvBin32x4.op));
- ppHRegPPC32(i->Pin.AvBin32x4.dst);
+ vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvBin32x4.op));
+ ppHRegPPC(i->Pin.AvBin32x4.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBin32x4.srcL);
+ ppHRegPPC(i->Pin.AvBin32x4.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBin32x4.srcR);
+ ppHRegPPC(i->Pin.AvBin32x4.srcR);
return;
case Pin_AvBin32Fx4:
- vex_printf("%s ", showPPC32AvFpOp(i->Pin.AvBin32Fx4.op));
- ppHRegPPC32(i->Pin.AvBin32Fx4.dst);
+ vex_printf("%s ", showPPCAvFpOp(i->Pin.AvBin32Fx4.op));
+ ppHRegPPC(i->Pin.AvBin32Fx4.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBin32Fx4.srcL);
+ ppHRegPPC(i->Pin.AvBin32Fx4.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvBin32Fx4.srcR);
+ ppHRegPPC(i->Pin.AvBin32Fx4.srcR);
return;
case Pin_AvUn32Fx4:
- vex_printf("%s ", showPPC32AvFpOp(i->Pin.AvUn32Fx4.op));
- ppHRegPPC32(i->Pin.AvUn32Fx4.dst);
+ vex_printf("%s ", showPPCAvFpOp(i->Pin.AvUn32Fx4.op));
+ ppHRegPPC(i->Pin.AvUn32Fx4.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvUn32Fx4.src);
+ ppHRegPPC(i->Pin.AvUn32Fx4.src);
return;
case Pin_AvPerm:
vex_printf("vperm ");
- ppHRegPPC32(i->Pin.AvPerm.dst);
+ ppHRegPPC(i->Pin.AvPerm.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvPerm.srcL);
+ ppHRegPPC(i->Pin.AvPerm.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvPerm.srcR);
+ ppHRegPPC(i->Pin.AvPerm.srcR);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvPerm.ctl);
+ ppHRegPPC(i->Pin.AvPerm.ctl);
return;
case Pin_AvSel:
vex_printf("vsel ");
- ppHRegPPC32(i->Pin.AvSel.dst);
+ ppHRegPPC(i->Pin.AvSel.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvSel.srcL);
+ ppHRegPPC(i->Pin.AvSel.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvSel.srcR);
+ ppHRegPPC(i->Pin.AvSel.srcR);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvSel.ctl);
+ ppHRegPPC(i->Pin.AvSel.ctl);
return;
case Pin_AvShlDbl:
vex_printf("vsldoi ");
- ppHRegPPC32(i->Pin.AvShlDbl.dst);
+ ppHRegPPC(i->Pin.AvShlDbl.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvShlDbl.srcL);
+ ppHRegPPC(i->Pin.AvShlDbl.srcL);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvShlDbl.srcR);
+ ppHRegPPC(i->Pin.AvShlDbl.srcR);
vex_printf(",%d", i->Pin.AvShlDbl.shift);
return;
UChar ch_sz = toUChar( (sz == 8) ? 'b' : (sz == 16) ? 'h' : 'w' );
vex_printf("vsplt%s%c ",
i->Pin.AvSplat.src->tag == Pvi_Imm ? "is" : "", ch_sz);
- ppHRegPPC32(i->Pin.AvSplat.dst);
+ ppHRegPPC(i->Pin.AvSplat.dst);
vex_printf(",");
- ppPPC32VI5s(i->Pin.AvSplat.src);
+ ppPPCVI5s(i->Pin.AvSplat.src);
if (i->Pin.AvSplat.src->tag == Pvi_Reg)
vex_printf(", %d", (128/sz)-1); /* louis lane */
return;
}
case Pin_AvCMov:
- vex_printf("avcmov (%s) ", showPPC32CondCode(i->Pin.AvCMov.cond));
- ppHRegPPC32(i->Pin.AvCMov.dst);
+ vex_printf("avcmov (%s) ", showPPCCondCode(i->Pin.AvCMov.cond));
+ ppHRegPPC(i->Pin.AvCMov.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvCMov.src);
+ ppHRegPPC(i->Pin.AvCMov.src);
vex_printf(": ");
vex_printf("if (v_dst != v_src) { ");
if (i->Pin.AvCMov.cond.test != Pct_ALWAYS) {
- vex_printf("if (%s) { ", showPPC32CondCode(i->Pin.AvCMov.cond));
+ vex_printf("if (%s) { ", showPPCCondCode(i->Pin.AvCMov.cond));
}
vex_printf("vmr ");
- ppHRegPPC32(i->Pin.AvCMov.dst);
+ ppHRegPPC(i->Pin.AvCMov.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.AvCMov.src);
+ ppHRegPPC(i->Pin.AvCMov.src);
if (i->Pin.FpCMov.cond.test != Pct_ALWAYS)
vex_printf(" }");
vex_printf(" }");
case Pin_AvLdVSCR:
vex_printf("mtvscr ");
- ppHRegPPC32(i->Pin.AvLdVSCR.src);
+ ppHRegPPC(i->Pin.AvLdVSCR.src);
return;
default:
- vex_printf("\nppPPC32Instr(ppc32): No such tag(%d)\n", (Int)i->tag);
- vpanic("ppPPC32Instr(ppc32)");
+ vex_printf("\nppPPCInstr: No such tag(%d)\n", (Int)i->tag);
+ vpanic("ppPPCInstr");
}
}
/* --------- Helpers for register allocation. --------- */
-void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 )
+void getRegUsage_PPCInstr ( HRegUsage* u, PPCInstr* i, Bool mode64 )
{
initHRegUsage(u);
switch (i->tag) {
addHRegUse(u, HRmWrite, i->Pin.LI.dst);
break;
case Pin_Alu:
- addHRegUse(u, HRmRead, i->Pin.Alu.srcL);
- addRegUsage_PPC32RH(u, i->Pin.Alu.srcR);
+ addHRegUse(u, HRmRead, i->Pin.Alu.srcL);
+ addRegUsage_PPCRH(u, i->Pin.Alu.srcR);
addHRegUse(u, HRmWrite, i->Pin.Alu.dst);
return;
case Pin_Shft:
addHRegUse(u, HRmRead, i->Pin.Shft.srcL);
- addRegUsage_PPC32RH(u, i->Pin.Shft.srcR);
+ addRegUsage_PPCRH(u, i->Pin.Shft.srcR);
addHRegUse(u, HRmWrite, i->Pin.Shft.dst);
return;
- case Pin_AddSubC32:
- addHRegUse(u, HRmWrite, i->Pin.AddSubC32.dst);
- addHRegUse(u, HRmRead, i->Pin.AddSubC32.srcL);
- addHRegUse(u, HRmRead, i->Pin.AddSubC32.srcR);
+ case Pin_AddSubC:
+ addHRegUse(u, HRmWrite, i->Pin.AddSubC.dst);
+ addHRegUse(u, HRmRead, i->Pin.AddSubC.srcL);
+ addHRegUse(u, HRmRead, i->Pin.AddSubC.srcR);
return;
case Pin_Cmp:
addHRegUse(u, HRmRead, i->Pin.Cmp.srcL);
- addRegUsage_PPC32RH(u, i->Pin.Cmp.srcR);
+ addRegUsage_PPCRH(u, i->Pin.Cmp.srcR);
return;
case Pin_Unary:
- addHRegUse(u, HRmWrite, i->Pin.Unary32.dst);
- addHRegUse(u, HRmRead, i->Pin.Unary32.src);
+ addHRegUse(u, HRmWrite, i->Pin.Unary.dst);
+ addHRegUse(u, HRmRead, i->Pin.Unary.src);
return;
case Pin_MulL:
addHRegUse(u, HRmWrite, i->Pin.MulL.dst);
- addHRegUse(u, HRmRead, i->Pin.MulL.srcL);
- addHRegUse(u, HRmRead, i->Pin.MulL.srcR);
+ addHRegUse(u, HRmRead, i->Pin.MulL.srcL);
+ addHRegUse(u, HRmRead, i->Pin.MulL.srcR);
return;
case Pin_Div:
addHRegUse(u, HRmWrite, i->Pin.Div.dst);
- addHRegUse(u, HRmRead, i->Pin.Div.srcL);
- addHRegUse(u, HRmRead, i->Pin.Div.srcR);
+ addHRegUse(u, HRmRead, i->Pin.Div.srcL);
+ addHRegUse(u, HRmRead, i->Pin.Div.srcR);
return;
case Pin_Call: {
UInt argir;
/* Finally, there is the issue that the insn trashes a
register because the literal target address has to be
loaded into a register. %r10 seems a suitable victim.
- (Can't use %r0, as use ops that interpret it as value zero). */
+ (Can't use %r0, as use ops that interpret it as value zero). */
addHRegUse(u, HRmWrite, hregPPC_GPR10(mode64));
/* Upshot of this is that the assembler really must use %r10,
and no other, as a destination temporary. */
return;
}
case Pin_Goto:
- addRegUsage_PPC32RI(u, i->Pin.Goto.dst);
+ addRegUsage_PPCRI(u, i->Pin.Goto.dst);
/* GPR3 holds destination address from Pin_Goto */
addHRegUse(u, HRmWrite, hregPPC_GPR3(mode64));
if (i->Pin.Goto.jk != Ijk_Boring
addHRegUse(u, HRmWrite, GuestStatePtr(mode64));
return;
case Pin_CMov:
- addRegUsage_PPC32RI(u, i->Pin.CMov.src);
+ addRegUsage_PPCRI(u, i->Pin.CMov.src);
addHRegUse(u, HRmWrite, i->Pin.CMov.dst);
return;
case Pin_Load:
- addRegUsage_PPC32AMode(u, i->Pin.Load.src);
+ addRegUsage_PPCAMode(u, i->Pin.Load.src);
addHRegUse(u, HRmWrite, i->Pin.Load.dst);
return;
case Pin_Store:
- addHRegUse(u, HRmRead, i->Pin.Store.src);
- addRegUsage_PPC32AMode(u, i->Pin.Store.dst);
+ addHRegUse(u, HRmRead, i->Pin.Store.src);
+ addRegUsage_PPCAMode(u, i->Pin.Store.dst);
return;
- case Pin_Set32:
- addHRegUse(u, HRmWrite, i->Pin.Set32.dst);
+ case Pin_Set:
+ addHRegUse(u, HRmWrite, i->Pin.Set.dst);
return;
case Pin_MfCR:
addHRegUse(u, HRmWrite, i->Pin.MfCR.dst);
case Pin_FpLdSt:
addHRegUse(u, (i->Pin.FpLdSt.isLoad ? HRmWrite : HRmRead),
i->Pin.FpLdSt.reg);
- addRegUsage_PPC32AMode(u, i->Pin.FpLdSt.addr);
+ addRegUsage_PPCAMode(u, i->Pin.FpLdSt.addr);
return;
case Pin_FpF64toF32:
addHRegUse(u, HRmWrite, i->Pin.FpF64toF32.dst);
addHRegUse(u, HRmRead, i->Pin.FpF64toF32.src);
return;
case Pin_FpF64toI32:
- addHRegUse(u, HRmWrite, i->Pin.FpF64toI32.dst);
- addHRegUse(u, HRmWrite, hregPPC32_FPR7());
- addHRegUse(u, HRmRead, i->Pin.FpF64toI32.src);
+ addHRegUse(u, HRmWrite, i->Pin.FpF64toI32.dst);
+ addHRegUse(u, HRmWrite, hregPPC_FPR7());
+ addHRegUse(u, HRmRead, i->Pin.FpF64toI32.src);
return;
case Pin_FpF64toI64:
addHRegUse(u, HRmWrite, i->Pin.FpF64toI64.dst);
- addHRegUse(u, HRmWrite, hregPPC32_FPR7());
+ addHRegUse(u, HRmWrite, hregPPC_FPR7());
addHRegUse(u, HRmRead, i->Pin.FpF64toI64.src);
return;
case Pin_FpI64toF64:
addHRegUse(u, HRmWrite, i->Pin.FpI64toF64.dst);
- addHRegUse(u, HRmWrite, hregPPC32_FPR7());
+ addHRegUse(u, HRmWrite, hregPPC_FPR7());
addHRegUse(u, HRmRead, i->Pin.FpI64toF64.src);
return;
case Pin_FpCMov:
addHRegUse(u, HRmModify, i->Pin.FpCMov.dst);
- addHRegUse(u, HRmRead, i->Pin.FpCMov.src);
+ addHRegUse(u, HRmRead, i->Pin.FpCMov.src);
return;
case Pin_FpLdFPSCR:
addHRegUse(u, HRmRead, i->Pin.FpLdFPSCR.src);
return;
case Pin_FpCmp:
addHRegUse(u, HRmWrite, i->Pin.FpCmp.dst);
- addHRegUse(u, HRmRead, i->Pin.FpCmp.srcL);
- addHRegUse(u, HRmRead, i->Pin.FpCmp.srcR);
+ addHRegUse(u, HRmRead, i->Pin.FpCmp.srcL);
+ addHRegUse(u, HRmRead, i->Pin.FpCmp.srcR);
return;
case Pin_RdWrLR:
i->Pin.AvLdSt.reg);
if (i->Pin.AvLdSt.addr->tag == Pam_IR)
addHRegUse(u, HRmWrite, hregPPC_GPR30(mode64));
- addRegUsage_PPC32AMode(u, i->Pin.AvLdSt.addr);
+ addRegUsage_PPCAMode(u, i->Pin.AvLdSt.addr);
return;
case Pin_AvUnary:
addHRegUse(u, HRmWrite, i->Pin.AvUnary.dst);
addHRegUse(u, HRmRead, i->Pin.AvBin32Fx4.srcL);
addHRegUse(u, HRmRead, i->Pin.AvBin32Fx4.srcR);
if (i->Pin.AvBin32Fx4.op == Pavfp_MULF)
- addHRegUse(u, HRmWrite, hregPPC32_VR29());
+ addHRegUse(u, HRmWrite, hregPPC_VR29());
return;
case Pin_AvUn32Fx4:
addHRegUse(u, HRmWrite, i->Pin.AvUn32Fx4.dst);
addHRegUse(u, HRmRead, i->Pin.AvShlDbl.srcR);
return;
case Pin_AvSplat:
- addHRegUse(u, HRmWrite, i->Pin.AvSplat.dst);
- addRegUsage_PPC32VI5s(u, i->Pin.AvSplat.src);
+ addHRegUse(u, HRmWrite, i->Pin.AvSplat.dst);
+ addRegUsage_PPCVI5s(u, i->Pin.AvSplat.src);
return;
case Pin_AvCMov:
addHRegUse(u, HRmModify, i->Pin.AvCMov.dst);
- addHRegUse(u, HRmRead, i->Pin.AvCMov.src);
+ addHRegUse(u, HRmRead, i->Pin.AvCMov.src);
return;
case Pin_AvLdVSCR:
addHRegUse(u, HRmRead, i->Pin.AvLdVSCR.src);
return;
default:
- ppPPC32Instr(i, mode64);
- vpanic("getRegUsage_PPC32Instr");
+ ppPPCInstr(i, mode64);
+ vpanic("getRegUsage_PPCInstr");
}
}
*r = lookupHRegRemap(m, *r);
}
-void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 )
+void mapRegs_PPCInstr ( HRegRemap* m, PPCInstr* i, Bool mode64 )
{
switch (i->tag) {
case Pin_LI:
case Pin_Alu:
mapReg(m, &i->Pin.Alu.dst);
mapReg(m, &i->Pin.Alu.srcL);
- mapRegs_PPC32RH(m, i->Pin.Alu.srcR);
+ mapRegs_PPCRH(m, i->Pin.Alu.srcR);
return;
case Pin_Shft:
mapReg(m, &i->Pin.Shft.dst);
mapReg(m, &i->Pin.Shft.srcL);
- mapRegs_PPC32RH(m, i->Pin.Shft.srcR);
+ mapRegs_PPCRH(m, i->Pin.Shft.srcR);
return;
- case Pin_AddSubC32:
- mapReg(m, &i->Pin.AddSubC32.dst);
- mapReg(m, &i->Pin.AddSubC32.srcL);
- mapReg(m, &i->Pin.AddSubC32.srcR);
+ case Pin_AddSubC:
+ mapReg(m, &i->Pin.AddSubC.dst);
+ mapReg(m, &i->Pin.AddSubC.srcL);
+ mapReg(m, &i->Pin.AddSubC.srcR);
return;
case Pin_Cmp:
mapReg(m, &i->Pin.Cmp.srcL);
- mapRegs_PPC32RH(m, i->Pin.Cmp.srcR);
+ mapRegs_PPCRH(m, i->Pin.Cmp.srcR);
return;
case Pin_Unary:
- mapReg(m, &i->Pin.Unary32.dst);
- mapReg(m, &i->Pin.Unary32.src);
+ mapReg(m, &i->Pin.Unary.dst);
+ mapReg(m, &i->Pin.Unary.src);
return;
case Pin_MulL:
mapReg(m, &i->Pin.MulL.dst);
case Pin_Call:
return;
case Pin_Goto:
- mapRegs_PPC32RI(m, i->Pin.Goto.dst);
+ mapRegs_PPCRI(m, i->Pin.Goto.dst);
return;
case Pin_CMov:
- mapRegs_PPC32RI(m, i->Pin.CMov.src);
+ mapRegs_PPCRI(m, i->Pin.CMov.src);
mapReg(m, &i->Pin.CMov.dst);
return;
case Pin_Load:
- mapRegs_PPC32AMode(m, i->Pin.Load.src);
+ mapRegs_PPCAMode(m, i->Pin.Load.src);
mapReg(m, &i->Pin.Load.dst);
return;
case Pin_Store:
mapReg(m, &i->Pin.Store.src);
- mapRegs_PPC32AMode(m, i->Pin.Store.dst);
+ mapRegs_PPCAMode(m, i->Pin.Store.dst);
return;
- case Pin_Set32:
- mapReg(m, &i->Pin.Set32.dst);
+ case Pin_Set:
+ mapReg(m, &i->Pin.Set.dst);
return;
case Pin_MfCR:
mapReg(m, &i->Pin.MfCR.dst);
return;
case Pin_FpLdSt:
mapReg(m, &i->Pin.FpLdSt.reg);
- mapRegs_PPC32AMode(m, i->Pin.FpLdSt.addr);
+ mapRegs_PPCAMode(m, i->Pin.FpLdSt.addr);
return;
case Pin_FpF64toF32:
mapReg(m, &i->Pin.FpF64toF32.dst);
return;
case Pin_AvLdSt:
mapReg(m, &i->Pin.AvLdSt.reg);
- mapRegs_PPC32AMode(m, i->Pin.AvLdSt.addr);
+ mapRegs_PPCAMode(m, i->Pin.AvLdSt.addr);
return;
case Pin_AvUnary:
mapReg(m, &i->Pin.AvUnary.dst);
return;
case Pin_AvSplat:
mapReg(m, &i->Pin.AvSplat.dst);
- mapRegs_PPC32VI5s(m, i->Pin.AvSplat.src);
+ mapRegs_PPCVI5s(m, i->Pin.AvSplat.src);
return;
case Pin_AvCMov:
mapReg(m, &i->Pin.AvCMov.dst);
return;
default:
- ppPPC32Instr(i, mode64);
- vpanic("mapRegs_PPC32Instr");
+ ppPPCInstr(i, mode64);
+ vpanic("mapRegs_PPCInstr");
}
}
source and destination to *src and *dst. If in doubt say No. Used
by the register allocator to do move coalescing.
*/
-Bool isMove_PPC32Instr ( PPC32Instr* i, HReg* src, HReg* dst )
+Bool isMove_PPCInstr ( PPCInstr* i, HReg* src, HReg* dst )
{
/* Moves between integer regs */
if (i->tag == Pin_Alu) {
/* Generate ppc32 spill/reload instructions under the direction of the
register allocator. Note it's critical these don't write the
condition codes. */
-PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 )
+PPCInstr* genSpill_PPC ( HReg rreg, UShort offsetB, Bool mode64 )
{
- PPC32AMode* am;
+ PPCAMode* am;
vassert(!hregIsVirtual(rreg));
- am = PPC32AMode_IR(offsetB, GuestStatePtr(mode64));
+ am = PPCAMode_IR( offsetB, GuestStatePtr(mode64) );
switch (hregClass(rreg)) {
case HRcInt64:
vassert(mode64);
- return PPC32Instr_Store( 8, am, rreg, mode64 );
+ return PPCInstr_Store( 8, am, rreg, mode64 );
case HRcInt32:
vassert(!mode64);
- return PPC32Instr_Store( 4, am, rreg, mode64 );
+ return PPCInstr_Store( 4, am, rreg, mode64 );
case HRcFlt64:
- return PPC32Instr_FpLdSt ( False/*store*/, 8, rreg, am );
+ return PPCInstr_FpLdSt ( False/*store*/, 8, rreg, am );
case HRcVec128:
// XXX: GPR30 used as spill register to kludge AltiVec AMode_IR
- return PPC32Instr_AvLdSt ( False/*store*/, 16, rreg, am );
+ return PPCInstr_AvLdSt ( False/*store*/, 16, rreg, am );
default:
ppHRegClass(hregClass(rreg));
- vpanic("genSpill_PPC32: unimplemented regclass");
+ vpanic("genSpill_PPC: unimplemented regclass");
}
}
-PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 )
+PPCInstr* genReload_PPC ( HReg rreg, UShort offsetB, Bool mode64 )
{
- PPC32AMode* am;
+ PPCAMode* am;
vassert(!hregIsVirtual(rreg));
- am = PPC32AMode_IR(offsetB, GuestStatePtr(mode64));
+ am = PPCAMode_IR( offsetB, GuestStatePtr(mode64) );
switch (hregClass(rreg)) {
case HRcInt64:
vassert(mode64);
- return PPC32Instr_Load( 8, False, rreg, am, mode64 );
+ return PPCInstr_Load( 8, False, rreg, am, mode64 );
case HRcInt32:
vassert(!mode64);
- return PPC32Instr_Load( 4, False, rreg, am, mode64 );
+ return PPCInstr_Load( 4, False, rreg, am, mode64 );
case HRcFlt64:
- return PPC32Instr_FpLdSt ( True/*load*/, 8, rreg, am );
+ return PPCInstr_FpLdSt ( True/*load*/, 8, rreg, am );
case HRcVec128:
// XXX: GPR30 used as spill register to kludge AltiVec AMode_IR
- return PPC32Instr_AvLdSt ( True/*load*/, 16, rreg, am );
+ return PPCInstr_AvLdSt ( True/*load*/, 16, rreg, am );
default:
ppHRegClass(hregClass(rreg));
- vpanic("genReload_PPC32: unimplemented regclass");
+ vpanic("genReload_PPC: unimplemented regclass");
}
}
as per PPC32 p576
*/
-static UChar* mkFormD ( UChar* p, UInt opc1, UInt r1, UInt r2, UInt imm )
+static UChar* mkFormD ( UChar* p, UInt opc1,
+ UInt r1, UInt r2, UInt imm )
{
UInt theInstr;
vassert(opc1 < 0x40);
vassert(opc2 < 0x08);
imm2 = ((imm2 & 0x1F) << 1) | (imm2 >> 5);
theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
- ((imm1 & 0x1F)<<11) | (imm2<<5) | (opc2<<2) | ((imm1 >> 5)<<1));
+ ((imm1 & 0x1F)<<11) | (imm2<<5) |
+ (opc2<<2) | ((imm1 >> 5)<<1));
return emit32(p, theInstr);
}
vassert(r3 < 0x20);
vassert(opc2 < 0x400);
vassert(b0 < 0x2);
- theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (opc2<<1) | (b0));
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+ (r3<<11) | (opc2<<1) | (b0));
return emit32(p, theInstr);
}
vassert(f3 < 0x20);
vassert(opc2 < 0x400);
vassert(b0 < 0x2);
- theInstr = ((opc1<<26) | (f1<<21) | (f2<<16) | (f3<<11) | (opc2<<1) | (b0));
+ theInstr = ((opc1<<26) | (f1<<21) | (f2<<16) |
+ (f3<<11) | (opc2<<1) | (b0));
return emit32(p, theInstr);
}
case 371: // mftb
case 467: // mtspr
vassert(f2 < 0x400);
- f2 = ((f2>>5) & 0x1F) | ((f2 & 0x1F)<<5); // re-arrange split field
+ // re-arrange split field
+ f2 = ((f2>>5) & 0x1F) | ((f2 & 0x1F)<<5);
break;
- default: vpanic("mkFormXFX(PPC32)");
+ default: vpanic("mkFormXFX(ppch)");
}
theInstr = ((31<<26) | (r1<<21) | (f2<<11) | (opc2<<1));
return emit32(p, theInstr);
vassert(BD < 0x4000);
vassert(AA < 0x2);
vassert(LK < 0x2);
- theInstr = ((16<<26) | (BO<<21) | (BI<<16) | (BD<<2) | (AA<<1) | (LK));
+ theInstr = ((16<<26) | (BO<<21) | (BI<<16) |
+ (BD<<2) | (AA<<1) | (LK));
return emit32(p, theInstr);
}
}
static UChar* doAMode_IR ( UChar* p, UInt opc1, UInt rSD,
- PPC32AMode* am, Bool mode64 )
+ PPCAMode* am, Bool mode64 )
{
UInt rA, idx;
vassert(am->tag == Pam_IR);
}
static UChar* doAMode_RR ( UChar* p, UInt opc1, UInt opc2,
- UInt rSD, PPC32AMode* am, Bool mode64 )
+ UInt rSD, PPCAMode* am, Bool mode64 )
{
UInt rA, rB;
vassert(am->tag == Pam_RR);
{
vassert(r_dst < 0x20);
-// if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
- if (imm == (ULong)(Long)(Int)(Short)(UShort)imm) {
+ if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
// sign-extendable from 16 bits
// addi r_dst,0,imm => li r_dst,imm
p = mkFormD(p, 14, r_dst, 0, imm & 0xFFFF);
} else {
-// if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
- if (imm == (ULong)(Long)(Int)(UInt)imm) {
+ if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
// sign-extendable from 32 bits
// addis r_dst,r0,(imm>>16) => lis r_dst, (imm>>16)
vassert(r3 < 0x20);
vassert(Rc < 0x2);
vassert(opc2 < 0x400);
- theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (Rc<<10) | opc2);
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+ (r3<<11) | (Rc<<10) | opc2);
return emit32(p, theInstr);
}
vassert(r3 < 0x20);
vassert(r4 < 0x20);
vassert(opc2 < 0x40);
- theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (r4<<6) | opc2);
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+ (r3<<11) | (r4<<6) | opc2);
return emit32(p, theInstr);
}
use a call-return scheme to get from the dispatcher to generated
code and back.
*/
-Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i,
- Bool mode64, void* dispatch )
+Int emit_PPCInstr ( UChar* buf, Int nbuf, PPCInstr* i,
+ Bool mode64, void* dispatch )
{
UChar* p = &buf[0];
UChar* ptmp = p;
vassert(nbuf >= 32);
-// vex_printf("asm ");ppPPC32Instr(i, mode64); vex_printf("\n");
+// vex_printf("asm ");ppPPCInstr(i, mode64); vex_printf("\n");
switch (i->tag) {
goto done;
case Pin_Alu: {
- PPC32RH* srcR = i->Pin.Alu.srcR;
- Bool immR = toBool(srcR->tag == Prh_Imm);
- UInt r_dst = iregNo(i->Pin.Alu.dst, mode64);
- UInt r_srcL = iregNo(i->Pin.Alu.srcL, mode64);
- UInt r_srcR = immR ? (-1)/*bogus*/ :
- iregNo(srcR->Prh.Reg.reg, mode64);
+ PPCRH* srcR = i->Pin.Alu.srcR;
+ Bool immR = toBool(srcR->tag == Prh_Imm);
+ UInt r_dst = iregNo(i->Pin.Alu.dst, mode64);
+ UInt r_srcL = iregNo(i->Pin.Alu.srcL, mode64);
+ UInt r_srcR = immR ? (-1)/*bogus*/ :
+ iregNo(srcR->Prh.Reg.reg, mode64);
switch (i->Pin.Alu.op) {
case Palu_ADD:
}
case Pin_Shft: {
- PPC32RH* srcR = i->Pin.Shft.srcR;
- Bool sz32 = i->Pin.Shft.sz32;
- Bool immR = toBool(srcR->tag == Prh_Imm);
- UInt r_dst = iregNo(i->Pin.Shft.dst, mode64);
- UInt r_srcL = iregNo(i->Pin.Shft.srcL, mode64);
- UInt r_srcR = immR ? (-1)/*bogus*/ :
- iregNo(srcR->Prh.Reg.reg, mode64);
+ PPCRH* srcR = i->Pin.Shft.srcR;
+ Bool sz32 = i->Pin.Shft.sz32;
+ Bool immR = toBool(srcR->tag == Prh_Imm);
+ UInt r_dst = iregNo(i->Pin.Shft.dst, mode64);
+ UInt r_srcL = iregNo(i->Pin.Shft.srcL, mode64);
+ UInt r_srcR = immR ? (-1)/*bogus*/ :
+ iregNo(srcR->Prh.Reg.reg, mode64);
if (!mode64)
vassert(sz32);
goto done;
}
- case Pin_AddSubC32: {
- Bool isAdd = i->Pin.AddSubC32.isAdd;
- Bool setC = i->Pin.AddSubC32.setC;
- UInt r_srcL = iregNo(i->Pin.AddSubC32.srcL, mode64);
- UInt r_srcR = iregNo(i->Pin.AddSubC32.srcR, mode64);
- UInt r_dst = iregNo(i->Pin.AddSubC32.dst, mode64);
+ case Pin_AddSubC: {
+ Bool isAdd = i->Pin.AddSubC.isAdd;
+ Bool setC = i->Pin.AddSubC.setC;
+ UInt r_srcL = iregNo(i->Pin.AddSubC.srcL, mode64);
+ UInt r_srcR = iregNo(i->Pin.AddSubC.srcR, mode64);
+ UInt r_dst = iregNo(i->Pin.AddSubC.dst, mode64);
if (isAdd) {
if (setC) /* addc (PPC32 p348) */
UInt fld1 = i->Pin.Cmp.crfD << 2;
UInt r_srcL = iregNo(i->Pin.Cmp.srcL, mode64);
UInt r_srcR, imm_srcR;
- PPC32RH* srcR = i->Pin.Cmp.srcR;
+ PPCRH* srcR = i->Pin.Cmp.srcR;
if (!mode64) // cmp double word invalid for mode32
vassert(sz32);
}
case Pin_Unary: {
- UInt r_dst = iregNo(i->Pin.Unary32.dst, mode64);
- UInt r_src = iregNo(i->Pin.Unary32.src, mode64);
+ UInt r_dst = iregNo(i->Pin.Unary.dst, mode64);
+ UInt r_src = iregNo(i->Pin.Unary.src, mode64);
- switch (i->Pin.Unary32.op) {
+ switch (i->Pin.Unary.op) {
case Pun_NOT: // nor r_dst,r_src,r_src
p = mkFormX(p, 31, r_src, r_dst, r_src, 124, 0);
break;
}
case Pin_Call: {
- PPC32CondCode cond = i->Pin.Call.cond;
- UInt r_dst = 10;
+ PPCCondCode cond = i->Pin.Call.cond;
+ UInt r_dst = 10;
/* As per detailed comment for Pin_Call in
- getRegUsage_PPC32Instr above, %r10 is used as an address temp */
+ getRegUsage_PPCInstr above, %r10 is used as an address temp */
/* jump over the following insns if condition does not hold */
if (cond.test != Pct_ALWAYS) {
/* don't know how many bytes to jump over yet...
make space for a jump instruction and fill in later. */
ptmp = p; /* fill in this bit later */
- p += 4; // p += 4
+ p += 4; // p += 4
}
- /* load target to r_dst */
- p = mkLoadImm(p, r_dst, i->Pin.Call.target, mode64); // p += 4|8|20
+ /* load target to r_dst */ // p += 4|8|20
+ p = mkLoadImm(p, r_dst, i->Pin.Call.target, mode64);
/* mtspr 9,r_dst => move r_dst to count register */
- p = mkFormXFX(p, r_dst, 9, 467); // p += 4
+ p = mkFormXFX(p, r_dst, 9, 467); // p += 4
/* bctrl => branch to count register (and save to lr) */
- p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 1); // p += 4
+ p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 1); // p += 4
/* Fix up the conditional jump, if there was one. */
if (cond.test != Pct_ALWAYS) {
Int delta = p - ptmp;
vassert(delta >= 16 && delta <= 32);
/* bc !ct,cf,delta */
- mkFormB(ptmp, invertCondTest(cond.test), cond.flag, (delta>>2), 0, 0);
+ mkFormB(ptmp, invertCondTest(cond.test),
+ cond.flag, (delta>>2), 0, 0);
}
goto done;
}
case Pin_Goto: {
- UInt trc = 0;
- UChar r_return = 3; /* Put target addr into %r3 */
- PPC32CondCode cond = i->Pin.Goto.cond;
+ UInt trc = 0;
+ UChar r_ret = 3; /* Put target addr into %r3 */
+ PPCCondCode cond = i->Pin.Goto.cond;
UInt r_dst;
ULong imm_dst;
break;
default:
ppIRJumpKind(i->Pin.Goto.jk);
- vpanic("emit_PPC32Instr.Pin_Goto: unknown jump kind");
+ vpanic("emit_PPCInstr.Pin_Goto: unknown jump kind");
}
if (trc !=0) {
vassert(trc < 0x10000);
p = mkFormD(p, 14, 31, 0, trc); // p += 4
}
- /* Get the destination address into %r_return */
+ /* Get the destination address into %r_ret */
if (i->Pin.Goto.dst->tag == Pri_Imm) {
imm_dst = i->Pin.Goto.dst->Pri.Imm;
- p = mkLoadImm(p, r_return, imm_dst, mode64); // p += 4|8|20
+ p = mkLoadImm(p, r_ret, imm_dst, mode64); // p += 4|8|20
} else {
vassert(i->Pin.Goto.dst->tag == Pri_Reg);
r_dst = iregNo(i->Pin.Goto.dst->Pri.Reg, mode64);
- p = mkMoveReg(p, r_return, r_dst); // p += 4
+ p = mkMoveReg(p, r_ret, r_dst); // p += 4
}
/* blr */
Int delta = p - ptmp;
vassert(delta >= 12 && delta <= 32);
/* bc !ct,cf,delta */
- mkFormB(ptmp, invertCondTest(cond.test), cond.flag, delta>>2, 0, 0);
+ mkFormB(ptmp, invertCondTest(cond.test),
+ cond.flag, delta>>2, 0, 0);
}
goto done;
}
case Pin_CMov: {
- UInt r_dst, r_src;
+ UInt r_dst, r_src;
ULong imm_src;
- PPC32CondCode cond;
+ PPCCondCode cond;
vassert(i->Pin.CMov.cond.test != Pct_ALWAYS);
r_dst = iregNo(i->Pin.CMov.dst, mode64);
Int delta = p - ptmp;
vassert(delta >= 8 && delta <= 24);
/* bc !ct,cf,delta */
- mkFormB(ptmp, invertCondTest(cond.test), cond.flag, (delta>>2), 0, 0);
+ mkFormB(ptmp, invertCondTest(cond.test),
+ cond.flag, (delta>>2), 0, 0);
}
goto done;
}
case Pin_Load: {
- PPC32AMode* am_addr = i->Pin.Load.src;
+ PPCAMode* am_addr = i->Pin.Load.src;
UInt r_dst = iregNo(i->Pin.Load.dst, mode64);
Bool syned = i->Pin.Load.syned;
UInt opc1, opc2, sz = i->Pin.Load.sz;
}
}
- case Pin_Set32: {
+ case Pin_Set: {
/* Make the destination register be 1 or 0, depending on whether
the relevant condition holds. */
- UInt r_dst = iregNo(i->Pin.Set32.dst, mode64);
- PPC32CondCode cond = i->Pin.Set32.cond;
- UInt rot_imm;
- UInt r_tmp;
+ UInt r_dst = iregNo(i->Pin.Set.dst, mode64);
+ PPCCondCode cond = i->Pin.Set.cond;
+ UInt rot_imm, r_tmp;
if (cond.test == Pct_ALWAYS) {
// Just load 1 to dst => li dst,1
p = mkFormD(p, 14, r_dst, 0, 1);
} else {
rot_imm = 1 + cond.flag;
- r_tmp = 0; // Not within scope of regalloc, so no need to declare.
+ r_tmp = 0; // Not set in getAllocable, so no need to declare.
// r_tmp = CR => mfcr r_tmp
p = mkFormX(p, 31, r_tmp, 0, 0, 19, 0);
}
case Pin_Store: {
- PPC32AMode* am_addr = i->Pin.Store.dst;
+ PPCAMode* am_addr = i->Pin.Store.dst;
UInt r_src = iregNo(i->Pin.Store.src, mode64);
UInt opc1, opc2, sz = i->Pin.Store.sz;
switch (i->Pin.Store.dst->tag) {
}
case Pin_FpLdSt: {
- PPC32AMode* am_addr = i->Pin.FpLdSt.addr;
+ PPCAMode* am_addr = i->Pin.FpLdSt.addr;
UInt f_reg = fregNo(i->Pin.FpLdSt.reg);
Bool idxd = toBool(i->Pin.FpLdSt.addr->tag == Pam_RR);
UChar sz = i->Pin.FpLdSt.sz;
UInt r_dst = iregNo(i->Pin.FpF64toI32.dst, mode64);
UInt fr_src = fregNo(i->Pin.FpF64toI32.src);
UChar fr_tmp = 7; // Temp freg
- PPC32AMode* am_addr;
+ PPCAMode* am_addr;
// fctiw (conv f64 to i32), PPC32 p404
p = mkFormX(p, 63, fr_tmp, 0, fr_src, 14, 0);
- // No RI form of stfiwx, so need PPC32AMode_RR:
- am_addr = PPC32AMode_RR( StackFramePtr(mode64),
- hregPPC_GPR0(mode64) );
+ // No RI form of stfiwx, so need PPCAMode_RR:
+ am_addr = PPCAMode_RR( StackFramePtr(mode64),
+ hregPPC_GPR0(mode64) );
// stfiwx (store fp64[lo32] as int32), PPC32 p517
p = doAMode_RR(p, 31, 983, fr_tmp, am_addr, mode64);
UInt r_dst = iregNo(i->Pin.FpF64toI64.dst, mode64);
UInt fr_src = fregNo(i->Pin.FpF64toI64.src);
UChar fr_tmp = 7; // Temp freg
- PPC32AMode* am_addr;
+ PPCAMode* am_addr;
// fctid (conv f64 to i64), PPC64 p437
p = mkFormX(p, 63, fr_tmp, 0, fr_src, 814, 0);
- am_addr = PPC32AMode_RR( StackFramePtr(mode64),
- hregPPC_GPR0(mode64) );
+ am_addr = PPCAMode_RR( StackFramePtr(mode64),
+ hregPPC_GPR0(mode64) );
// stfdx (store fp64), PPC64 p589
p = doAMode_RR(p, 31, 727, fr_tmp, am_addr, mode64);
UInt r_src = iregNo(i->Pin.FpI64toF64.src, mode64);
UInt fr_dst = fregNo(i->Pin.FpI64toF64.dst);
UChar fr_tmp = 7; // Temp freg
- PPC32AMode* am_addr = PPC32AMode_RR( StackFramePtr(mode64),
- hregPPC_GPR0(mode64) );
+ PPCAMode* am_addr = PPCAMode_RR( StackFramePtr(mode64),
+ hregPPC_GPR0(mode64) );
// stdx r_src,r0,r1
p = doAMode_RR(p, 31, 149, r_src, am_addr, mode64);
}
case Pin_FpCMov: {
- UInt fr_dst = fregNo(i->Pin.FpCMov.dst);
- UInt fr_src = fregNo(i->Pin.FpCMov.src);
- PPC32CondCode cc = i->Pin.FpCMov.cond;
+ UInt fr_dst = fregNo(i->Pin.FpCMov.dst);
+ UInt fr_src = fregNo(i->Pin.FpCMov.src);
+ PPCCondCode cc = i->Pin.FpCMov.cond;
if (fr_dst == fr_src) goto done;
// Only have AltiVec AMode_RR: kludge AMode_IR
if (!idxd) {
r_idx = 30; // XXX: Using r30 as temp
- p = mkLoadImm(p, r_idx, i->Pin.AvLdSt.addr->Pam.IR.index, mode64);
+ p = mkLoadImm(p, r_idx,
+ i->Pin.AvLdSt.addr->Pam.IR.index, mode64);
} else {
r_idx = iregNo(i->Pin.AvLdSt.addr->Pam.RR.index, mode64);
}
if (i->Pin.FpLdSt.isLoad) { // Load from memory (1,2,4,16)
- opc2 = (sz == 1) ? 7 : (sz == 2) ? 39 : (sz == 4) ? 71 : 103;
+ opc2 = (sz==1) ? 7 : (sz==2) ? 39 : (sz==4) ? 71 : 103;
p = mkFormX(p, 31, v_reg, r_idx, r_base, opc2, 0);
} else { // Store to memory (1,2,4,16)
- opc2 = (sz == 1) ? 135 : (sz == 2) ? 167 : (sz == 4) ? 199 : 231;
+ opc2 = (sz==1) ? 135 : (sz==2) ? 167 : (sz==4) ? 199 : 231;
p = mkFormX(p, 31, v_reg, r_idx, r_base, opc2, 0);
}
goto done;
*/
UInt vB = 29; // XXX: Using v29 for temp do not change
// without also changing
- // getRegUsage_PPC32Instr
+ // getRegUsage_PPCInstr
UInt konst = 0x1F;
// Better way to load -0.0 (0x80000000) ?
// vspltisw vB,0x1F (0x1F => each word of vB)
p = mkFormVX( p, 4, vB, konst, 0, 908 );
- // vslw vB,vB,vB (each word of vB = (0x1F << 0x1F) = 0x80000000
+ // vslw vB,vB,vB (each word of vB = (0x1F << 0x1F) = 0x80000000
p = mkFormVX( p, 4, vB, vB, vB, 388 );
// Finally, do the multiply:
p = mkFormVA( p, 4, v_dst, v_srcL, vB, v_srcR, 46 );
break;
}
- case Pavfp_CMPEQF:
- p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 198 ); // vcmpeqfp
+ case Pavfp_CMPEQF: // vcmpeqfp
+ p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 198 );
break;
- case Pavfp_CMPGTF:
- p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 710 ); // vcmpgtfp
+ case Pavfp_CMPGTF: // vcmpgtfp
+ p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 710 );
break;
- case Pavfp_CMPGEF:
- p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 454 ); // vcmpgefp
+ case Pavfp_CMPGEF: // vcmpgefp
+ p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 454 );
break;
default:
}
case Pin_AvCMov: {
- UInt v_dst = vregNo(i->Pin.AvCMov.dst);
- UInt v_src = vregNo(i->Pin.AvCMov.src);
- PPC32CondCode cc = i->Pin.AvCMov.cond;
+ UInt v_dst = vregNo(i->Pin.AvCMov.dst);
+ UInt v_src = vregNo(i->Pin.AvCMov.src);
+ PPCCondCode cc = i->Pin.AvCMov.cond;
if (v_dst == v_src) goto done;
bad:
vex_printf("\n=> ");
- ppPPC32Instr(i, mode64);
- vpanic("emit_PPC32Instr");
+ ppPPCInstr(i, mode64);
+ vpanic("emit_PPCInstr");
/*NOTREACHED*/
done:
without prior written permission.
*/
-#ifndef __LIBVEX_HOST_PPC32_HDEFS_H
-#define __LIBVEX_HOST_PPC32_HDEFS_H
+#ifndef __LIBVEX_HOST_PPC_HDEFS_H
+#define __LIBVEX_HOST_PPC_HDEFS_H
/* Num registers used for function calls */
#define PPC_N_REGPARMS 8
32 real float regs, and 32 real vector regs.
*/
-extern void ppHRegPPC32 ( HReg );
+extern void ppHRegPPC ( HReg );
extern HReg hregPPC_GPR0 ( Bool mode64 ); // scratch reg / zero reg
extern HReg hregPPC_GPR1 ( Bool mode64 ); // Stack Frame Pointer
extern HReg hregPPC_GPR8 ( Bool mode64 );
extern HReg hregPPC_GPR9 ( Bool mode64 );
extern HReg hregPPC_GPR10 ( Bool mode64 );
-extern HReg hregPPC_GPR11 ( Bool mode64 ); // not used: calls by ptr / env ptr for some langs
-extern HReg hregPPC_GPR12 ( Bool mode64 ); // not used: exception handling and global linkage code
-extern HReg hregPPC_GPR13 ( Bool mode64 ); // not used: thread specific pointer
+extern HReg hregPPC_GPR11 ( Bool mode64 );
+extern HReg hregPPC_GPR12 ( Bool mode64 );
+extern HReg hregPPC_GPR13 ( Bool mode64 );
extern HReg hregPPC_GPR14 ( Bool mode64 );
extern HReg hregPPC_GPR15 ( Bool mode64 );
extern HReg hregPPC_GPR16 ( Bool mode64 );
extern HReg hregPPC_GPR27 ( Bool mode64 );
extern HReg hregPPC_GPR28 ( Bool mode64 );
extern HReg hregPPC_GPR29 ( Bool mode64 ); // reserved for dispatcher
-extern HReg hregPPC_GPR30 ( Bool mode64 ); // we use as VMX spill temporary
+extern HReg hregPPC_GPR30 ( Bool mode64 ); // used as VMX spill temp
extern HReg hregPPC_GPR31 ( Bool mode64 ); // GuestStatePtr (callee-saved)
-extern HReg hregPPC32_FPR0 ( void );
-extern HReg hregPPC32_FPR1 ( void );
-extern HReg hregPPC32_FPR2 ( void );
-extern HReg hregPPC32_FPR3 ( void );
-extern HReg hregPPC32_FPR4 ( void );
-extern HReg hregPPC32_FPR5 ( void );
-extern HReg hregPPC32_FPR6 ( void );
-extern HReg hregPPC32_FPR7 ( void );
-extern HReg hregPPC32_FPR8 ( void );
-extern HReg hregPPC32_FPR9 ( void );
-extern HReg hregPPC32_FPR10 ( void );
-extern HReg hregPPC32_FPR11 ( void );
-extern HReg hregPPC32_FPR12 ( void );
-extern HReg hregPPC32_FPR13 ( void );
-extern HReg hregPPC32_FPR14 ( void );
-extern HReg hregPPC32_FPR15 ( void );
-extern HReg hregPPC32_FPR16 ( void );
-extern HReg hregPPC32_FPR17 ( void );
-extern HReg hregPPC32_FPR18 ( void );
-extern HReg hregPPC32_FPR19 ( void );
-extern HReg hregPPC32_FPR20 ( void );
-extern HReg hregPPC32_FPR21 ( void );
-extern HReg hregPPC32_FPR22 ( void );
-extern HReg hregPPC32_FPR23 ( void );
-extern HReg hregPPC32_FPR24 ( void );
-extern HReg hregPPC32_FPR25 ( void );
-extern HReg hregPPC32_FPR26 ( void );
-extern HReg hregPPC32_FPR27 ( void );
-extern HReg hregPPC32_FPR28 ( void );
-extern HReg hregPPC32_FPR29 ( void );
-extern HReg hregPPC32_FPR30 ( void );
-extern HReg hregPPC32_FPR31 ( void );
-
-extern HReg hregPPC32_VR0 ( void );
-extern HReg hregPPC32_VR1 ( void );
-extern HReg hregPPC32_VR2 ( void );
-extern HReg hregPPC32_VR3 ( void );
-extern HReg hregPPC32_VR4 ( void );
-extern HReg hregPPC32_VR5 ( void );
-extern HReg hregPPC32_VR6 ( void );
-extern HReg hregPPC32_VR7 ( void );
-extern HReg hregPPC32_VR8 ( void );
-extern HReg hregPPC32_VR9 ( void );
-extern HReg hregPPC32_VR10 ( void );
-extern HReg hregPPC32_VR11 ( void );
-extern HReg hregPPC32_VR12 ( void );
-extern HReg hregPPC32_VR13 ( void );
-extern HReg hregPPC32_VR14 ( void );
-extern HReg hregPPC32_VR15 ( void );
-extern HReg hregPPC32_VR16 ( void );
-extern HReg hregPPC32_VR17 ( void );
-extern HReg hregPPC32_VR18 ( void );
-extern HReg hregPPC32_VR19 ( void );
-extern HReg hregPPC32_VR20 ( void );
-extern HReg hregPPC32_VR21 ( void );
-extern HReg hregPPC32_VR22 ( void );
-extern HReg hregPPC32_VR23 ( void );
-extern HReg hregPPC32_VR24 ( void );
-extern HReg hregPPC32_VR25 ( void );
-extern HReg hregPPC32_VR26 ( void );
-extern HReg hregPPC32_VR27 ( void );
-extern HReg hregPPC32_VR28 ( void );
-extern HReg hregPPC32_VR29 ( void );
-extern HReg hregPPC32_VR30 ( void );
-extern HReg hregPPC32_VR31 ( void );
+extern HReg hregPPC_FPR0 ( void );
+extern HReg hregPPC_FPR1 ( void );
+extern HReg hregPPC_FPR2 ( void );
+extern HReg hregPPC_FPR3 ( void );
+extern HReg hregPPC_FPR4 ( void );
+extern HReg hregPPC_FPR5 ( void );
+extern HReg hregPPC_FPR6 ( void );
+extern HReg hregPPC_FPR7 ( void );
+extern HReg hregPPC_FPR8 ( void );
+extern HReg hregPPC_FPR9 ( void );
+extern HReg hregPPC_FPR10 ( void );
+extern HReg hregPPC_FPR11 ( void );
+extern HReg hregPPC_FPR12 ( void );
+extern HReg hregPPC_FPR13 ( void );
+extern HReg hregPPC_FPR14 ( void );
+extern HReg hregPPC_FPR15 ( void );
+extern HReg hregPPC_FPR16 ( void );
+extern HReg hregPPC_FPR17 ( void );
+extern HReg hregPPC_FPR18 ( void );
+extern HReg hregPPC_FPR19 ( void );
+extern HReg hregPPC_FPR20 ( void );
+extern HReg hregPPC_FPR21 ( void );
+extern HReg hregPPC_FPR22 ( void );
+extern HReg hregPPC_FPR23 ( void );
+extern HReg hregPPC_FPR24 ( void );
+extern HReg hregPPC_FPR25 ( void );
+extern HReg hregPPC_FPR26 ( void );
+extern HReg hregPPC_FPR27 ( void );
+extern HReg hregPPC_FPR28 ( void );
+extern HReg hregPPC_FPR29 ( void );
+extern HReg hregPPC_FPR30 ( void );
+extern HReg hregPPC_FPR31 ( void );
+
+extern HReg hregPPC_VR0 ( void );
+extern HReg hregPPC_VR1 ( void );
+extern HReg hregPPC_VR2 ( void );
+extern HReg hregPPC_VR3 ( void );
+extern HReg hregPPC_VR4 ( void );
+extern HReg hregPPC_VR5 ( void );
+extern HReg hregPPC_VR6 ( void );
+extern HReg hregPPC_VR7 ( void );
+extern HReg hregPPC_VR8 ( void );
+extern HReg hregPPC_VR9 ( void );
+extern HReg hregPPC_VR10 ( void );
+extern HReg hregPPC_VR11 ( void );
+extern HReg hregPPC_VR12 ( void );
+extern HReg hregPPC_VR13 ( void );
+extern HReg hregPPC_VR14 ( void );
+extern HReg hregPPC_VR15 ( void );
+extern HReg hregPPC_VR16 ( void );
+extern HReg hregPPC_VR17 ( void );
+extern HReg hregPPC_VR18 ( void );
+extern HReg hregPPC_VR19 ( void );
+extern HReg hregPPC_VR20 ( void );
+extern HReg hregPPC_VR21 ( void );
+extern HReg hregPPC_VR22 ( void );
+extern HReg hregPPC_VR23 ( void );
+extern HReg hregPPC_VR24 ( void );
+extern HReg hregPPC_VR25 ( void );
+extern HReg hregPPC_VR26 ( void );
+extern HReg hregPPC_VR27 ( void );
+extern HReg hregPPC_VR28 ( void );
+extern HReg hregPPC_VR29 ( void );
+extern HReg hregPPC_VR30 ( void );
+extern HReg hregPPC_VR31 ( void );
#define StackFramePtr(_mode64) hregPPC_GPR1(_mode64)
#define GuestStatePtr(_mode64) hregPPC_GPR31(_mode64)
Pcf_7EQ = 30, /* zero | equal */
Pcf_7SO = 31 /* summary overflow */
}
- PPC32CondFlag;
+ PPCCondFlag;
typedef
enum { /* Maps bc bitfield BO */
Pct_TRUE = 0xC,
Pct_ALWAYS = 0x14
}
- PPC32CondTest;
+ PPCCondTest;
typedef
struct {
- PPC32CondFlag flag;
- PPC32CondTest test;
+ PPCCondFlag flag;
+ PPCCondTest test;
}
- PPC32CondCode;
+ PPCCondCode;
-extern HChar* showPPC32CondCode ( PPC32CondCode );
+extern HChar* showPPCCondCode ( PPCCondCode );
/* constructor */
-extern PPC32CondCode mk_PPCCondCode ( PPC32CondTest, PPC32CondFlag );
+extern PPCCondCode mk_PPCCondCode ( PPCCondTest, PPCCondFlag );
/* false->true, true->false */
-extern PPC32CondTest invertCondTest ( PPC32CondTest );
+extern PPCCondTest invertCondTest ( PPCCondTest );
Pam_IR, /* Immediate (signed 16-bit) + Reg */
Pam_RR /* Reg1 + Reg2 */
}
- PPC32AModeTag;
+ PPCAModeTag;
typedef
struct {
- PPC32AModeTag tag;
+ PPCAModeTag tag;
union {
struct {
HReg base;
} RR;
} Pam;
}
- PPC32AMode;
+ PPCAMode;
-extern PPC32AMode* PPC32AMode_IR ( Int, HReg );
-extern PPC32AMode* PPC32AMode_RR ( HReg, HReg );
+extern PPCAMode* PPCAMode_IR ( Int, HReg );
+extern PPCAMode* PPCAMode_RR ( HReg, HReg );
-extern PPC32AMode* dopyPPC32AMode ( PPC32AMode* );
+extern PPCAMode* dopyPPCAMode ( PPCAMode* );
-extern void ppPPC32AMode ( PPC32AMode* );
+extern void ppPPCAMode ( PPCAMode* );
/* --------- Operand, which can be a reg or a u16/s16. --------- */
Prh_Imm=1,
Prh_Reg=2
}
- PPC32RHTag;
+ PPCRHTag;
typedef
struct {
- PPC32RHTag tag;
+ PPCRHTag tag;
union {
struct {
Bool syned;
}
Prh;
}
- PPC32RH;
+ PPCRH;
-extern PPC32RH* PPC32RH_Imm ( Bool, UShort );
-extern PPC32RH* PPC32RH_Reg ( HReg );
+extern PPCRH* PPCRH_Imm ( Bool, UShort );
+extern PPCRH* PPCRH_Reg ( HReg );
-extern void ppPPC32RH ( PPC32RH* );
+extern void ppPPCRH ( PPCRH* );
/* --------- Operand, which can be a reg or a u32/64. --------- */
Pri_Imm=3,
Pri_Reg=4
}
- PPC32RITag;
+ PPCRITag;
typedef
struct {
- PPC32RITag tag;
+ PPCRITag tag;
union {
ULong Imm;
HReg Reg;
}
Pri;
}
- PPC32RI;
+ PPCRI;
-extern PPC32RI* PPC32RI_Imm ( ULong );
-extern PPC32RI* PPC32RI_Reg ( HReg );
+extern PPCRI* PPCRI_Imm ( ULong );
+extern PPCRI* PPCRI_Reg ( HReg );
-extern void ppPPC32RI ( PPC32RI* );
+extern void ppPPCRI ( PPCRI* );
/* --------- Operand, which can be a vector reg or a s6. --------- */
Pvi_Imm=5,
Pvi_Reg=6
}
- PPC32VI5sTag;
+ PPCVI5sTag;
typedef
struct {
- PPC32VI5sTag tag;
+ PPCVI5sTag tag;
union {
Char Imm5s;
HReg Reg;
}
Pvi;
}
- PPC32VI5s;
+ PPCVI5s;
-extern PPC32VI5s* PPC32VI5s_Imm ( Char );
-extern PPC32VI5s* PPC32VI5s_Reg ( HReg );
+extern PPCVI5s* PPCVI5s_Imm ( Char );
+extern PPCVI5s* PPCVI5s_Reg ( HReg );
-extern void ppPPC32VI5s ( PPC32VI5s* );
+extern void ppPPCVI5s ( PPCVI5s* );
/* --------- Instructions. --------- */
Pun_CLZ32,
Pun_CLZ64
}
- PPC32UnaryOp;
+ PPCUnaryOp;
-extern HChar* showPPC32UnaryOp ( PPC32UnaryOp );
+extern HChar* showPPCUnaryOp ( PPCUnaryOp );
/* --------- */
Palu_ADD, Palu_SUB,
Palu_AND, Palu_OR, Palu_XOR,
}
- PPC32AluOp;
+ PPCAluOp;
extern
-HChar* showPPC32AluOp ( PPC32AluOp,
- Bool /* is the 2nd operand an immediate? */);
+HChar* showPPCAluOp ( PPCAluOp,
+ Bool /* is the 2nd operand an immediate? */);
/* --------- */
Pshft_INVALID,
Pshft_SHL, Pshft_SHR, Pshft_SAR,
}
- PPC32ShftOp;
+ PPCShftOp;
extern
-HChar* showPPC32ShftOp ( PPC32ShftOp,
- Bool /* is the 2nd operand an immediate? */,
- Bool /* is this a 32bit or 64bit op? */ );
+HChar* showPPCShftOp ( PPCShftOp,
+ Bool /* is the 2nd operand an immediate? */,
+ Bool /* is this a 32bit or 64bit op? */ );
/* --------- */
/* Unary */
Pfp_SQRT, Pfp_ABS, Pfp_NEG, Pfp_MOV
}
- PPC32FpOp;
+ PPCFpOp;
-extern HChar* showPPC32FpOp ( PPC32FpOp );
+extern HChar* showPPCFpOp ( PPCFpOp );
/* --------- */
/* Merge */
Pav_MRGHI, Pav_MRGLO,
}
- PPC32AvOp;
+ PPCAvOp;
-extern HChar* showPPC32AvOp ( PPC32AvOp );
+extern HChar* showPPCAvOp ( PPCAvOp );
/* --------- */
Pavfp_CVTU2F, Pavfp_CVTS2F, Pavfp_QCVTF2U, Pavfp_QCVTF2S,
Pavfp_ROUNDM, Pavfp_ROUNDP, Pavfp_ROUNDN, Pavfp_ROUNDZ,
}
- PPC32AvFpOp;
+ PPCAvFpOp;
-extern HChar* showPPC32AvFpOp ( PPC32AvFpOp );
+extern HChar* showPPCAvFpOp ( PPCAvFpOp );
/* --------- */
Pin_LI, /* load word (32/64-bit) immediate (fake insn) */
Pin_Alu, /* word add/sub/and/or/xor */
Pin_Shft, /* word shl/shr/sar */
- Pin_AddSubC32, /* 32-bit add/sub with read/write carry */
+ Pin_AddSubC, /* add/sub with read/write carry */
Pin_Cmp, /* word compare */
Pin_Unary, /* not, neg, clz */
Pin_MulL, /* widening multiply */
Pin_Call, /* call to address in register */
Pin_Goto, /* conditional/unconditional jmp to dst */
Pin_CMov, /* conditional move */
- Pin_Load, /* load a 8|16|32 bit value from mem */
- Pin_Store, /* store a 8|16|32 bit value to mem */
- Pin_Set32, /* convert condition code to 32-bit value */
+ Pin_Load, /* load a 8|16|32|64 bit value from mem */
+ Pin_Store, /* store a 8|16|32|64 bit value to mem */
+ Pin_Set, /* convert condition code to value 0 or 1 */
Pin_MfCR, /* move from condition register to GPR */
Pin_MFence, /* mem fence */
Pin_FpLdSt, /* FP load/store */
Pin_FpF64toF32, /* FP round IEEE754 double to IEEE754 single */
Pin_FpF64toI32, /* FP round IEEE754 double to 32-bit integer */
- Pin_FpF64toI64, /* FP round IEEE754 double to 32-bit integer */
+ Pin_FpF64toI64, /* FP round IEEE754 double to 64-bit integer */
Pin_FpI64toF64, /* FP round IEEE754 64-bit integer to double */
Pin_FpCMov, /* FP floating point conditional move */
Pin_FpLdFPSCR, /* mtfsf */
Pin_AvLdVSCR, /* mtvscr */
Pin_AvCMov /* AV conditional move */
}
- PPC32InstrTag;
+ PPCInstrTag;
/* Destinations are on the LEFT (first operand) */
typedef
struct {
- PPC32InstrTag tag;
+ PPCInstrTag tag;
union {
- /* Get a 32-bit literal into a register. May turn into one or
- two real insns. */
+ /* Get a 32/64-bit literal into a register.
+ May turn into a number of real insns. */
struct {
HReg dst;
ULong imm64;
is an unsigned 16.
*/
struct {
- PPC32AluOp op;
- HReg dst;
- HReg srcL;
- PPC32RH* srcR;
+ PPCAluOp op;
+ HReg dst;
+ HReg srcL;
+ PPCRH* srcR;
} Alu;
/* Integer shl/shr/sar.
Limitations: the immediate, if it exists,
is a signed 5-bit value between 1 and 31 inclusive.
*/
struct {
- PPC32ShftOp op;
- Bool sz32; /* mode64 has both 32 and 64bit shft */
- HReg dst;
- HReg srcL;
- PPC32RH* srcR;
+ PPCShftOp op;
+ Bool sz32; /* mode64 has both 32 and 64bit shft */
+ HReg dst;
+ HReg srcL;
+ PPCRH* srcR;
} Shft;
/* */
struct {
HReg dst;
HReg srcL;
HReg srcR;
- } AddSubC32;
+ } AddSubC;
/* If signed, the immediate, if it exists, is a signed 16,
else it is an unsigned 16. */
struct {
- Bool syned;
- Bool sz32; /* mode64 has both 32 and 64bit cmp */
- UInt crfD;
- HReg srcL;
- PPC32RH* srcR;
+ Bool syned;
+ Bool sz32; /* mode64 has both 32 and 64bit cmp */
+ UInt crfD;
+ HReg srcL;
+ PPCRH* srcR;
} Cmp;
/* Not and Neg */
struct {
- PPC32UnaryOp op;
- HReg dst;
- HReg src;
- } Unary32;
+ PPCUnaryOp op;
+ HReg dst;
+ HReg src;
+ } Unary;
struct {
Bool syned; /* meaningless if hi32==False */
Bool hi; /* False=>low, True=>high */
using a bit mask (1<<N is set if rN holds an arg, for N in
3 .. 10 inclusive). */
struct {
- PPC32CondCode cond;
- Addr64 target;
- UInt argiregs;
+ PPCCondCode cond;
+ Addr64 target;
+ UInt argiregs;
} Call;
/* Pseudo-insn. Goto dst, on given condition (which could be
Pct_ALWAYS). */
struct {
- IRJumpKind jk;
- PPC32CondCode cond;
- PPC32RI* dst;
+ IRJumpKind jk;
+ PPCCondCode cond;
+ PPCRI* dst;
} Goto;
/* Mov src to dst on the given condition, which may not
be the bogus Pct_ALWAYS. */
struct {
- PPC32CondCode cond;
- HReg dst;
- PPC32RI* src;
+ PPCCondCode cond;
+ HReg dst;
+ PPCRI* src;
} CMov;
- /* Sign/Zero extending loads. Dst size is always 32 bits. */
+ /* Sign/Zero extending loads. Dst size is host word size */
struct {
- UChar sz; /* 1|2|4|8 */
- Bool syned;
- HReg dst;
- PPC32AMode* src;
+ UChar sz; /* 1|2|4|8 */
+ Bool syned;
+ HReg dst;
+ PPCAMode* src;
} Load;
- /* 32/16/8 bit stores */
+ /* 64/32/16/8 bit stores */
struct {
- UChar sz; /* 1|2|4|8 */
- PPC32AMode* dst;
- HReg src;
+ UChar sz; /* 1|2|4|8 */
+ PPCAMode* dst;
+ HReg src;
} Store;
- /* Convert a ppc32 condition code to a 32-bit value (0 or 1). */
+ /* Convert a ppc condition code to value 0 or 1. */
struct {
- PPC32CondCode cond;
- HReg dst;
- } Set32;
+ PPCCondCode cond;
+ HReg dst;
+ } Set;
/* Move the entire CR to a GPR */
struct {
HReg dst;
} MfCR;
/* Mem fence. In short, an insn which flushes all preceding
loads and stores as much as possible before continuing.
- On PPC32 we emit a "sync". */
+ On PPC we emit a "sync". */
struct {
} MFence;
- /* PPC32 Floating point */
+ /* PPC Floating point */
struct {
- PPC32FpOp op;
- HReg dst;
- HReg src;
+ PPCFpOp op;
+ HReg dst;
+ HReg src;
} FpUnary;
struct {
- PPC32FpOp op;
- HReg dst;
- HReg srcL;
- HReg srcR;
+ PPCFpOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
} FpBinary;
struct {
- Bool isLoad;
- UChar sz; /* only 4 (IEEE single) or 8 (IEEE double) */
- HReg reg;
- PPC32AMode* addr;
+ Bool isLoad;
+ UChar sz; /* only 4 (IEEE single) or 8 (IEEE double) */
+ HReg reg;
+ PPCAMode* addr;
} FpLdSt;
- /* By observing the current FPU rounding mode, round src into dst,
+ /* By observing the current FPU rounding mode, round src->dst,
re-interpreting dst to an IEEE754 32-bit (float) type. */
struct {
HReg src;
HReg dst;
} FpF64toF32;
- /* By observing the current FPU rounding mode, round src into dst,
+ /* By observing the current FPU rounding mode, round src->dst,
re-interpreting dst to an 32-bit integer type. */
struct {
HReg src;
HReg src;
HReg dst;
} FpF64toI64;
- /* By observing the current FPU rounding mode, reinterpret src from
- a 64-bit integer to double type, and round into dst. */
+ /* By observing the current FPU rounding mode, reinterpret src
+ from a 64bit integer to double type, and round into dst. */
struct {
HReg src;
HReg dst;
/* Mov src to dst on the given condition, which may not
be the bogus Xcc_ALWAYS. */
struct {
- PPC32CondCode cond;
- HReg dst;
- HReg src;
+ PPCCondCode cond;
+ HReg dst;
+ HReg src;
} FpCMov;
/* Load FP Status & Control Register */
struct {
/* Simplistic AltiVec */
struct {
- Bool isLoad;
- UChar sz; /* 8|16|32|128 */
- HReg reg;
- PPC32AMode* addr;
+ Bool isLoad;
+ UChar sz; /* 8|16|32|128 */
+ HReg reg;
+ PPCAMode* addr;
} AvLdSt;
struct {
- PPC32AvOp op;
- HReg dst;
- HReg src;
+ PPCAvOp op;
+ HReg dst;
+ HReg src;
} AvUnary;
struct {
- PPC32AvOp op;
- HReg dst;
- HReg srcL;
- HReg srcR;
+ PPCAvOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
} AvBinary;
struct {
- PPC32AvOp op;
- HReg dst;
- HReg srcL;
- HReg srcR;
+ PPCAvOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
} AvBin8x16;
struct {
- PPC32AvOp op;
- HReg dst;
- HReg srcL;
- HReg srcR;
+ PPCAvOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
} AvBin16x8;
struct {
- PPC32AvOp op;
- HReg dst;
- HReg srcL;
- HReg srcR;
+ PPCAvOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
} AvBin32x4;
struct {
- PPC32AvFpOp op;
+ PPCAvFpOp op;
HReg dst;
HReg srcL;
HReg srcR;
} AvBin32Fx4;
struct {
- PPC32AvFpOp op;
+ PPCAvFpOp op;
HReg dst;
HReg src;
} AvUn32Fx4;
struct {
UChar sz; /* 8,16,32 */
HReg dst;
- PPC32VI5s* src;
+ PPCVI5s* src;
} AvSplat;
/* Mov src to dst on the given condition, which may not
be the bogus Xcc_ALWAYS. */
struct {
- PPC32CondCode cond;
- HReg dst;
- HReg src;
+ PPCCondCode cond;
+ HReg dst;
+ HReg src;
} AvCMov;
/* Load AltiVec Status & Control Register */
struct {
} AvLdVSCR;
} Pin;
}
- PPC32Instr;
-
-
-extern PPC32Instr* PPC32Instr_LI ( HReg, ULong, Bool );
-extern PPC32Instr* PPC32Instr_Alu ( PPC32AluOp, HReg, HReg, PPC32RH* );
-extern PPC32Instr* PPC32Instr_Shft ( PPC32ShftOp, Bool sz32, HReg, HReg, PPC32RH* );
-extern PPC32Instr* PPC32Instr_AddSubC32 ( Bool, Bool, HReg, HReg, HReg );
-extern PPC32Instr* PPC32Instr_Cmp ( Bool, Bool, UInt, HReg, PPC32RH* );
-extern PPC32Instr* PPC32Instr_Unary ( PPC32UnaryOp op, HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi32, Bool sz32, HReg, HReg, HReg );
-extern PPC32Instr* PPC32Instr_Div ( Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_Call ( PPC32CondCode, Addr64, UInt );
-extern PPC32Instr* PPC32Instr_Goto ( IRJumpKind, PPC32CondCode cond, PPC32RI* dst );
-extern PPC32Instr* PPC32Instr_CMov ( PPC32CondCode, HReg dst, PPC32RI* src );
-extern PPC32Instr* PPC32Instr_Load ( UChar sz, Bool syned,
- HReg dst, PPC32AMode* src, Bool mode64 );
-extern PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst,
- HReg src, Bool mode64 );
-extern PPC32Instr* PPC32Instr_Set32 ( PPC32CondCode cond, HReg dst );
-extern PPC32Instr* PPC32Instr_MfCR ( HReg dst );
-extern PPC32Instr* PPC32Instr_MFence ( void );
-
-extern PPC32Instr* PPC32Instr_FpUnary ( PPC32FpOp op, HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_FpBinary ( PPC32FpOp op, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_FpLdSt ( Bool isLoad, UChar sz, HReg, PPC32AMode* );
-extern PPC32Instr* PPC32Instr_FpF64toF32 ( HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_FpF64toI32 ( HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_FpF64toI64 ( HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_FpI64toF64 ( HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_FpCMov ( PPC32CondCode, HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_FpLdFPSCR ( HReg src );
-extern PPC32Instr* PPC32Instr_FpCmp ( HReg dst, HReg srcL, HReg srcR );
-
-extern PPC32Instr* PPC32Instr_RdWrLR ( Bool wrLR, HReg gpr );
-
-extern PPC32Instr* PPC32Instr_AvLdSt ( Bool isLoad, UChar sz, HReg, PPC32AMode* );
-extern PPC32Instr* PPC32Instr_AvUnary ( PPC32AvOp op, HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_AvBinary ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_AvBin8x16 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_AvBin16x8 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_AvBin32x4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_AvBin32Fx4 ( PPC32AvOp op, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_AvUn32Fx4 ( PPC32AvOp op, HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl );
-extern PPC32Instr* PPC32Instr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_AvShlDbl ( UChar shift, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_AvSplat ( UChar sz, HReg dst, PPC32VI5s* src );
-extern PPC32Instr* PPC32Instr_AvCMov ( PPC32CondCode, HReg dst, HReg src );
-extern PPC32Instr* PPC32Instr_AvLdVSCR ( HReg src );
-
-extern void ppPPC32Instr ( PPC32Instr*, Bool mode64 );
+ PPCInstr;
+
+
+extern PPCInstr* PPCInstr_LI ( HReg, ULong, Bool );
+extern PPCInstr* PPCInstr_Alu ( PPCAluOp, HReg, HReg, PPCRH* );
+extern PPCInstr* PPCInstr_Shft ( PPCShftOp, Bool sz32, HReg, HReg, PPCRH* );
+extern PPCInstr* PPCInstr_AddSubC ( Bool, Bool, HReg, HReg, HReg );
+extern PPCInstr* PPCInstr_Cmp ( Bool, Bool, UInt, HReg, PPCRH* );
+extern PPCInstr* PPCInstr_Unary ( PPCUnaryOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_MulL ( Bool syned, Bool hi32, Bool sz32, HReg, HReg, HReg );
+extern PPCInstr* PPCInstr_Div ( Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_Call ( PPCCondCode, Addr64, UInt );
+extern PPCInstr* PPCInstr_Goto ( IRJumpKind, PPCCondCode cond, PPCRI* dst );
+extern PPCInstr* PPCInstr_CMov ( PPCCondCode, HReg dst, PPCRI* src );
+extern PPCInstr* PPCInstr_Load ( UChar sz, Bool syned,
+ HReg dst, PPCAMode* src, Bool mode64 );
+extern PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst,
+ HReg src, Bool mode64 );
+extern PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst );
+extern PPCInstr* PPCInstr_MfCR ( HReg dst );
+extern PPCInstr* PPCInstr_MFence ( void );
+
+extern PPCInstr* PPCInstr_FpUnary ( PPCFpOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpBinary ( PPCFpOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_FpLdSt ( Bool isLoad, UChar sz, HReg, PPCAMode* );
+extern PPCInstr* PPCInstr_FpF64toF32 ( HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpF64toI32 ( HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpF64toI64 ( HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpI64toF64 ( HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpCMov ( PPCCondCode, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpLdFPSCR ( HReg src );
+extern PPCInstr* PPCInstr_FpCmp ( HReg dst, HReg srcL, HReg srcR );
+
+extern PPCInstr* PPCInstr_RdWrLR ( Bool wrLR, HReg gpr );
+
+extern PPCInstr* PPCInstr_AvLdSt ( Bool isLoad, UChar sz, HReg, PPCAMode* );
+extern PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvUn32Fx4 ( PPCAvOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl );
+extern PPCInstr* PPCInstr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvShlDbl ( UChar shift, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvSplat ( UChar sz, HReg dst, PPCVI5s* src );
+extern PPCInstr* PPCInstr_AvCMov ( PPCCondCode, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_AvLdVSCR ( HReg src );
+
+extern void ppPPCInstr ( PPCInstr*, Bool mode64 );
/* Some functions that insulate the register allocator from details
of the underlying instruction set. */
-extern void getRegUsage_PPC32Instr ( HRegUsage*, PPC32Instr*, Bool mode64 );
-extern void mapRegs_PPC32Instr ( HRegRemap*, PPC32Instr* , Bool mode64);
-extern Bool isMove_PPC32Instr ( PPC32Instr*, HReg*, HReg* );
-extern Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr*,
- Bool mode64, void* dispatch );
-extern PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 );
-extern PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 );
-extern void getAllocableRegs_PPC32 ( Int*, HReg**, Bool mode64 );
-extern HInstrArray* iselBB_PPC32 ( IRBB*, VexArchInfo* );
-
-#endif /* ndef __LIBVEX_HOST_PPC32_HDEFS_H */
+extern void getRegUsage_PPCInstr ( HRegUsage*, PPCInstr*, Bool mode64 );
+extern void mapRegs_PPCInstr ( HRegRemap*, PPCInstr* , Bool mode64);
+extern Bool isMove_PPCInstr ( PPCInstr*, HReg*, HReg* );
+extern Int emit_PPCInstr ( UChar* buf, Int nbuf, PPCInstr*,
+ Bool mode64, void* dispatch );
+extern PPCInstr* genSpill_PPC ( HReg rreg, UShort offsetB, Bool mode64 );
+extern PPCInstr* genReload_PPC ( HReg rreg, UShort offsetB, Bool mode64 );
+extern void getAllocableRegs_PPC ( Int*, HReg**, Bool mode64 );
+extern HInstrArray* iselBB_PPC ( IRBB*, VexArchInfo* );
+
+#endif /* ndef __LIBVEX_HOST_PPC_HDEFS_H */
/*---------------------------------------------------------------*/
/*--- end host-ppc32/hdefs.h ---*/
#include "host-ppc32/hdefs.h"
/* Is our guest binary 32 or 64bit? Set at each call to
- iselBB_PPC32 below. */
+ iselBB_PPC below. */
static Bool mode64 = False;
#define HRcIntWRDSZ (mode64 ? HRcInt64 : HRcInt32)
/*---------------------------------------------------------*/
-/*--- PPC32 FP Status & Control Register Conventions ---*/
+/*--- PPC FP Status & Control Register Conventions ---*/
/*---------------------------------------------------------*/
/*
Vex-generated code expects to run with the FPU set as follows: all
same set of IRTemps as the type mapping does.
- vregmap holds the primary register for the IRTemp.
- - vregmapHI is only used for 64-bit integer-typed
- IRTemps. It holds the identity of a second
- 32-bit virtual HReg, which holds the high half
- of the value.
+ - vregmapHI is only used in 32bit mode, for 64-bit integer-
+ typed IRTemps. It holds the identity of a second 32-bit
+ virtual HReg, which holds the high half of the value.
- A copy of the link reg, so helper functions don't kill it.
return env->vregmap[tmp];
}
-static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp )
+static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO,
+ ISelEnv* env, IRTemp tmp )
{
vassert(!mode64);
vassert(tmp >= 0);
*vrHI = env->vregmapHI[tmp];
}
-static void lookupIRTemp128 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp )
+static void lookupIRTemp128 ( HReg* vrHI, HReg* vrLO,
+ ISelEnv* env, IRTemp tmp )
{
vassert(mode64);
vassert(tmp >= 0);
*vrHI = env->vregmapHI[tmp];
}
-static void addInstr ( ISelEnv* env, PPC32Instr* instr )
+static void addInstr ( ISelEnv* env, PPCInstr* instr )
{
addHInstr(env->code, instr);
if (vex_traceflags & VEX_TRACE_VCODE) {
- ppPPC32Instr(instr, mode64);
+ ppPPCInstr(instr, mode64);
vex_printf("\n");
}
}
signed or not. If yes, this will never return -32768 as an
immediate; this guaranteed that all signed immediates that are
return can have their sign inverted if need be. */
-static PPC32RH* iselIntExpr_RH_wrk ( ISelEnv* env,
+static PPCRH* iselIntExpr_RH_wrk ( ISelEnv* env,
Bool syned, IRExpr* e );
-static PPC32RH* iselIntExpr_RH ( ISelEnv* env,
+static PPCRH* iselIntExpr_RH ( ISelEnv* env,
Bool syned, IRExpr* e );
/* Compute an I32 into a RI (reg or 32-bit immediate). */
-static PPC32RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e );
-static PPC32RI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e );
+static PPCRI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e );
+static PPCRI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e );
/* Compute an I8 into a reg-or-5-bit-unsigned-immediate, the latter
being an immediate in the range 1 .. 31 inclusive. Used for doing
shift amounts. */
-static PPC32RH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e );
-static PPC32RH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e );
+static PPCRH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e );
+static PPCRH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e );
/* Compute an I8 into a reg-or-6-bit-unsigned-immediate, the latter
being an immediate in the range 1 .. 63 inclusive. Used for doing
shift amounts. */
-static PPC32RH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e );
-static PPC32RH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e );
+static PPCRH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e );
+static PPCRH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e );
/* Compute an I32 into an AMode. */
-static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e );
-static PPC32AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e );
+static PPCAMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e );
+static PPCAMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e );
/* Compute an I64 into a GPR pair. */
static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo,
static void iselInt128Expr ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e );
-static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e );
-static PPC32CondCode iselCondCode ( ISelEnv* env, IRExpr* e );
+static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e );
+static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e );
static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e );
static HReg iselDblExpr ( ISelEnv* env, IRExpr* e );
/* Make an int reg-reg move. */
-static PPC32Instr* mk_iMOVds_RR ( HReg r_dst, HReg r_src )
+static PPCInstr* mk_iMOVds_RR ( HReg r_dst, HReg r_src )
{
vassert(hregClass(r_dst) == HRcIntWRDSZ);
vassert(hregClass(r_src) == HRcIntWRDSZ);
- return PPC32Instr_Alu(Palu_OR, r_dst, r_src, PPC32RH_Reg(r_src));
+ return PPCInstr_Alu(Palu_OR, r_dst, r_src, PPCRH_Reg(r_src));
}
//.. /* Make a vector reg-reg move. */
{
HReg sp = StackFramePtr(mode64);
vassert(n < 256 && (n%16) == 0);
- addInstr(env, PPC32Instr_Alu(
- Palu_ADD, sp, sp, PPC32RH_Imm(True,toUShort(n))));
+ addInstr(env, PPCInstr_Alu( Palu_ADD, sp, sp,
+ PPCRH_Imm(True,toUShort(n)) ));
}
static void sub_from_sp ( ISelEnv* env, UInt n )
{
HReg sp = StackFramePtr(mode64);
vassert(n < 256 && (n%16) == 0);
- addInstr(env, PPC32Instr_Alu(
- Palu_SUB, sp, sp, PPC32RH_Imm(True,toUShort(n))));
+ addInstr(env, PPCInstr_Alu( Palu_SUB, sp, sp,
+ PPCRH_Imm(True,toUShort(n)) ));
}
/*
HReg align16 = newVRegI(env);
addInstr(env, mk_iMOVds_RR(r, StackFramePtr(mode64)));
// add 16
- addInstr(env, PPC32Instr_Alu(
- Palu_ADD, r, r, PPC32RH_Imm(True,toUShort(16))));
+ addInstr(env, PPCInstr_Alu( Palu_ADD, r, r,
+ PPCRH_Imm(True,toUShort(16)) ));
// mask to quadword
- addInstr(env, PPC32Instr_LI(align16, 0xFFFFFFFFFFFFFFF0ULL, mode64));
- addInstr(env, PPC32Instr_Alu(Palu_AND, r,r, PPC32RH_Reg(align16)));
+ addInstr(env, PPCInstr_LI(align16, 0xFFFFFFFFFFFFFFF0ULL, mode64));
+ addInstr(env, PPCInstr_Alu(Palu_AND, r,r, PPCRH_Reg(align16)));
return r;
}
/* Load 2*I32 regs to fp reg */
-static HReg mk_LoadRR32toFPR ( ISelEnv* env, HReg r_srcHi, HReg r_srcLo )
+static HReg mk_LoadRR32toFPR ( ISelEnv* env,
+ HReg r_srcHi, HReg r_srcLo )
{
HReg fr_dst = newVRegF(env);
- PPC32AMode *am_addr0, *am_addr1;
+ PPCAMode *am_addr0, *am_addr1;
vassert(!mode64);
vassert(hregClass(r_srcHi) == HRcInt32);
vassert(hregClass(r_srcLo) == HRcInt32);
sub_from_sp( env, 16 ); // Move SP down 16 bytes
- am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64));
- am_addr1 = PPC32AMode_IR(4, StackFramePtr(mode64));
+ am_addr0 = PPCAMode_IR( 0, StackFramePtr(mode64) );
+ am_addr1 = PPCAMode_IR( 4, StackFramePtr(mode64) );
// store hi,lo as Ity_I32's
- addInstr(env, PPC32Instr_Store( 4, am_addr0, r_srcHi, mode64 ));
- addInstr(env, PPC32Instr_Store( 4, am_addr1, r_srcLo, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_addr0, r_srcHi, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_addr1, r_srcLo, mode64 ));
// load as float
- addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0));
+ addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0));
add_to_sp( env, 16 ); // Reset SP
return fr_dst;
static HReg mk_LoadR64toFPR ( ISelEnv* env, HReg r_src )
{
HReg fr_dst = newVRegF(env);
- PPC32AMode *am_addr0;
+ PPCAMode *am_addr0;
vassert(mode64);
vassert(hregClass(r_src) == HRcInt64);
sub_from_sp( env, 16 ); // Move SP down 16 bytes
- am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64));
+ am_addr0 = PPCAMode_IR( 0, StackFramePtr(mode64) );
// store as Ity_I64
- addInstr(env, PPC32Instr_Store( 8, am_addr0, r_src, mode64 ));
+ addInstr(env, PPCInstr_Store( 8, am_addr0, r_src, mode64 ));
// load as float
- addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0));
+ addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0));
add_to_sp( env, 16 ); // Reset SP
return fr_dst;
/* Given an amode, return one which references 4 bytes further
along. */
-static PPC32AMode* advance4 ( ISelEnv* env, PPC32AMode* am )
+static PPCAMode* advance4 ( ISelEnv* env, PPCAMode* am )
{
- PPC32AMode* am4 = dopyPPC32AMode(am);
+ PPCAMode* am4 = dopyPPCAMode( am );
if (am4->tag == Pam_IR
&& am4->Pam.IR.index + 4 <= 32767) {
am4->Pam.IR.index += 4;
} else {
- vpanic("advance4(ppc32,host)");
+ vpanic("advance4(ppc,host)");
}
return am4;
}
Bool passBBP,
IRExpr* guard, IRCallee* cee, IRExpr** args )
{
- PPC32CondCode cc;
- HReg argregs[PPC_N_REGPARMS];
- HReg tmpregs[PPC_N_REGPARMS];
- Bool go_fast;
- Int n_args, i, argreg;
- UInt argiregs;
- ULong target;
+ PPCCondCode cc;
+ HReg argregs[PPC_N_REGPARMS];
+ HReg tmpregs[PPC_N_REGPARMS];
+ Bool go_fast;
+ Int n_args, i, argreg;
+ UInt argiregs;
+ ULong target;
/* Marshal args for a call and do the call.
vassert(argreg < PPC_N_REGPARMS);
vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 ||
typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
-
if (!mode64) {
if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) {
argiregs |= (1 << (argreg+3));
- addInstr(env, mk_iMOVds_RR( argregs[argreg],
- iselIntExpr_R(env, args[i]) ));
+ addInstr(env,
+ mk_iMOVds_RR( argregs[argreg],
+ iselIntExpr_R(env, args[i]) ));
} else { // Ity_I64
HReg rHi, rLo;
- if (argreg%2 == 1) // ppc32 abi spec for passing a LONG_LONG
+ if (argreg%2 == 1) // ppc32 abi spec for passing LONG_LONG
argreg++; // XXX: odd argreg => even rN
vassert(argreg < PPC_N_REGPARMS-1);
iselInt64Expr(&rHi,&rLo, env, args[i]);
/* This is pretty stupid; better to move directly to r3
after the rest of the args are done. */
tmpregs[argreg] = newVRegI(env);
- addInstr(env, mk_iMOVds_RR( tmpregs[argreg], GuestStatePtr(mode64) ));
+ addInstr(env, mk_iMOVds_RR( tmpregs[argreg],
+ GuestStatePtr(mode64) ));
argreg++;
}
tmpregs[argreg] = iselIntExpr_R(env, args[i]);
} else { // Ity_I64
HReg rHi, rLo;
- if (argreg%2 == 1) // ppc32 abi spec for passing a LONG_LONG
+ if (argreg%2 == 1) // ppc32 abi spec for passing LONG_LONG
argreg++; // XXX: odd argreg => even rN
vassert(argreg < PPC_N_REGPARMS-1);
iselInt64Expr(&rHi,&rLo, env, args[i]);
toUInt(Ptr_to_ULong(cee->addr));
/* Finally, the call itself. */
- addInstr(env, PPC32Instr_Call( cc, (Addr64)target, argiregs ));
+ addInstr(env, PPCInstr_Call( cc, (Addr64)target, argiregs ));
}
- so we can set the whole register at once (faster)
note: upper 32 bits ignored by FpLdFPSCR
*/
- addInstr(env, PPC32Instr_LI(r_src, 0x0, mode64));
+ addInstr(env, PPCInstr_LI(r_src, 0x0, mode64));
if (mode64) {
fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64
} else {
fr_src = mk_LoadRR32toFPR( env, r_src, r_src ); // 2*I32 -> F64
}
- addInstr(env, PPC32Instr_FpLdFPSCR( fr_src ));
+ addInstr(env, PPCInstr_FpLdFPSCR( fr_src ));
}
-/* Convert IR rounding mode to PPC32 encoding */
-static HReg roundModeIRtoPPC32 ( ISelEnv* env, HReg r_rmIR )
+/* Convert IR rounding mode to PPC encoding */
+static HReg roundModeIRtoPPC ( ISelEnv* env, HReg r_rmIR )
{
/*
rounding mode | PPC | IR
to +infinity | 10 | 10
to -infinity | 11 | 01
*/
- HReg r_rmPPC32 = newVRegI(env);
- HReg r_tmp = newVRegI(env);
+ HReg r_rmPPC = newVRegI(env);
+ HReg r_tmp = newVRegI(env);
vassert(hregClass(r_rmIR) == HRcIntWRDSZ);
// AND r_rmIR,3 -- shouldn't be needed; paranoia
- addInstr(env,
- PPC32Instr_Alu(Palu_AND, r_rmIR, r_rmIR, PPC32RH_Imm(False,3)));
-
- // r_rmPPC32 = XOR( r_rmIR, (r_rmIR << 1) & 2)
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
- r_tmp, r_rmIR, PPC32RH_Imm(False,1)));
- addInstr(env,
- PPC32Instr_Alu(Palu_AND, r_tmp, r_tmp, PPC32RH_Imm(False,2)));
- addInstr(env,
- PPC32Instr_Alu(Palu_XOR, r_rmPPC32, r_rmIR, PPC32RH_Reg(r_tmp)));
- return r_rmPPC32;
+ addInstr(env, PPCInstr_Alu( Palu_AND, r_rmIR, r_rmIR,
+ PPCRH_Imm(False,3) ));
+
+ // r_rmPPC = XOR( r_rmIR, (r_rmIR << 1) & 2)
+ addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_tmp, r_rmIR, PPCRH_Imm(False,1)));
+ addInstr(env, PPCInstr_Alu( Palu_AND, r_tmp, r_tmp,
+ PPCRH_Imm(False,2) ));
+ addInstr(env, PPCInstr_Alu( Palu_XOR, r_rmPPC, r_rmIR,
+ PPCRH_Reg(r_tmp) ));
+ return r_rmPPC;
}
/* Mess with the FPU's rounding mode: 'mode' is an I32-typed
expression denoting a value in the range 0 .. 3, indicating a round
- mode encoded as per type IRRoundingMode. Set the PPC32 FPSCR to have
+ mode encoded as per type IRRoundingMode. Set the PPC FPSCR to have
the same rounding.
For speed & simplicity, we're setting the *entire* FPSCR here.
*/
- so we can set the whole register at once (faster)
*/
- // Resolve rounding mode and convert to PPC32 representation
- r_src = roundModeIRtoPPC32( env, iselIntExpr_R(env, mode) );
+ // Resolve rounding mode and convert to PPC representation
+ r_src = roundModeIRtoPPC( env, iselIntExpr_R(env, mode) );
// gpr -> fpr
if (mode64) {
fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64
}
// Move to FPSCR
- addInstr(env, PPC32Instr_FpLdFPSCR( fr_src ));
+ addInstr(env, PPCInstr_FpLdFPSCR( fr_src ));
}
*/
static HReg mk_AvDuplicateRI( ISelEnv* env, IRExpr* e )
{
- HReg r_src;
- HReg dst = newVRegV(env);
- PPC32RI* ri = iselIntExpr_RI(env, e);
- IRType ty = typeOfIRExpr(env->type_env,e);
- UInt sz = (ty == Ity_I8) ? 8 : (ty == Ity_I16) ? 16 : 32;
+ HReg r_src;
+ HReg dst = newVRegV(env);
+ PPCRI* ri = iselIntExpr_RI(env, e);
+ IRType ty = typeOfIRExpr(env->type_env,e);
+ UInt sz = (ty == Ity_I8) ? 8 : (ty == Ity_I16) ? 16 : 32;
vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
/* special case: immediate */
if (simm6 > 15) { /* 16:31 inclusive */
HReg v1 = newVRegV(env);
HReg v2 = newVRegV(env);
- addInstr(env, PPC32Instr_AvSplat(sz, v1, PPC32VI5s_Imm(-16)));
- addInstr(env, PPC32Instr_AvSplat(sz, v2, PPC32VI5s_Imm(simm6-16)));
+ addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16)));
+ addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6-16)));
addInstr(env,
- (sz== 8) ? PPC32Instr_AvBin8x16(Pav_SUBU, dst, v2, v1) :
- (sz==16) ? PPC32Instr_AvBin16x8(Pav_SUBU, dst, v2, v1)
- : PPC32Instr_AvBin32x4(Pav_SUBU, dst, v2, v1) );
+ (sz== 8) ? PPCInstr_AvBin8x16(Pav_SUBU, dst, v2, v1) :
+ (sz==16) ? PPCInstr_AvBin16x8(Pav_SUBU, dst, v2, v1)
+ : PPCInstr_AvBin32x4(Pav_SUBU, dst, v2, v1) );
return dst;
}
if (simm6 < -16) { /* -32:-17 inclusive */
HReg v1 = newVRegV(env);
HReg v2 = newVRegV(env);
- addInstr(env, PPC32Instr_AvSplat(sz, v1, PPC32VI5s_Imm(-16)));
- addInstr(env, PPC32Instr_AvSplat(sz, v2, PPC32VI5s_Imm(simm6+16)));
+ addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16)));
+ addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6+16)));
addInstr(env,
- (sz== 8) ? PPC32Instr_AvBin8x16(Pav_ADDU, dst, v2, v1) :
- (sz==16) ? PPC32Instr_AvBin16x8(Pav_ADDU, dst, v2, v1)
- : PPC32Instr_AvBin32x4(Pav_ADDU, dst, v2, v1) );
+ (sz== 8) ? PPCInstr_AvBin8x16(Pav_ADDU, dst, v2, v1) :
+ (sz==16) ? PPCInstr_AvBin16x8(Pav_ADDU, dst, v2, v1)
+ : PPCInstr_AvBin32x4(Pav_ADDU, dst, v2, v1) );
return dst;
}
/* simplest form: -16:15 inclusive */
- addInstr(env, PPC32Instr_AvSplat(sz, dst, PPC32VI5s_Imm(simm6)));
+ addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Imm(simm6)));
return dst;
}
/* no luck; use the Slow way. */
r_src = newVRegI(env);
- addInstr(env, PPC32Instr_LI(r_src, (Long)simm32, mode64));
+ addInstr(env, PPCInstr_LI(r_src, (Long)simm32, mode64));
}
else {
r_src = ri->Pri.Reg;
/* default case: store r_src in lowest lane of 16-aligned mem,
load vector, splat lowest lane to dst */
{
- /* CAB: Perhaps faster to store r_src multiple times (sz dependent),
+ /* CAB: Maybe faster to store r_src multiple times (sz dependent),
and simply load the vector? */
HReg r_aligned16;
HReg v_src = newVRegV(env);
- PPC32AMode *am_off12;
+ PPCAMode *am_off12;
sub_from_sp( env, 32 ); // Move SP down
/* Get a 16-aligned address within our stack space */
r_aligned16 = get_sp_aligned16( env );
- am_off12 = PPC32AMode_IR( 12, r_aligned16);
+ am_off12 = PPCAMode_IR( 12, r_aligned16 );
/* Store r_src in low word of 16-aligned mem */
- addInstr(env, PPC32Instr_Store( 4, am_off12, r_src, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_off12, r_src, mode64 ));
/* Load src to vector[low lane] */
- addInstr(env, PPC32Instr_AvLdSt( True/*load*/, 4, v_src, am_off12 ));
+ addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 4, v_src, am_off12 ) );
add_to_sp( env, 32 ); // Reset SP
/* Finally, splat v_src[low_lane] to dst */
- addInstr(env, PPC32Instr_AvSplat(sz, dst, PPC32VI5s_Reg(v_src)));
+ addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Reg(v_src)));
return dst;
}
}
/* 32bit float => sign(1) | expontent(8) | mantissa(23)
nan => exponent all ones, mantissa > 0 */
- addInstr(env, PPC32Instr_AvBinary(Pav_AND, expt, vSrc, msk_exp));
- addInstr(env, PPC32Instr_AvBin32x4(Pav_CMPEQU, expt, expt, msk_exp));
- addInstr(env, PPC32Instr_AvBinary(Pav_AND, mnts, vSrc, msk_mnt));
- addInstr(env, PPC32Instr_AvBin32x4(Pav_CMPGTU, mnts, mnts, zeros));
- addInstr(env, PPC32Instr_AvBinary(Pav_AND, vIsNan, expt, mnts));
+ addInstr(env, PPCInstr_AvBinary(Pav_AND, expt, vSrc, msk_exp));
+ addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, expt, expt, msk_exp));
+ addInstr(env, PPCInstr_AvBinary(Pav_AND, mnts, vSrc, msk_mnt));
+ addInstr(env, PPCInstr_AvBin32x4(Pav_CMPGTU, mnts, mnts, zeros));
+ addInstr(env, PPCInstr_AvBinary(Pav_AND, vIsNan, expt, mnts));
return vIsNan;
}
/*---------------------------------------------------------*/
-/*--- ISEL: Integer expressions (32/16/8 bit) ---*/
+/*--- ISEL: Integer expressions (64/32/16/8 bit) ---*/
/*---------------------------------------------------------*/
/* Select insns for an integer-typed expression, and add them to the
vregs to the same real register, so the copies will often disappear
later in the game.
- This should handle expressions of 32, 16 and 8-bit type. All
- results are returned in a 32-bit register. For 16- and 8-bit
- expressions, the upper 16/24 bits are arbitrary, so you should mask
- or sign extend partial values if necessary.
+ This should handle expressions of 64, 32, 16 and 8-bit type.
+ All results are returned in a (mode64 ? 64bit : 32bit) register.
+ For 16- and 8-bit expressions, the upper (32/48/56 : 16/24) bits
+ are arbitrary, so you should mask or sign extend partial values
+ if necessary.
*/
static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
/* --------- LOAD --------- */
case Iex_Load: {
HReg r_dst = newVRegI(env);
- PPC32AMode* am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr);
+ PPCAMode* am_addr = iselIntExpr_AMode( env, e->Iex.Load.addr );
if (e->Iex.Load.end != Iend_BE)
goto irreducible;
- addInstr(env, PPC32Instr_Load( toUChar(sizeofIRType(ty)),
- False, r_dst, am_addr, mode64 ));
+ addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)),
+ False, r_dst, am_addr, mode64 ));
return r_dst;
break;
}
/* --------- BINARY OP --------- */
case Iex_Binop: {
- PPC32AluOp aluOp;
- PPC32ShftOp shftOp;
+ PPCAluOp aluOp;
+ PPCShftOp shftOp;
//.. /* Pattern: Sub32(0,x) */
//.. if (e->Iex.Binop.op == Iop_Sub32 && isZero32(e->Iex.Binop.arg1)) {
//.. HReg dst = newVRegI(env);
//.. HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg2);
//.. addInstr(env, mk_iMOVsd_RR(reg,dst));
-//.. addInstr(env, PPC32Instr_Unary(Xun_NEG,PPC32RM_Reg(dst)));
+//.. addInstr(env, PPCInstr_Unary(Xun_NEG,PPCRM_Reg(dst)));
//.. return dst;
//.. }
/* For commutative ops we assume any literal
values are on the second operand. */
if (aluOp != Palu_INVALID) {
- HReg r_dst = newVRegI(env);
- HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
- PPC32RH* ri_srcR = NULL;
+ HReg r_dst = newVRegI(env);
+ HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPCRH* ri_srcR = NULL;
/* get right arg into an RH, in the appropriate way */
switch (aluOp) {
case Palu_ADD: case Palu_SUB:
default:
vpanic("iselIntExpr_R_wrk-aluOp-arg2");
}
- addInstr(env, PPC32Instr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
+ addInstr(env, PPCInstr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
return r_dst;
}
}
/* we assume any literal values are on the second operand. */
if (shftOp != Pshft_INVALID) {
- HReg r_dst = newVRegI(env);
- HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
- PPC32RH* ri_srcR = NULL;
+ HReg r_dst = newVRegI(env);
+ HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPCRH* ri_srcR = NULL;
/* get right arg into an RH, in the appropriate way */
switch (shftOp) {
case Pshft_SHL: case Pshft_SHR: case Pshft_SAR:
/* widen the left arg if needed */
if (shftOp == Pshft_SHR || shftOp == Pshft_SAR) {
if (ty == Ity_I8 || ty == Ity_I16) {
- PPC32RH* amt = PPC32RH_Imm(False, toUShort(ty == Ity_I8 ? 24 : 16));
- HReg tmp = newVRegI(env);
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
- tmp, r_srcL, amt));
- addInstr(env, PPC32Instr_Shft(shftOp, True/*32bit shift*/,
- tmp, tmp, amt));
+ PPCRH* amt = PPCRH_Imm(False,
+ toUShort(ty == Ity_I8 ? 24 : 16));
+ HReg tmp = newVRegI(env);
+ addInstr(env, PPCInstr_Shft(Pshft_SHL,
+ True/*32bit shift*/,
+ tmp, r_srcL, amt));
+ addInstr(env, PPCInstr_Shft(shftOp,
+ True/*32bit shift*/,
+ tmp, tmp, amt));
r_srcL = tmp;
vassert(0); /* AWAITING TEST CASE */
}
32bit shifts are fine for all others */
if (ty == Ity_I64) {
vassert(mode64);
- addInstr(env, PPC32Instr_Shft(shftOp, False/*64bit shift*/,
- r_dst, r_srcL, ri_srcR));
+ addInstr(env, PPCInstr_Shft(shftOp, False/*64bit shift*/,
+ r_dst, r_srcL, ri_srcR));
} else {
- addInstr(env, PPC32Instr_Shft(shftOp, True/*32bit shift*/,
- r_dst, r_srcL, ri_srcR));
+ addInstr(env, PPCInstr_Shft(shftOp, True/*32bit shift*/,
+ r_dst, r_srcL, ri_srcR));
}
return r_dst;
}
HReg r_dst = newVRegI(env);
HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Div(syned, True/*32bit div*/,
- r_dst, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_Div(syned, True/*32bit div*/,
+ r_dst, r_srcL, r_srcR));
return r_dst;
}
if (e->Iex.Binop.op == Iop_DivS64 ||
HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
vassert(mode64);
- addInstr(env, PPC32Instr_Div(syned, False/*64bit div*/,
- r_dst, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_Div(syned, False/*64bit div*/,
+ r_dst, r_srcL, r_srcR));
return r_dst;
}
HReg r_dst = newVRegI(env);
HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_MulL(syned, False/*lo32*/, sz32,
- r_dst, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_MulL(syned, False/*lo32*/, sz32,
+ r_dst, r_srcL, r_srcR));
return r_dst;
}
HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
vassert(mode64);
- addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/,
- False/*lo32*/, True/*32bit mul*/,
- tLo, r_srcL, r_srcR));
- addInstr(env, PPC32Instr_MulL(syned,
- True/*hi32*/, True/*32bit mul*/,
- tHi, r_srcL, r_srcR));
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/,
- r_dst, tHi, PPC32RH_Imm(False,32)));
- addInstr(env, PPC32Instr_Alu(Palu_OR, r_dst, r_dst, PPC32RH_Reg(tLo)));
+ addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/,
+ False/*lo32*/, True/*32bit mul*/,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_MulL(syned,
+ True/*hi32*/, True/*32bit mul*/,
+ tHi, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/,
+ r_dst, tHi, PPCRH_Imm(False,32)));
+ addInstr(env, PPCInstr_Alu(Palu_OR,
+ r_dst, r_dst, PPCRH_Reg(tLo)));
return r_dst;
}
/* El-mutanto 3-way compare? */
if (e->Iex.Binop.op == Iop_CmpORD32S ||
e->Iex.Binop.op == Iop_CmpORD32U) {
- Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S);
- HReg dst = newVRegI(env);
- HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
- PPC32RH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Cmp(syned, True/*32bit cmp*/,
- 7/*cr*/, srcL, srcR));
- addInstr(env, PPC32Instr_MfCR(dst));
- addInstr(env, PPC32Instr_Alu(Palu_AND, dst, dst,
- PPC32RH_Imm(False,7<<1)));
+ Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S);
+ HReg dst = newVRegI(env);
+ HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPCRH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
+ addInstr(env, PPCInstr_Cmp(syned, True/*32bit cmp*/,
+ 7/*cr*/, srcL, srcR));
+ addInstr(env, PPCInstr_MfCR(dst));
+ addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst,
+ PPCRH_Imm(False,7<<1)));
return dst;
}
if (e->Iex.Binop.op == Iop_CmpORD64S ||
e->Iex.Binop.op == Iop_CmpORD64U) {
- Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD64S);
- HReg dst = newVRegI(env);
- HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
- PPC32RH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
+ Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD64S);
+ HReg dst = newVRegI(env);
+ HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPCRH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
vassert(mode64);
- addInstr(env, PPC32Instr_Cmp(syned, False/*64bit cmp*/,
- 7/*cr*/, srcL, srcR));
- addInstr(env, PPC32Instr_MfCR(dst));
- addInstr(env, PPC32Instr_Alu(Palu_AND, dst, dst,
- PPC32RH_Imm(False,7<<1)));
+ addInstr(env, PPCInstr_Cmp(syned, False/*64bit cmp*/,
+ 7/*cr*/, srcL, srcR));
+ addInstr(env, PPCInstr_MfCR(dst));
+ addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst,
+ PPCRH_Imm(False,7<<1)));
return dst;
}
//zz HReg hi8s = iselIntExpr_R(env, e->Iex.Binop.arg1);
//zz HReg lo8s = iselIntExpr_R(env, e->Iex.Binop.arg2);
//zz addInstr(env,
-//zz PPC32Instr_Alu(Palu_SHL, hi8, hi8s, PPC32RH_Imm(False,8)));
+//zz PPCInstr_Alu(Palu_SHL, hi8, hi8s, PPCRH_Imm(False,8)));
//zz addInstr(env,
-//zz PPC32Instr_Alu(Palu_AND, lo8, lo8s, PPC32RH_Imm(False,0xFF)));
+//zz PPCInstr_Alu(Palu_AND, lo8, lo8s, PPCRH_Imm(False,0xFF)));
//zz addInstr(env,
-//zz PPC32Instr_Alu(Palu_OR, hi8, hi8, PPC32RI_Reg(lo8)));
+//zz PPCInstr_Alu(Palu_OR, hi8, hi8, PPCRI_Reg(lo8)));
//zz return hi8;
//zz }
//zz
//zz HReg lo16 = newVRegI32(env);
//zz HReg hi16s = iselIntExpr_R(env, e->Iex.Binop.arg1);
//zz HReg lo16s = iselIntExpr_R(env, e->Iex.Binop.arg2);
-//zz addInstr(env, mk_sh32(env, Psh_SHL, hi16, hi16s, PPC32RI_Imm(16)));
-//zz addInstr(env, PPC32Instr_Alu(Palu_AND, lo16, lo16s, PPC32RI_Imm(0xFFFF)));
-//zz addInstr(env, PPC32Instr_Alu(Palu_OR, hi16, hi16, PPC32RI_Reg(lo16)));
+//zz addInstr(env, mk_sh32(env, Psh_SHL, hi16, hi16s, PPCRI_Imm(16)));
+//zz addInstr(env, PPCInstr_Alu(Palu_AND, lo16, lo16s, PPCRI_Imm(0xFFFF)));
+//zz addInstr(env, PPCInstr_Alu(Palu_OR, hi16, hi16, PPCRI_Reg(lo16)));
//zz return hi16;
//zz }
HReg fr_srcL = iselDblExpr(env, e->Iex.Binop.arg1);
HReg fr_srcR = iselDblExpr(env, e->Iex.Binop.arg2);
- HReg r_ccPPC32 = newVRegI(env);
+ HReg r_ccPPC = newVRegI(env);
HReg r_ccIR = newVRegI(env);
HReg r_ccIR_b0 = newVRegI(env);
HReg r_ccIR_b2 = newVRegI(env);
HReg r_ccIR_b6 = newVRegI(env);
- addInstr(env, PPC32Instr_FpCmp(r_ccPPC32, fr_srcL, fr_srcR));
+ addInstr(env, PPCInstr_FpCmp(r_ccPPC, fr_srcL, fr_srcR));
- /* Map compare result from PPC32 to IR,
+ /* Map compare result from PPC to IR,
conforming to CmpF64 definition. */
/*
FP cmp result | PPC | IR
LT | 0x8 | 0x01
*/
- // r_ccIR_b0 = r_ccPPC32[0] | r_ccPPC32[3]
- addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/,
- r_ccIR_b0, r_ccPPC32, PPC32RH_Imm(False,0x3)));
- addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR_b0, r_ccPPC32, PPC32RH_Reg(r_ccIR_b0)));
- addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b0, r_ccIR_b0, PPC32RH_Imm(False,0x1)));
+ // r_ccIR_b0 = r_ccPPC[0] | r_ccPPC[3]
+ addInstr(env, PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/,
+ r_ccIR_b0, r_ccPPC,
+ PPCRH_Imm(False,0x3)));
+ addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR_b0,
+ r_ccPPC, PPCRH_Reg(r_ccIR_b0)));
+ addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b0,
+ r_ccIR_b0, PPCRH_Imm(False,0x1)));
- // r_ccIR_b2 = r_ccPPC32[0]
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
- r_ccIR_b2, r_ccPPC32, PPC32RH_Imm(False,0x2)));
- addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b2, r_ccIR_b2, PPC32RH_Imm(False,0x4)));
-
- // r_ccIR_b6 = r_ccPPC32[0] | r_ccPPC32[1]
- addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/,
- r_ccIR_b6, r_ccPPC32, PPC32RH_Imm(False,0x1)));
- addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR_b6, r_ccPPC32, PPC32RH_Reg(r_ccIR_b6)));
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
- r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm(False,0x6)));
- addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm(False,0x40)));
+ // r_ccIR_b2 = r_ccPPC[0]
+ addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_ccIR_b2, r_ccPPC,
+ PPCRH_Imm(False,0x2)));
+ addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b2,
+ r_ccIR_b2, PPCRH_Imm(False,0x4)));
+
+ // r_ccIR_b6 = r_ccPPC[0] | r_ccPPC[1]
+ addInstr(env, PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/,
+ r_ccIR_b6, r_ccPPC,
+ PPCRH_Imm(False,0x1)));
+ addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR_b6,
+ r_ccPPC, PPCRH_Reg(r_ccIR_b6)));
+ addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_ccIR_b6, r_ccIR_b6,
+ PPCRH_Imm(False,0x6)));
+ addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b6,
+ r_ccIR_b6, PPCRH_Imm(False,0x40)));
// r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6
- addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR, r_ccIR_b0, PPC32RH_Reg(r_ccIR_b2)));
- addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR, r_ccIR, PPC32RH_Reg(r_ccIR_b6)));
+ addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR,
+ r_ccIR_b0, PPCRH_Reg(r_ccIR_b2)));
+ addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR,
+ r_ccIR, PPCRH_Reg(r_ccIR_b6)));
return r_ccIR;
}
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
- addInstr(env, PPC32Instr_FpF64toI32(r_dst, fr_src));
+ addInstr(env, PPCInstr_FpF64toI32(r_dst, fr_src));
add_to_sp( env, 16 );
/* Restore default FPU rounding. */
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
- addInstr(env, PPC32Instr_FpF64toI64(r_dst, fr_src));
+ addInstr(env, PPCInstr_FpF64toI64(r_dst, fr_src));
add_to_sp( env, 16 );
/* Restore default FPU rounding. */
/* --------- UNARY OP --------- */
case Iex_Unop: {
+ IROp op_unop = e->Iex.Unop.op;
+
/* 1Uto8(32to1(expr32)) */
DEFINE_PATTERN(p_32to1_then_1Uto8,
unop(Iop_1Uto8,unop(Iop_32to1,bind(0))));
IRExpr* expr32 = mi.bindee[0];
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, expr32);
- addInstr(env, PPC32Instr_Alu(Palu_AND, r_dst, r_src, PPC32RH_Imm(False,1)));
+ addInstr(env, PPCInstr_Alu(Palu_AND, r_dst,
+ r_src, PPCRH_Imm(False,1)));
return r_dst;
}
IRExpr_Load(Iend_BE,Ity_I16,bind(0))) );
if (matchIRExpr(&mi,p_LDbe16_then_16Uto32,e)) {
HReg r_dst = newVRegI(env);
- PPC32AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
- addInstr(env, PPC32Instr_Load(2,False,r_dst,amode, mode64));
+ PPCAMode* amode = iselIntExpr_AMode( env, mi.bindee[0] );
+ addInstr(env, PPCInstr_Load(2,False,r_dst,amode, mode64));
return r_dst;
}
}
- switch (e->Iex.Unop.op) {
+ switch (op_unop) {
case Iop_8Uto16:
case Iop_8Uto32:
case Iop_8Uto64:
case Iop_16Uto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- UShort mask = toUShort(e->Iex.Unop.op==Iop_16Uto64 ? 0xFFFF :
- e->Iex.Unop.op==Iop_16Uto32 ? 0xFFFF : 0xFF);
- addInstr(env, PPC32Instr_Alu(Palu_AND,r_dst,r_src,
- PPC32RH_Imm(False,mask)));
+ UShort mask = toUShort(op_unop==Iop_16Uto64 ? 0xFFFF :
+ op_unop==Iop_16Uto32 ? 0xFFFF : 0xFF);
+ addInstr(env, PPCInstr_Alu(Palu_AND,r_dst,r_src,
+ PPCRH_Imm(False,mask)));
return r_dst;
}
case Iop_32Uto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
vassert(mode64);
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/,
- r_dst, r_src, PPC32RH_Imm(False,32)));
- addInstr(env, PPC32Instr_Shft(Pshft_SHR, False/*64bit shift*/,
- r_dst, r_dst, PPC32RH_Imm(False,32)));
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/,
+ r_dst, r_src, PPCRH_Imm(False,32)));
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SHR, False/*64bit shift*/,
+ r_dst, r_dst, PPCRH_Imm(False,32)));
return r_dst;
}
case Iop_8Sto16:
case Iop_16Sto32: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- UShort amt = toUShort(e->Iex.Unop.op==Iop_16Sto32 ? 16 : 24);
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
- r_dst, r_src, PPC32RH_Imm(False,amt)));
- addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/,
- r_dst, r_dst, PPC32RH_Imm(False,amt)));
+ UShort amt = toUShort(op_unop==Iop_16Sto32 ? 16 : 24);
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_dst, r_src, PPCRH_Imm(False,amt)));
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+ r_dst, r_dst, PPCRH_Imm(False,amt)));
return r_dst;
}
case Iop_8Sto64:
case Iop_32Sto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- UShort amt = toUShort(e->Iex.Unop.op==Iop_8Sto64 ? 56 :
- e->Iex.Unop.op==Iop_16Sto64 ? 48 : 32);
+ UShort amt = toUShort(op_unop==Iop_8Sto64 ? 56 :
+ op_unop==Iop_16Sto64 ? 48 : 32);
vassert(mode64);
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, False/*64bit shift*/,
- r_dst, r_src, PPC32RH_Imm(False,amt)));
- addInstr(env, PPC32Instr_Shft(Pshft_SAR, False/*64bit shift*/,
- r_dst, r_dst, PPC32RH_Imm(False,amt)));
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/,
+ r_dst, r_src, PPCRH_Imm(False,amt)));
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SAR, False/*64bit shift*/,
+ r_dst, r_dst, PPCRH_Imm(False,amt)));
return r_dst;
}
case Iop_Not8:
case Iop_Not64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Unary(Pun_NOT,r_dst,r_src));
+ addInstr(env, PPCInstr_Unary(Pun_NOT,r_dst,r_src));
return r_dst;
}
case Iop_64HIto32: {
} else {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Shft(Pshft_SHR, False/*64bit shift*/,
- r_dst, r_src, PPC32RH_Imm(False,32)));
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SHR, False/*64bit shift*/,
+ r_dst, r_src, PPCRH_Imm(False,32)));
return r_dst;
}
}
//:: if (matchIRExpr(&mi,p_MullS32_then_64to32,e)) {
//:: HReg r_dst = newVRegI32(env);
//:: HReg r_srcL = iselIntExpr_R( env, mi.bindee[0] );
-//:: PPC32RI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] ));
-//:: addInstr(env, PPC32Instr_MulL(True, 0, r_dst, r_srcL, ri_srcR));
+//:: PPCRI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] ));
+//:: addInstr(env, PPCInstr_MulL(True, 0, r_dst, r_srcL, ri_srcR));
//:: return r_dst;
//:: }
//:: }
//:: if (matchIRExpr(&mi,p_MullU32_then_64to32,e)) {
//:: HReg r_dst = newVRegI32(env);
//:: HReg r_srcL = iselIntExpr_R( env, mi.bindee[0] );
-//:: PPC32RI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] ));
-//:: addInstr(env, PPC32Instr_MulL(False, 0, r_dst, r_srcL, ri_srcR));
+//:: PPCRI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] ));
+//:: addInstr(env, PPCInstr_MulL(False, 0, r_dst, r_srcL, ri_srcR));
//:: return r_dst;
//:: }
//:: }
case Iop_32HIto16: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- UShort shift = toUShort(e->Iex.Unop.op == Iop_16HIto8 ? 8 : 16);
- addInstr(env, PPC32Instr_Shft(Pshft_SHR, True/*32bit shift*/,
- r_dst, r_src, PPC32RH_Imm(False,shift)));
+ UShort shift = toUShort(op_unop == Iop_16HIto8 ? 8 : 16);
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/,
+ r_dst, r_src, PPCRH_Imm(False,shift)));
return r_dst;
}
case Iop_128HIto64: {
}
case Iop_1Uto32:
case Iop_1Uto8: {
- HReg r_dst = newVRegI(env);
- PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Set32(cond,r_dst));
+ HReg r_dst = newVRegI(env);
+ PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+ addInstr(env, PPCInstr_Set(cond,r_dst));
return r_dst;
}
case Iop_1Sto8:
case Iop_1Sto16:
case Iop_1Sto32: {
/* could do better than this, but for now ... */
- HReg r_dst = newVRegI(env);
- PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Set32(cond,r_dst));
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
- r_dst, r_dst, PPC32RH_Imm(False,31)));
- addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/,
- r_dst, r_dst, PPC32RH_Imm(False,31)));
+ HReg r_dst = newVRegI(env);
+ PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+ addInstr(env, PPCInstr_Set(cond,r_dst));
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+ r_dst, r_dst, PPCRH_Imm(False,31)));
+ addInstr(env,
+ PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+ r_dst, r_dst, PPCRH_Imm(False,31)));
return r_dst;
}
case Iop_Clz32:
case Iop_Clz64: {
- PPC32UnaryOp op_clz =
- (e->Iex.Unop.op == Iop_Clz32) ? Pun_CLZ32 : Pun_CLZ64;
+ PPCUnaryOp op_clz = (op_unop == Iop_Clz32) ? Pun_CLZ32 :
+ Pun_CLZ64;
/* Count leading zeroes. */
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Unary(op_clz,r_dst,r_src));
+ addInstr(env, PPCInstr_Unary(op_clz,r_dst,r_src));
return r_dst;
}
case Iop_Neg8:
case Iop_Neg64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Unary(Pun_NEG,r_dst,r_src));
+ addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src));
return r_dst;
}
HReg r_aligned16;
HReg dst = newVRegI(env);
HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
- PPC32AMode *am_off0, *am_off12;
+ PPCAMode *am_off0, *am_off12;
sub_from_sp( env, 32 ); // Move SP down 32 bytes
// get a quadword aligned address within our stack space
r_aligned16 = get_sp_aligned16( env );
- am_off0 = PPC32AMode_IR( 0, r_aligned16 );
- am_off12 = PPC32AMode_IR( 12,r_aligned16 );
+ am_off0 = PPCAMode_IR( 0, r_aligned16 );
+ am_off12 = PPCAMode_IR( 12,r_aligned16 );
// store vec, load low word to dst
- addInstr(env, PPC32Instr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
- addInstr(env, PPC32Instr_Load( 4, False, dst, am_off12, mode64 ));
+ addInstr(env,
+ PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
+ addInstr(env,
+ PPCInstr_Load( 4, False, dst, am_off12, mode64 ));
add_to_sp( env, 32 ); // Reset SP
return dst;
/* Given an IEEE754 double, produce an I64 with the same bit
pattern. */
case Iop_ReinterpF64asI64: {
- PPC32AMode *am_addr;
+ PPCAMode *am_addr;
HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
HReg r_dst = newVRegI(env);
vassert(mode64);
sub_from_sp( env, 16 ); // Move SP down 16 bytes
- am_addr = PPC32AMode_IR(0, StackFramePtr(mode64));
+ am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
// store as F64
- addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr ));
+ addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8,
+ fr_src, am_addr ));
// load as Ity_I64
- addInstr(env, PPC32Instr_Load( 8, False, r_dst, am_addr, mode64 ));
+ addInstr(env, PPCInstr_Load( 8, False,
+ r_dst, am_addr, mode64 ));
add_to_sp( env, 16 ); // Reset SP
return r_dst;
}
-
+
default:
break;
}
if (ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
HReg r_dst = newVRegI(env);
- PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset,
- GuestStatePtr(mode64) );
- addInstr(env, PPC32Instr_Load( toUChar(sizeofIRType(ty)),
- False, r_dst, am_addr, mode64 ));
+ PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+ GuestStatePtr(mode64) );
+ addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)),
+ False, r_dst, am_addr, mode64 ));
return r_dst;
}
break;
goto irreducible;
/* Marshal args, do the call, clear stack. */
- doHelperCall( env, False, NULL, e->Iex.CCall.cee, e->Iex.CCall.args );
+ doHelperCall( env, False, NULL,
+ e->Iex.CCall.cee, e->Iex.CCall.args );
/* GPR3 now holds the destination address from Pin_Goto */
addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64)));
case Iex_Const: {
Long l;
HReg r_dst = newVRegI(env);
- switch (e->Iex.Const.con->tag) {
+ IRConst* con = e->Iex.Const.con;
+ switch (con->tag) {
case Ico_U64: vassert(mode64);
- l = (Long) e->Iex.Const.con->Ico.U64; break;
- case Ico_U32: l = (Long)(Int) e->Iex.Const.con->Ico.U32; break;
- case Ico_U16: l = (Long)(Int)(Short)e->Iex.Const.con->Ico.U16; break;
- case Ico_U8: l = (Long)(Int)(Char )e->Iex.Const.con->Ico.U8; break;
- default: vpanic("iselIntExpr_R.const(ppc32)");
+ l = (Long) con->Ico.U64; break;
+ case Ico_U32: l = (Long)(Int) con->Ico.U32; break;
+ case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break;
+ case Ico_U8: l = (Long)(Int)(Char )con->Ico.U8; break;
+ default: vpanic("iselIntExpr_R.const(ppc)");
}
- addInstr(env, PPC32Instr_LI(r_dst, (ULong)l, mode64));
+ addInstr(env, PPCInstr_LI(r_dst, (ULong)l, mode64));
return r_dst;
}
if ((ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) &&
typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
- PPC32CondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
- HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond);
- HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
- PPC32RI* r0 = iselIntExpr_RI(env, e->Iex.Mux0X.expr0);
- HReg r_dst = newVRegI(env);
- HReg r_tmp = newVRegI(env);
+ PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
+ HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond);
+ HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
+ PPCRI* r0 = iselIntExpr_RI(env, e->Iex.Mux0X.expr0);
+ HReg r_dst = newVRegI(env);
+ HReg r_tmp = newVRegI(env);
addInstr(env, mk_iMOVds_RR(r_dst,rX));
- addInstr(env, PPC32Instr_Alu(Palu_AND, r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
- addInstr(env, PPC32Instr_CMov(cc,r_dst,r0));
+ addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp,
+ r_cond, PPCRH_Imm(False,0xFF)));
+ addInstr(env, PPCInstr_Cmp(False/*unsined*/, True/*32bit cmp*/,
+ 7/*cr*/, r_tmp, PPCRH_Imm(False,0)));
+ addInstr(env, PPCInstr_CMov(cc,r_dst,r0));
return r_dst;
}
break;
/* We get here if no pattern matched. */
irreducible:
ppIRExpr(e);
- vpanic("iselIntExpr_R(ppc32): cannot reduce tree");
+ vpanic("iselIntExpr_R(ppc): cannot reduce tree");
}
return toBool(u == (UInt)i);
}
-static Bool sane_AMode ( PPC32AMode* am )
+static Bool sane_AMode ( PPCAMode* am )
{
switch (am->tag) {
case Pam_IR:
hregClass(am->Pam.RR.index) == HRcIntWRDSZ &&
hregIsVirtual(am->Pam.IR.index) );
default:
- vpanic("sane_AMode: unknown ppc32 amode tag");
+ vpanic("sane_AMode: unknown ppc amode tag");
}
}
-static PPC32AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e )
+static PPCAMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e )
{
- PPC32AMode* am = iselIntExpr_AMode_wrk(env, e);
+ PPCAMode* am = iselIntExpr_AMode_wrk(env, e);
vassert(sane_AMode(am));
return am;
}
/* DO NOT CALL THIS DIRECTLY ! */
-static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e )
+static PPCAMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e )
{
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == (mode64 ? Ity_I64 : Ity_I32));
&& e->Iex.Binop.arg2->tag == Iex_Const
&& e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32
&& fits16bits(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)) {
- return PPC32AMode_IR(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32,
- iselIntExpr_R(env, e->Iex.Binop.arg1));
+ return PPCAMode_IR( e->Iex.Binop.arg2->Iex.Const.con->Ico.U32,
+ iselIntExpr_R(env, e->Iex.Binop.arg1) );
}
/* Add32(expr,expr) */
&& e->Iex.Binop.op == Iop_Add32) {
HReg r_base = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_idx = iselIntExpr_R(env, e->Iex.Binop.arg2);
- return PPC32AMode_RR(r_idx, r_base);
+ return PPCAMode_RR( r_idx, r_base );
}
/* Doesn't match anything in particular. Generate it into
a register and use that. */
{
HReg r1 = iselIntExpr_R(env, e);
- return PPC32AMode_IR(0, r1);
+ return PPCAMode_IR( 0, r1 );
}
}
immediate; this guaranteed that all signed immediates that are
return can have their sign inverted if need be. */
-static PPC32RH* iselIntExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e )
+static PPCRH* iselIntExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e )
{
- PPC32RH* ri = iselIntExpr_RH_wrk(env, syned, e);
+ PPCRH* ri = iselIntExpr_RH_wrk(env, syned, e);
/* sanity checks ... */
switch (ri->tag) {
case Prh_Imm:
vassert(hregIsVirtual(ri->Prh.Reg.reg));
return ri;
default:
- vpanic("iselIntExpr_RH: unknown ppc32 RH tag");
+ vpanic("iselIntExpr_RH: unknown ppc RH tag");
}
}
/* DO NOT CALL THIS DIRECTLY ! */
-static PPC32RH* iselIntExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e )
+static PPCRH* iselIntExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e )
{
ULong u;
Long l;
/* special case: immediate */
if (e->tag == Iex_Const) {
+ IRConst* con = e->Iex.Const.con;
/* What value are we aiming to generate? */
- switch (e->Iex.Const.con->tag) {
+ switch (con->tag) {
/* Note: Not sign-extending - we carry 'syned' around */
case Ico_U64: vassert(mode64);
- u = e->Iex.Const.con->Ico.U64; break;
- case Ico_U32: u = 0xFFFFFFFF & e->Iex.Const.con->Ico.U32; break;
- case Ico_U16: u = 0x0000FFFF & e->Iex.Const.con->Ico.U16; break;
- case Ico_U8: u = 0x000000FF & e->Iex.Const.con->Ico.U8; break;
- default: vpanic("iselIntExpr_RH.Iex_Const(ppc32h)");
+ u = con->Ico.U64; break;
+ case Ico_U32: u = 0xFFFFFFFF & con->Ico.U32; break;
+ case Ico_U16: u = 0x0000FFFF & con->Ico.U16; break;
+ case Ico_U8: u = 0x000000FF & con->Ico.U8; break;
+ default: vpanic("iselIntExpr_RH.Iex_Const(ppch)");
}
l = (Long)u;
/* Now figure out if it's representable. */
if (!syned && u <= 65535) {
- return PPC32RH_Imm(False/*unsigned*/, toUShort(u & 0xFFFF));
+ return PPCRH_Imm(False/*unsigned*/, toUShort(u & 0xFFFF));
}
if (syned && l >= -32767 && l <= 32767) {
- return PPC32RH_Imm(True/*signed*/, toUShort(u & 0xFFFF));
+ return PPCRH_Imm(True/*signed*/, toUShort(u & 0xFFFF));
}
/* no luck; use the Slow Way. */
}
/* default case: calculate into a register and return that */
{
HReg r = iselIntExpr_R ( env, e );
- return PPC32RH_Reg(r);
+ return PPCRH_Reg(r);
}
}
/* --------------------- RIs --------------------- */
-/* Calculate an expression into an PPC32RI operand. As with
+/* Calculate an expression into an PPCRI operand. As with
iselIntExpr_R, the expression can have type 32, 16 or 8 bits. */
-static PPC32RI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e )
+static PPCRI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e )
{
- PPC32RI* ri = iselIntExpr_RI_wrk(env, e);
+ PPCRI* ri = iselIntExpr_RI_wrk(env, e);
/* sanity checks ... */
switch (ri->tag) {
case Pri_Imm:
vassert(hregIsVirtual(ri->Pri.Reg));
return ri;
default:
- vpanic("iselIntExpr_RI: unknown ppc32 RI tag");
+ vpanic("iselIntExpr_RI: unknown ppc RI tag");
}
}
/* DO NOT CALL THIS DIRECTLY ! */
-static PPC32RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e )
+static PPCRI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e )
{
Long l;
IRType ty = typeOfIRExpr(env->type_env,e);
/* special case: immediate */
if (e->tag == Iex_Const) {
- switch (e->Iex.Const.con->tag) {
+ IRConst* con = e->Iex.Const.con;
+ switch (con->tag) {
case Ico_U64: vassert(mode64);
- l = (Long) e->Iex.Const.con->Ico.U64; break;
- case Ico_U32: l = (Long)(Int) e->Iex.Const.con->Ico.U32; break;
- case Ico_U16: l = (Long)(Int)(Short)e->Iex.Const.con->Ico.U16; break;
- case Ico_U8: l = (Long)(Int)(Char )e->Iex.Const.con->Ico.U8; break;
- default: vpanic("iselIntExpr_RI.Iex_Const(ppc32h)");
+ l = (Long) con->Ico.U64; break;
+ case Ico_U32: l = (Long)(Int) con->Ico.U32; break;
+ case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break;
+ case Ico_U8: l = (Long)(Int)(Char )con->Ico.U8; break;
+ default: vpanic("iselIntExpr_RI.Iex_Const(ppch)");
}
- return PPC32RI_Imm((ULong)l);
+ return PPCRI_Imm((ULong)l);
}
/* default case: calculate into a register and return that */
{
HReg r = iselIntExpr_R ( env, e );
- return PPC32RI_Reg(r);
+ return PPCRI_Reg(r);
}
}
being an immediate in the range 1 .. 31 inclusive. Used for doing
shift amounts. */
-static PPC32RH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e )
+static PPCRH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e )
{
- PPC32RH* ri = iselIntExpr_RH5u_wrk(env, e);
+ PPCRH* ri = iselIntExpr_RH5u_wrk(env, e);
/* sanity checks ... */
switch (ri->tag) {
case Prh_Imm:
vassert(hregIsVirtual(ri->Prh.Reg.reg));
return ri;
default:
- vpanic("iselIntExpr_RH5u: unknown ppc32 RI tag");
+ vpanic("iselIntExpr_RH5u: unknown ppc RI tag");
}
}
/* DO NOT CALL THIS DIRECTLY ! */
-static PPC32RH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e )
+static PPCRH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e )
{
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_I8);
&& e->Iex.Const.con->tag == Ico_U8
&& e->Iex.Const.con->Ico.U8 >= 1
&& e->Iex.Const.con->Ico.U8 <= 31) {
- return PPC32RH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8);
+ return PPCRH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8);
}
/* default case: calculate into a register and return that */
{
HReg r = iselIntExpr_R ( env, e );
- return PPC32RH_Reg(r);
+ return PPCRH_Reg(r);
}
}
being an immediate in the range 1 .. 63 inclusive. Used for doing
shift amounts. */
-static PPC32RH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e )
+static PPCRH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e )
{
- PPC32RH* ri = iselIntExpr_RH6u_wrk(env, e);
+ PPCRH* ri = iselIntExpr_RH6u_wrk(env, e);
/* sanity checks ... */
switch (ri->tag) {
case Prh_Imm:
}
/* DO NOT CALL THIS DIRECTLY ! */
-static PPC32RH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e )
+static PPCRH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e )
{
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_I8);
&& e->Iex.Const.con->tag == Ico_U8
&& e->Iex.Const.con->Ico.U8 >= 1
&& e->Iex.Const.con->Ico.U8 <= 63) {
- return PPC32RH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8);
+ return PPCRH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8);
}
/* default case: calculate into a register and return that */
{
HReg r = iselIntExpr_R ( env, e );
- return PPC32RH_Reg(r);
+ return PPCRH_Reg(r);
}
}
condition code which would correspond when the expression would
notionally have returned 1. */
-static PPC32CondCode iselCondCode ( ISelEnv* env, IRExpr* e )
+static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e )
{
/* Uh, there's nothing we can sanity check here, unfortunately. */
return iselCondCode_wrk(env,e);
}
/* DO NOT CALL THIS DIRECTLY ! */
-static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
+static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
{
// MatchInfo mi;
// DECLARE_PATTERN(p_32to1);
if (e->tag == Iex_Const && e->Iex.Const.con->Ico.U1 == True) {
// Make a compare that will always be true:
HReg r_zero = newVRegI(env);
- addInstr(env, PPC32Instr_LI(r_zero, 0, mode64));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, r_zero, PPC32RH_Reg(r_zero)));
+ addInstr(env, PPCInstr_LI(r_zero, 0, mode64));
+ addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, r_zero, PPCRH_Reg(r_zero)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
/* Not1(...) */
if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) {
/* Generate code for the arg, and negate the test condition */
- PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+ PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
cond.test = invertCondTest(cond.test);
return cond;
}
HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
HReg tmp = newVRegI(env);
/* could do better, probably -- andi. */
- addInstr(env, PPC32Instr_Alu(Palu_AND, tmp, src, PPC32RH_Imm(False,1)));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, tmp, PPC32RH_Imm(False,1)));
+ addInstr(env, PPCInstr_Alu(Palu_AND, tmp,
+ src, PPCRH_Imm(False,1)));
+ addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, tmp, PPCRH_Imm(False,1)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
&& e->Iex.Unop.op == Iop_CmpNEZ8) {
HReg r_32 = iselIntExpr_R(env, e->Iex.Unop.arg);
HReg r_l = newVRegI(env);
- addInstr(env, PPC32Instr_Alu(Palu_AND, r_l, r_32,
- PPC32RH_Imm(False,0xFF)));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, r_l, PPC32RH_Imm(False,0)));
+ addInstr(env, PPCInstr_Alu(Palu_AND, r_l, r_32,
+ PPCRH_Imm(False,0xFF)));
+ addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, r_l, PPCRH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ32) {
HReg r1 = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, r1, PPC32RH_Imm(False,0)));
+ addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, r1, PPCRH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
|| e->Iex.Binop.op == Iop_CmpLE32U)) {
Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S ||
e->Iex.Binop.op == Iop_CmpLE32S);
- HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
- PPC32RH* ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Cmp(syned, True/*32bit cmp*/,
- 7/*cr*/, r1, ri2));
+ HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPCRH* ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
+ addInstr(env, PPCInstr_Cmp(syned, True/*32bit cmp*/,
+ 7/*cr*/, r1, ri2));
switch (e->Iex.Binop.op) {
case Iop_CmpEQ32: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
case Iop_CmpNE32: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
-// case Iop_CmpLT32S: return mk_PPCCondCode( Pct_TRUE, Pcf_LT );
+// case Iop_CmpLT32S: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
case Iop_CmpLT32U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
-// case Iop_CmpLE32S: return mk_PPCCondCode( Pct_FALSE, Pcf_GT );
+// case Iop_CmpLE32S: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
case Iop_CmpLE32U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
- default: vpanic("iselCondCode(ppc32): CmpXX32");
+ default: vpanic("iselCondCode(ppc): CmpXX32");
}
}
|| e->Iex.Binop.op == Iop_CmpLE64U)) {
Bool syned = (e->Iex.Binop.op == Iop_CmpLT64S ||
e->Iex.Binop.op == Iop_CmpLE64S);
- HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
- PPC32RH* ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
+ HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ PPCRH* ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
vassert(mode64);
- addInstr(env, PPC32Instr_Cmp(syned, False/*64bit cmp*/,
- 7/*cr*/, r1, ri2));
+ addInstr(env, PPCInstr_Cmp(syned, False/*64bit cmp*/,
+ 7/*cr*/, r1, ri2));
switch (e->Iex.Binop.op) {
case Iop_CmpEQ64: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
case Iop_CmpNE64: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
-// case Iop_CmpLT64S: return mk_PPCCondCode( Pct_TRUE, Pcf_LT );
+// case Iop_CmpLT64S: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
case Iop_CmpLT64U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
-// case Iop_CmpLE64S: return mk_PPCCondCode( Pct_FALSE, Pcf_GT );
+// case Iop_CmpLE64S: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
case Iop_CmpLE64U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
- default: vpanic("iselCondCode(ppc32): CmpXX64");
+ default: vpanic("iselCondCode(ppc): CmpXX64");
}
}
HReg tmp = newVRegI(env);
iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg );
addInstr(env, mk_iMOVds_RR(tmp, lo));
- addInstr(env, PPC32Instr_Alu(Palu_OR, tmp, tmp, PPC32RH_Reg(hi)));
- addInstr(env, PPC32Instr_Cmp(False/*sign*/, True/*32bit cmp*/,
- 7/*cr*/, tmp,PPC32RH_Imm(False,0)));
+ addInstr(env, PPCInstr_Alu(Palu_OR, tmp, tmp, PPCRH_Reg(hi)));
+ addInstr(env, PPCInstr_Cmp(False/*sign*/, True/*32bit cmp*/,
+ 7/*cr*/, tmp,PPCRH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
} else { // mode64
HReg r_src = iselIntExpr_R(env, e->Iex.Binop.arg1);
- addInstr(env, PPC32Instr_Cmp(False/*sign*/, False/*64bit cmp*/,
- 7/*cr*/, r_src,PPC32RH_Imm(False,0)));
+ addInstr(env, PPCInstr_Cmp(False/*sign*/, False/*64bit cmp*/,
+ 7/*cr*/, r_src,PPCRH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
}
if (e->tag == Iex_Tmp) {
HReg r_src = lookupIRTemp(env, e->Iex.Tmp.tmp);
HReg src_masked = newVRegI(env);
- addInstr(env, PPC32Instr_Alu(Palu_AND, src_masked, r_src, PPC32RH_Imm(False,1)));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, src_masked, PPC32RH_Imm(False,1)));
+ addInstr(env,
+ PPCInstr_Alu(Palu_AND, src_masked,
+ r_src, PPCRH_Imm(False,1)));
+ addInstr(env,
+ PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, src_masked, PPCRH_Imm(False,1)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
either real or virtual regs; in any case they must not be changed
by subsequent code emitted by the caller. */
-static void iselInt128Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+static void iselInt128Expr ( HReg* rHi, HReg* rLo,
+ ISelEnv* env, IRExpr* e )
{
vassert(mode64);
iselInt128Expr_wrk(rHi, rLo, env, e);
}
/* DO NOT CALL THIS DIRECTLY ! */
-static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo,
+ ISelEnv* env, IRExpr* e )
{
vassert(e);
vassert(typeOfIRExpr(env->type_env,e) == Ity_I128);
Bool syned = toBool(e->Iex.Binop.op == Iop_MullS64);
HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/,
- False/*lo64*/, False/*64bit mul*/,
- tLo, r_srcL, r_srcR));
- addInstr(env, PPC32Instr_MulL(syned,
- True/*hi64*/, False/*64bit mul*/,
- tHi, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/,
+ False/*lo64*/, False/*64bit mul*/,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_MulL(syned,
+ True/*hi64*/, False/*64bit mul*/,
+ tHi, r_srcL, r_srcR));
*rHi = tHi;
*rLo = tLo;
return;
either real or virtual regs; in any case they must not be changed
by subsequent code emitted by the caller. */
-static void iselInt64Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+static void iselInt64Expr ( HReg* rHi, HReg* rLo,
+ ISelEnv* env, IRExpr* e )
{
vassert(!mode64);
iselInt64Expr_wrk(rHi, rLo, env, e);
}
/* DO NOT CALL THIS DIRECTLY ! */
-static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo,
+ ISelEnv* env, IRExpr* e )
{
// HWord fn = 0; /* helper fn for most SIMD64 stuff */
vassert(e);
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
vassert(e->Iex.Const.con->tag == Ico_U64);
- addInstr(env, PPC32Instr_LI(tHi, wHi, mode64));
- addInstr(env, PPC32Instr_LI(tLo, wLo, mode64));
+ addInstr(env, PPCInstr_LI(tHi, wHi, mode64));
+ addInstr(env, PPCInstr_LI(tLo, wLo, mode64));
*rHi = tHi;
*rLo = tLo;
return;
/* 64-bit GET */
if (e->tag == Iex_Get) {
- PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset,
- GuestStatePtr(mode64) );
- PPC32AMode* am_addr4 = advance4(env, am_addr);
+ PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+ GuestStatePtr(mode64) );
+ PPCAMode* am_addr4 = advance4(env, am_addr);
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
- addInstr(env, PPC32Instr_Load( 4, False, tHi, am_addr, mode64 ));
- addInstr(env, PPC32Instr_Load( 4, False, tLo, am_addr4, mode64 ));
+ addInstr(env, PPCInstr_Load( 4, False, tHi, am_addr, mode64 ));
+ addInstr(env, PPCInstr_Load( 4, False, tLo, am_addr4, mode64 ));
*rHi = tHi;
*rLo = tLo;
return;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
- PPC32CondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
+ PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond);
HReg r_tmp = newVRegI(env);
addInstr(env, mk_iMOVds_RR(tHi,eXHi));
addInstr(env, mk_iMOVds_RR(tLo,eXLo));
- addInstr(env, PPC32Instr_Alu(Palu_AND,
- r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
+ addInstr(env, PPCInstr_Alu(Palu_AND,
+ r_tmp, r_cond, PPCRH_Imm(False,0xFF)));
+ addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, r_tmp, PPCRH_Imm(False,0)));
- addInstr(env, PPC32Instr_CMov(cc,tHi,PPC32RI_Reg(e0Hi)));
- addInstr(env, PPC32Instr_CMov(cc,tLo,PPC32RI_Reg(e0Lo)));
+ addInstr(env, PPCInstr_CMov(cc,tHi,PPCRI_Reg(e0Hi)));
+ addInstr(env, PPCInstr_CMov(cc,tLo,PPCRI_Reg(e0Lo)));
*rHi = tHi;
*rLo = tLo;
return;
/* --------- BINARY ops --------- */
if (e->tag == Iex_Binop) {
- switch (e->Iex.Binop.op) {
+ IROp op_binop = e->Iex.Binop.op;
+ switch (op_binop) {
/* 32 x 32 -> 64 multiply */
case Iop_MullU32:
case Iop_MullS32: {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
- Bool syned = toBool(e->Iex.Binop.op == Iop_MullS32);
+ Bool syned = toBool(op_binop == Iop_MullS32);
HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/,
- False/*lo32*/, True/*32bit mul*/,
- tLo, r_srcL, r_srcR));
- addInstr(env, PPC32Instr_MulL(syned,
- True/*hi32*/, True/*32bit mul*/,
- tHi, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/,
+ False/*lo32*/, True/*32bit mul*/,
+ tLo, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_MulL(syned,
+ True/*hi32*/, True/*32bit mul*/,
+ tHi, r_srcL, r_srcR));
*rHi = tHi;
*rLo = tLo;
return;
//.. HReg sHi, sLo;
//.. HReg tLo = newVRegI32(env);
//.. HReg tHi = newVRegI32(env);
-//.. Bool syned = e->Iex.Binop.op == Iop_DivModS64to32;
+//.. Bool syned = op_binop == Iop_DivModS64to32;
//.. X86RM* rmRight = iselIntExpr_RM(env, e->Iex.Binop.arg2);
//.. iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1);
//.. addInstr(env, mk_iMOVsd_RR(sHi, hregX86_EDX()));
HReg xLo, xHi, yLo, yHi;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
- PPC32AluOp op = e->Iex.Binop.op==Iop_Or64 ? Palu_OR
- : e->Iex.Binop.op==Iop_And64 ? Palu_AND
- : Palu_XOR;
+ PPCAluOp op = (op_binop == Iop_Or64) ? Palu_OR :
+ (op_binop == Iop_And64) ? Palu_AND : Palu_XOR;
iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Alu(op, tHi, xHi, PPC32RH_Reg(yHi)));
- addInstr(env, PPC32Instr_Alu(op, tLo, xLo, PPC32RH_Reg(yLo)));
+ addInstr(env, PPCInstr_Alu(op, tHi, xHi, PPCRH_Reg(yHi)));
+ addInstr(env, PPCInstr_Alu(op, tLo, xLo, PPCRH_Reg(yLo)));
*rHi = tHi;
*rLo = tLo;
return;
HReg tHi = newVRegI(env);
iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
-//.. if (e->Iex.Binop.op==Iop_Add64) {
- addInstr(env, PPC32Instr_AddSubC32( True/*add*/, True /*set carry*/,
- tLo, xLo, yLo));
- addInstr(env, PPC32Instr_AddSubC32( True/*add*/, False/*read carry*/,
- tHi, xHi, yHi));
+//.. if (op_binop==Iop_Add64) {
+ addInstr(env, PPCInstr_AddSubC( True/*add*/, True /*set carry*/,
+ tLo, xLo, yLo));
+ addInstr(env, PPCInstr_AddSubC( True/*add*/, False/*read carry*/,
+ tHi, xHi, yHi));
//.. } else { // Sub64
//.. }
*rHi = tHi;
case Iop_32Sto64: {
HReg tHi = newVRegI(env);
HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/,
- tHi, src, PPC32RH_Imm(False,31)));
+ addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+ tHi, src, PPCRH_Imm(False,31)));
*rHi = tHi;
*rLo = src;
return;
case Iop_32Uto64: {
HReg tHi = newVRegI(env);
HReg tLo = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_LI(tHi, 0, mode64));
+ addInstr(env, PPCInstr_LI(tHi, 0, mode64));
*rHi = tHi;
*rLo = tLo;
return;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
- PPC32AMode *am_off0, *am_offLO, *am_offHI;
+ PPCAMode *am_off0, *am_offLO, *am_offHI;
sub_from_sp( env, 32 ); // Move SP down 32 bytes
// get a quadword aligned address within our stack space
r_aligned16 = get_sp_aligned16( env );
- am_off0 = PPC32AMode_IR( 0, r_aligned16 );
- am_offHI = PPC32AMode_IR( off, r_aligned16 );
- am_offLO = PPC32AMode_IR( off+4, r_aligned16 );
+ am_off0 = PPCAMode_IR( 0, r_aligned16 );
+ am_offHI = PPCAMode_IR( off, r_aligned16 );
+ am_offLO = PPCAMode_IR( off+4, r_aligned16 );
// store as Vec128
- addInstr(env, PPC32Instr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
+ addInstr(env,
+ PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
// load hi,lo words (of hi/lo half of vec) as Ity_I32's
- addInstr(env, PPC32Instr_Load( 4, False, tHi, am_offHI, mode64 ));
- addInstr(env, PPC32Instr_Load( 4, False, tLo, am_offLO, mode64 ));
+ addInstr(env,
+ PPCInstr_Load( 4, False, tHi, am_offHI, mode64 ));
+ addInstr(env,
+ PPCInstr_Load( 4, False, tLo, am_offLO, mode64 ));
add_to_sp( env, 32 ); // Reset SP
*rHi = tHi;
case Iop_1Sto64: {
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
- PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Set32(cond,tLo));
- addInstr(env, PPC32Instr_Shft(Pshft_SHL, True/*32bit shift*/,
- tLo, tLo, PPC32RH_Imm(False,31)));
- addInstr(env, PPC32Instr_Shft(Pshft_SAR, True/*32bit shift*/,
- tLo, tLo, PPC32RH_Imm(False,31)));
+ PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+ addInstr(env, PPCInstr_Set(cond,tLo));
+ addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+ tLo, tLo, PPCRH_Imm(False,31)));
+ addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+ tLo, tLo, PPCRH_Imm(False,31)));
addInstr(env, mk_iMOVds_RR(tHi, tLo));
*rHi = tHi;
*rLo = tLo;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
iselInt64Expr(&yHi, &yLo, env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_LI(zero, 0, mode64));
- addInstr(env, PPC32Instr_AddSubC32( False/*sub*/, True /*set carry*/,
- tLo, zero, yLo));
- addInstr(env, PPC32Instr_AddSubC32( False/*sub*/, False/*read carry*/,
- tHi, zero, yHi));
+ addInstr(env, PPCInstr_LI(zero, 0, mode64));
+ addInstr(env, PPCInstr_AddSubC( False/*sub*/, True/*set carry*/,
+ tLo, zero, yLo));
+ addInstr(env, PPCInstr_AddSubC( False/*sub*/, False/*read carry*/,
+ tHi, zero, yHi));
*rHi = tHi;
*rLo = tLo;
return;
/* Given an IEEE754 double, produce an I64 with the same bit
pattern. */
case Iop_ReinterpF64asI64: {
- PPC32AMode *am_addr0, *am_addr1;
+ PPCAMode *am_addr0, *am_addr1;
HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
HReg r_dstLo = newVRegI(env);
HReg r_dstHi = newVRegI(env);
sub_from_sp( env, 16 ); // Move SP down 16 bytes
- am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64));
- am_addr1 = PPC32AMode_IR(4, StackFramePtr(mode64));
+ am_addr0 = PPCAMode_IR( 0, StackFramePtr(mode64) );
+ am_addr1 = PPCAMode_IR( 4, StackFramePtr(mode64) );
// store as F64
- addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr0 ));
+ addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8,
+ fr_src, am_addr0 ));
// load hi,lo as Ity_I32's
- addInstr(env, PPC32Instr_Load( 4, False, r_dstHi, am_addr0, mode64 ));
- addInstr(env, PPC32Instr_Load( 4, False, r_dstLo, am_addr1, mode64 ));
+ addInstr(env, PPCInstr_Load( 4, False, r_dstHi,
+ am_addr0, mode64 ));
+ addInstr(env, PPCInstr_Load( 4, False, r_dstLo,
+ am_addr1, mode64 ));
*rHi = r_dstHi;
*rLo = r_dstLo;
//.. return;
//.. }
- vex_printf("iselInt64Expr(ppc32): No such tag(%u)\n", e->tag);
+ vex_printf("iselInt64Expr(ppc): No such tag(%u)\n", e->tag);
ppIRExpr(e);
- vpanic("iselInt64Expr(ppc32)");
+ vpanic("iselInt64Expr(ppc)");
}
}
if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
- PPC32AMode* am_addr;
+ PPCAMode* am_addr;
HReg r_dst = newVRegF(env);
vassert(e->Iex.Load.ty == Ity_F32);
am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr);
- addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 4, r_dst, am_addr));
+ addInstr(env, PPCInstr_FpLdSt(True/*load*/, 4, r_dst, am_addr));
return r_dst;
}
HReg r_dst = newVRegF(env);
HReg r_src = iselDblExpr(env, e->Iex.Binop.arg2);
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
- addInstr(env, PPC32Instr_FpF64toF32(r_dst, r_src));
+ addInstr(env, PPCInstr_FpF64toF32(r_dst, r_src));
set_FPU_rounding_default( env );
return r_dst;
}
if (e->tag == Iex_Get) {
HReg r_dst = newVRegF(env);
- PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset,
- GuestStatePtr(mode64) );
- addInstr(env, PPC32Instr_FpLdSt( True/*load*/, 4, r_dst, am_addr ));
+ PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+ GuestStatePtr(mode64) );
+ addInstr(env, PPCInstr_FpLdSt( True/*load*/, 4, r_dst, am_addr ));
return r_dst;
}
//.. return dst;
//.. }
- vex_printf("iselFltExpr(ppc32): No such tag(%u)\n", e->tag);
+ vex_printf("iselFltExpr(ppc): No such tag(%u)\n", e->tag);
ppIRExpr(e);
- vpanic("iselFltExpr_wrk(ppc32)");
+ vpanic("iselFltExpr_wrk(ppc)");
}
u.u64 = e->Iex.Const.con->Ico.F64i;
}
else
- vpanic("iselDblExpr(ppc32): const");
+ vpanic("iselDblExpr(ppc): const");
if (!mode64) {
HReg r_srcHi = newVRegI(env);
HReg r_srcLo = newVRegI(env);
- addInstr(env, PPC32Instr_LI(r_srcHi, u.u32x2[1], mode64));
- addInstr(env, PPC32Instr_LI(r_srcLo, u.u32x2[0], mode64));
+ addInstr(env, PPCInstr_LI(r_srcHi, u.u32x2[1], mode64));
+ addInstr(env, PPCInstr_LI(r_srcLo, u.u32x2[0], mode64));
return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
} else { // mode64
HReg r_src = newVRegI(env);
- addInstr(env, PPC32Instr_LI(r_src, u.u64, mode64));
+ addInstr(env, PPCInstr_LI(r_src, u.u64, mode64));
return mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64
}
}
if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
HReg r_dst = newVRegF(env);
- PPC32AMode* am_addr;
+ PPCAMode* am_addr;
vassert(e->Iex.Load.ty == Ity_F64);
am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr);
- addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 8, r_dst, am_addr));
+ addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_dst, am_addr));
return r_dst;
}
if (e->tag == Iex_Get) {
HReg r_dst = newVRegF(env);
- PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset,
- GuestStatePtr(mode64) );
- addInstr(env, PPC32Instr_FpLdSt( True/*load*/, 8, r_dst, am_addr ));
+ PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+ GuestStatePtr(mode64) );
+ addInstr(env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst, am_addr ));
return r_dst;
}
//.. }
if (e->tag == Iex_Binop) {
- PPC32FpOp fpop = Pfp_INVALID;
+ PPCFpOp fpop = Pfp_INVALID;
switch (e->Iex.Binop.op) {
case Iop_AddF64: fpop = Pfp_ADD; break;
case Iop_SubF64: fpop = Pfp_SUB; break;
HReg r_dst = newVRegF(env);
HReg r_srcL = iselDblExpr(env, e->Iex.Binop.arg1);
HReg r_srcR = iselDblExpr(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_FpBinary(fpop, r_dst, r_srcL, r_srcR));
+ addInstr(env, PPCInstr_FpBinary(fpop, r_dst, r_srcL, r_srcR));
return r_dst;
}
set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
sub_from_sp( env, 16 );
- addInstr(env, PPC32Instr_FpI64toF64(fr_dst, r_src));
+ addInstr(env, PPCInstr_FpI64toF64(fr_dst, r_src));
add_to_sp( env, 16 );
/* Restore default FPU rounding. */
//.. HReg fr_dst = newVRegF(env);
//.. HReg rHi,rLo;
//.. iselInt64Expr( &rHi, &rLo, env, e->Iex.Binop.arg2);
-//.. addInstr(env, PPC32Instr_Push(PPC32RMI_Reg(rHi)));
-//.. addInstr(env, PPC32Instr_Push(PPC32RMI_Reg(rLo)));
+//.. addInstr(env, PPCInstr_Push(PPCRMI_Reg(rHi)));
+//.. addInstr(env, PPCInstr_Push(PPCRMI_Reg(rLo)));
//..
//.. /* Set host rounding mode */
//.. set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
//..
-//.. PPC32AMode* am_addr = ...
-//.. addInstr(env, PPC32Instr_FpLdSt( True/*load*/, 8, r_dst,
-//.. PPC32AMode_IR(0, GuestStatePtr ) ));
+//.. PPCAMode* am_addr = ...
+//.. addInstr(env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst,
+//.. PPCAMode_IR(0, GuestStatePtr ) ));
//..
//..
-//.. addInstr(env, PPC32Instr_FpLdStI(
+//.. addInstr(env, PPCInstr_FpLdStI(
//.. True/*load*/, 8, fr_dst,
-//.. PPC32AMode_IR(0, hregPPC32_ESP())));
+//.. PPCAMode_IR(0, hregPPC_ESP())));
//..
//.. /* Restore default FPU rounding. */
//.. set_FPU_rounding_default( env );
//.. }
if (e->tag == Iex_Unop) {
- PPC32FpOp fpop = Pfp_INVALID;
+ PPCFpOp fpop = Pfp_INVALID;
switch (e->Iex.Unop.op) {
case Iop_NegF64: fpop = Pfp_NEG; break;
case Iop_AbsF64: fpop = Pfp_ABS; break;
if (fpop != Pfp_INVALID) {
HReg fr_dst = newVRegF(env);
HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_FpUnary(fpop, fr_dst, fr_src));
-//.. if (fpop != Pfp_SQRT && fpop != Xfp_NEG && fpop != Xfp_ABS)
-//.. roundToF64(env, fr_dst);
+ addInstr(env, PPCInstr_FpUnary(fpop, fr_dst, fr_src));
+//.. if (fpop != Pfp_SQRT && fpop != Xfp_NEG && fpop != Xfp_ABS)
+//.. roundToF64(env, fr_dst);
return fr_dst;
}
}
if (e->tag == Iex_Mux0X) {
if (ty == Ity_F64
&& typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
- PPC32CondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
+ PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond);
HReg frX = iselDblExpr(env, e->Iex.Mux0X.exprX);
HReg fr0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
HReg fr_dst = newVRegF(env);
HReg r_tmp = newVRegI(env);
- addInstr(env, PPC32Instr_Alu(Palu_AND, r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
- addInstr(env, PPC32Instr_FpUnary( Pfp_MOV, fr_dst, frX ));
- addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
- addInstr(env, PPC32Instr_FpCMov( cc, fr_dst, fr0 ));
+ addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp,
+ r_cond, PPCRH_Imm(False,0xFF)));
+ addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, frX ));
+ addInstr(env, PPCInstr_Cmp(False/*unsined*/, True/*32bit cmp*/,
+ 7/*cr*/, r_tmp, PPCRH_Imm(False,0)));
+ addInstr(env, PPCInstr_FpCMov( cc, fr_dst, fr0 ));
return fr_dst;
}
}
- vex_printf("iselDblExpr(ppc32): No such tag(%u)\n", e->tag);
+ vex_printf("iselDblExpr(ppc): No such tag(%u)\n", e->tag);
ppIRExpr(e);
- vpanic("iselDblExpr_wrk(ppc32)");
+ vpanic("iselDblExpr_wrk(ppc)");
}
static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e )
{
//.. Bool arg1isEReg = False;
- PPC32AvOp op = Pav_INVALID;
- IRType ty = typeOfIRExpr(env->type_env,e);
+ PPCAvOp op = Pav_INVALID;
+ IRType ty = typeOfIRExpr(env->type_env,e);
vassert(e);
vassert(ty == Ity_V128);
}
if (e->tag == Iex_Get) {
- /* Guest state vectors are 16byte aligned, so don't need to worry here */
+ /* Guest state vectors are 16byte aligned,
+ so don't need to worry here */
HReg dst = newVRegV(env);
addInstr(env,
- PPC32Instr_AvLdSt( True/*load*/, 16, dst,
- PPC32AMode_IR(e->Iex.Get.offset,
- GuestStatePtr(mode64))));
+ PPCInstr_AvLdSt( True/*load*/, 16, dst,
+ PPCAMode_IR( e->Iex.Get.offset,
+ GuestStatePtr(mode64) )));
return dst;
}
if (e->tag == Iex_Load) {
- PPC32AMode* am_addr;
+ PPCAMode* am_addr;
HReg v_dst = newVRegV(env);
vassert(e->Iex.Load.ty == Ity_V128);
am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr);
- addInstr(env, PPC32Instr_AvLdSt( True/*load*/, 16, v_dst, am_addr));
+ addInstr(env, PPCInstr_AvLdSt( True/*load*/, 16, v_dst, am_addr));
return v_dst;
}
case Iop_NotV128: {
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, arg));
+ addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, arg));
return dst;
}
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg zero = newVRegV(env);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvBinary(Pav_XOR, zero, zero, zero));
- addInstr(env, PPC32Instr_AvBin8x16(Pav_CMPEQU, dst, arg, zero));
- addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, dst));
+ addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
+ addInstr(env, PPCInstr_AvBin8x16(Pav_CMPEQU, dst, arg, zero));
+ addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
return dst;
}
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg zero = newVRegV(env);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvBinary(Pav_XOR, zero, zero, zero));
- addInstr(env, PPC32Instr_AvBin16x8(Pav_CMPEQU, dst, arg, zero));
- addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, dst));
+ addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
+ addInstr(env, PPCInstr_AvBin16x8(Pav_CMPEQU, dst, arg, zero));
+ addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
return dst;
}
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg zero = newVRegV(env);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvBinary(Pav_XOR, zero, zero, zero));
- addInstr(env, PPC32Instr_AvBin32x4(Pav_CMPEQU, dst, arg, zero));
- addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, dst));
+ addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
+ addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, dst, arg, zero));
+ addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
return dst;
}
{
HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvUn32Fx4(op, dst, arg));
+ addInstr(env, PPCInstr_AvUn32Fx4(op, dst, arg));
return dst;
}
HReg r_aligned16, r_zeros;
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
HReg dst = newVRegV(env);
- PPC32AMode *am_off0, *am_off4, *am_off8, *am_off12;
+ PPCAMode *am_off0, *am_off4, *am_off8, *am_off12;
sub_from_sp( env, 32 ); // Move SP down
/* Get a quadword aligned address within our stack space */
r_aligned16 = get_sp_aligned16( env );
- am_off0 = PPC32AMode_IR( 0, r_aligned16);
- am_off4 = PPC32AMode_IR( 4, r_aligned16);
- am_off8 = PPC32AMode_IR( 8, r_aligned16);
- am_off12 = PPC32AMode_IR( 12, r_aligned16);
+ am_off0 = PPCAMode_IR( 0, r_aligned16 );
+ am_off4 = PPCAMode_IR( 4, r_aligned16 );
+ am_off8 = PPCAMode_IR( 8, r_aligned16 );
+ am_off12 = PPCAMode_IR( 12, r_aligned16 );
/* Store zeros */
r_zeros = newVRegI(env);
- addInstr(env, PPC32Instr_LI(r_zeros, 0x0, mode64));
- addInstr(env, PPC32Instr_Store( 4, am_off0, r_zeros, mode64 ));
- addInstr(env, PPC32Instr_Store( 4, am_off4, r_zeros, mode64 ));
- addInstr(env, PPC32Instr_Store( 4, am_off8, r_zeros, mode64 ));
+ addInstr(env, PPCInstr_LI(r_zeros, 0x0, mode64));
+ addInstr(env, PPCInstr_Store( 4, am_off0, r_zeros, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_off4, r_zeros, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_off8, r_zeros, mode64 ));
/* Store r_src in low word of quadword-aligned mem */
- addInstr(env, PPC32Instr_Store( 4, am_off12, r_src, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_off12, r_src, mode64 ));
/* Load word into low word of quadword vector reg */
- addInstr(env, PPC32Instr_AvLdSt( True/*load*/, 4, dst, am_off12 ));
+ addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 4, dst, am_off12 ));
add_to_sp( env, 32 ); // Reset SP
return dst;
case Iop_64HLtoV128: {
if (!mode64) {
HReg r3, r2, r1, r0, r_aligned16;
- PPC32AMode *am_off0, *am_off4, *am_off8, *am_off12;
+ PPCAMode *am_off0, *am_off4, *am_off8, *am_off12;
HReg dst = newVRegV(env);
/* do this via the stack (easy, convenient, etc) */
sub_from_sp( env, 32 ); // Move SP down
// get a quadword aligned address within our stack space
r_aligned16 = get_sp_aligned16( env );
- am_off0 = PPC32AMode_IR( 0, r_aligned16);
- am_off4 = PPC32AMode_IR( 4, r_aligned16);
- am_off8 = PPC32AMode_IR( 8, r_aligned16);
- am_off12 = PPC32AMode_IR( 12, r_aligned16);
+ am_off0 = PPCAMode_IR( 0, r_aligned16 );
+ am_off4 = PPCAMode_IR( 4, r_aligned16 );
+ am_off8 = PPCAMode_IR( 8, r_aligned16 );
+ am_off12 = PPCAMode_IR( 12, r_aligned16 );
/* Do the less significant 64 bits */
iselInt64Expr(&r1, &r0, env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Store( 4, am_off12, r0, mode64 ));
- addInstr(env, PPC32Instr_Store( 4, am_off8, r1, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_off12, r0, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_off8, r1, mode64 ));
/* Do the more significant 64 bits */
iselInt64Expr(&r3, &r2, env, e->Iex.Binop.arg1);
- addInstr(env, PPC32Instr_Store( 4, am_off4, r2, mode64 ));
- addInstr(env, PPC32Instr_Store( 4, am_off0, r3, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_off4, r2, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_off0, r3, mode64 ));
/* Fetch result back from stack. */
- addInstr(env, PPC32Instr_AvLdSt(True/*load*/, 16, dst, am_off0));
+ addInstr(env, PPCInstr_AvLdSt(True/*ld*/, 16, dst, am_off0));
add_to_sp( env, 32 ); // Reset SP
return dst;
HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvBin32Fx4(op, dst, argL, argR));
+ addInstr(env, PPCInstr_AvBin32Fx4(op, dst, argL, argR));
return dst;
}
HReg isNanLR = newVRegV(env);
HReg isNanL = isNan(env, argL);
HReg isNanR = isNan(env, argR);
- addInstr(env, PPC32Instr_AvBinary(Pav_OR, isNanLR, isNanL, isNanR));
+ addInstr(env, PPCInstr_AvBinary(Pav_OR, isNanLR,
+ isNanL, isNanR));
- addInstr(env, PPC32Instr_AvBin32Fx4(Pavfp_CMPGTF, dst, argL, argR));
- addInstr(env, PPC32Instr_AvBinary(Pav_OR, dst, dst, isNanLR));
- addInstr(env, PPC32Instr_AvUnary(Pav_NOT, dst, dst));
+ addInstr(env, PPCInstr_AvBin32Fx4(Pavfp_CMPGTF, dst,
+ argL, argR));
+ addInstr(env, PPCInstr_AvBinary(Pav_OR, dst, dst, isNanLR));
+ addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
return dst;
}
HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvBinary(op, dst, arg1, arg2));
+ addInstr(env, PPCInstr_AvBinary(op, dst, arg1, arg2));
return dst;
}
HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvBin8x16(op, dst, arg1, arg2));
+ addInstr(env, PPCInstr_AvBin8x16(op, dst, arg1, arg2));
return dst;
}
HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvBin16x8(op, dst, arg1, arg2));
+ addInstr(env, PPCInstr_AvBin16x8(op, dst, arg1, arg2));
return dst;
}
HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
- addInstr(env, PPC32Instr_AvBin32x4(op, dst, arg1, arg2));
+ addInstr(env, PPCInstr_AvBin32x4(op, dst, arg1, arg2));
return dst;
}
HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg dst = newVRegV(env);
HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_AvBin8x16(op, dst, r_src, v_shft));
+ addInstr(env, PPCInstr_AvBin8x16(op, dst, r_src, v_shft));
return dst;
}
HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg dst = newVRegV(env);
HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_AvBin16x8(op, dst, r_src, v_shft));
+ addInstr(env, PPCInstr_AvBin16x8(op, dst, r_src, v_shft));
return dst;
}
HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg dst = newVRegV(env);
HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_AvBin32x4(op, dst, r_src, v_shft));
+ addInstr(env, PPCInstr_AvBin32x4(op, dst, r_src, v_shft));
return dst;
}
HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2);
/* Note: shift value gets masked by 127 */
- addInstr(env, PPC32Instr_AvBinary(op, dst, r_src, v_shft));
+ addInstr(env, PPCInstr_AvBinary(op, dst, r_src, v_shft));
return dst;
}
HReg dst = newVRegV(env);
HReg v_src = iselVecExpr(env, e->Iex.Binop.arg1);
HReg v_ctl = iselVecExpr(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_AvPerm(dst, v_src, v_src, v_ctl));
+ addInstr(env, PPCInstr_AvPerm(dst, v_src, v_src, v_ctl));
return dst;
}
//.. }
// unused: vec_fail:
- vex_printf("iselVecExpr(ppc32) (subarch = %s): can't reduce\n",
+ vex_printf("iselVecExpr(ppc) (subarch = %s): can't reduce\n",
LibVEX_ppVexSubArch(env->subarch));
ppIRExpr(e);
- vpanic("iselVecExpr_wrk(ppc32)");
+ vpanic("iselVecExpr_wrk(ppc)");
}
/* --------- STORE --------- */
case Ist_Store: {
- PPC32AMode* am_addr;
+ PPCAMode* am_addr;
IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 ||
(mode64 && (tyd == Ity_I64))) {
HReg r_src = iselIntExpr_R(env, stmt->Ist.Store.data);
- addInstr(env, PPC32Instr_Store( toUChar(sizeofIRType(tyd)),
+ addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(tyd)),
am_addr, r_src, mode64 ));
return;
}
if (tyd == Ity_F64) {
HReg fr_src = iselDblExpr(env, stmt->Ist.Store.data);
- addInstr(env, PPC32Instr_FpLdSt(False/*store*/, 8, fr_src, am_addr));
+ addInstr(env,
+ PPCInstr_FpLdSt(False/*store*/, 8, fr_src, am_addr));
return;
}
if (tyd == Ity_F32) {
HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data);
- addInstr(env, PPC32Instr_FpLdSt(False/*store*/, 4, fr_src, am_addr));
+ addInstr(env,
+ PPCInstr_FpLdSt(False/*store*/, 4, fr_src, am_addr));
return;
}
//.. if (tyd == Ity_I64) {
//.. }
if (tyd == Ity_V128) {
HReg v_src = iselVecExpr(env, stmt->Ist.Store.data);
- addInstr(env, PPC32Instr_AvLdSt(False/*store*/, 16, v_src, am_addr));
+ addInstr(env,
+ PPCInstr_AvLdSt(False/*store*/, 16, v_src, am_addr));
return;
}
break;
if (ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
HReg r_src = iselIntExpr_R(env, stmt->Ist.Put.data);
- PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset,
- GuestStatePtr(mode64));
- addInstr(env, PPC32Instr_Store( toUChar(sizeofIRType(ty)),
- am_addr, r_src, mode64 ));
+ PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
+ GuestStatePtr(mode64) );
+ addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(ty)),
+ am_addr, r_src, mode64 ));
return;
}
if (!mode64 && ty == Ity_I64) {
HReg rHi, rLo;
- PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset,
- GuestStatePtr(mode64));
- PPC32AMode* am_addr4 = advance4(env, am_addr);
+ PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
+ GuestStatePtr(mode64) );
+ PPCAMode* am_addr4 = advance4(env, am_addr);
iselInt64Expr(&rHi,&rLo, env, stmt->Ist.Put.data);
- addInstr(env, PPC32Instr_Store( 4, am_addr, rHi, mode64 ));
- addInstr(env, PPC32Instr_Store( 4, am_addr4, rLo, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_addr, rHi, mode64 ));
+ addInstr(env, PPCInstr_Store( 4, am_addr4, rLo, mode64 ));
return;
}
if (ty == Ity_V128) {
- /* Guest state vectors are 16byte aligned, so don't need to worry here */
+ /* Guest state vectors are 16byte aligned,
+ so don't need to worry here */
HReg v_src = iselVecExpr(env, stmt->Ist.Put.data);
- PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset,
- GuestStatePtr(mode64));
- addInstr(env, PPC32Instr_AvLdSt(False/*store*/, 16, v_src, am_addr));
+ PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
+ GuestStatePtr(mode64) );
+ addInstr(env,
+ PPCInstr_AvLdSt(False/*store*/, 16, v_src, am_addr));
return;
}
//.. if (ty == Ity_F32) {
//.. }
if (ty == Ity_F64) {
HReg fr_src = iselDblExpr(env, stmt->Ist.Put.data);
- PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset,
- GuestStatePtr(mode64));
- addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr ));
+ PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
+ GuestStatePtr(mode64) );
+ addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8,
+ fr_src, am_addr ));
return;
}
break;
return;
}
if (ty == Ity_I1) {
- PPC32CondCode cond = iselCondCode(env, stmt->Ist.Tmp.data);
+ PPCCondCode cond = iselCondCode(env, stmt->Ist.Tmp.data);
HReg r_dst = lookupIRTemp(env, tmp);
- addInstr(env, PPC32Instr_Set32(cond, r_dst));
+ addInstr(env, PPCInstr_Set(cond, r_dst));
return;
}
if (ty == Ity_F64) {
HReg fr_dst = lookupIRTemp(env, tmp);
HReg fr_src = iselDblExpr(env, stmt->Ist.Tmp.data);
- addInstr(env, PPC32Instr_FpUnary(Pfp_MOV, fr_dst, fr_src));
+ addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src));
return;
}
if (ty == Ity_F32) {
HReg fr_dst = lookupIRTemp(env, tmp);
HReg fr_src = iselFltExpr(env, stmt->Ist.Tmp.data);
- addInstr(env, PPC32Instr_FpUnary(Pfp_MOV, fr_dst, fr_src));
+ addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src));
return;
}
if (ty == Ity_V128) {
HReg v_dst = lookupIRTemp(env, tmp);
HReg v_src = iselVecExpr(env, stmt->Ist.Tmp.data);
- addInstr(env, PPC32Instr_AvUnary(Pav_MOV, v_dst, v_src));
+ addInstr(env, PPCInstr_AvUnary(Pav_MOV, v_dst, v_src));
return;
}
break;
/* --------- MEM FENCE --------- */
case Ist_MFence:
- addInstr(env, PPC32Instr_MFence());
+ addInstr(env, PPCInstr_MFence());
return;
/* --------- INSTR MARK --------- */
/* --------- EXIT --------- */
case Ist_Exit: {
- PPC32RI* ri_dst;
- PPC32CondCode cc;
+ PPCRI* ri_dst;
+ PPCCondCode cc;
IRConstTag tag = stmt->Ist.Exit.dst->tag;
if (!mode64 && (tag != Ico_U32))
- vpanic("iselStmt(ppc32): Ist_Exit: dst is not a 32-bit value");
+ vpanic("iselStmt(ppc): Ist_Exit: dst is not a 32-bit value");
if (mode64 && (tag != Ico_U64))
vpanic("iselStmt(ppc64): Ist_Exit: dst is not a 64-bit value");
ri_dst = iselIntExpr_RI(env, IRExpr_Const(stmt->Ist.Exit.dst));
cc = iselCondCode(env,stmt->Ist.Exit.guard);
- addInstr(env, PPC32Instr_RdWrLR(True, env->savedLR));
- addInstr(env, PPC32Instr_Goto(stmt->Ist.Exit.jk, cc, ri_dst));
+ addInstr(env, PPCInstr_RdWrLR(True, env->savedLR));
+ addInstr(env, PPCInstr_Goto(stmt->Ist.Exit.jk, cc, ri_dst));
return;
}
}
stmt_fail:
ppIRStmt(stmt);
- vpanic("iselStmt(ppc32)");
+ vpanic("iselStmt(ppc)");
}
static void iselNext ( ISelEnv* env, IRExpr* next, IRJumpKind jk )
{
- PPC32CondCode cond;
- PPC32RI* ri;
+ PPCCondCode cond;
+ PPCRI* ri;
if (vex_traceflags & VEX_TRACE_VCODE) {
vex_printf("\n-- goto {");
ppIRJumpKind(jk);
}
cond = mk_PPCCondCode( Pct_ALWAYS, Pcf_7EQ );
ri = iselIntExpr_RI(env, next);
- addInstr(env, PPC32Instr_RdWrLR(True, env->savedLR));
- addInstr(env, PPC32Instr_Goto(jk, cond, ri));
+ addInstr(env, PPCInstr_RdWrLR(True, env->savedLR));
+ addInstr(env, PPCInstr_Goto(jk, cond, ri));
}
/*--- Insn selector top-level ---*/
/*---------------------------------------------------------*/
-/* Translate an entire BB to ppc32 code. */
+/* Translate an entire BB to ppc code. */
-HInstrArray* iselBB_PPC32 ( IRBB* bb, VexArchInfo* archinfo_host )
+HInstrArray* iselBB_PPC ( IRBB* bb, VexArchInfo* archinfo_host )
{
Int i, j;
HReg hreg, hregHI;
mode64 = True;
break;
default:
- vpanic("iselBB_PPC32: illegal subarch");
+ vpanic("iselBB_PPC: illegal subarch");
}
/* Make up an initial environment to use. */
case Ity_V128: hreg = mkHReg(j++, HRcVec128, True); break;
default:
ppIRType(bb->tyenv->types[i]);
- if (mode64)
- vpanic("iselBB(ppc64): IRTemp type");
- else
- vpanic("iselBB(ppc32): IRTemp type");
+ vpanic("iselBB(ppc): IRTemp type");
}
env->vregmap[i] = hreg;
env->vregmapHI[i] = hregHI;
/* Keep a copy of the link reg, so helper functions don't kill it. */
env->savedLR = newVRegI(env);
- addInstr(env, PPC32Instr_RdWrLR(False, env->savedLR));
+ addInstr(env, PPCInstr_RdWrLR(False, env->savedLR));
/* Ok, finally we can iterate over the statements. */
for (i = 0; i < bb->stmts_used; i++)
case VexArchPPC32:
mode64 = False;
- getAllocableRegs_PPC32 ( &n_available_real_regs,
- &available_real_regs, mode64 );
- isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPC32Instr;
- getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPC32Instr;
- mapRegs = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPC32Instr;
- genSpill = (HInstr*(*)(HReg,Int,Bool)) genSpill_PPC32;
- genReload = (HInstr*(*)(HReg,Int,Bool)) genReload_PPC32;
- ppInstr = (void(*)(HInstr*,Bool)) ppPPC32Instr;
- ppReg = (void(*)(HReg)) ppHRegPPC32;
- iselBB = iselBB_PPC32;
- emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPC32Instr;
+ getAllocableRegs_PPC ( &n_available_real_regs,
+ &available_real_regs, mode64 );
+ isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
+ getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPCInstr;
+ mapRegs = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPCInstr;
+ genSpill = (HInstr*(*)(HReg,Int,Bool)) genSpill_PPC;
+ genReload = (HInstr*(*)(HReg,Int,Bool)) genReload_PPC;
+ ppInstr = (void(*)(HInstr*,Bool)) ppPPCInstr;
+ ppReg = (void(*)(HReg)) ppHRegPPC;
+ iselBB = iselBB_PPC;
+ emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr;
host_is_bigendian = True;
host_word_type = Ity_I32;
vassert(vta->archinfo_guest.subarch == VexSubArchPPC32_I
case VexArchPPC64:
mode64 = True;
- getAllocableRegs_PPC32 ( &n_available_real_regs,
- &available_real_regs, mode64 );
- isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPC32Instr;
- getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPC32Instr;
- mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPC32Instr;
- genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_PPC32;
- genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_PPC32;
- ppInstr = (void(*)(HInstr*, Bool)) ppPPC32Instr;
- ppReg = (void(*)(HReg)) ppHRegPPC32;
- iselBB = iselBB_PPC32;
- emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPC32Instr;
+ getAllocableRegs_PPC ( &n_available_real_regs,
+ &available_real_regs, mode64 );
+ isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
+ getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPCInstr;
+ mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPCInstr;
+ genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_PPC;
+ genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_PPC;
+ ppInstr = (void(*)(HInstr*, Bool)) ppPPCInstr;
+ ppReg = (void(*)(HReg)) ppHRegPPC;
+ iselBB = iselBB_PPC;
+ emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr;
host_is_bigendian = True;
host_word_type = Ity_I64;
vassert(vta->archinfo_guest.subarch == VexSubArchPPC64_FI
case VexArchPPC32:
preciseMemExnsFn = guest_ppc32_state_requires_precise_mem_exns;
- disInstrFn = disInstr_PPC32;
+ disInstrFn = disInstr_PPC;
specHelper = guest_ppc32_spechelper;
guest_sizeB = sizeof(VexGuestPPC32State);
guest_word_type = Ity_I32;
case VexArchPPC64:
preciseMemExnsFn = guest_ppc64_state_requires_precise_mem_exns;
- disInstrFn = disInstr_PPC32;
+ disInstrFn = disInstr_PPC;
specHelper = guest_ppc64_spechelper;
guest_sizeB = sizeof(VexGuestPPC64State);
guest_word_type = Ity_I64;
/* Write default settings info *vai. */
void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
{
- vai->subarch = VexSubArch_INVALID;
- vai->ppc32_cache_line_szB = 0;
+ vai->subarch = VexSubArch_INVALID;
+ vai->ppc_cache_line_szB = 0;
}
/* This is the only mandatory field. */
VexSubArch subarch;
/* PPC32/PPC64 only: size of cache line */
- Int ppc32_cache_line_szB;
+ Int ppc_cache_line_szB;
}
VexArchInfo;
ppc64
~~~~~
- Probably the same as ppc32.
+ Same as ppc32.
ALL GUEST ARCHITECTURES
~~~~~~~~~~~~~~~~~~~~~~~
LibVEX_default_VexArchInfo(&vex_archinfo);
vex_archinfo.subarch = VexSubArch;
- vex_archinfo.ppc32_cache_line_szB = CacheLineSize;
+ vex_archinfo.ppc_cache_line_szB = CacheLineSize;
/* */
vta.arch_guest = VexArch;
LibVEX_default_VexArchInfo(&vai_ppc32);
vai_ppc32.subarch = VexSubArchPPC32_VFI;
- vai_ppc32.ppc32_cache_line_szB = 128;
+ vai_ppc32.ppc_cache_line_szB = 128;
/* ----- Set up args for LibVEX_Translate ----- */
#if 1 /* ppc32 -> ppc32 */