printf("\n");
// ppc64
- printf("#define OFFSET_ppc64_GPR0 %3d\n",
+ printf("#define OFFSET_ppc64_GPR0 %4d\n",
offsetof(VexGuestPPC64State,guest_GPR0));
- printf("#define OFFSET_ppc64_GPR3 %3d\n",
+ printf("#define OFFSET_ppc64_GPR3 %4d\n",
offsetof(VexGuestPPC64State,guest_GPR3));
- printf("#define OFFSET_ppc64_GPR4 %3d\n",
+ printf("#define OFFSET_ppc64_GPR4 %4d\n",
offsetof(VexGuestPPC64State,guest_GPR4));
- printf("#define OFFSET_ppc64_GPR5 %3d\n",
+ printf("#define OFFSET_ppc64_GPR5 %4d\n",
offsetof(VexGuestPPC64State,guest_GPR5));
- printf("#define OFFSET_ppc64_GPR6 %3d\n",
+ printf("#define OFFSET_ppc64_GPR6 %4d\n",
offsetof(VexGuestPPC64State,guest_GPR6));
- printf("#define OFFSET_ppc64_GPR7 %3d\n",
+ printf("#define OFFSET_ppc64_GPR7 %4d\n",
offsetof(VexGuestPPC64State,guest_GPR7));
- printf("#define OFFSET_ppc64_GPR8 %3d\n",
+ printf("#define OFFSET_ppc64_GPR8 %4d\n",
offsetof(VexGuestPPC64State,guest_GPR8));
- printf("#define OFFSET_ppc64_CIA %3d\n",
+ printf("#define OFFSET_ppc64_CIA %4d\n",
offsetof(VexGuestPPC64State,guest_CIA));
- printf("#define OFFSET_ppc64_CR0_0 %3d\n",
+ printf("#define OFFSET_ppc64_CR0_0 %4d\n",
offsetof(VexGuestPPC64State,guest_CR0_0));
printf("\n");
IRExpr* guest_ppc32_spechelper ( HChar* function_name,
IRExpr** args );
+extern
+IRExpr* guest_ppc64_spechelper ( HChar* function_name,
+ IRExpr** args );
+
/* Describes to the optimser which part of the guest state require
precise memory exceptions. This is logically part of the guest
state description. */
extern
Bool guest_ppc32_state_requires_precise_mem_exns ( Int, Int );
+extern
+Bool guest_ppc64_state_requires_precise_mem_exns ( Int, Int );
+
extern
VexGuestLayout ppc32Guest_layout;
+extern
+VexGuestLayout ppc64Guest_layout;
+
/* FP Rounding mode - different encoding to IR */
typedef
/* 9 */ PPC32G_FLAG_OP_SUBFI, // subfic
/* 10 */ PPC32G_FLAG_OP_SRAW, // sraw
/* 11 */ PPC32G_FLAG_OP_SRAWI, // srawi
+ /* 12 */ PPC32G_FLAG_OP_SRAD, // srad
+ /* 13 */ PPC32G_FLAG_OP_SRADI, // sradi
PPC32G_FLAG_OP_NUMBER
};
return NULL;
}
+IRExpr* guest_ppc64_spechelper ( HChar* function_name,
+ IRExpr** args )
+{
+ return NULL;
+}
+
/*----------------------------------------------*/
/*--- The exported fns .. ---*/
return False;
}
+Bool guest_ppc64_state_requires_precise_mem_exns ( Int minoff,
+ Int maxoff )
+{
+ Int lr_min = offsetof(VexGuestPPC64State, guest_LR);
+ Int lr_max = lr_min + 4 - 1;
+ Int r1_min = offsetof(VexGuestPPC64State, guest_GPR1);
+ Int r1_max = r1_min + 4 - 1;
+ Int cia_min = offsetof(VexGuestPPC64State, guest_CIA);
+ Int cia_max = cia_min + 4 - 1;
+
+ if (maxoff < lr_min || minoff > lr_max) {
+ /* no overlap with LR */
+ } else {
+ return True;
+ }
+
+ if (maxoff < r1_min || minoff > r1_max) {
+ /* no overlap with R1 */
+ } else {
+ return True;
+ }
+
+ if (maxoff < cia_min || minoff > cia_max) {
+ /* no overlap with CIA */
+ } else {
+ return True;
+ }
+
+ return False;
+}
+
-#define ALWAYSDEFD(field) \
+#define ALWAYSDEFD32(field) \
{ offsetof(VexGuestPPC32State, field), \
(sizeof ((VexGuestPPC32State*)0)->field) }
.n_alwaysDefd = 7,
.alwaysDefd
- = { /* 0 */ ALWAYSDEFD(guest_CIA),
- /* 1 */ ALWAYSDEFD(guest_EMWARN),
- /* 2 */ ALWAYSDEFD(guest_TISTART),
- /* 3 */ ALWAYSDEFD(guest_TILEN),
- /* 4 */ ALWAYSDEFD(guest_VSCR),
- /* 5 */ ALWAYSDEFD(guest_FPROUND),
- /* 6 */ ALWAYSDEFD(guest_RESVN)
+ = { /* 0 */ ALWAYSDEFD32(guest_CIA),
+ /* 1 */ ALWAYSDEFD32(guest_EMWARN),
+ /* 2 */ ALWAYSDEFD32(guest_TISTART),
+ /* 3 */ ALWAYSDEFD32(guest_TILEN),
+ /* 4 */ ALWAYSDEFD32(guest_VSCR),
+ /* 5 */ ALWAYSDEFD32(guest_FPROUND),
+ /* 6 */ ALWAYSDEFD32(guest_RESVN)
}
};
+#define ALWAYSDEFD64(field) \
+ { offsetof(VexGuestPPC64State, field), \
+ (sizeof ((VexGuestPPC64State*)0)->field) }
+
+VexGuestLayout
+ ppc64Guest_layout
+ = {
+ /* Total size of the guest state, in bytes. */
+ .total_sizeB = sizeof(VexGuestPPC64State),
+
+ /* Describe the stack pointer. */
+ .offset_SP = offsetof(VexGuestPPC64State,guest_GPR1),
+ .sizeof_SP = 4,
+
+ /* Describe the instruction pointer. */
+ .offset_IP = offsetof(VexGuestPPC64State,guest_CIA),
+ .sizeof_IP = 4,
+
+ /* Describe any sections to be regarded by Memcheck as
+ 'always-defined'. */
+ .n_alwaysDefd = 7,
+
+ .alwaysDefd
+ = { /* 0 */ ALWAYSDEFD64(guest_CIA),
+ /* 1 */ ALWAYSDEFD64(guest_EMWARN),
+ /* 2 */ ALWAYSDEFD64(guest_TISTART),
+ /* 3 */ ALWAYSDEFD64(guest_TILEN),
+ /* 4 */ ALWAYSDEFD64(guest_VSCR),
+ /* 5 */ ALWAYSDEFD64(guest_FPROUND),
+ /* 6 */ ALWAYSDEFD64(guest_RESVN)
+ }
+ };
/*---------------------------------------------------------------*/
/*--- end guest-ppc32/ghelpers.c ---*/
vassert(len > 0 && len <= 64);
for (i=len; i>0; i--) {
- vex_printf("%d", ((x & (1<<(len-1))) != 0) );
+ vex_printf("%d", ((x & (((ULong)1)<<(len-1))) != 0) );
x = x << 1;
if (((i-1)%4)==0 && (i > 1) && spaces) {
vex_printf(" ");
/*--- Misc Helpers ---*/
/*------------------------------------------------------------*/
-/* Generate mask with 1's from 'begin' through 'end' (0=lsb),
- wrapping if begin > end. */
+/* Generate mask with 1's from 'begin' through 'end',
+ wrapping if begin > end.
+ begin->end works from right to left, 0=lsb
+*/
static UInt MASK32( UInt begin, UInt end )
{
vassert(begin < 32);
return mask;
}
-static Addr64 nextInsnAddr() { return guest_CIA_curr_instr + 4; }
+static Addr64 nextInsnAddr( void )
+{
+ return guest_CIA_curr_instr + 4;
+}
/*------------------------------------------------------------*/
IRExpr* rot_amt )
{
IRExpr *mask, *rot;
- vassert(typeOfIRExpr(irbb->tyenv,rot_amt) == Ity_I32);
+ vassert(typeOfIRExpr(irbb->tyenv,rot_amt) == Ity_I8);
- if (mode64) {
- vassert(typeOfIRExpr(irbb->tyenv,src) == Ity_I64);
+ if (typeOfIRExpr(irbb->tyenv,src) == Ity_I64) {
// rot = (src << rot_amt) | (src >> (64-rot_amt))
mask = binop(Iop_And8, rot_amt, mkU8(63));
rot = binop(Iop_Or64,
binop(Iop_Shl64, src, mask),
binop(Iop_Shr64, src, binop(Iop_Sub8, mkU8(64), mask)));
} else {
- vassert(typeOfIRExpr(irbb->tyenv,src) == Ity_I32);
// rot = (src << rot_amt) | (src >> (32-rot_amt))
- mask = unop(Iop_32to8, binop(Iop_And32, rot_amt, mkU32(31)));
+ mask = binop(Iop_And8, rot_amt, mkU8(31));
rot = binop(Iop_Or32,
binop(Iop_Shl32, src, mask),
binop(Iop_Shr32, src, binop(Iop_Sub8, mkU8(32), mask)));
}
/* Note: the MuxOX is not merely an optimisation; it's needed
- because otherwise the Shr32 is a shift by the word size when
+ because otherwise the Shr is a shift by the word size when
mask denotes zero. For rotates by immediates, a lot of
this junk gets folded out. */
return IRExpr_Mux0X( mask, /* zero rotate */ src,
case /* 0 */ PPC32G_FLAG_OP_ADD:
/* res <u argL */
xer_ca
- = binop(Iop_CmpLT64U, res, argL);
+ = unop(Iop_1Uto32, binop(Iop_CmpLT64U, res, argL));
break;
case /* 1 */ PPC32G_FLAG_OP_ADDE:
= mkOR1(
binop(Iop_CmpLT64U, res, argL),
mkAND1(
- binop(Iop_CmpEQ64, oldca, mkU32(1)),
+ binop(Iop_CmpEQ64, oldca, mkU64(1)),
binop(Iop_CmpEQ64, res, argL)
)
);
+ xer_ca
+ = unop(Iop_1Uto32, xer_ca);
break;
case /* 8 */ PPC32G_FLAG_OP_SUBFE:
binop(Iop_CmpEQ64, res, argR)
)
);
+ xer_ca
+ = unop(Iop_1Uto32, xer_ca);
break;
case /* 7 */ PPC32G_FLAG_OP_SUBFC:
case /* 9 */ PPC32G_FLAG_OP_SUBFI:
/* res <=u argR */
xer_ca
- = binop(Iop_CmpLE64U, res, argR);
+ = unop(Iop_1Uto32, binop(Iop_CmpLE64U, res, argR));
break;
-#if 0 // CAB: TODO
-
case /* 10 */ PPC32G_FLAG_OP_SRAW:
- /* The shift amount is guaranteed to be in 0 .. 63 inclusive.
+ /* The shift amount is guaranteed to be in 0 .. 31 inclusive.
If it is <= 31, behave like SRAWI; else XER.CA is the sign
bit of argL. */
- /* This term valid for shift amount < 32 only */
+ /* This term valid for shift amount < 31 only */
+
xer_ca
= binop(
- Iop_And32,
- binop(Iop_Sar32, argL, mkU8(31)),
- binop( Iop_And32,
+ Iop_And64,
+ binop(Iop_Sar64, argL, mkU8(31)),
+ binop( Iop_And64,
argL,
- binop( Iop_Sub32,
- binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,argR)),
- mkU32(1) )
+ binop( Iop_Sub64,
+ binop(Iop_Shl64, mkU64(1), unop(Iop_64to8,argR)),
+ mkU64(1) )
)
);
xer_ca
= IRExpr_Mux0X(
/* shift amt > 31 ? */
- unop(Iop_1Uto8, binop(Iop_CmpLT32U, mkU32(31), argR)),
+ unop(Iop_1Uto8, binop(Iop_CmpLT64U, mkU64(31), argR)),
/* no -- be like srawi */
- binop(Iop_CmpNE32, xer_ca, mkU32(0)),
+ unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))),
/* yes -- get sign bit of argL */
- unop(Iop_32to1, binop(Iop_Shr32, argL, mkU8(31)))
+ unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63)))
);
break;
case /* 11 */ PPC32G_FLAG_OP_SRAWI:
- /* xer_ca is 1 iff src was negative and bits_shifted_out !=
- 0. Since the shift amount is known to be in the range
- 0 .. 31 inclusive the following seems viable:
+ /* xer_ca is 1 iff src was negative and bits_shifted_out != 0.
+ Since the shift amount is known to be in the range 0 .. 31
+ inclusive the following seems viable:
xer.ca == 1 iff the following is nonzero:
(argL >>s 31) -- either all 0s or all 1s
& (argL & (1<<argR)-1) -- the stuff shifted out */
+
xer_ca
= binop(
- Iop_And32,
- binop(Iop_Sar32, argL, mkU8(31)),
- binop( Iop_And32,
+ Iop_And64,
+ binop(Iop_Sar64, argL, mkU8(31)),
+ binop( Iop_And64,
argL,
- binop( Iop_Sub32,
- binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,argR)),
- mkU32(1) )
+ binop( Iop_Sub64,
+ binop(Iop_Shl64, mkU64(1), unop(Iop_64to8,argR)),
+ mkU64(1) )
)
);
xer_ca
- = binop(Iop_CmpNE32, xer_ca, mkU32(0));
+ = unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)));
break;
-#endif
+
+ case /* 12 */ PPC32G_FLAG_OP_SRAD:
+ vassert(0); // AWAITING TEST CASE
+
+ /* The shift amount is guaranteed to be in 0 .. 63 inclusive.
+ If it is <= 63, behave like SRADI; else XER.CA is the sign
+ bit of argL. */
+ /* This term valid for shift amount < 63 only */
+
+ xer_ca
+ = binop(
+ Iop_And64,
+ binop(Iop_Sar64, argL, mkU8(63)),
+ binop( Iop_And64,
+ argL,
+ binop( Iop_Sub64,
+ binop(Iop_Shl64, mkU64(1), unop(Iop_64to8,argR)),
+ mkU64(1) )
+ )
+ );
+ xer_ca
+ = IRExpr_Mux0X(
+ /* shift amt > 63 ? */
+ unop(Iop_1Uto8, binop(Iop_CmpLT64U, mkU64(63), argR)),
+ /* no -- be like srawi */
+ unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0))),
+ /* yes -- get sign bit of argL */
+ unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63)))
+ );
+ break;
+
+
+ case /* 13 */ PPC32G_FLAG_OP_SRADI:
+ /* xer_ca is 1 iff src was negative and bits_shifted_out != 0.
+ Since the shift amount is known to be in the range 0 .. 63
+ inclusive, the following seems viable:
+ xer.ca == 1 iff the following is nonzero:
+ (argL >>s 63) -- either all 0s or all 1s
+ & (argL & (1<<argR)-1) -- the stuff shifted out */
+
+ xer_ca
+ = binop(
+ Iop_And64,
+ binop(Iop_Sar64, argL, mkU8(63)),
+ binop( Iop_And64,
+ argL,
+ binop( Iop_Sub64,
+ binop(Iop_Shl64, mkU64(1), unop(Iop_64to8,argR)),
+ mkU64(1) )
+ )
+ );
+ xer_ca
+ = unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)));
+ break;
+
default:
vex_printf("set_XER_CA: op = %u\n", op);
- vpanic("set_XER_CA(ppc32)");
+ vpanic("set_XER_CA(ppc64)");
}
/* xer_ca MUST denote either 0 or 1, no other value allowed */
- putXER_CA( unop(Iop_1Uto8, xer_ca) );
+ putXER_CA( unop(Iop_32to8, xer_ca) );
}
static void set_XER_CA ( IRType ty, UInt op, IRExpr* res,
/*--- Read/write to guest-state --- */
/*------------------------------------------------------------*/
-static IRExpr* /* :: Ity_I32 */ getGST ( PPC_GST reg )
+static IRExpr* /* :: Ity_I32/64 */ getGST ( PPC_GST reg )
{
IRType ty = mode64 ? Ity_I64 : Ity_I32;
switch (reg) {
}
default:
+ vex_printf("getGST_masked(ppc32): %u", reg);
vpanic("getGST_masked(ppc32)");
}
flag_OE ? "o" : "", flag_rC ? "." : "",
rD_addr, rA_addr, rB_addr);
if (mode64) {
- DIP(" => mode64 not implemented\n");
- return False;
/* Note:
XER settings are mode independent, and reflect the
overflow of the low-order 32bit result
CR0[LT|GT|EQ] are undefined if flag_rC && mode64
*/
-#if 0
IRExpr* dividend = unop(Iop_32Sto64,
unop(Iop_64to32, mkexpr(rA)));
IRExpr* divisor = unop(Iop_32Sto64,
unop(Iop_64to32,
binop(Iop_DivS64, dividend, divisor))) );
if (flag_OE) {
- // CAB: what do we need to compare here?
- // rA|rB, or dividend|divisor with rD
set_XER_OV( ty, PPC32G_FLAG_OP_DIVW,
- mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+ mkexpr(rD), dividend, divisor );
}
-#endif
} else {
assign( rD, binop(Iop_DivS32, mkexpr(rA), mkexpr(rB)) );
if (flag_OE) {
flag_OE ? "o" : "", flag_rC ? "." : "",
rD_addr, rA_addr, rB_addr);
if (mode64) {
- DIP(" => mode64 not implemented\n");
- return False;
/* Note:
XER settings are mode independent, and reflect the
overflow of the low-order 32bit result
CR0[LT|GT|EQ] are undefined if flag_rC && mode64
*/
-#if 0
IRExpr* dividend = unop(Iop_32Uto64,
unop(Iop_64to32, mkexpr(rA)));
IRExpr* divisor = unop(Iop_32Uto64,
unop(Iop_64to32,
binop(Iop_DivU64, dividend, divisor))) );
if (flag_OE) {
- // CAB: swap rA for dividend, rB for divisor?
- // zero top half of rD ?
set_XER_OV( ty, PPC32G_FLAG_OP_DIVWU,
- mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+ mkexpr(rD), dividend, divisor );
}
-#endif
} else {
assign( rD, binop(Iop_DivU32, mkexpr(rA), mkexpr(rB)) );
if (flag_OE) {
break;
}
+
+ /* 64bit Arithmetic */
+ case 0x49: // mulhd (Multiply High Double Word, PPC64 p539)
+ if (flag_OE != 0) {
+ vex_printf("dis_int_arith(PPC32)(mulhd,flagOE)\n");
+ return False;
+ }
+ DIP("mulhd%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ rD_addr, rA_addr, rB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( rD, unop(Iop_128HIto64,
+ binop(Iop_MullU64,
+ mkexpr(rA), mkexpr(rB))) );
+ */
+
+ case 0x9: // mulhdu (Multiply High Double Word Unsigned, PPC64 p540)
+ if (flag_OE != 0) {
+ vex_printf("dis_int_arith(PPC32)(mulhdu,flagOE)\n");
+ return False;
+ }
+ DIP("mulhdu%s r%u,r%u,r%u\n", flag_rC ? "." : "",
+ rD_addr, rA_addr, rB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( rD, unop(Iop_128HIto64,
+ binop(Iop_MullU64,
+ mkexpr(rA), mkexpr(rB))) );
+ */
+
+ case 0xE9: // mulld (Multiply Low Double Word, PPC64 p543)
+ DIP("mulld%s%s r%u,r%u,r%u\n",
+ flag_OE ? "o" : "", flag_rC ? "." : "",
+ rD_addr, rA_addr, rB_addr);
+ assign( rD, binop(Iop_Mul64, mkexpr(rA), mkexpr(rB)) );
+ if (flag_OE) {
+ set_XER_OV( ty, PPC32G_FLAG_OP_MULLW,
+ mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+ }
+ break;
+
+ case 0x1E9: // divd (Divide Double Word, PPC64 p419)
+ DIP("divd%s%s r%u,r%u,r%u\n",
+ flag_OE ? "o" : "", flag_rC ? "." : "",
+ rD_addr, rA_addr, rB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( rD, binop(Iop_DivS64, mkexpr(rA), mkexpr(rB)) );
+ if (flag_OE) {
+ set_XER_OV( ty, PPC32G_FLAG_OP_DIVW,
+ mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+ }
+
+ if invalid divide (rA==0x8000_0000_0000_0000 && rB==-1, OR rB==0)
+ rD undefined, CR[LT,GT,EQ] undefined
+ flag_OE ? XER: set OV
+ */
+ /* Note:
+ if (0x8000_0000 / -1) or (x / 0)
+ => rD=undef, if(flag_rC) CR7=undef, if(flag_OE) XER_OV=1
+ => But _no_ exception raised. */
+
+ case 0x1C9: // divdu (Divide Double Word Unsigned, PPC64 p420)
+ DIP("divdu%s%s r%u,r%u,r%u\n",
+ flag_OE ? "o" : "", flag_rC ? "." : "",
+ rD_addr, rA_addr, rB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( rD, binop(Iop_DivU64, mkexpr(rA), mkexpr(rB)) );
+ if (flag_OE) {
+ set_XER_OV( PPC32G_FLAG_OP_DIVWU,
+ mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+ }
+
+ if invalid divide (rB==0)
+ rD undefined, CR[LT,GT,EQ] undefined
+ flag_OE ? XER: set OV
+ */
+ /* Note: ditto comment divd, for (x / 0) */
+
default:
vex_printf("dis_int_arith(PPC32)(opc2)\n");
return False;
}
break;
+
default:
vex_printf("dis_int_arith(PPC32)(opc1)\n");
return False;
mkexpr(rS), mkexpr(rB)) );
break;
+
+ /* 64bit Integer Logical Instructions */
+ case 0x3DA: // extsw (Extend Sign Word, PPC64 p430)
+ if (rB_addr!=0) {
+ vex_printf("dis_int_logic(PPC32)(extsw,rB_addr)\n");
+ return False;
+ }
+ DIP("extsw%s r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr);
+ assign(rA, unop(Iop_32Sto64, unop(Iop_64to32, mkexpr(rS))));
+ break;
+
+ case 0x03A: // cntlzd (Count Leading Zeros DW, PPC64 p401)
+ if (rB_addr!=0) {
+ vex_printf("dis_int_logic(PPC32)(cntlzd,rB_addr)\n");
+ return False;
+ }
+ DIP("cntlzd%s r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ // Iop_Clz64 undefined for arg==0, so deal with that case:
+ irx = binop(Iop_CmpNE64, mkexpr(rS), mkU64(0));
+ assign(rA, IRExpr_Mux0X( unop(Iop_1Uto8, irx),
+ mkU64(64),
+ unop(Iop_Clz64, mkexpr(rS)) ));
+ */
+
default:
vex_printf("dis_int_logic(PPC32)(opc2)\n");
return False;
*/
static Bool dis_int_rot ( UInt theInstr )
{
- /* M-Form */
+ /* M-Form, MDS-Form */
UChar opc1 = ifieldOPC(theInstr);
UChar rS_addr = ifieldRegDS(theInstr);
UChar rA_addr = ifieldRegA(theInstr);
UChar sh_imm = rB_addr;
UChar MaskBeg = toUChar( IFIELD( theInstr, 6, 5 ) );
UChar MaskEnd = toUChar( IFIELD( theInstr, 1, 5 ) );
+ UChar msk_imm = toUChar( IFIELD( theInstr, 5, 6 ) );
+ UChar opc2 = toUChar( IFIELD( theInstr, 2, 3 ) );
+ UChar b1 = ifieldBIT1(theInstr);
UChar flag_rC = ifieldBIT0(theInstr);
IRType ty = mode64 ? Ity_I64 : Ity_I32;
IRTemp rA = newTemp(ty);
IRTemp rB = newTemp(ty);
IRExpr *r;
+ UInt mask32;
+ ULong mask64;
assign( rS, getIReg(rS_addr) );
assign( rB, getIReg(rB_addr) );
if (mode64) {
// tmp32 = (ROTL(rS_Lo32, Imm)
// rA = ((tmp32 || tmp32) & mask64) | (rA & ~mask64)
- ULong mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
+ mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
r = unop(Iop_32Uto64, r);
r = binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32)));
assign( rA,
binop(Iop_Or64,
- binop(Iop_And64, r, mkU32(mask64)),
- binop(Iop_And64, getIReg(rA_addr), mkU32(~mask64))) );
+ binop(Iop_And64, r, mkU64(mask64)),
+ binop(Iop_And64, getIReg(rA_addr), mkU64(~mask64))) );
}
else {
// rA = (ROTL(rS, Imm) & mask) | (rA & ~mask);
- UInt mask = MASK32(31-MaskEnd, 31-MaskBeg);
- r = ROTL(mkexpr(rS), mkU32(sh_imm));
+ mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
+ r = ROTL(mkexpr(rS), mkU8(sh_imm));
assign( rA,
binop(Iop_Or32,
- binop(Iop_And32, mkU32(mask), r),
- binop(Iop_And32, getIReg(rA_addr), mkU32(~mask))) );
+ binop(Iop_And32, mkU32(mask32), r),
+ binop(Iop_And32, getIReg(rA_addr), mkU32(~mask32))) );
}
break;
}
vassert(sh_imm < 32);
if (mode64) {
- ULong mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
+ mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? "." : "",
rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
// tmp32 = (ROTL(rS_Lo32, Imm)
r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
r = unop(Iop_32Uto64, r);
r = binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32)));
- assign( rA, binop(Iop_And64, r, mkU32(mask64)) );
+ assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
}
else {
if (MaskBeg == 0 && sh_imm+MaskEnd == 31) {
}
else {
/* General case. */
- UInt mask = MASK32(31-MaskEnd, 31-MaskBeg);
+ mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? "." : "",
rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
// rA = ROTL(rS, Imm) & mask
- assign( rA, binop(Iop_And32, ROTL(mkexpr(rS), mkU32(sh_imm)),
- mkU32(mask)) );
+ assign( rA, binop(Iop_And32, ROTL(mkexpr(rS), mkU8(sh_imm)),
+ mkU32(mask32)) );
}
}
break;
DIP("rlwnm%s r%u,r%u,r%u,%d,%d\n", flag_rC ? "." : "",
rA_addr, rS_addr, rB_addr, MaskBeg, MaskEnd);
if (mode64) {
- ULong mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
+ mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
// tmp32 = (ROTL(rS_Lo32, rB[0-4])
// rA = ((tmp32 || tmp32) & mask64)
// note, ROTL does the masking, so we don't do it here
unop(Iop_32to8, mkexpr(rB)) );
r = unop(Iop_32Uto64, r);
r = binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32)));
- assign( rA, binop(Iop_And64, r, mkU32(mask64)) );
+ assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
} else {
- UInt mask = MASK32(31-MaskEnd, 31-MaskBeg);
+ mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
// rA = ROTL(rS, rB[0-4]) & mask
// note, ROTL does the masking, so we don't do it here
assign( rA, binop(Iop_And32,
- ROTL(mkexpr(rS), mkexpr(rB)),
- mkU32(mask)) );
+ ROTL(mkexpr(rS), unop(Iop_32to8, mkexpr(rB))),
+ mkU32(mask32)) );
}
break;
}
+
+ /* 64bit Integer Rotates */
+ case 0x1E: {
+ msk_imm = ((msk_imm & 1) << 5) | (msk_imm >> 1);
+ sh_imm |= b1 << 5;
+
+ vassert( msk_imm < 64 );
+ vassert( sh_imm < 64 );
+
+ switch (opc2) {
+ case 0x4:
+ /*
+ n = lowest 6bits of rB
+ r = ROTL64(rS,n)
+ */
+ if (b1 == 0) { // rldcl (Rotate Left DW then Clear Left, PPC64 p555)
+ DIP("rldcl%s r%u,r%u,r%u,%u\n", flag_rC ? "." : "",
+ rA_addr, rS_addr, rB_addr, msk_imm);
+ r = ROTL(mkexpr(rS), mkU8(sh_imm));
+ mask64 = MASK64(0, 63-msk_imm);
+ assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+ break;
+ } else { // rldcr (Rotate Left DW then Clear Right, PPC64 p556)
+ DIP("rldcr%s r%u,r%u,r%u,%u\n", flag_rC ? "." : "",
+ rA_addr, rS_addr, rB_addr, msk_imm);
+ r = ROTL(mkexpr(rS), mkU8(sh_imm));
+ mask64 = MASK64(63-msk_imm, 63);
+ assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+ break;
+ }
+ break;
+
+ case 0x2: // rldic (Rotate Left DW Imm then Clear, PPC64 p557)
+ DIP("rldic%s r%u,r%u,%u,%u\n", flag_rC ? "." : "",
+ rA_addr, rS_addr, sh_imm, msk_imm);
+ r = ROTL(mkexpr(rS), mkU8(sh_imm));
+ mask64 = MASK64(sh_imm, 63-msk_imm);
+ assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+ break;
+ // later: deal with special case: (msk_imm==0) => SHL(sh_imm)
+ /*
+ Hmm... looks like this'll do the job more simply:
+ r = SHL(rS, sh_imm)
+ m = ~(1 << (63-msk_imm))
+ assign(rA, r & m);
+ */
+
+ case 0x0: // rldicl (Rotate Left DW Imm then Clear Left, PPC64 p558)
+ DIP("rldicl%s r%u,r%u,%u,%u\n", flag_rC ? "." : "",
+ rA_addr, rS_addr, sh_imm, msk_imm);
+ r = ROTL(mkexpr(rS), mkU8(sh_imm));
+ mask64 = MASK64(0, 63-msk_imm);
+ assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+ break;
+ // later: deal with special case: (msk_imm + sh_imm == 63) => SHR(63 - sh_imm)
+
+ case 0x1: // rldicr (Rotate Left DW Imm then Clear Right, PPC64 p559)
+ DIP("rldicr%s r%u,r%u,%u,%u\n", flag_rC ? "." : "",
+ rA_addr, rS_addr, sh_imm, msk_imm);
+ r = ROTL(mkexpr(rS), mkU8(sh_imm));
+ mask64 = MASK64(63-msk_imm, 63);
+ assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+ break;
+ // later: deal with special case: (msk_imm == sh_imm) => SHL(sh_imm)
+
+ case 0x3: // rldimi (Rotate Left DW Imm then Mask Insert, PPC64 p560)
+ DIP("rldimi%s r%u,r%u,%u,%u\n", flag_rC ? "." : "",
+ rA_addr, rS_addr, sh_imm, msk_imm);
+ r = ROTL(mkexpr(rS), mkU8(sh_imm));
+ mask64 = MASK64(sh_imm, 63-msk_imm);
+ assign( rA, binop(Iop_Or64,
+ binop(Iop_And64, mkU64(mask64), r),
+ binop(Iop_And64, mkU64(~mask64), mkexpr(rA))) );
+ break;
+
+ default:
+ vex_printf("dis_int_rot(PPC32)(opc2)\n");
+ return False;
+ }
+ break;
+ }
+
default:
vex_printf("dis_int_rot(PPC32)(opc1)\n");
return False;
*/
static Bool dis_int_load ( UInt theInstr )
{
- /* D-Form, X-Form */
+ /* D-Form, X-Form, DS-Form */
UChar opc1 = ifieldOPC(theInstr);
UChar rD_addr = ifieldRegDS(theInstr);
UChar rA_addr = ifieldRegA(theInstr);
UInt uimm16 = ifieldUIMM16(theInstr);
UChar rB_addr = ifieldRegB(theInstr);
UInt opc2 = ifieldOPClo10(theInstr);
+ UChar b1 = ifieldBIT1(theInstr);
UChar b0 = ifieldBIT0(theInstr);
Int simm16 = extend_s_16to32(uimm16);
IRType ty = mode64 ? Ity_I64 : Ity_I32;
- // IRTemp rA = newTemp(ty);
- // IRTemp rB = newTemp(ty);
IRTemp EA = newTemp(ty);
IRExpr* val;
- // assign( rA, getIReg(rA_addr) );
- // assign( rB, getIReg(rB_addr) );
-
- if (opc1 != 0x1F) {
- assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
- } else {
+ switch (opc1) {
+ case 0x1F: // register offset
assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+ break;
+ case 0x3A: // immediate offset: 64bit
+ simm16 = simm16 & 0xFFFFFFFC;
+ default: // immediate offset
+ assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
+ break;
}
switch (opc1) {
putIReg( rD_addr, mkSzWiden32(ty, val, False) );
break;
+
+ /* 64bit Loads */
+ case 0x035: // ldux (Load DW with Update Indexed, PPC64 p475)
+ if (rA_addr == 0 || rA_addr == rD_addr) {
+ vex_printf("dis_int_load(PPC32)(ldux,rA_addr|rD_addr)\n");
+ return False;
+ }
+ DIP("ldux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+ putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) );
+ putIReg( rA_addr, mkexpr(EA) );
+ break;
+
+ case 0x015: // ldx (Load DW Indexed, PPC64 p476)
+ DIP("ldx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+ putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) );
+ break;
+
+ case 0x175: // lwaux (Load W Algebraic with Update Indexed, PPC64 p501)
+ if (rA_addr == 0 || rA_addr == rD_addr) {
+ vex_printf("dis_int_load(PPC32)(lwaux,rA_addr|rD_addr)\n");
+ return False;
+ }
+ DIP("lwaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+ putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
+ putIReg( rA_addr, mkexpr(EA) );
+ break;
+
+ case 0x155: // lwax (Load W Algebraic Indexed, PPC64 p502)
+ DIP("lwax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+ putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
+ break;
+
default:
vex_printf("dis_int_load(PPC32)(opc2)\n");
return False;
}
break;
+
+ /* DS Form - 64bit Loads */
+ case 0x3A:
+ switch (b1<<1 | b0) {
+ case 0x0: // ld (Load Double Word, PPC64 p472)
+ DIP("ld r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
+ putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) );
+ break;
+
+ case 0x1: // ldu (Load Double Word with Update, PPC64 p474)
+ if (rA_addr == 0 || rA_addr == rD_addr) {
+ vex_printf("dis_int_load(PPC32)(ldu,rA_addr|rD_addr)\n");
+ return False;
+ }
+ DIP("ldu r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
+ simm16 = simm16 & ~0x3;
+ putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) );
+ putIReg( rA_addr, mkexpr(EA) );
+ break;
+
+ case 0x2: // lwa (Load Word Algebraic, PPC64 p499)
+ DIP("lwa r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
+ putIReg( rD_addr, unop(Iop_32Sto64, loadBE(Ity_I32, mkexpr(EA))) );
+ break;
+
+ default:
+ vex_printf("dis_int_load(PPC32)(0x3A, opc2)\n");
+ return False;
+ }
+ break;
+
default:
vex_printf("dis_int_load(PPC32)(opc1)\n");
return False;
*/
static Bool dis_int_store ( UInt theInstr )
{
- /* D-Form, X-Form */
+ /* D-Form, X-Form, DS-Form */
UChar opc1 = ifieldOPC(theInstr);
UInt rS_addr = ifieldRegDS(theInstr);
UInt rA_addr = ifieldRegA(theInstr);
UInt uimm16 = ifieldUIMM16(theInstr);
UInt rB_addr = ifieldRegB(theInstr);
UInt opc2 = ifieldOPClo10(theInstr);
+ UChar b1 = ifieldBIT1(theInstr);
UChar b0 = ifieldBIT0(theInstr);
Int simm16 = extend_s_16to32(uimm16);
assign( rB, getIReg(rB_addr) );
assign( rS, getIReg(rS_addr) );
- if (opc1 != 0x1F) {
- assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
- } else {
+ switch (opc1) {
+ case 0x1F: // register offset
assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+ break;
+ case 0x3E: // immediate offset: 64bit
+ simm16 = simm16 & 0xFFFFFFFC;
+ default: // immediate offset
+ assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
+ break;
}
switch (opc1) {
storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) );
break;
- /* X Form */
+ /* X Form : all these use EA_indexed */
case 0x1F:
if (b0 != 0) {
vex_printf("dis_int_store(PPC32)(0x1F,b0)\n");
storeBE( mkexpr(EA), mkSzNarrow32(ty, mkexpr(rS)) );
break;
+
+ /* 64bit Stores */
+ case 0x0B5: // stdux (Store DW with Update Indexed, PPC64 p584)
+ if (rA_addr == 0) {
+ vex_printf("dis_int_store(PPC32)(stdux,rA_addr)\n");
+ return False;
+ }
+ DIP("stdux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+ putIReg( rA_addr, mkexpr(EA) );
+ storeBE( mkexpr(EA), mkexpr(rS) );
+ break;
+
+ case 0x095: // stdx (Store DW Indexed, PPC64 p585)
+ DIP("stdx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+ storeBE( mkexpr(EA), mkexpr(rS) );
+ break;
+
default:
vex_printf("dis_int_store(PPC32)(opc2)\n");
return False;
}
break;
+
+ /* DS Form - 64bit Stores */
+ case 0x3E:
+ switch (b1<<1 | b0) {
+ case 0x0: // std (Store Double Word, PPC64 p580)
+ DIP("std r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+ storeBE( mkexpr(EA), mkexpr(rS) );
+ break;
+
+ case 0x1: // stdu (Store Double Word with Update, PPC64 p583)
+ DIP("stdu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+ putIReg( rA_addr, mkexpr(EA) );
+ storeBE( mkexpr(EA), mkexpr(rS) );
+ break;
+
+ default:
+ vex_printf("dis_int_load(PPC32)(0x3A, opc2)\n");
+ return False;
+ }
+ break;
+
default:
vex_printf("dis_int_store(PPC32)(opc1)\n");
return False;
{
IRType ty = mode64 ? Ity_I64 : Ity_I32;
IRTemp ok = newTemp(Ity_I32);
+ IRExpr* ctr_lo32;
- if ((BO >> 2) & 1) {
+ if ((BO >> 2) & 1) { // independent of ctr
assign( ok, mkU32(0xFFFFFFFF) );
} else {
- if ((BO >> 1) & 1) {
+ ctr_lo32 = mkSzNarrow32(ty, getGST( PPC_GST_CTR ));
+ if ((BO >> 1) & 1) { // ctr == 0 ?
assign( ok, unop( Iop_1Sto32,
- binop( mkSzOp(ty, Iop_CmpEQ8),
- getGST( PPC_GST_CTR ),
- mkSzImm(ty, 0))) );
- } else {
+ binop( Iop_CmpEQ32, ctr_lo32, mkU32(0))) );
+ } else { // ctr != 0 ?
assign( ok, unop( Iop_1Sto32,
- binop( mkSzOp(ty, Iop_CmpNE8),
- getGST( PPC_GST_CTR ),
- mkSzImm(ty, 0))) );
+ binop( Iop_CmpNE32, ctr_lo32, mkU32(0))) );
}
}
return mkexpr(ok);
UInt opc2 = ifieldOPClo10(theInstr);
UChar b0 = ifieldBIT0(theInstr);
- IRType ty = mode64 ? Ity_I64 : Ity_I32;
- IRTemp crbD = newTemp(ty);
- IRTemp crbA = newTemp(ty);
- IRTemp crbB = newTemp(ty);
+ IRTemp crbD = newTemp(Ity_I32);
+ IRTemp crbA = newTemp(Ity_I32);
+ IRTemp crbB = newTemp(Ity_I32);
if (opc1 != 19 || b0 != 0) {
vex_printf("dis_cond_logic(PPC32)(opc1)\n");
case 0x096: {
// stwcx. (Store Word Conditional Indexed, PPC32 p532)
- IRTemp resaddr = newTemp(Ity_I32);
+ IRTemp resaddr = newTemp(ty);
if (b0 != 1) {
vex_printf("dis_memsync(PPC32)(stwcx.,b0)\n");
return False;
are carried through to the generated code. */
stmt( IRStmt_MFence() );
break;
+
+
+ /* 64bit Memsync */
+ case 0x054: // ldarx (Load DW and Reserve Indexed, PPC64 p473)
+ if (b0 != 0) {
+ vex_printf("dis_memsync(PPC32)(ldarx,b0)\n");
+ return False;
+ }
+ DIP("ldarx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( EA, ea_standard(rA_addr, rB_addr) );
+ putIReg( rD_addr, loadBE(Ity_I64, mkexpr(EA)) );
+ // Take a reservation
+ stmt( IRStmt_Put( OFFB_RESVN, mkexpr(EA) ));
+ */
+
+ case 0x0D6: // stdcx. (Store DW Condition Indexed, PPC64 p581)
+ if (b0 != 1) {
+ vex_printf("dis_memsync(PPC32)(stdcx.,b0)\n");
+ return False;
+ }
+ DIP("stdcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( rS, getIReg(rS_addr) );
+ assign( EA, ea_standard(rA_addr, rB_addr) );
+
+ // First set up as if the reservation failed
+ // Set CR0[LT GT EQ S0] = 0b000 || XER[SO]
+ putCR321(0, mkU8(0<<1));
+ putCR0(0, getXER_SO());
+
+ // Get the reservation address into a temporary, then clear it.
+ assign( resaddr, IRExpr_Get(OFFB_RESVN, Ity_I64) );
+ stmt( IRStmt_Put( OFFB_RESVN, mkU64(0) ));
+
+ // Skip the rest if the reservation really did fail.
+ stmt( IRStmt_Exit(
+ binop(Iop_CmpNE64, mkexpr(resaddr),
+ mkexpr(EA)),
+ Ijk_Boring,
+ IRConst_U32(guest_CIA_curr_instr + 4)) );
+
+ // Success? Do the store
+ storeBE( mkexpr(EA), mkexpr(rS) );
+ // Set CR0[LT GT EQ S0] = 0b001 || XER[SO]
+ putCR321(0, mkU8(1<<1));
+ */
+
default:
vex_printf("dis_memsync(PPC32)(opc2)\n");
return False;
*/
static Bool dis_int_shift ( UInt theInstr )
{
- /* X-Form */
+ /* X-Form, XS-Form */
UChar opc1 = ifieldOPC(theInstr);
UChar rS_addr = ifieldRegDS(theInstr);
UChar rA_addr = ifieldRegA(theInstr);
UChar rB_addr = ifieldRegB(theInstr);
UChar sh_imm = rB_addr;
UInt opc2 = ifieldOPClo10(theInstr);
+ UChar b1 = ifieldBIT1(theInstr);
UChar flag_rC = ifieldBIT0(theInstr);
IRType ty = mode64 ? Ity_I64 : Ity_I32;
IRTemp rA = newTemp(ty);
IRTemp outofrange = newTemp(Ity_I8);
- IRTemp sh_amt = newTemp(Ity_I8);
+// IRTemp sh_amt = newTemp(Ity_I8);
IRTemp sh_amt32 = newTemp(Ity_I32);
IRTemp rS_lo32 = newTemp(Ity_I32);
IRTemp rB_lo32 = newTemp(Ity_I32);
assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */True) );
set_XER_CA( ty, PPC32G_FLAG_OP_SRAW,
- mkexpr(rA), mkexpr(rS_lo32), mkexpr(sh_amt32),
- getXER_CA32() );
+ mkexpr(rA),
+ mkSzWiden32(ty, mkexpr(rS_lo32), True),
+ mkSzWiden32(ty, mkexpr(sh_amt32), True ),
+ mkSzWiden32(ty, getXER_CA32(), True) );
break;
case 0x338: // srawi (Shift Right Algebraic Word Immediate, PPC32 p507)
DIP("srawi%s r%u,r%u,%d\n", flag_rC ? "." : "",
rA_addr, rS_addr, sh_imm);
vassert(sh_imm < 32);
- assign( sh_amt, mkU8(sh_imm) );
- e_tmp = binop(Iop_Sar32, mkexpr(rS_lo32), mkexpr(sh_amt));
- assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */True) );
+ if (mode64) {
+ assign( rA, binop(Iop_Sar64,
+ binop(Iop_Shl64, getIReg(rS_addr), mkU8(32)),
+ mkU8(32 + sh_imm)) );
+ } else {
+ assign( rA, binop(Iop_Sar32, mkexpr(rS_lo32), mkU8(sh_imm)) );
+ }
set_XER_CA( ty, PPC32G_FLAG_OP_SRAWI,
- mkexpr(rA), mkexpr(rS_lo32), mkU32(sh_imm),
- getXER_CA32() );
+ mkexpr(rA),
+ mkSzWiden32(ty, mkexpr(rS_lo32), /* Signed */True),
+ mkSzImm(ty, sh_imm),
+ mkSzWiden32(ty, getXER_CA32(), /* Signed */False) );
break;
case 0x218: // srw (Shift Right Word, PPC32 p508)
rA_addr, rS_addr, rB_addr);
/* rA = rS >>u rB */
/* ppc32 semantics are:
- slw(x,y) = (x >>u (y & 31)) -- primary result
+ srw(x,y) = (x >>u (y & 31)) -- primary result
& ~((y << 26) >>s 31) -- make result 0
for y in 32 .. 63
*/
mkU8(31))));
assign( rA, mkSzWiden32(ty, e_tmp, /* Signed */False) );
break;
-
+
+
+ /* 64bit Shifts */
+ case 0x01B: // sld (Shift Left DW, PPC64 p568)
+ DIP("sld%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ /* rA = rS << rB */
+ /* ppc32 semantics are:
+ slw(x,y) = (x << (y & 63)) -- primary result
+ & ~((y << 57) >>s 63) -- make result 0
+ for y in 64 ..
+ */
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( rA,
+ binop(
+ Iop_And64,
+ binop( Iop_Shl64,
+ mkexpr(rS),
+ unop( Iop_64to8,
+ binop(Iop_And64, mkexpr(rB), mkU64(63)))),
+ unop( Iop_Not64,
+ binop( Iop_Sar64,
+ binop(Iop_Shl64, mkexpr(rB), mkU8(57)),
+ mkU8(63)))) );
+ */
+
+ case 0x31A: // srad (Shift Right Algebraic DW, PPC64 p570)
+ DIP("srad%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ /* amt = rB & 127
+ rA = Sar64( rS, amt > 63 ? 63 : amt )
+ XER.CA = amt > 63 ? sign-of-rS : (computation as per srawi)
+ */
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( sh_amt64, binop(Iop_And64, mkU64(0x7F), mkexpr(rB)) );
+ assign( outofrange,
+ unop( Iop_1Uto8,
+ binop(Iop_CmpLT64U, mkU64(63), mkexpr(sh_amt64)) ));
+ assign( rA,
+ binop( Iop_Sar64,
+ mkexpr(rS),
+ unop( Iop_64to8,
+ IRExpr_Mux0X( mkexpr(outofrange),
+ mkexpr(sh_amt64),
+ mkU32(63)) ))
+ );
+ set_XER_CA( PPC32G_FLAG_OP_SRAD,
+ mkexpr(rA), mkexpr(rS), mkexpr(sh_amt64),
+ getXER_CA32() );
+ */
+
+ case 0x33A: case 0x33B: // sradi (Shift Right Algebraic DW Imm, PPC64 p571)
+ sh_imm |= b1<<5;
+ vassert(sh_imm < 64);
+ DIP("sradi%s r%u,r%u,%u\n", flag_rC ? "." : "", rA_addr, rS_addr, sh_imm);
+ assign( rA, binop(Iop_Sar64, getIReg(rS_addr), mkU8(sh_imm)) );
+
+ set_XER_CA( ty, PPC32G_FLAG_OP_SRADI,
+ mkexpr(rA),
+ getIReg(rS_addr),
+ mkU64(sh_imm),
+ mkSzWiden32(ty, getXER_CA32(), /* Signed */False) );
+ break;
+
+ case 0x21B: // srd (Shift Right DW, PPC64 p574)
+ DIP("srd%s r%u,r%u,r%u\n", flag_rC ? "." : "", rA_addr, rS_addr, rB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /* rA = rS >>u rB */
+ /* ppc32 semantics are:
+ srw(x,y) = (x >>u (y & 63)) -- primary result
+ & ~((y << 57) >>s 63) -- make result 0
+ for y in 64 .. 127
+ */
+ /*
+ assign( rA,
+ binop(
+ Iop_And64,
+ binop( Iop_Shr64,
+ mkexpr(rS),
+ unop( Iop_64to8,
+ binop(Iop_And64, mkexpr(rB), mkU64(63)))),
+ unop( Iop_Not64,
+ binop( Iop_Sar64,
+ binop(Iop_Shl64, mkexpr(rB), mkU8(57)),
+ mkU8(63)))) );
+ */
+
default:
vex_printf("dis_int_shift(PPC32)(opc2)\n");
return False;
UChar b0 = ifieldBIT0(theInstr);
IRType ty = mode64 ? Ity_I64 : Ity_I32;
- IRTemp rS = newTemp(Ity_I32);
+ IRTemp rS = newTemp(ty);
assign( rS, getIReg(rS_addr) );
/* Reorder SPR field as per PPC32 p470 */
assign( frB, getFReg(frB_addr));
switch (opc2) {
- case 0x00C: // frsp (Floating Round to Single, PPC32 p423)
+ case 0x00C: // frsp (Float Round to Single, PPC32 p423)
DIP("frsp%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
assign( frD, roundToSgl( mkexpr(frB) ));
break;
- case 0x00E: // fctiw (Floating Conv to Int, PPC32 p404)
+ case 0x00E: // fctiw (Float Conv to Int, PPC32 p404)
DIP("fctiw%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
assign( r_tmp, binop(Iop_F64toI32, get_roundingmode(), mkexpr(frB)) );
assign( frD, unop( Iop_ReinterpI64asF64,
unop( Iop_32Uto64, mkexpr(r_tmp))));
break;
- case 0x00F: // fctiwz (Floating Conv to Int, Round to Zero, PPC32 p405)
+ case 0x00F: // fctiwz (Float Conv to Int, Round to Zero, PPC32 p405)
DIP("fctiwz%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
assign( r_tmp, binop(Iop_F64toI32, mkU32(0x3), mkexpr(frB)) );
assign( frD, unop( Iop_ReinterpI64asF64,
unop( Iop_32Uto64, mkexpr(r_tmp))));
break;
-
+
+
+ /* 64bit FP conversions */
+ case 0x32E: // fctid (Float Conv to Int DW, PPC64 p437)
+ DIP("fctid%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( r_tmp, binop(Iop_F64toI64, get_roundingmode(), mkexpr(frB)) );
+ assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp)) );
+ */
+
+ case 0x32F: // fctidz (Float Conv to Int DW, Round to Zero, PPC64 p437)
+ DIP("fctidz%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ assign( r_tmp, binop(Iop_F64toI64, mkU32(0x3), mkexpr(frB)) );
+ assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp)) );
+ */
+
+ case 0x34E: // fcfid (Float Conv from Int DW, PPC64 p434)
+ DIP("fcfid%s fr%u,fr%u\n", flag_rC ? "." : "", frD_addr, frB_addr);
+ DIP(" => not implemented\n");
+ return False;
+ /*
+ ?
+ assign( r_tmp, unop( Iop_ReinterpF64asI64, mkexpr(rD)) );
+ assign( frD, binop(Iop_I64toF64, get_roundingmode(), mkexpr(frB)) );
+ */
+
default:
vex_printf("dis_fp_round(PPC32)(opc2)\n");
return False;
IRType ty = mode64 ? Ity_I64 : Ity_I32;
/* What insn variants are we supporting today? */
- Bool allow_FP = archinfo->subarch == VexSubArchPPC32_FI
- || archinfo->subarch == VexSubArchPPC32_VFI;
+ Bool allow_FP = archinfo->subarch == VexSubArchPPC32_FI ||
+ archinfo->subarch == VexSubArchPPC32_VFI ||
+ archinfo->subarch == VexSubArchPPC64_FI ||
+ archinfo->subarch == VexSubArchPPC64_VFI;
- Bool allow_VMX = archinfo->subarch == VexSubArchPPC32_VFI;
+ Bool allow_VMX = archinfo->subarch == VexSubArchPPC32_VFI ||
+ archinfo->subarch == VexSubArchPPC64_VFI;
/* The running delta */
Long delta = (Long)mkSzAddr(ty, (ULong)delta64);
and have done. */
theInstr = getUIntBigendianly( (UChar*)(&guest_code[delta]) );
+#if 0
+ vex_printf("disInstr(ppc32): instr: ");
+ vex_printf_binary( theInstr, 32, True );
+ vex_printf("\n");
+#endif
+
+// vex_printf("insn: 0x%x\n", theInstr);
+
if (mode64) {
DIP("\t0x%llx: ", guest_CIA_curr_instr);
} else {
if (dis_int_rot( theInstr )) goto decode_success;
goto decode_failure;
+ /* 64bit Integer Rotate Instructions */
+ case 0x1E: // rldcl, rldcr, rldic, rldicl, rldicr, rldimi
+ if (dis_int_rot( theInstr )) goto decode_success;
+ goto decode_failure;
+
/* Integer Load Instructions */
case 0x22: case 0x23: case 0x2A: // lbz, lbzu, lha
case 0x2B: case 0x28: case 0x29: // lhau, lhz, lhzu
goto decode_failure;
//zz /* Trap Instructions */
+//zz case 0x02: // tdi
+//zz DIP("trap op (tdi) => not implemented\n");
+//zz goto decode_failure;
//zz case 0x03: // twi
//zz DIP("trap op (twi) => not implemented\n");
//zz goto decode_failure;
if (dis_fp_store( theInstr )) goto decode_success;
goto decode_failure;
+ /* 64bit Integer Loads */
+ case 0x3A: // ld, ldu, lwa
+ if (!mode64) goto decode_failure;
+ if (dis_int_load( theInstr )) goto decode_success;
+ goto decode_failure;
+
case 0x3B:
if (!allow_FP) goto decode_failure;
}
break;
+ /* 64bit Integer Stores */
+ case 0x3E: // std, stdu
+ if (!mode64) goto decode_failure;
+ if (dis_int_store( theInstr )) goto decode_success;
+ goto decode_failure;
+
case 0x3F:
if (!allow_FP) goto decode_failure;
/* Instrs using opc[1:5] never overlap with instrs using opc[1:10],
case 0x2C7: // mtfsf
if (dis_fp_scr( theInstr )) goto decode_success;
goto decode_failure;
+
+ /* 64bit FP conversions */
+ case 0x32E: // fctid
+ case 0x32F: // fctidz
+ case 0x34E: // fcfid
+ if (!mode64) goto decode_failure;
+ if (dis_fp_round(theInstr)) goto decode_success;
+ goto decode_failure;
+
default:
goto decode_failure;
}
case 0x0C8: // subfze
if (dis_int_arith( theInstr )) goto decode_success;
goto decode_failure;
+
+ /* 64bit Integer Arithmetic */
+ case 0x009: case 0x049: case 0x0E9: // mulhdu, mulhd, mulld
+ case 0x1C9: case 0x1E9: // divdu, divd
+ if (!mode64) goto decode_failure;
+ if (dis_int_arith( theInstr )) goto decode_success;
+ goto decode_failure;
+
default:
break; // Fall through...
}
if (dis_int_logic( theInstr )) goto decode_success;
goto decode_failure;
+ /* 64bit Integer Logical Instructions */
+ case 0x3DA: case 0x03A: // extsw, cntlzw
+ if (!mode64) goto decode_failure;
+ if (dis_int_logic( theInstr )) goto decode_success;
+ goto decode_failure;
+
/* Integer Shift Instructions */
case 0x018: case 0x318: case 0x338: // slw, sraw, srawi
case 0x218: // srw
if (dis_int_shift( theInstr )) goto decode_success;
goto decode_failure;
+ /* 64bit Integer Shift Instructions */
+ case 0x01B: case 0x31A: // sld, srad
+ case 0x33A: case 0x33B: // sradi_a, sradi_b
+ case 0x21B: // srd
+ if (!mode64) goto decode_failure;
+ if (dis_int_shift( theInstr )) goto decode_success;
+ goto decode_failure;
+
/* Integer Load Instructions */
case 0x057: case 0x077: case 0x157: // lbzx, lbzux, lhax
case 0x177: case 0x117: case 0x137: // lhaux, lhzx, lhzux
if (dis_int_load( theInstr )) goto decode_success;
goto decode_failure;
+ /* 64bit Integer Load Instructions */
+ case 0x035: case 0x015: // ldux, ldx
+ case 0x175: case 0x155: // lwaux, lwax
+ if (!mode64) goto decode_failure;
+ if (dis_int_load( theInstr )) goto decode_success;
+ goto decode_failure;
+
/* Integer Store Instructions */
case 0x0F7: case 0x0D7: case 0x1B7: // stbux, stbx, sthux
case 0x197: case 0x0B7: case 0x097: // sthx, stwux, stwx
if (dis_int_store( theInstr )) goto decode_success;
goto decode_failure;
+ /* 64bit Integer Store Instructions */
+ case 0x0B5: case 0x095: // stdux, stdx
+ if (!mode64) goto decode_failure;
+ if (dis_int_store( theInstr )) goto decode_success;
+ goto decode_failure;
+
/* Integer Load and Store with Byte Reverse Instructions */
case 0x316: case 0x216: case 0x396: // lhbrx, lwbrx, sthbrx
case 0x296: // stwbrx
if (dis_memsync( theInstr )) goto decode_success;
goto decode_failure;
+ /* 64bit Memory Synchronization Instructions */
+ case 0x054: case 0x0D6: // ldarx, stdcx.
+ if (!mode64) goto decode_failure;
+ if (dis_memsync( theInstr )) goto decode_success;
+ goto decode_failure;
+
/* Processor Control Instructions */
case 0x200: case 0x013: case 0x153: // mcrxr, mfcr, mfspr
case 0x173: case 0x090: case 0x1D3: // mftb, mtcrf, mtspr
//zz /* Trap Instructions */
//zz case 0x004: // tw
//zz DIP("trap op (tw) => not implemented\n");
+//zz goto decode_failure;
+//zz case 0x044: // td
+//zz DIP("trap op (td) => not implemented\n");
//zz goto decode_failure;
/* Floating Point Load Instructions */
}
/* But specific for real regs. */
switch (hregClass(reg)) {
+ case HRcInt64:
+ r = hregNumber(reg);
+ vassert(r >= 0 && r < 32);
+ vex_printf("%s", ireg32_names[r]);
+ return;
case HRcInt32:
r = hregNumber(reg);
vassert(r >= 0 && r < 32);
}
}
-HReg hregPPC32_GPR0 ( void ) { return mkHReg( 0, HRcInt32, False); }
-HReg hregPPC32_GPR1 ( void ) { return mkHReg( 1, HRcInt32, False); }
-HReg hregPPC32_GPR2 ( void ) { return mkHReg( 2, HRcInt32, False); }
-HReg hregPPC32_GPR3 ( void ) { return mkHReg( 3, HRcInt32, False); }
-HReg hregPPC32_GPR4 ( void ) { return mkHReg( 4, HRcInt32, False); }
-HReg hregPPC32_GPR5 ( void ) { return mkHReg( 5, HRcInt32, False); }
-HReg hregPPC32_GPR6 ( void ) { return mkHReg( 6, HRcInt32, False); }
-HReg hregPPC32_GPR7 ( void ) { return mkHReg( 7, HRcInt32, False); }
-HReg hregPPC32_GPR8 ( void ) { return mkHReg( 8, HRcInt32, False); }
-HReg hregPPC32_GPR9 ( void ) { return mkHReg( 9, HRcInt32, False); }
-HReg hregPPC32_GPR10 ( void ) { return mkHReg(10, HRcInt32, False); }
-HReg hregPPC32_GPR11 ( void ) { return mkHReg(11, HRcInt32, False); }
-HReg hregPPC32_GPR12 ( void ) { return mkHReg(12, HRcInt32, False); }
-HReg hregPPC32_GPR13 ( void ) { return mkHReg(13, HRcInt32, False); }
-HReg hregPPC32_GPR14 ( void ) { return mkHReg(14, HRcInt32, False); }
-HReg hregPPC32_GPR15 ( void ) { return mkHReg(15, HRcInt32, False); }
-HReg hregPPC32_GPR16 ( void ) { return mkHReg(16, HRcInt32, False); }
-HReg hregPPC32_GPR17 ( void ) { return mkHReg(17, HRcInt32, False); }
-HReg hregPPC32_GPR18 ( void ) { return mkHReg(18, HRcInt32, False); }
-HReg hregPPC32_GPR19 ( void ) { return mkHReg(19, HRcInt32, False); }
-HReg hregPPC32_GPR20 ( void ) { return mkHReg(20, HRcInt32, False); }
-HReg hregPPC32_GPR21 ( void ) { return mkHReg(21, HRcInt32, False); }
-HReg hregPPC32_GPR22 ( void ) { return mkHReg(22, HRcInt32, False); }
-HReg hregPPC32_GPR23 ( void ) { return mkHReg(23, HRcInt32, False); }
-HReg hregPPC32_GPR24 ( void ) { return mkHReg(24, HRcInt32, False); }
-HReg hregPPC32_GPR25 ( void ) { return mkHReg(25, HRcInt32, False); }
-HReg hregPPC32_GPR26 ( void ) { return mkHReg(26, HRcInt32, False); }
-HReg hregPPC32_GPR27 ( void ) { return mkHReg(27, HRcInt32, False); }
-HReg hregPPC32_GPR28 ( void ) { return mkHReg(28, HRcInt32, False); }
-HReg hregPPC32_GPR29 ( void ) { return mkHReg(29, HRcInt32, False); }
-HReg hregPPC32_GPR30 ( void ) { return mkHReg(30, HRcInt32, False); }
-HReg hregPPC32_GPR31 ( void ) { return mkHReg(31, HRcInt32, False); }
+
+#define MkHRegGPR(_n, _mode64) \
+ mkHReg(_n, _mode64 ? HRcInt64 : HRcInt32, False)
+
+HReg hregPPC_GPR0 ( Bool mode64 ) { return MkHRegGPR( 0, mode64); }
+HReg hregPPC_GPR1 ( Bool mode64 ) { return MkHRegGPR( 1, mode64); }
+HReg hregPPC_GPR2 ( Bool mode64 ) { return MkHRegGPR( 2, mode64); }
+HReg hregPPC_GPR3 ( Bool mode64 ) { return MkHRegGPR( 3, mode64); }
+HReg hregPPC_GPR4 ( Bool mode64 ) { return MkHRegGPR( 4, mode64); }
+HReg hregPPC_GPR5 ( Bool mode64 ) { return MkHRegGPR( 5, mode64); }
+HReg hregPPC_GPR6 ( Bool mode64 ) { return MkHRegGPR( 6, mode64); }
+HReg hregPPC_GPR7 ( Bool mode64 ) { return MkHRegGPR( 7, mode64); }
+HReg hregPPC_GPR8 ( Bool mode64 ) { return MkHRegGPR( 8, mode64); }
+HReg hregPPC_GPR9 ( Bool mode64 ) { return MkHRegGPR( 9, mode64); }
+HReg hregPPC_GPR10 ( Bool mode64 ) { return MkHRegGPR(10, mode64); }
+HReg hregPPC_GPR11 ( Bool mode64 ) { return MkHRegGPR(11, mode64); }
+HReg hregPPC_GPR12 ( Bool mode64 ) { return MkHRegGPR(12, mode64); }
+HReg hregPPC_GPR13 ( Bool mode64 ) { return MkHRegGPR(13, mode64); }
+HReg hregPPC_GPR14 ( Bool mode64 ) { return MkHRegGPR(14, mode64); }
+HReg hregPPC_GPR15 ( Bool mode64 ) { return MkHRegGPR(15, mode64); }
+HReg hregPPC_GPR16 ( Bool mode64 ) { return MkHRegGPR(16, mode64); }
+HReg hregPPC_GPR17 ( Bool mode64 ) { return MkHRegGPR(17, mode64); }
+HReg hregPPC_GPR18 ( Bool mode64 ) { return MkHRegGPR(18, mode64); }
+HReg hregPPC_GPR19 ( Bool mode64 ) { return MkHRegGPR(19, mode64); }
+HReg hregPPC_GPR20 ( Bool mode64 ) { return MkHRegGPR(20, mode64); }
+HReg hregPPC_GPR21 ( Bool mode64 ) { return MkHRegGPR(21, mode64); }
+HReg hregPPC_GPR22 ( Bool mode64 ) { return MkHRegGPR(22, mode64); }
+HReg hregPPC_GPR23 ( Bool mode64 ) { return MkHRegGPR(23, mode64); }
+HReg hregPPC_GPR24 ( Bool mode64 ) { return MkHRegGPR(24, mode64); }
+HReg hregPPC_GPR25 ( Bool mode64 ) { return MkHRegGPR(25, mode64); }
+HReg hregPPC_GPR26 ( Bool mode64 ) { return MkHRegGPR(26, mode64); }
+HReg hregPPC_GPR27 ( Bool mode64 ) { return MkHRegGPR(27, mode64); }
+HReg hregPPC_GPR28 ( Bool mode64 ) { return MkHRegGPR(28, mode64); }
+HReg hregPPC_GPR29 ( Bool mode64 ) { return MkHRegGPR(29, mode64); }
+HReg hregPPC_GPR30 ( Bool mode64 ) { return MkHRegGPR(30, mode64); }
+HReg hregPPC_GPR31 ( Bool mode64 ) { return MkHRegGPR(31, mode64); }
+
+#undef MK_INT_HREG
HReg hregPPC32_FPR0 ( void ) { return mkHReg( 0, HRcFlt64, False); }
HReg hregPPC32_FPR1 ( void ) { return mkHReg( 1, HRcFlt64, False); }
void getAllocableRegs_PPC32 ( Int* nregs, HReg** arr, Bool mode64 )
{
UInt i=0;
- *nregs = 90 - 24 - 24;
+ if (mode64)
+ *nregs = (32-8) + (32-24) + (32-24);
+ else
+ *nregs = (32-6) + (32-24) + (32-24);
*arr = LibVEX_Alloc(*nregs * sizeof(HReg));
// GPR0 = scratch reg where possible - some ops interpret as value zero
// GPR1 = stack pointer
// GPR2 = TOC pointer
- (*arr)[i++] = hregPPC32_GPR3();
- (*arr)[i++] = hregPPC32_GPR4();
- (*arr)[i++] = hregPPC32_GPR5();
- (*arr)[i++] = hregPPC32_GPR6();
- (*arr)[i++] = hregPPC32_GPR7();
- (*arr)[i++] = hregPPC32_GPR8();
- (*arr)[i++] = hregPPC32_GPR9();
- (*arr)[i++] = hregPPC32_GPR10();
- (*arr)[i++] = hregPPC32_GPR11();
- (*arr)[i++] = hregPPC32_GPR12();
+ (*arr)[i++] = hregPPC_GPR3(mode64);
+ (*arr)[i++] = hregPPC_GPR4(mode64);
+ (*arr)[i++] = hregPPC_GPR5(mode64);
+ (*arr)[i++] = hregPPC_GPR6(mode64);
+ (*arr)[i++] = hregPPC_GPR7(mode64);
+ (*arr)[i++] = hregPPC_GPR8(mode64);
+ (*arr)[i++] = hregPPC_GPR9(mode64);
+ (*arr)[i++] = hregPPC_GPR10(mode64);
+ if (!mode64) {
+ /* in mode64:
+ r11 used for calls by ptr / env ptr for some langs
+ r12 used for exception handling and global linkage code */
+ (*arr)[i++] = hregPPC_GPR11(mode64);
+ (*arr)[i++] = hregPPC_GPR12(mode64);
+ }
// GPR13 = thread specific pointer
- (*arr)[i++] = hregPPC32_GPR14();
- (*arr)[i++] = hregPPC32_GPR15();
- (*arr)[i++] = hregPPC32_GPR16();
- (*arr)[i++] = hregPPC32_GPR17();
- (*arr)[i++] = hregPPC32_GPR18();
- (*arr)[i++] = hregPPC32_GPR19();
- (*arr)[i++] = hregPPC32_GPR20();
- (*arr)[i++] = hregPPC32_GPR21();
- (*arr)[i++] = hregPPC32_GPR22();
- (*arr)[i++] = hregPPC32_GPR23();
- (*arr)[i++] = hregPPC32_GPR24();
- (*arr)[i++] = hregPPC32_GPR25();
- (*arr)[i++] = hregPPC32_GPR26();
- (*arr)[i++] = hregPPC32_GPR27();
- (*arr)[i++] = hregPPC32_GPR28();
- (*arr)[i++] = hregPPC32_GPR29();
+ (*arr)[i++] = hregPPC_GPR14(mode64);
+ (*arr)[i++] = hregPPC_GPR15(mode64);
+ (*arr)[i++] = hregPPC_GPR16(mode64);
+ (*arr)[i++] = hregPPC_GPR17(mode64);
+ (*arr)[i++] = hregPPC_GPR18(mode64);
+ (*arr)[i++] = hregPPC_GPR19(mode64);
+ (*arr)[i++] = hregPPC_GPR20(mode64);
+ (*arr)[i++] = hregPPC_GPR21(mode64);
+ (*arr)[i++] = hregPPC_GPR22(mode64);
+ (*arr)[i++] = hregPPC_GPR23(mode64);
+ (*arr)[i++] = hregPPC_GPR24(mode64);
+ (*arr)[i++] = hregPPC_GPR25(mode64);
+ (*arr)[i++] = hregPPC_GPR26(mode64);
+ (*arr)[i++] = hregPPC_GPR27(mode64);
+ (*arr)[i++] = hregPPC_GPR28(mode64);
+ (*arr)[i++] = hregPPC_GPR29(mode64);
// GPR30 AltiVec spill reg temporary
// GPR31 = GuestStatePtr
}
-/* --------- Operand, which can be a reg or a u32. --------- */
+/* --------- Operand, which can be a reg or a u32/64. --------- */
-PPC32RI* PPC32RI_Imm ( UInt imm32 ) {
+PPC32RI* PPC32RI_Imm ( ULong imm64 ) {
PPC32RI* op = LibVEX_Alloc(sizeof(PPC32RI));
- op->tag = Pri_Imm;
- op->Pri.Imm = imm32;
+ op->tag = Pri_Imm;
+ op->Pri.Imm = imm64;
return op;
}
PPC32RI* PPC32RI_Reg ( HReg reg ) {
PPC32RI* op = LibVEX_Alloc(sizeof(PPC32RI));
- op->tag = Pri_Reg;
- op->Pri.Reg = reg;
+ op->tag = Pri_Reg;
+ op->Pri.Reg = reg;
return op;
}
void ppPPC32RI ( PPC32RI* dst ) {
switch (dst->tag) {
case Pri_Imm:
- vex_printf("0x%x", dst->Pri.Imm);
+ vex_printf("0x%llx", dst->Pri.Imm);
break;
case Pri_Reg:
ppHRegPPC32(dst->Pri.Reg);
}
}
-HChar* showPPC32AluOp ( PPC32AluOp op, Bool immR ) {
+HChar* showPPC32AluOp ( PPC32AluOp op, Bool immR, Bool is32Bit ) {
switch (op) {
case Palu_ADD: return immR ? "addi" : "add";
case Palu_SUB: return immR ? "subi" : "sub";
case Palu_AND: return immR ? "andi." : "and";
case Palu_OR: return immR ? "ori" : "or";
case Palu_XOR: return immR ? "xori" : "xor";
- case Palu_SHL: return immR ? "slwi" : "slw";
- case Palu_SHR: return immR ? "srwi" : "srw";
- case Palu_SAR: return immR ? "srawi" : "sraw";
+ case Palu_SHL: return is32Bit ? (immR ? "slwi" : "slw") :
+ (immR ? "sldi" : "sld");
+ case Palu_SHR: return is32Bit ? (immR ? "srwi" : "srw") :
+ (immR ? "srdi" : "srd");
+ case Palu_SAR: return is32Bit ? (immR ? "srawi" : "sraw") :
+ (immR ? "sradi" : "srad");
default: vpanic("showPPC32AluOp");
}
}
}
}
-PPC32Instr* PPC32Instr_LI32 ( HReg dst, UInt imm32 )
+PPC32Instr* PPC32Instr_LI ( HReg dst, ULong imm64, Bool mode64 )
{
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_LI32;
- i->Pin.LI32.dst = dst;
- i->Pin.LI32.imm32 = imm32;
+ PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag = Pin_LI;
+ i->Pin.LI.dst = dst;
+ i->Pin.LI.imm64 = imm64;
+ if (!mode64)
+ vassert( (Long)imm64 == (Long)(Int)(UInt)imm64 );
return i;
}
-PPC32Instr* PPC32Instr_Alu32 ( PPC32AluOp op, HReg dst,
- HReg srcL, PPC32RH* srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_Alu32;
- i->Pin.Alu32.op = op;
- i->Pin.Alu32.dst = dst;
- i->Pin.Alu32.srcL = srcL;
- i->Pin.Alu32.srcR = srcR;
+PPC32Instr* PPC32Instr_Alu ( PPC32AluOp op, HReg dst,
+ HReg srcL, PPC32RH* srcR ) {
+ PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag = Pin_Alu;
+ i->Pin.Alu.op = op;
+ i->Pin.Alu.dst = dst;
+ i->Pin.Alu.srcL = srcL;
+ i->Pin.Alu.srcR = srcR;
return i;
}
PPC32Instr* PPC32Instr_AddSubC32 ( Bool isAdd, Bool setC,
i->Pin.AddSubC32.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_Cmp32 ( Bool syned, UInt crfD,
- HReg srcL, PPC32RH* srcR ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_Cmp32;
- i->Pin.Cmp32.syned = syned;
- i->Pin.Cmp32.crfD = crfD;
- i->Pin.Cmp32.srcL = srcL;
- i->Pin.Cmp32.srcR = srcR;
+PPC32Instr* PPC32Instr_Cmp ( Bool syned, UInt crfD,
+ HReg srcL, PPC32RH* srcR ) {
+ PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag = Pin_Cmp;
+ i->Pin.Cmp.syned = syned;
+ i->Pin.Cmp.crfD = crfD;
+ i->Pin.Cmp.srcL = srcL;
+ i->Pin.Cmp.srcR = srcR;
return i;
}
-PPC32Instr* PPC32Instr_Unary32 ( PPC32UnaryOp op, HReg dst, HReg src ) {
+PPC32Instr* PPC32Instr_Unary ( PPC32UnaryOp op, HReg dst, HReg src ) {
PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_Unary32;
+ i->tag = Pin_Unary;
i->Pin.Unary32.op = op;
i->Pin.Unary32.dst = dst;
i->Pin.Unary32.src = src;
return i;
}
-PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi32,
+PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi,
HReg dst, HReg srcL, HReg srcR ) {
PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
i->tag = Pin_MulL;
i->Pin.MulL.syned = syned;
- i->Pin.MulL.hi32 = hi32;
+ i->Pin.MulL.hi = hi;
i->Pin.MulL.dst = dst;
i->Pin.MulL.srcL = srcL;
i->Pin.MulL.srcR = srcR;
- /* if doing the low 32, the signedness is irrelevant, but tie it
+ /* if doing the low word, the signedness is irrelevant, but tie it
down anyway. */
- if (!hi32) vassert(!syned);
+ if (!hi) vassert(!syned);
return i;
}
PPC32Instr* PPC32Instr_Div ( Bool syned, HReg dst, HReg srcL, HReg srcR ) {
return i;
}
PPC32Instr* PPC32Instr_Call ( PPC32CondCode cond,
- Addr32 target, UInt argiregs ) {
+ Addr64 target, UInt argiregs ) {
UInt mask;
PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
i->tag = Pin_Call;
i->Pin.Goto.jk = jk;
return i;
}
-PPC32Instr* PPC32Instr_CMov32 ( PPC32CondCode cond,
- HReg dst, PPC32RI* src ) {
- PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
- i->tag = Pin_CMov32;
- i->Pin.CMov32.cond = cond;
- i->Pin.CMov32.src = src;
- i->Pin.CMov32.dst = dst;
+PPC32Instr* PPC32Instr_CMov ( PPC32CondCode cond,
+ HReg dst, PPC32RI* src ) {
+ PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag = Pin_CMov;
+ i->Pin.CMov.cond = cond;
+ i->Pin.CMov.src = src;
+ i->Pin.CMov.dst = dst;
vassert(cond.test != Pct_ALWAYS);
return i;
}
PPC32Instr* PPC32Instr_Load ( UChar sz, Bool syned,
- HReg dst, PPC32AMode* src ) {
+ HReg dst, PPC32AMode* src, Bool mode64 ) {
PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
i->tag = Pin_Load;
i->Pin.Load.sz = sz;
i->Pin.Load.syned = syned;
i->Pin.Load.src = src;
i->Pin.Load.dst = dst;
- vassert(sz == 1 || sz == 2 || sz == 4);
+ vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+ if (sz == 8) vassert(mode64);
return i;
}
-PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst, HReg src ) {
+PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst, HReg src,
+ Bool mode64 ) {
PPC32Instr* i = LibVEX_Alloc(sizeof(PPC32Instr));
i->tag = Pin_Store;
i->Pin.Store.sz = sz;
i->Pin.Store.src = src;
i->Pin.Store.dst = dst;
- vassert(sz == 1 || sz == 2 || sz == 4);
+ vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+ if (sz == 8) vassert(mode64);
return i;
}
PPC32Instr* PPC32Instr_Set32 ( PPC32CondCode cond, HReg dst ) {
/* Pretty Print instructions */
-static void ppLoadImm ( HReg dst, UInt imm ) {
- if (imm < 0x10000) {
+static void ppLoadImm ( HReg dst, ULong imm, Bool mode64 ) {
+#if 1
+ vex_printf("li_word ");
+ ppHRegPPC32(dst);
+ if (!mode64) {
+ vassert(imm == (ULong)(Long)(Int)(UInt)imm);
+ vex_printf(",0x%08x", (UInt)imm);
+ } else {
+ vex_printf(",0x%016llx", imm);
+ }
+#else
+// if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
+ if (imm == (ULong)(Long)(Int)(Short)(UShort)imm) {
+ // sign-extendable from 16 bits
vex_printf("li ");
ppHRegPPC32(dst);
- vex_printf(",0x%x", imm);
+ vex_printf(",0x%x", (UInt)imm);
} else {
- vex_printf("lis ");
- ppHRegPPC32(dst);
- vex_printf(",0x%x ; ", imm >> 16);
- vex_printf("ori ");
- ppHRegPPC32(dst);
- vex_printf(",");
- ppHRegPPC32(dst);
- vex_printf(",0x%x", imm & 0xFFFF);
+// if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
+ if (imm == (ULong)(Long)(Int)(UInt)imm) {
+ // sign-extendable from 32 bits
+ vex_printf("lis ");
+ ppHRegPPC32(dst);
+ vex_printf(",0x%x ; ", (UInt)(imm >> 16));
+ vex_printf("ori ");
+ ppHRegPPC32(dst);
+ vex_printf(",");
+ ppHRegPPC32(dst);
+ vex_printf(",0x%x", (UInt)(imm & 0xFFFF));
+ } else {
+ // full 64bit immediate load: 5 (five!) insns.
+ vassert(mode64);
+
+ // load high word
+ vex_printf("lis ");
+ ppHRegPPC32(dst);
+ vex_printf(",0x%x ; ", (UInt)(imm >> 48) & 0xFFFF);
+ vex_printf("ori ");
+ ppHRegPPC32(dst);
+ vex_printf(",");
+ ppHRegPPC32(dst);
+ vex_printf(",0x%x ; ", (UInt)(imm >> 32) & 0xFFFF);
+
+ // shift r_dst low word to high word => rldicr
+ vex_printf("rldicr ");
+ ppHRegPPC32(dst);
+ vex_printf(",");
+ ppHRegPPC32(dst);
+ vex_printf(",32,31 ; ");
+
+ // load low word
+ vex_printf("oris ");
+ ppHRegPPC32(dst);
+ vex_printf(",");
+ ppHRegPPC32(dst);
+ vex_printf(",0x%x ; ", (UInt)(imm >> 16) & 0xFFFF);
+ vex_printf("ori ");
+ ppHRegPPC32(dst);
+ vex_printf(",");
+ ppHRegPPC32(dst);
+ vex_printf(",0x%x", (UInt)(imm >> 0) & 0xFFFF);
+ }
}
+#endif
}
static void ppMovReg ( HReg dst, HReg src ) {
void ppPPC32Instr ( PPC32Instr* i, Bool mode64 )
{
switch (i->tag) {
- case Pin_LI32:
- vex_printf("li32 ");
- ppHRegPPC32(i->Pin.LI32.dst);
- vex_printf(",0x%x", i->Pin.LI32.imm32);
+ case Pin_LI:
+ ppLoadImm(i->Pin.LI.dst, i->Pin.LI.imm64, mode64);
break;
- case Pin_Alu32:
+ case Pin_Alu: {
+ HReg r_srcL = i->Pin.Alu.srcL;
+ PPC32RH* rh_srcR = i->Pin.Alu.srcR;
/* special-case "mr" */
- if (i->Pin.Alu32.op == Palu_OR && // or Rd,Rs,Rs == mr Rd,Rs
- i->Pin.Alu32.srcR->tag == Prh_Reg &&
- i->Pin.Alu32.srcR->Prh.Reg.reg == i->Pin.Alu32.srcL) {
+ if (i->Pin.Alu.op == Palu_OR && // or Rd,Rs,Rs == mr Rd,Rs
+ rh_srcR->tag == Prh_Reg &&
+ rh_srcR->Prh.Reg.reg == r_srcL) {
vex_printf("mr ");
- ppHRegPPC32(i->Pin.Alu32.dst);
+ ppHRegPPC32(i->Pin.Alu.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.Alu32.srcL);
+ ppHRegPPC32(r_srcL);
} else {
/* generic */
- vex_printf("%s ",
- showPPC32AluOp(i->Pin.Alu32.op,
- toBool(i->Pin.Alu32.srcR->tag == Prh_Imm)));
- ppHRegPPC32(i->Pin.Alu32.dst);
+ vex_printf("%s ", showPPC32AluOp(i->Pin.Alu.op,
+ toBool(rh_srcR->tag == Prh_Imm),
+ toBool(hregClass(r_srcL) == HRcInt32)));
+ ppHRegPPC32(i->Pin.Alu.dst);
vex_printf(",");
- ppHRegPPC32(i->Pin.Alu32.srcL);
+ ppHRegPPC32(r_srcL);
vex_printf(",");
- ppPPC32RH(i->Pin.Alu32.srcR);
+ ppPPC32RH(rh_srcR);
}
return;
+ }
case Pin_AddSubC32:
vex_printf("%s%s ",
i->Pin.AddSubC32.isAdd ? "add" : "sub",
vex_printf(",");
ppHRegPPC32(i->Pin.AddSubC32.srcR);
return;
- case Pin_Cmp32:
+ case Pin_Cmp:
vex_printf("%s%s %%cr%u,",
- i->Pin.Cmp32.syned ? "cmp" : "cmpl",
- i->Pin.Cmp32.srcR->tag == Prh_Imm ? "i" : "",
- i->Pin.Cmp32.crfD);
- ppHRegPPC32(i->Pin.Cmp32.srcL);
+ i->Pin.Cmp.syned ? "cmp" : "cmpl",
+ i->Pin.Cmp.srcR->tag == Prh_Imm ? "i" : "",
+ i->Pin.Cmp.crfD);
+ ppHRegPPC32(i->Pin.Cmp.srcL);
vex_printf(",");
- ppPPC32RH(i->Pin.Cmp32.srcR);
+ ppPPC32RH(i->Pin.Cmp.srcR);
return;
- case Pin_Unary32:
+ case Pin_Unary:
vex_printf("%s ", showPPC32UnaryOp(i->Pin.Unary32.op));
ppHRegPPC32(i->Pin.Unary32.dst);
vex_printf(",");
return;
case Pin_MulL:
vex_printf("mul%s%s ",
- i->Pin.MulL.hi32 ? "hw" : "lw",
- i->Pin.MulL.hi32 ? (i->Pin.MulL.syned ? "s" : "u") : "");
+ i->Pin.MulL.hi ? "hw" : "lw",
+ i->Pin.MulL.hi ? (i->Pin.MulL.syned ? "s" : "u") : "");
ppHRegPPC32(i->Pin.MulL.dst);
vex_printf(",");
ppHRegPPC32(i->Pin.MulL.srcL);
vex_printf("if (%s) ", showPPC32CondCode(i->Pin.Call.cond));
}
vex_printf("{ ");
- ppLoadImm(hregPPC32_GPR12(), i->Pin.Call.target);
- vex_printf(" ; mtctr r12 ; bctrl [");
+ ppLoadImm(hregPPC_GPR10(mode64), i->Pin.Call.target, mode64);
+ vex_printf(" ; mtctr r10 ; bctrl [");
for (n = 0; n < 32; n++) {
if (i->Pin.Call.argiregs & (1<<n)) {
vex_printf("r%d", n);
vex_printf(" ; ");
}
if (i->Pin.Goto.dst->tag == Pri_Imm) {
- ppLoadImm(hregPPC32_GPR3(), i->Pin.Goto.dst->Pri.Imm);
+ ppLoadImm(hregPPC_GPR3(mode64), i->Pin.Goto.dst->Pri.Imm, mode64);
} else {
- ppMovReg(hregPPC32_GPR3(), i->Pin.Goto.dst->Pri.Reg);
+ ppMovReg(hregPPC_GPR3(mode64), i->Pin.Goto.dst->Pri.Reg);
}
vex_printf(" ; blr }");
return;
- case Pin_CMov32:
- vex_printf("cmov32 (%s) ", showPPC32CondCode(i->Pin.CMov32.cond));
- ppHRegPPC32(i->Pin.CMov32.dst);
+ case Pin_CMov:
+ vex_printf("cmov (%s) ", showPPC32CondCode(i->Pin.CMov.cond));
+ ppHRegPPC32(i->Pin.CMov.dst);
vex_printf(",");
- ppPPC32RI(i->Pin.CMov32.src);
+ ppPPC32RI(i->Pin.CMov.src);
vex_printf(": ");
- if (i->Pin.CMov32.cond.test != Pct_ALWAYS) {
- vex_printf("if (%s) ", showPPC32CondCode(i->Pin.CMov32.cond));
+ if (i->Pin.CMov.cond.test != Pct_ALWAYS) {
+ vex_printf("if (%s) ", showPPC32CondCode(i->Pin.CMov.cond));
}
vex_printf("{ ");
- if (i->Pin.CMov32.src->tag == Pri_Imm) {
- ppLoadImm(i->Pin.CMov32.dst, i->Pin.CMov32.src->Pri.Imm);
+ if (i->Pin.CMov.src->tag == Pri_Imm) {
+ ppLoadImm(i->Pin.CMov.dst, i->Pin.CMov.src->Pri.Imm, mode64);
} else {
- ppMovReg(i->Pin.CMov32.dst, i->Pin.CMov32.src->Pri.Reg);
+ ppMovReg(i->Pin.CMov.dst, i->Pin.CMov.src->Pri.Reg);
}
vex_printf(" }");
return;
case Pin_Load: {
- UChar sz = i->Pin.Load.sz;
- Bool syned = i->Pin.Load.syned;
Bool idxd = toBool(i->Pin.Load.src->tag == Pam_RR);
- vex_printf("l%c%c%s ",
- (sz==1) ? 'b' : (sz==2 ? 'h' : 'w'),
- syned ? 'a' : 'z',
- idxd ? "x" : "" );
+ UChar sz = i->Pin.Load.sz;
+ UChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : 'd';
+ UChar* s_syned = i->Pin.Load.syned ? "a" : sz==8 ? "" : "z";
+ vex_printf("l%c%s%s ", c_sz, s_syned, idxd ? "x" : "" );
ppHRegPPC32(i->Pin.Load.dst);
vex_printf(",");
ppPPC32AMode(i->Pin.Load.src);
case Pin_Store: {
UChar sz = i->Pin.Store.sz;
Bool idxd = toBool(i->Pin.Store.dst->tag == Pam_RR);
- vex_printf("st%c%s ",
- (sz==1) ? 'b' : (sz==2 ? 'h' : 'w'),
- idxd ? "x" : "" );
+ UChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : /*8*/ 'd';
+ vex_printf("st%c%s ", c_sz, idxd ? "x" : "" );
ppHRegPPC32(i->Pin.Store.src);
vex_printf(",");
ppPPC32AMode(i->Pin.Store.dst);
UChar sz = i->Pin.AvLdSt.sz;
HChar* str_size;
if (i->Pin.AvLdSt.addr->tag == Pam_IR) {
- ppLoadImm(hregPPC32_GPR30(), i->Pin.AvLdSt.addr->Pam.RR.index);
+ ppLoadImm(hregPPC_GPR30(mode64),
+ i->Pin.AvLdSt.addr->Pam.RR.index, mode64);
vex_printf(" ; ");
}
str_size = sz==1 ? "eb" : sz==2 ? "eh" : sz==4 ? "ew" : "";
{
initHRegUsage(u);
switch (i->tag) {
- case Pin_LI32:
- addHRegUse(u, HRmWrite, i->Pin.LI32.dst);
+ case Pin_LI:
+ addHRegUse(u, HRmWrite, i->Pin.LI.dst);
break;
- case Pin_Alu32:
- addHRegUse(u, HRmRead, i->Pin.Alu32.srcL);
- addRegUsage_PPC32RH(u, i->Pin.Alu32.srcR);
- addHRegUse(u, HRmWrite, i->Pin.Alu32.dst);
+ case Pin_Alu:
+ addHRegUse(u, HRmRead, i->Pin.Alu.srcL);
+ addRegUsage_PPC32RH(u, i->Pin.Alu.srcR);
+ addHRegUse(u, HRmWrite, i->Pin.Alu.dst);
return;
case Pin_AddSubC32:
addHRegUse(u, HRmWrite, i->Pin.AddSubC32.dst);
addHRegUse(u, HRmRead, i->Pin.AddSubC32.srcL);
addHRegUse(u, HRmRead, i->Pin.AddSubC32.srcR);
return;
- case Pin_Cmp32:
- addHRegUse(u, HRmRead, i->Pin.Cmp32.srcL);
- addRegUsage_PPC32RH(u, i->Pin.Cmp32.srcR);
+ case Pin_Cmp:
+ addHRegUse(u, HRmRead, i->Pin.Cmp.srcL);
+ addRegUsage_PPC32RH(u, i->Pin.Cmp.srcR);
return;
- case Pin_Unary32:
+ case Pin_Unary:
addHRegUse(u, HRmWrite, i->Pin.Unary32.dst);
addHRegUse(u, HRmRead, i->Pin.Unary32.src);
return;
addHRegUse(u, HRmRead, i->Pin.Div.srcL);
addHRegUse(u, HRmRead, i->Pin.Div.srcR);
return;
- case Pin_Call:
+ case Pin_Call: {
+ UInt argir;
/* This is a bit subtle. */
/* First off, claim it trashes all the caller-saved regs
which fall within the register allocator's jurisdiction.
- These I believe to be: r3 to r12.
+ These I believe to be:
+ mode32: r3 to r12
+ mode64: r3 to r10
*/
- addHRegUse(u, HRmWrite, hregPPC32_GPR3());
- addHRegUse(u, HRmWrite, hregPPC32_GPR4());
- addHRegUse(u, HRmWrite, hregPPC32_GPR5());
- addHRegUse(u, HRmWrite, hregPPC32_GPR6());
- addHRegUse(u, HRmWrite, hregPPC32_GPR7());
- addHRegUse(u, HRmWrite, hregPPC32_GPR8());
- addHRegUse(u, HRmWrite, hregPPC32_GPR9());
- addHRegUse(u, HRmWrite, hregPPC32_GPR10());
- addHRegUse(u, HRmWrite, hregPPC32_GPR11());
- addHRegUse(u, HRmWrite, hregPPC32_GPR12());
-
+ addHRegUse(u, HRmWrite, hregPPC_GPR3(mode64));
+ addHRegUse(u, HRmWrite, hregPPC_GPR4(mode64));
+ addHRegUse(u, HRmWrite, hregPPC_GPR5(mode64));
+ addHRegUse(u, HRmWrite, hregPPC_GPR6(mode64));
+ addHRegUse(u, HRmWrite, hregPPC_GPR7(mode64));
+ addHRegUse(u, HRmWrite, hregPPC_GPR8(mode64));
+ addHRegUse(u, HRmWrite, hregPPC_GPR9(mode64));
+ addHRegUse(u, HRmWrite, hregPPC_GPR10(mode64));
+ if (!mode64) {
+ addHRegUse(u, HRmWrite, hregPPC_GPR11(mode64));
+ addHRegUse(u, HRmWrite, hregPPC_GPR12(mode64));
+ }
+
/* Now we have to state any parameter-carrying registers
which might be read. This depends on the argiregs field. */
- if (i->Pin.Call.argiregs & (1<<10)) addHRegUse(u, HRmRead, hregPPC32_GPR10());
- if (i->Pin.Call.argiregs & (1<<9)) addHRegUse(u, HRmRead, hregPPC32_GPR9());
- if (i->Pin.Call.argiregs & (1<<8)) addHRegUse(u, HRmRead, hregPPC32_GPR8());
- if (i->Pin.Call.argiregs & (1<<7)) addHRegUse(u, HRmRead, hregPPC32_GPR7());
- if (i->Pin.Call.argiregs & (1<<6)) addHRegUse(u, HRmRead, hregPPC32_GPR6());
- if (i->Pin.Call.argiregs & (1<<5)) addHRegUse(u, HRmRead, hregPPC32_GPR5());
- if (i->Pin.Call.argiregs & (1<<4)) addHRegUse(u, HRmRead, hregPPC32_GPR4());
- if (i->Pin.Call.argiregs & (1<<3)) addHRegUse(u, HRmRead, hregPPC32_GPR3());
-
- vassert(0 == (i->Pin.Call.argiregs
- & ~((1<<3)|(1<<4)|(1<<5)|(1<<6)
- |(1<<7)|(1<<8)|(1<<9)|(1<<10))));
+ argir = i->Pin.Call.argiregs;
+ if (argir &(1<<10)) addHRegUse(u, HRmRead, hregPPC_GPR10(mode64));
+ if (argir & (1<<9)) addHRegUse(u, HRmRead, hregPPC_GPR9(mode64));
+ if (argir & (1<<8)) addHRegUse(u, HRmRead, hregPPC_GPR8(mode64));
+ if (argir & (1<<7)) addHRegUse(u, HRmRead, hregPPC_GPR7(mode64));
+ if (argir & (1<<6)) addHRegUse(u, HRmRead, hregPPC_GPR6(mode64));
+ if (argir & (1<<5)) addHRegUse(u, HRmRead, hregPPC_GPR5(mode64));
+ if (argir & (1<<4)) addHRegUse(u, HRmRead, hregPPC_GPR4(mode64));
+ if (argir & (1<<3)) addHRegUse(u, HRmRead, hregPPC_GPR3(mode64));
+
+ vassert(0 == (argir & ~((1<<3)|(1<<4)|(1<<5)|(1<<6)
+ |(1<<7)|(1<<8)|(1<<9)|(1<<10))));
/* Finally, there is the issue that the insn trashes a
register because the literal target address has to be
- loaded into a register. %r12 seems a suitable victim.
+ loaded into a register. %r10 seems a suitable victim.
(Can't use %r0, as use ops that interpret it as value zero). */
- addHRegUse(u, HRmWrite, hregPPC32_GPR12());
- /* Upshot of this is that the assembler really must use %r12,
+ addHRegUse(u, HRmWrite, hregPPC_GPR10(mode64));
+ /* Upshot of this is that the assembler really must use %r10,
and no other, as a destination temporary. */
return;
+ }
case Pin_Goto:
addRegUsage_PPC32RI(u, i->Pin.Goto.dst);
/* GPR3 holds destination address from Pin_Goto */
- addHRegUse(u, HRmWrite, hregPPC32_GPR3());
+ addHRegUse(u, HRmWrite, hregPPC_GPR3(mode64));
if (i->Pin.Goto.jk != Ijk_Boring)
- addHRegUse(u, HRmWrite, GuestStatePtr);
+ addHRegUse(u, HRmWrite, GuestStatePtr(mode64));
return;
- case Pin_CMov32:
- addRegUsage_PPC32RI(u, i->Pin.CMov32.src);
- addHRegUse(u, HRmWrite, i->Pin.CMov32.dst);
+ case Pin_CMov:
+ addRegUsage_PPC32RI(u, i->Pin.CMov.src);
+ addHRegUse(u, HRmWrite, i->Pin.CMov.dst);
return;
case Pin_Load:
addRegUsage_PPC32AMode(u, i->Pin.Load.src);
addHRegUse(u, (i->Pin.AvLdSt.isLoad ? HRmWrite : HRmRead),
i->Pin.AvLdSt.reg);
if (i->Pin.AvLdSt.addr->tag == Pam_IR)
- addHRegUse(u, HRmWrite, hregPPC32_GPR30());
+ addHRegUse(u, HRmWrite, hregPPC_GPR30(mode64));
addRegUsage_PPC32AMode(u, i->Pin.AvLdSt.addr);
return;
case Pin_AvUnary:
addHRegUse(u, HRmRead, i->Pin.AvBin32Fx4.srcL);
addHRegUse(u, HRmRead, i->Pin.AvBin32Fx4.srcR);
if (i->Pin.AvBin32Fx4.op == Pavfp_MULF)
- addHRegUse(u, HRmWrite, hregPPC32_GPR29());
+ addHRegUse(u, HRmWrite, hregPPC_GPR29(mode64));
return;
case Pin_AvUn32Fx4:
addHRegUse(u, HRmWrite, i->Pin.AvUn32Fx4.dst);
void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 )
{
switch (i->tag) {
- case Pin_LI32:
- mapReg(m, &i->Pin.LI32.dst);
+ case Pin_LI:
+ mapReg(m, &i->Pin.LI.dst);
return;
- case Pin_Alu32:
- mapReg(m, &i->Pin.Alu32.dst);
- mapReg(m, &i->Pin.Alu32.srcL);
- mapRegs_PPC32RH(m, i->Pin.Alu32.srcR);
+ case Pin_Alu:
+ mapReg(m, &i->Pin.Alu.dst);
+ mapReg(m, &i->Pin.Alu.srcL);
+ mapRegs_PPC32RH(m, i->Pin.Alu.srcR);
return;
case Pin_AddSubC32:
mapReg(m, &i->Pin.AddSubC32.dst);
mapReg(m, &i->Pin.AddSubC32.srcL);
mapReg(m, &i->Pin.AddSubC32.srcR);
return;
- case Pin_Cmp32:
- mapReg(m, &i->Pin.Cmp32.srcL);
- mapRegs_PPC32RH(m, i->Pin.Cmp32.srcR);
+ case Pin_Cmp:
+ mapReg(m, &i->Pin.Cmp.srcL);
+ mapRegs_PPC32RH(m, i->Pin.Cmp.srcR);
return;
- case Pin_Unary32:
+ case Pin_Unary:
mapReg(m, &i->Pin.Unary32.dst);
mapReg(m, &i->Pin.Unary32.src);
return;
case Pin_Goto:
mapRegs_PPC32RI(m, i->Pin.Goto.dst);
return;
- case Pin_CMov32:
- mapRegs_PPC32RI(m, i->Pin.CMov32.src);
- mapReg(m, &i->Pin.CMov32.dst);
+ case Pin_CMov:
+ mapRegs_PPC32RI(m, i->Pin.CMov.src);
+ mapReg(m, &i->Pin.CMov.dst);
return;
case Pin_Load:
mapRegs_PPC32AMode(m, i->Pin.Load.src);
Bool isMove_PPC32Instr ( PPC32Instr* i, HReg* src, HReg* dst )
{
/* Moves between integer regs */
- if (i->tag == Pin_Alu32) {
+ if (i->tag == Pin_Alu) {
// or Rd,Rs,Rs == mr Rd,Rs
- if (i->Pin.Alu32.op != Palu_OR)
+ if (i->Pin.Alu.op != Palu_OR)
return False;
- if (i->Pin.Alu32.srcR->tag != Prh_Reg)
+ if (i->Pin.Alu.srcR->tag != Prh_Reg)
return False;
- if (i->Pin.Alu32.srcR->Prh.Reg.reg != i->Pin.Alu32.srcL)
+ if (i->Pin.Alu.srcR->Prh.Reg.reg != i->Pin.Alu.srcL)
return False;
- *src = i->Pin.Alu32.srcL;
- *dst = i->Pin.Alu32.dst;
+ *src = i->Pin.Alu.srcL;
+ *dst = i->Pin.Alu.dst;
return True;
}
/* Moves between FP regs */
{
PPC32AMode* am;
vassert(!hregIsVirtual(rreg));
- am = PPC32AMode_IR(offsetB, GuestStatePtr);
+ am = PPC32AMode_IR(offsetB, GuestStatePtr(mode64));
switch (hregClass(rreg)) {
+ case HRcInt64:
+ vassert(mode64);
+ return PPC32Instr_Store( 8, am, rreg, mode64 );
case HRcInt32:
- return PPC32Instr_Store( 4, am, rreg);
+ vassert(!mode64);
+ return PPC32Instr_Store( 4, am, rreg, mode64 );
case HRcFlt64:
return PPC32Instr_FpLdSt ( False/*store*/, 8, rreg, am );
case HRcVec128:
{
PPC32AMode* am;
vassert(!hregIsVirtual(rreg));
- am = PPC32AMode_IR(offsetB, GuestStatePtr);
+ am = PPC32AMode_IR(offsetB, GuestStatePtr(mode64));
switch (hregClass(rreg)) {
+ case HRcInt64:
+ vassert(mode64);
+ return PPC32Instr_Load( 8, False, rreg, am, mode64 );
case HRcInt32:
- return PPC32Instr_Load( 4, False, rreg, am );
+ vassert(!mode64);
+ return PPC32Instr_Load( 4, False, rreg, am, mode64 );
case HRcFlt64:
return PPC32Instr_FpLdSt ( True/*load*/, 8, rreg, am );
case HRcVec128:
/* --------- The ppc32 assembler (bleh.) --------- */
-static UInt iregNo ( HReg r )
+static UInt iregNo ( HReg r, Bool mode64 )
{
UInt n;
- vassert(hregClass(r) == HRcInt32);
+ vassert(hregClass(r) == mode64 ? HRcInt64 : HRcInt32);
vassert(!hregIsVirtual(r));
n = hregNumber(r);
vassert(n <= 32);
{
UInt theInstr;
vassert(opc1 < 0x40);
- vassert(r1 < 0x20);
- vassert(r2 < 0x20);
+ vassert(r1 < 0x20);
+ vassert(r2 < 0x20);
imm = imm & 0xFFFF;
theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (imm));
return emit32(p, theInstr);
}
+static UChar* mkFormMD ( UChar* p, UInt opc1, UInt r1, UInt r2,
+ UInt imm1, UInt imm2, UInt opc2 )
+{
+ UInt theInstr;
+ vassert(opc1 < 0x40);
+ vassert(r1 < 0x20);
+ vassert(r2 < 0x20);
+ vassert(imm1 < 0x40);
+ vassert(imm2 < 0x40);
+ vassert(opc2 < 0x08);
+ imm2 = ((imm2 & 0x1F) << 1) | (imm2 >> 5);
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+ ((imm1 & 0x1F)<<11) | (imm2<<5) | (opc2<<2) | ((imm1 >> 5)<<1));
+ return emit32(p, theInstr);
+}
+
static UChar* mkFormX ( UChar* p, UInt opc1, UInt r1, UInt r2,
UInt r3, UInt opc2, UInt b0 )
{
UInt theInstr;
vassert(opc1 < 0x40);
- vassert(r1 < 0x20);
- vassert(r2 < 0x20);
- vassert(r3 < 0x20);
+ vassert(r1 < 0x20);
+ vassert(r2 < 0x20);
+ vassert(r3 < 0x20);
vassert(opc2 < 0x400);
- vassert(b0 < 0x2);
+ vassert(b0 < 0x2);
theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (opc2<<1) | (b0));
return emit32(p, theInstr);
}
{
UInt theInstr;
vassert(opc1 < 0x40);
- vassert(r1 < 0x20);
- vassert(r2 < 0x20);
- vassert(r3 < 0x20);
- vassert(b10 < 0x2);
+ vassert(r1 < 0x20);
+ vassert(r2 < 0x20);
+ vassert(r3 < 0x20);
+ vassert(b10 < 0x2);
vassert(opc2 < 0x200);
- vassert(b0 < 0x2);
+ vassert(b0 < 0x2);
theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
(r3<<11) | (b10 << 10) | (opc2<<1) | (b0));
return emit32(p, theInstr);
{
UInt theInstr;
vassert(opc1 < 0x40);
- vassert(f1 < 0x20);
- vassert(f2 < 0x20);
- vassert(f3 < 0x20);
+ vassert(f1 < 0x20);
+ vassert(f2 < 0x20);
+ vassert(f3 < 0x20);
vassert(opc2 < 0x400);
- vassert(b0 < 0x2);
+ vassert(b0 < 0x2);
theInstr = ((opc1<<26) | (f1<<21) | (f2<<16) | (f3<<11) | (opc2<<1) | (b0));
return emit32(p, theInstr);
}
static UChar* mkFormXFX ( UChar* p, UInt r1, UInt f2, UInt opc2 )
{
UInt theInstr;
- vassert(r1 < 0x20);
- vassert(f2 < 0x20);
+ vassert(r1 < 0x20);
+ vassert(f2 < 0x20);
vassert(opc2 < 0x400);
switch (opc2) {
case 144: // mtcrf
return emit32(p, theInstr);
}
+static UChar* mkFormXS ( UChar* p, UInt opc1, UInt r1, UInt r2,
+ UInt imm, UInt opc2, UInt b0 )
+{
+ UInt theInstr;
+ vassert(opc1 < 0x40);
+ vassert(r1 < 0x20);
+ vassert(r2 < 0x20);
+ vassert(imm < 0x40);
+ vassert(opc2 < 0x400);
+ vassert(b0 < 0x2);
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+ ((imm & 0x1F)<<11) | (opc2<<2) | ((imm>>5)<<1) | (b0));
+ return emit32(p, theInstr);
+}
+
+
#if 0
// 'b'
static UChar* mkFormI ( UChar* p, UInt LI, UInt AA, UInt LK )
{
UInt theInstr;
vassert(opc1 < 0x40);
- vassert(r1 < 0x20);
- vassert(r2 < 0x20);
- vassert(f3 < 0x20);
- vassert(MB < 0x20);
- vassert(ME < 0x20);
- vassert(Rc < 0x2);
+ vassert(r1 < 0x20);
+ vassert(r2 < 0x20);
+ vassert(f3 < 0x20);
+ vassert(MB < 0x20);
+ vassert(ME < 0x20);
+ vassert(Rc < 0x2);
theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
(f3<<11) | (MB<<6) | (ME<<1) | (Rc));
return emit32(p, theInstr);
return emit32(p, theInstr);
}
-static UChar* doAMode_IR ( UChar* p, UInt opc1, UInt rSD, PPC32AMode* am )
+static UChar* doAMode_IR ( UChar* p, UInt opc1, UInt rSD,
+ PPC32AMode* am, Bool mode64 )
{
UInt rA, idx;
vassert(am->tag == Pam_IR);
vassert(am->Pam.IR.index < 0x10000);
- rA = iregNo(am->Pam.IR.base);
+ rA = iregNo(am->Pam.IR.base, mode64);
idx = am->Pam.IR.index;
+ if (opc1 == 58 || opc1 == 62) { // ld/std: mode64 only
+ vassert(mode64);
+ // kludge DS form: lowest 2 bits = 00
+ idx &= 0xFFFC;
+ }
p = mkFormD(p, opc1, rSD, rA, idx);
return p;
}
-
static UChar* doAMode_RR ( UChar* p, UInt opc1, UInt opc2,
- UInt rSD, PPC32AMode* am )
+ UInt rSD, PPC32AMode* am, Bool mode64 )
{
UInt rA, rB;
vassert(am->tag == Pam_RR);
- rA = iregNo(am->Pam.RR.base);
- rB = iregNo(am->Pam.RR.index);
+ rA = iregNo(am->Pam.RR.base, mode64);
+ rB = iregNo(am->Pam.RR.index, mode64);
p = mkFormX(p, opc1, rSD, rA, rB, opc2, 0);
return p;
}
+
/* Load imm to r_dst */
-static UChar* mkLoadImm ( UChar* p, UInt r_dst, UInt imm )
+static UChar* mkLoadImm ( UChar* p, UInt r_dst, ULong imm, Bool mode64 )
{
vassert(r_dst < 0x20);
- if (imm >= 0xFFFF8000 || imm <= 0x7FFF) { // sign-extendable from 16 bits?
+// if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
+ if (imm == (ULong)(Long)(Int)(Short)(UShort)imm) {
+ // sign-extendable from 16 bits
+
// addi r_dst,0,imm => li r_dst,imm
p = mkFormD(p, 14, r_dst, 0, imm & 0xFFFF);
} else {
- // addis r_dst,r0,(imm>>16) => lis r_dst, (imm>>16)
- p = mkFormD(p, 15, r_dst, 0, (imm>>16) & 0xFFFF);
- // ori r_dst, r_dst, (imm & 0xFFFF)
- p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF);
+// if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
+ if (imm == (ULong)(Long)(Int)(UInt)imm) {
+ // sign-extendable from 32 bits
+
+ // addis r_dst,r0,(imm>>16) => lis r_dst, (imm>>16)
+ p = mkFormD(p, 15, r_dst, 0, (imm>>16) & 0xFFFF);
+ // ori r_dst, r_dst, (imm & 0xFFFF)
+ p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF);
+ } else {
+ // full 64bit immediate load: 5 (five!) insns.
+ vassert(mode64);
+
+ // load high word
+ // lis r_dst, (imm>>48) & 0xFFFF
+ p = mkFormD(p, 15, r_dst, 0, (imm>>48) & 0xFFFF);
+ // ori r_dst, r_dst, (imm>>32) & 0xFFFF
+ p = mkFormD(p, 24, r_dst, r_dst, (imm>>32) & 0xFFFF);
+
+ // shift r_dst low word to high word => rldicr
+ p = mkFormMD(p, 30, r_dst, r_dst, 32, 31, 1);
+
+ // load low word
+ // oris r_dst, r_dst, (imm>>16) & 0xFFFF
+ p = mkFormD(p, 25, r_dst, r_dst, (imm>>16) & 0xFFFF);
+ // ori r_dst, r_dst, (imm) & 0xFFFF
+ p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF);
+ }
}
return p;
}
switch (i->tag) {
- case Pin_LI32:
- p = mkLoadImm(p, iregNo(i->Pin.LI32.dst), i->Pin.LI32.imm32);
+ case Pin_LI:
+ p = mkLoadImm(p, iregNo(i->Pin.LI.dst, mode64),
+ i->Pin.LI.imm64, mode64);
goto done;
- case Pin_Alu32: {
- PPC32RH* srcR = i->Pin.Alu32.srcR;
+ case Pin_Alu: {
+ PPC32RH* srcR = i->Pin.Alu.srcR;
Bool immR = toBool(srcR->tag == Prh_Imm);
- UInt r_dst = iregNo(i->Pin.Alu32.dst);
- UInt r_srcL = iregNo(i->Pin.Alu32.srcL);
- UInt r_srcR = immR ? (-1)/*bogus*/ : iregNo(srcR->Prh.Reg.reg);
+ UInt r_dst = iregNo(i->Pin.Alu.dst, mode64);
+ UInt r_srcL = iregNo(i->Pin.Alu.srcL, mode64);
+ UInt r_srcR = immR ? (-1)/*bogus*/ :
+ iregNo(srcR->Prh.Reg.reg, mode64);
+ Bool is32BitOp = toBool(hregClass(i->Pin.Alu.srcL) == HRcInt32);
- switch (i->Pin.Alu32.op) {
+ switch (i->Pin.Alu.op) {
case Palu_ADD:
if (immR) {
break;
case Palu_SHL:
- if (immR) {
- /* rd = rs << n, 1 <= n <= 31
- is
- rlwinm rd,rs,n,0,31-n (PPC32 p501)
- */
- UInt n = srcR->Prh.Imm.imm16;
- vassert(!srcR->Prh.Imm.syned);
- vassert(n > 0 && n < 32);
- p = mkFormM(p, 21, r_srcL, r_dst, n, 0, 31-n, 0);
+ if (is32BitOp) {
+ vassert(!mode64);
+ if (immR) {
+ /* rd = rs << n, 1 <= n <= 31
+ is
+ rlwinm rd,rs,n,0,31-n (PPC32 p501)
+ */
+ UInt n = srcR->Prh.Imm.imm16;
+ vassert(!srcR->Prh.Imm.syned);
+ vassert(n > 0 && n < 32);
+ p = mkFormM(p, 21, r_srcL, r_dst, n, 0, 31-n, 0);
+ } else {
+ /* slw (PPC32 p505) */
+ p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 24, 0);
+ }
} else {
- /* slw (PPC32 p505) */
- p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 24, 0);
+ vassert(mode64);
+ if (immR) {
+ /* rd = rs << n, 1 <= n <= 63
+ is
+ rldicr rd,rs,n,63-n (PPC64 p559)
+ */
+ UInt n = srcR->Prh.Imm.imm16;
+ vassert(!srcR->Prh.Imm.syned);
+ vassert(n > 0 && n < 64);
+ p = mkFormMD(p, 30, r_srcL, r_dst, n, 63-n, 1);
+ } else {
+ /* sld (PPC64 p568) */
+ p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 27, 0);
+ }
}
break;
case Palu_SHR:
- if (immR) {
- /* rd = rs >>u n, 1 <= n <= 31
- is
- rlwinm rd,rs,32-n,n,31 (PPC32 p501)
- */
- UInt n = srcR->Prh.Imm.imm16;
- vassert(!srcR->Prh.Imm.syned);
- vassert(n > 0 && n < 32);
- p = mkFormM(p, 21, r_srcL, r_dst, 32-n, n, 31, 0);
+ if (is32BitOp) {
+ vassert(!mode64);
+ if (immR) {
+ /* rd = rs >>u n, 1 <= n <= 31
+ is
+ rlwinm rd,rs,32-n,n,31 (PPC32 p501)
+ */
+ UInt n = srcR->Prh.Imm.imm16;
+ vassert(!srcR->Prh.Imm.syned);
+ vassert(n > 0 && n < 32);
+ p = mkFormM(p, 21, r_srcL, r_dst, 32-n, n, 31, 0);
+ } else {
+ /* srw (PPC32 p508) */
+ p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 536, 0);
+ }
} else {
- /* srw (PPC32 p508) */
- p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 536, 0);
+ vassert(mode64);
+ if (immR) {
+ /* rd = rs >>u n, 1 <= n <= 63
+ is
+ rldicl rd,rs,64-n,n (PPC64 p558)
+ */
+ UInt n = srcR->Prh.Imm.imm16;
+ vassert(!srcR->Prh.Imm.syned);
+ vassert(n > 0 && n < 64);
+ p = mkFormMD(p, 30, r_srcL, r_dst, 64-n, n, 0);
+ } else {
+ /* srd (PPC64 p574) */
+ p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 539, 0);
+ }
}
break;
case Palu_SAR:
- if (immR) {
- /* srawi (PPC32 p507) */
- UInt n = srcR->Prh.Imm.imm16;
- vassert(!srcR->Prh.Imm.syned);
- vassert(n > 0 && n < 32);
- p = mkFormX(p, 31, r_srcL, r_dst, n, 824, 0);
+ if (is32BitOp) {
+ vassert(!mode64);
+ if (immR) {
+ /* srawi (PPC32 p507) */
+ UInt n = srcR->Prh.Imm.imm16;
+ vassert(!srcR->Prh.Imm.syned);
+ vassert(n > 0 && n < 32);
+ p = mkFormX(p, 31, r_srcL, r_dst, n, 824, 0);
+ } else {
+ /* sraw (PPC32 p506) */
+ p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 792, 0);
+ }
} else {
- /* sraw (PPC32 p506) */
- p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 792, 0);
+ vassert(mode64);
+ if (immR) {
+ /* sradi (PPC64 p571) */
+ UInt n = srcR->Prh.Imm.imm16;
+ vassert(!srcR->Prh.Imm.syned);
+ vassert(n > 0 && n < 64);
+ p = mkFormXS(p, 31, r_srcL, r_dst, n, 413, 0);
+ } else {
+ /* srad (PPC32 p570) */
+ p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 794, 0);
+ }
}
break;
case Pin_AddSubC32: {
Bool isAdd = i->Pin.AddSubC32.isAdd;
Bool setC = i->Pin.AddSubC32.setC;
- UInt r_srcL = iregNo(i->Pin.AddSubC32.srcL);
- UInt r_srcR = iregNo(i->Pin.AddSubC32.srcR);
- UInt r_dst = iregNo(i->Pin.AddSubC32.dst);
+ UInt r_srcL = iregNo(i->Pin.AddSubC32.srcL, mode64);
+ UInt r_srcR = iregNo(i->Pin.AddSubC32.srcR, mode64);
+ UInt r_dst = iregNo(i->Pin.AddSubC32.dst, mode64);
if (isAdd) {
if (setC) /* addc (PPC32 p348) */
goto done;
}
- case Pin_Cmp32: {
- Bool syned = i->Pin.Cmp32.syned;
- UInt fld1 = i->Pin.Cmp32.crfD << 2;
- UInt r_srcL = iregNo(i->Pin.Cmp32.srcL);
+ case Pin_Cmp: {
+ Bool syned = i->Pin.Cmp.syned;
+ UInt fld1 = i->Pin.Cmp.crfD << 2;
+ UInt r_srcL = iregNo(i->Pin.Cmp.srcL, mode64);
UInt r_srcR, imm_srcR;
- PPC32RH* srcR = i->Pin.Cmp32.srcR;
+ PPC32RH* srcR = i->Pin.Cmp.srcR;
switch (srcR->tag) {
case Prh_Imm:
case Prh_Reg:
/* cmpi (signed) (PPC32 p367) or
cmpli (unsigned) (PPC32 p379) */
- r_srcR = iregNo(srcR->Prh.Reg.reg);
+ r_srcR = iregNo(srcR->Prh.Reg.reg, mode64);
p = mkFormX(p, 31, fld1, r_srcL, r_srcR, syned ? 0 : 32, 0);
break;
default:
goto done;
}
- case Pin_Unary32: {
- UInt r_dst = iregNo(i->Pin.Unary32.dst);
- UInt r_src = iregNo(i->Pin.Unary32.src);
+ case Pin_Unary: {
+ UInt r_dst = iregNo(i->Pin.Unary32.dst, mode64);
+ UInt r_src = iregNo(i->Pin.Unary32.src, mode64);
switch (i->Pin.Unary32.op) {
case Pun_NOT: // nor r_dst,r_src,r_src
case Pin_MulL: {
Bool syned = i->Pin.MulL.syned;
- UInt r_dst = iregNo(i->Pin.MulL.dst);
- UInt r_srcL = iregNo(i->Pin.MulL.srcL);
- UInt r_srcR = iregNo(i->Pin.MulL.srcR);
+ UInt r_dst = iregNo(i->Pin.MulL.dst, mode64);
+ UInt r_srcL = iregNo(i->Pin.MulL.srcL, mode64);
+ UInt r_srcR = iregNo(i->Pin.MulL.srcR, mode64);
+ Bool is32BitOp = toBool(hregClass(i->Pin.MulL.dst) == HRcInt32);
- if (i->Pin.MulL.hi32) {
+ if (i->Pin.MulL.hi) {
// mul hi words, must consider sign
if (syned) {
- // mulhw r_dst,r_srcL,r_srcR
- p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 75, 0);
+ if (is32BitOp) // mulhw r_dst,r_srcL,r_srcR
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 75, 0);
+ else // mulhd r_dst,r_srcL,r_srcR
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 73, 0);
} else {
- // mulhwu r_dst,r_srcL,r_srcR
- p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 11, 0);
+ if (is32BitOp) // mulhwu r_dst,r_srcL,r_srcR
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 11, 0);
+ else // mulhdu r_dst,r_srcL,r_srcR
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 9, 0);
}
} else {
// mul low word, sign is irrelevant
vassert(!i->Pin.MulL.syned);
- // mullw r_dst,r_srcL,r_srcR
- p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 235, 0);
+ if (is32BitOp) // mullw r_dst,r_srcL,r_srcR
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 235, 0);
+ else // mulld r_dst,r_srcL,r_srcR
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 233, 0);
}
goto done;
}
case Pin_Div: {
- Bool syned = i->Pin.MulL.syned;
- UInt r_dst = iregNo(i->Pin.Div.dst);
- UInt r_srcL = iregNo(i->Pin.Div.srcL);
- UInt r_srcR = iregNo(i->Pin.Div.srcR);
+ Bool syned = i->Pin.Div.syned;
+ UInt r_dst = iregNo(i->Pin.Div.dst, mode64);
+ UInt r_srcL = iregNo(i->Pin.Div.srcL, mode64);
+ UInt r_srcR = iregNo(i->Pin.Div.srcR, mode64);
+ Bool is32BitOp = toBool(hregClass(i->Pin.Div.dst) == HRcInt32);
if (syned == True) {
- // divw r_dst,r_srcL,r_srcR
- p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 491, 0);
+ if (is32BitOp) // divw r_dst,r_srcL,r_srcR
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 491, 0);
+ else
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 489, 0);
} else {
- // divwu r_dst,r_srcL,r_srcR
- p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 459, 0);
+ if (is32BitOp) // divwu r_dst,r_srcL,r_srcR
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 459, 0);
+ else
+ p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 457, 0);
}
goto done;
}
case Pin_Call: {
PPC32CondCode cond = i->Pin.Call.cond;
- UInt r_dst = 12;
+ UInt r_dst = 10;
/* As per detailed comment for Pin_Call in
- getRegUsage_PPC32Instr above, %r12 is used as an address temp */
+ getRegUsage_PPC32Instr above, %r10 is used as an address temp */
/* jump over the following insns if condition does not hold */
if (cond.test != Pct_ALWAYS) {
}
/* load target to r_dst */
- p = mkLoadImm(p, r_dst, i->Pin.Call.target); // p += 4|8
+ p = mkLoadImm(p, r_dst, i->Pin.Call.target, mode64); // p += 4|8|20
/* mtspr 9,r_dst => move r_dst to count register */
p = mkFormXFX(p, r_dst, 9, 467); // p += 4
/* Fix up the conditional jump, if there was one. */
if (cond.test != Pct_ALWAYS) {
Int delta = p - ptmp;
- vassert(delta >= 16 && delta <= 20);
+ vassert(delta >= 16 && delta <= 32);
/* bc !ct,cf,delta */
mkFormB(ptmp, invertCondTest(cond.test), cond.flag, (delta>>2), 0, 0);
}
UInt trc = 0;
UChar r_return = 3; /* Put target addr into %r3 */
PPC32CondCode cond = i->Pin.Goto.cond;
- UInt r_dst, imm_dst;
+ UInt r_dst;
+ ULong imm_dst;
/* First off, if this is conditional, create a conditional
jump over the rest of it. */
/* Get the destination address into %r_return */
if (i->Pin.Goto.dst->tag == Pri_Imm) {
imm_dst = i->Pin.Goto.dst->Pri.Imm;
- p = mkLoadImm(p, r_return, imm_dst); // p += 4|8
+ p = mkLoadImm(p, r_return, imm_dst, mode64); // p += 4|8|20
} else {
vassert(i->Pin.Goto.dst->tag == Pri_Reg);
- r_dst = iregNo(i->Pin.Goto.dst->Pri.Reg);
+ r_dst = iregNo(i->Pin.Goto.dst->Pri.Reg, mode64);
p = mkMoveReg(p, r_return, r_dst); // p += 4
}
/* Fix up the conditional jump, if there was one. */
if (cond.test != Pct_ALWAYS) {
Int delta = p - ptmp;
- vassert(delta >= 12 && delta <= 20);
+ vassert(delta >= 12 && delta <= 32);
/* bc !ct,cf,delta */
mkFormB(ptmp, invertCondTest(cond.test), cond.flag, delta>>2, 0, 0);
}
goto done;
}
- case Pin_CMov32: {
- UInt r_dst, imm_src, r_src;
+ case Pin_CMov: {
+ UInt r_dst, r_src;
+ ULong imm_src;
PPC32CondCode cond;
- vassert(i->Pin.CMov32.cond.test != Pct_ALWAYS);
+ vassert(i->Pin.CMov.cond.test != Pct_ALWAYS);
- r_dst = iregNo(i->Pin.CMov32.dst);
- cond = i->Pin.CMov32.cond;
+ r_dst = iregNo(i->Pin.CMov.dst, mode64);
+ cond = i->Pin.CMov.cond;
/* branch (if cond fails) over move instrs */
if (cond.test != Pct_ALWAYS) {
}
// cond true: move src => dst
- switch (i->Pin.CMov32.src->tag) {
+ switch (i->Pin.CMov.src->tag) {
case Pri_Imm:
- imm_src = i->Pin.CMov32.src->Pri.Imm;
- p = mkLoadImm(p, r_dst, imm_src);
+ imm_src = i->Pin.CMov.src->Pri.Imm;
+ p = mkLoadImm(p, r_dst, imm_src, mode64); // p += 4|8|20
break;
case Pri_Reg:
- r_src = iregNo(i->Pin.CMov32.src->Pri.Reg);
- p = mkMoveReg(p, r_dst, r_src);
+ r_src = iregNo(i->Pin.CMov.src->Pri.Reg, mode64);
+ p = mkMoveReg(p, r_dst, r_src); // p += 4
break;
default: goto bad;
}
/* Fix up the conditional jump, if there was one. */
if (cond.test != Pct_ALWAYS) {
Int delta = p - ptmp;
- vassert(delta >= 4 && delta <= 12);
+ vassert(delta >= 8 && delta <= 24);
/* bc !ct,cf,delta */
mkFormB(ptmp, invertCondTest(cond.test), cond.flag, (delta>>2), 0, 0);
}
case Pin_Load: {
PPC32AMode* am_addr = i->Pin.Load.src;
- UInt r_dst = iregNo(i->Pin.Load.dst);
+ UInt r_dst = iregNo(i->Pin.Load.dst, mode64);
Bool syned = i->Pin.Load.syned;
UInt opc1, opc2, sz = i->Pin.Load.sz;
- switch (i->Pin.Load.src->tag) {
+ switch (am_addr->tag) {
case Pam_IR:
- if (sz == 2) { // the only signed load
- opc1 = (syned) ? 42: 40;
- } else {
- vassert(syned == False);
- opc1 = (sz == 1) ? 34 : 32; // 1:4
+ switch(sz) {
+ case 1: opc1 = 34; break;
+ case 2: opc1 = (syned) ? 42: 40; break;
+ case 4: opc1 = 32; break;
+ case 8: opc1 = 58; break;
+ default:
+ goto bad;
}
- p = doAMode_IR(p, opc1, r_dst, am_addr);
+ p = doAMode_IR(p, opc1, r_dst, am_addr, mode64);
goto done;
case Pam_RR:
- if (sz == 2) { // the only signed load
- opc2 = (syned) ? 343: 279;
- } else {
- vassert(syned == False);
- opc2 = (sz == 1) ? 87 : 23; // 1:4
+ switch(sz) {
+ case 1: opc2 = 87; break;
+ case 2: opc2 = (syned) ? 343: 279; break;
+ case 4: opc2 = 23; break;
+ case 8: opc2 = 21; break;
+ default:
+ goto bad;
}
- p = doAMode_RR(p, 31, opc2, r_dst, am_addr);
+ p = doAMode_RR(p, 31, opc2, r_dst, am_addr, mode64);
goto done;
default:
goto bad;
case Pin_Set32: {
/* Make the destination register be 1 or 0, depending on whether
the relevant condition holds. */
- UInt r_dst = iregNo(i->Pin.Set32.dst);
+ UInt r_dst = iregNo(i->Pin.Set32.dst, mode64);
PPC32CondCode cond = i->Pin.Set32.cond;
UInt rot_imm;
UInt r_tmp;
case Pin_MfCR:
// mfcr dst
- p = mkFormX(p, 31, iregNo(i->Pin.MfCR.dst), 0, 0, 19, 0);
+ p = mkFormX(p, 31, iregNo(i->Pin.MfCR.dst, mode64), 0, 0, 19, 0);
goto done;
case Pin_MFence: {
case Pin_Store: {
PPC32AMode* am_addr = i->Pin.Store.dst;
- UInt r_src = iregNo(i->Pin.Store.src);
+ UInt r_src = iregNo(i->Pin.Store.src, mode64);
UInt opc1, opc2, sz = i->Pin.Store.sz;
switch (i->Pin.Store.dst->tag) {
case Pam_IR:
- opc1 = (sz == 1) ? 38 : ((sz == 2) ? 44 : 36); // 1:2:4
- p = doAMode_IR(p, opc1, r_src, am_addr);
+ switch(sz) {
+ case 1: opc1 = 38; break;
+ case 2: opc1 = 44; break;
+ case 4: opc1 = 36; break;
+ case 8: vassert(mode64);
+ opc1 = 62; break;
+ default:
+ goto bad;
+ }
+ p = doAMode_IR(p, opc1, r_src, am_addr, mode64);
goto done;
case Pam_RR:
- opc2 = (sz == 1) ? 215 : ((sz == 2) ? 407 : 151); // 1:2:4
- p = doAMode_RR(p, 31, opc2, r_src, am_addr);
+ switch(sz) {
+ case 1: opc2 = 215; break;
+ case 2: opc2 = 407; break;
+ case 4: opc2 = 151; break;
+ case 8: vassert(mode64);
+ opc2 = 149; break;
+ default:
+ goto bad;
+ }
+ p = doAMode_RR(p, 31, opc2, r_src, am_addr, mode64);
goto done;
default:
goto bad;
UInt f_reg = fregNo(i->Pin.FpLdSt.reg);
Bool idxd = toBool(i->Pin.FpLdSt.addr->tag == Pam_RR);
UChar sz = i->Pin.FpLdSt.sz;
+ UInt opc;
vassert(sz == 4 || sz == 8);
if (i->Pin.FpLdSt.isLoad) { // Load from memory
if (idxd) { // lf[s|d]x, PPC32 p444|440
- p = doAMode_RR(p, 31, ((sz == 4) ? 535 : 599), f_reg, am_addr);
+ opc = (sz == 4) ? 535 : 599;
+ p = doAMode_RR(p, 31, opc, f_reg, am_addr, mode64);
} else { // lf[s|d], PPC32 p441|437
- p = doAMode_IR(p, ((sz == 4) ? 48 : 50), f_reg, am_addr);
+ opc = (sz == 4) ? 48 : 50;
+ p = doAMode_IR(p, opc, f_reg, am_addr, mode64);
}
} else { // Store to memory
if (idxd) { // stf[s|d]x, PPC32 p521|516
- p = doAMode_RR(p, 31, ((sz == 4) ? 663 : 727), f_reg, am_addr);
+ opc = (sz == 4) ? 663 : 727;
+ p = doAMode_RR(p, 31, opc, f_reg, am_addr, mode64);
} else { // stf[s|d], PPC32 p518|513
- p = doAMode_IR(p, ((sz == 4) ? 52 : 54), f_reg, am_addr);
+ opc = (sz == 4) ? 52 : 54;
+ p = doAMode_IR(p, opc, f_reg, am_addr, mode64);
}
}
goto done;
}
case Pin_FpF64toI32: {
- UInt r_dst = iregNo(i->Pin.FpF64toI32.dst);
+ UInt r_dst = iregNo(i->Pin.FpF64toI32.dst, mode64);
UInt fr_src = fregNo(i->Pin.FpF64toI32.src);
UChar fr_tmp = 7; // Temp freg
PPC32AMode* am_addr;
p = mkFormX(p, 63, fr_tmp, 0, fr_src, 14, 0);
// No RI form of stfiwx, so need PPC32AMode_RR:
- am_addr = PPC32AMode_RR( StackFramePtr, hregPPC32_GPR0() );
+ am_addr = PPC32AMode_RR( StackFramePtr(mode64),
+ hregPPC_GPR0(mode64) );
// stfiwx (store fp64[lo32] as int32), PPC32 p517
- p = doAMode_RR(p, 31, 983, fr_tmp, am_addr);
+ p = doAMode_RR(p, 31, 983, fr_tmp, am_addr, mode64);
// lwzx (load int32), PPC32 p463
- p = doAMode_RR(p, 31, 23, r_dst, am_addr);
+ p = doAMode_RR(p, 31, 23, r_dst, am_addr, mode64);
goto done;
}
case Pin_FpCmp: {
UChar crfD = 1;
- UInt r_dst = iregNo(i->Pin.FpCmp.dst);
+ UInt r_dst = iregNo(i->Pin.FpCmp.dst, mode64);
UInt fr_srcL = fregNo(i->Pin.FpCmp.srcL);
UInt fr_srcR = fregNo(i->Pin.FpCmp.srcR);
vassert(crfD < 8);
}
case Pin_RdWrLR: {
- UInt reg = iregNo(i->Pin.RdWrLR.gpr);
+ UInt reg = iregNo(i->Pin.RdWrLR.gpr, mode64);
/* wrLR==True ? mtlr r4 : mflr r4 */
p = mkFormXFX(p, reg, 8, (i->Pin.RdWrLR.wrLR==True) ? 467 : 339);
goto done;
vassert(sz == 1 || sz == 2 || sz == 4 || sz == 16);
v_reg = vregNo(i->Pin.AvLdSt.reg);
- r_base = iregNo(i->Pin.AvLdSt.addr->Pam.RR.base);
+ r_base = iregNo(i->Pin.AvLdSt.addr->Pam.RR.base, mode64);
// Only have AltiVec AMode_RR: kludge AMode_IR
if (!idxd) {
r_idx = 30; // XXX: Using r30 as temp
- p = mkLoadImm(p, r_idx, i->Pin.AvLdSt.addr->Pam.IR.index);
+ p = mkLoadImm(p, r_idx, i->Pin.AvLdSt.addr->Pam.IR.index, mode64);
} else {
- r_idx = iregNo(i->Pin.AvLdSt.addr->Pam.RR.index);
+ r_idx = iregNo(i->Pin.AvLdSt.addr->Pam.RR.index, mode64);
}
if (i->Pin.FpLdSt.isLoad) { // Load from memory (1,2,4,16)
#define __LIBVEX_HOST_PPC32_HDEFS_H
/* Num registers used for function calls */
-#define PPC32_N_REGPARMS 8
+#define PPC_N_REGPARMS 8
/* --------- Registers. --------- */
extern void ppHRegPPC32 ( HReg );
-extern HReg hregPPC32_GPR0 ( void ); // scratch reg / zero reg
-extern HReg hregPPC32_GPR1 ( void ); // Stack Frame Pointer
-extern HReg hregPPC32_GPR2 ( void ); // TOC pointer - not used
-extern HReg hregPPC32_GPR3 ( void );
-extern HReg hregPPC32_GPR4 ( void );
-extern HReg hregPPC32_GPR5 ( void );
-extern HReg hregPPC32_GPR6 ( void );
-extern HReg hregPPC32_GPR7 ( void );
-extern HReg hregPPC32_GPR8 ( void );
-extern HReg hregPPC32_GPR9 ( void );
-extern HReg hregPPC32_GPR10 ( void );
-extern HReg hregPPC32_GPR11 ( void );
-extern HReg hregPPC32_GPR12 ( void );
-extern HReg hregPPC32_GPR13 ( void ); // thread specific pointer - not used
-extern HReg hregPPC32_GPR14 ( void );
-extern HReg hregPPC32_GPR15 ( void );
-extern HReg hregPPC32_GPR16 ( void );
-extern HReg hregPPC32_GPR17 ( void );
-extern HReg hregPPC32_GPR18 ( void );
-extern HReg hregPPC32_GPR19 ( void );
-extern HReg hregPPC32_GPR20 ( void );
-extern HReg hregPPC32_GPR21 ( void );
-extern HReg hregPPC32_GPR22 ( void );
-extern HReg hregPPC32_GPR23 ( void );
-extern HReg hregPPC32_GPR24 ( void );
-extern HReg hregPPC32_GPR25 ( void );
-extern HReg hregPPC32_GPR26 ( void );
-extern HReg hregPPC32_GPR27 ( void );
-extern HReg hregPPC32_GPR28 ( void );
-extern HReg hregPPC32_GPR29 ( void );
-extern HReg hregPPC32_GPR30 ( void );
-extern HReg hregPPC32_GPR31 ( void ); // GuestStatePtr
+extern HReg hregPPC_GPR0 ( Bool mode64 ); // scratch reg / zero reg
+extern HReg hregPPC_GPR1 ( Bool mode64 ); // Stack Frame Pointer
+extern HReg hregPPC_GPR2 ( Bool mode64 ); // not used: TOC pointer
+extern HReg hregPPC_GPR3 ( Bool mode64 );
+extern HReg hregPPC_GPR4 ( Bool mode64 );
+extern HReg hregPPC_GPR5 ( Bool mode64 );
+extern HReg hregPPC_GPR6 ( Bool mode64 );
+extern HReg hregPPC_GPR7 ( Bool mode64 );
+extern HReg hregPPC_GPR8 ( Bool mode64 );
+extern HReg hregPPC_GPR9 ( Bool mode64 );
+extern HReg hregPPC_GPR10 ( Bool mode64 );
+extern HReg hregPPC_GPR11 ( Bool mode64 ); // not used: calls by ptr / env ptr for some langs
+extern HReg hregPPC_GPR12 ( Bool mode64 ); // not used: exception handling and global linkage code
+extern HReg hregPPC_GPR13 ( Bool mode64 ); // not used: thread specific pointer
+extern HReg hregPPC_GPR14 ( Bool mode64 );
+extern HReg hregPPC_GPR15 ( Bool mode64 );
+extern HReg hregPPC_GPR16 ( Bool mode64 );
+extern HReg hregPPC_GPR17 ( Bool mode64 );
+extern HReg hregPPC_GPR18 ( Bool mode64 );
+extern HReg hregPPC_GPR19 ( Bool mode64 );
+extern HReg hregPPC_GPR20 ( Bool mode64 );
+extern HReg hregPPC_GPR21 ( Bool mode64 );
+extern HReg hregPPC_GPR22 ( Bool mode64 );
+extern HReg hregPPC_GPR23 ( Bool mode64 );
+extern HReg hregPPC_GPR24 ( Bool mode64 );
+extern HReg hregPPC_GPR25 ( Bool mode64 );
+extern HReg hregPPC_GPR26 ( Bool mode64 );
+extern HReg hregPPC_GPR27 ( Bool mode64 );
+extern HReg hregPPC_GPR28 ( Bool mode64 );
+extern HReg hregPPC_GPR29 ( Bool mode64 );
+extern HReg hregPPC_GPR30 ( Bool mode64 );
+extern HReg hregPPC_GPR31 ( Bool mode64 ); // GuestStatePtr
extern HReg hregPPC32_FPR0 ( void );
extern HReg hregPPC32_FPR1 ( void );
extern HReg hregPPC32_VR30 ( void );
extern HReg hregPPC32_VR31 ( void );
-#define StackFramePtr hregPPC32_GPR1()
-#define GuestStatePtr hregPPC32_GPR31()
+#define StackFramePtr(_mode64) hregPPC_GPR1(_mode64)
+#define GuestStatePtr(_mode64) hregPPC_GPR31(_mode64)
extern void ppPPC32RH ( PPC32RH* );
-/* --------- Operand, which can be a reg or a u32. --------- */
+/* --------- Operand, which can be a reg or a u32/64. --------- */
typedef
enum {
struct {
PPC32RITag tag;
union {
- UInt Imm;
- HReg Reg;
+ ULong Imm;
+ HReg Reg;
}
Pri;
}
PPC32RI;
-extern PPC32RI* PPC32RI_Imm ( UInt );
+extern PPC32RI* PPC32RI_Imm ( ULong );
extern PPC32RI* PPC32RI_Reg ( HReg );
extern void ppPPC32RI ( PPC32RI* );
extern
HChar* showPPC32AluOp ( PPC32AluOp,
- Bool /* is the 2nd operand an immediate? */ );
+ Bool /* is the 2nd operand an immediate? */,
+ Bool /* is this a 32bit or 64bit op? */ );
/* --------- */
/* --------- */
typedef
enum {
- Pin_LI32, /* load 32-bit immediate (fake insn) */
- Pin_Alu32, /* 32-bit add/sub/and/or/xor/shl/shr/sar */
+ Pin_LI, /* load word (32/64-bit) immediate (fake insn) */
+ Pin_Alu, /* word add/sub/and/or/xor/shl/shr/sar */
Pin_AddSubC32, /* 32-bit add/sub with read/write carry */
- Pin_Cmp32, /* 32-bit compare */
- Pin_Unary32, /* 32-bit not, neg, clz */
+ Pin_Cmp, /* word compare */
+ Pin_Unary, /* not, neg, clz */
Pin_MulL, /* widening multiply */
Pin_Div, /* div */
Pin_Call, /* call to address in register */
Pin_Goto, /* conditional/unconditional jmp to dst */
- Pin_CMov32, /* conditional move */
+ Pin_CMov, /* conditional move */
Pin_Load, /* load a 8|16|32 bit value from mem */
Pin_Store, /* store a 8|16|32 bit value to mem */
Pin_Set32, /* convert condition code to 32-bit value */
two real insns. */
struct {
HReg dst;
- UInt imm32;
- } LI32;
+ ULong imm64;
+ } LI;
/* Integer add/sub/and/or/xor/shl/shr/sar. Limitations:
- For add, the immediate, if it exists, is a signed 16.
- For sub, the immediate, if it exists, is a signed 16
HReg dst;
HReg srcL;
PPC32RH* srcR;
- } Alu32;
+ } Alu;
/* */
struct {
Bool isAdd; /* else sub */
UInt crfD;
HReg srcL;
PPC32RH* srcR;
- } Cmp32;
+ } Cmp;
/* Not and Neg */
struct {
PPC32UnaryOp op;
} Unary32;
struct {
Bool syned; /* meaningless if hi32==False */
- Bool hi32; /* False=>low, True=>high */
+ Bool hi; /* False=>low, True=>high */
HReg dst;
HReg srcL;
HReg srcR;
3 .. 10 inclusive). */
struct {
PPC32CondCode cond;
- Addr32 target;
+ Addr64 target;
UInt argiregs;
} Call;
/* Pseudo-insn. Goto dst, on given condition (which could be
PPC32CondCode cond;
HReg dst;
PPC32RI* src;
- } CMov32;
+ } CMov;
/* Sign/Zero extending loads. Dst size is always 32 bits. */
struct {
UChar sz; /* 1|2|4 */
PPC32Instr;
-extern PPC32Instr* PPC32Instr_LI32 ( HReg, UInt );
-extern PPC32Instr* PPC32Instr_Alu32 ( PPC32AluOp, HReg, HReg, PPC32RH* );
+extern PPC32Instr* PPC32Instr_LI ( HReg, ULong, Bool );
+extern PPC32Instr* PPC32Instr_Alu ( PPC32AluOp, HReg, HReg, PPC32RH* );
extern PPC32Instr* PPC32Instr_AddSubC32 ( Bool, Bool, HReg, HReg, HReg );
-extern PPC32Instr* PPC32Instr_Cmp32 ( Bool, UInt, HReg, PPC32RH* );
-extern PPC32Instr* PPC32Instr_Unary32 ( PPC32UnaryOp op, HReg dst, HReg src );
+extern PPC32Instr* PPC32Instr_Cmp ( Bool, UInt, HReg, PPC32RH* );
+extern PPC32Instr* PPC32Instr_Unary ( PPC32UnaryOp op, HReg dst, HReg src );
extern PPC32Instr* PPC32Instr_MulL ( Bool syned, Bool hi32, HReg, HReg, HReg );
extern PPC32Instr* PPC32Instr_Div ( Bool syned, HReg dst, HReg srcL, HReg srcR );
-extern PPC32Instr* PPC32Instr_Call ( PPC32CondCode, Addr32, UInt );
+extern PPC32Instr* PPC32Instr_Call ( PPC32CondCode, Addr64, UInt );
extern PPC32Instr* PPC32Instr_Goto ( IRJumpKind, PPC32CondCode cond, PPC32RI* dst );
-extern PPC32Instr* PPC32Instr_CMov32 ( PPC32CondCode, HReg dst, PPC32RI* src );
+extern PPC32Instr* PPC32Instr_CMov ( PPC32CondCode, HReg dst, PPC32RI* src );
extern PPC32Instr* PPC32Instr_Load ( UChar sz, Bool syned,
- HReg dst, PPC32AMode* src );
-extern PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst, HReg src );
+ HReg dst, PPC32AMode* src, Bool mode64 );
+extern PPC32Instr* PPC32Instr_Store ( UChar sz, PPC32AMode* dst,
+ HReg src, Bool mode64 );
extern PPC32Instr* PPC32Instr_Set32 ( PPC32CondCode cond, HReg dst );
extern PPC32Instr* PPC32Instr_MfCR ( HReg dst );
extern PPC32Instr* PPC32Instr_MFence ( void );
#include "host-generic/h_generic_regs.h"
#include "host-ppc32/hdefs.h"
+/* Is our guest binary 32 or 64bit? Set at each call to
+ iselBB_PPC32 below. */
+static Bool mode64 = False;
+
+#define HRcIntWRDSZ (mode64 ? HRcInt64 : HRcInt32)
+
+
/*---------------------------------------------------------*/
/*--- Register Usage Conventions ---*/
/*---------------------------------------------------------*/
------------
GPR0 Reserved
GPR1 Stack Pointer
- GPR2 TOC pointer - not used
- GPR3:12 Allocateable
- GPR13 Thread-specific pointer - not used
+ GPR2 not used - TOC pointer
+ GPR3:10 Allocateable
+ GPR11 if mode64: not used - calls by ptr / env ptr for some langs
+ GPR12 if mode64: not used - exceptions / global linkage code
+ GPR13 not used - Thread-specific pointer
GPR14:29 Allocateable
GPR30 AltiVec temp spill register
GPR31 GuestStatePointer
Of Allocateable regs:
- GPR3:12 Caller-saved regs
+ if (mode64)
+ GPR3:10 Caller-saved regs
+ else
+ GPR3:12 Caller-saved regs
GPR14:29 Callee-saved regs
GPR3 [Return | Parameter] - carrying reg
-------------------
FPR0:31 Allocateable
- FPR0:13 Caller-saved regs
- FPR14:31 Callee-saved regs
+ FPR0 Caller-saved - scratch reg
+ if (mode64)
+ FPR1:13 Caller-saved - param & return regs
+ else
+ FPR1:8 Caller-saved - param & return regs
+ FPR9:13 Caller-saved regs
+ FPR14:31 Callee-saved regs
+
+
+ Vector Regs (on processors with the VMX feature)
+ -----------
+ VR0-VR1 Volatile scratch registers
+ VR2-VR13 Volatile vector parameters registers
+ VR14-VR19 Volatile scratch registers
+ VR20-VR31 Non-volatile registers
+ VRSAVE Non-volatile 32-bit register
*/
static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp )
{
+ vassert(!mode64);
+ vassert(tmp >= 0);
+ vassert(tmp < env->n_vregmap);
+ vassert(env->vregmapHI[tmp] != INVALID_HREG);
+ *vrLO = env->vregmap[tmp];
+ *vrHI = env->vregmapHI[tmp];
+}
+
+static void lookupIRTemp128 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp )
+{
+ vassert(mode64);
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
vassert(env->vregmapHI[tmp] != INVALID_HREG);
{
addHInstr(env->code, instr);
if (vex_traceflags & VEX_TRACE_VCODE) {
- ppPPC32Instr(instr, False);
+ ppPPC32Instr(instr, mode64);
vex_printf("\n");
}
}
static HReg newVRegI ( ISelEnv* env )
-{
- HReg reg = mkHReg(env->vreg_ctr, HRcInt32, True/*virtual reg*/);
+{
+ HReg reg = mkHReg(env->vreg_ctr, HRcIntWRDSZ, True/*virtual reg*/);
env->vreg_ctr++;
return reg;
}
static PPC32RH* iselIntExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e );
static PPC32RH* iselIntExpr_RH5u ( ISelEnv* env, IRExpr* e );
+/* Compute an I8 into a reg-or-6-bit-unsigned-immediate, the latter
+ being an immediate in the range 1 .. 63 inclusive. Used for doing
+ shift amounts. */
+static PPC32RH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e );
+static PPC32RH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e );
+
/* Compute an I32 into an AMode. */
static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e );
static PPC32AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e );
static void iselInt64Expr ( HReg* rHi, HReg* rLo,
ISelEnv* env, IRExpr* e );
+/* Compute an I128 into a GPR64 pair. */
+static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo,
+ ISelEnv* env, IRExpr* e );
+static void iselInt128Expr ( HReg* rHi, HReg* rLo,
+ ISelEnv* env, IRExpr* e );
+
static PPC32CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e );
static PPC32CondCode iselCondCode ( ISelEnv* env, IRExpr* e );
static PPC32Instr* mk_iMOVds_RR ( HReg r_dst, HReg r_src )
{
- vassert(hregClass(r_dst) == HRcInt32);
- vassert(hregClass(r_src) == HRcInt32);
- return PPC32Instr_Alu32(Palu_OR, r_dst, r_src, PPC32RH_Reg(r_src));
+ vassert(hregClass(r_dst) == HRcIntWRDSZ);
+ vassert(hregClass(r_src) == HRcIntWRDSZ);
+ return PPC32Instr_Alu(Palu_OR, r_dst, r_src, PPC32RH_Reg(r_src));
}
//.. /* Make a vector reg-reg move. */
/* Advance/retreat %sp by n. */
-static void add_to_sp ( ISelEnv* env, Int n )
+static void add_to_sp ( ISelEnv* env, UInt n )
{
- HReg sp = StackFramePtr;
- vassert(n > 0 && n < 256 && (n%16) == 0);
- addInstr(env, PPC32Instr_Alu32(
+ HReg sp = StackFramePtr(mode64);
+ vassert(n < 256 && (n%16) == 0);
+ addInstr(env, PPC32Instr_Alu(
Palu_ADD, sp, sp, PPC32RH_Imm(True,toUShort(n))));
}
-static void sub_from_sp ( ISelEnv* env, Int n )
+static void sub_from_sp ( ISelEnv* env, UInt n )
{
- HReg sp = StackFramePtr;
- vassert(n > 0 && n < 256 && (n%16) == 0);
- addInstr(env, PPC32Instr_Alu32(
+ HReg sp = StackFramePtr(mode64);
+ vassert(n < 256 && (n%16) == 0);
+ addInstr(env, PPC32Instr_Alu(
Palu_SUB, sp, sp, PPC32RH_Imm(True,toUShort(n))));
}
{
HReg r = newVRegI(env);
HReg align16 = newVRegI(env);
- addInstr(env, mk_iMOVds_RR(r, StackFramePtr));
+ addInstr(env, mk_iMOVds_RR(r, StackFramePtr(mode64)));
// add 16
- addInstr(env, PPC32Instr_Alu32(
+ addInstr(env, PPC32Instr_Alu(
Palu_ADD, r, r, PPC32RH_Imm(True,toUShort(16))));
// mask to quadword
- addInstr(env, PPC32Instr_LI32(align16, (UInt)0xFFFFFFF0));
- addInstr(env, PPC32Instr_Alu32(Palu_AND, r,r, PPC32RH_Reg(align16)));
+ addInstr(env, PPC32Instr_LI(align16, 0xFFFFFFFFFFFFFFF0ULL, mode64));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r,r, PPC32RH_Reg(align16)));
return r;
}
/* Load 2*I32 regs to fp reg */
-static HReg mk_LoadRRtoFPR ( ISelEnv* env, HReg r_srcHi, HReg r_srcLo )
+static HReg mk_LoadRR32toFPR ( ISelEnv* env, HReg r_srcHi, HReg r_srcLo )
{
HReg fr_dst = newVRegF(env);
PPC32AMode *am_addr0, *am_addr1;
+ vassert(!mode64);
+ vassert(hregClass(r_srcHi) == HRcInt32);
+ vassert(hregClass(r_srcLo) == HRcInt32);
+
sub_from_sp( env, 16 ); // Move SP down 16 bytes
- am_addr0 = PPC32AMode_IR(0, StackFramePtr);
- am_addr1 = PPC32AMode_IR(4, StackFramePtr);
+ am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64));
+ am_addr1 = PPC32AMode_IR(4, StackFramePtr(mode64));
// store hi,lo as Ity_I32's
- addInstr(env, PPC32Instr_Store( 4, am_addr0, r_srcHi ));
- addInstr(env, PPC32Instr_Store( 4, am_addr1, r_srcLo ));
+ addInstr(env, PPC32Instr_Store( 4, am_addr0, r_srcHi, mode64 ));
+ addInstr(env, PPC32Instr_Store( 4, am_addr1, r_srcLo, mode64 ));
+
+ // load as float
+ addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0));
+
+ add_to_sp( env, 16 ); // Reset SP
+ return fr_dst;
+}
+
+/* Load I64 reg to fp reg */
+static HReg mk_LoadR64toFPR ( ISelEnv* env, HReg r_src )
+{
+ HReg fr_dst = newVRegF(env);
+ PPC32AMode *am_addr0;
+
+ vassert(mode64);
+ vassert(hregClass(r_src) == HRcInt64);
+
+ sub_from_sp( env, 16 ); // Move SP down 16 bytes
+ am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64));
+
+ // store as Ity_I64
+ addInstr(env, PPC32Instr_Store( 8, am_addr0, r_src, mode64 ));
// load as float
addInstr(env, PPC32Instr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0));
IRExpr* guard, IRCallee* cee, IRExpr** args )
{
PPC32CondCode cc;
- HReg argregs[PPC32_N_REGPARMS];
- HReg tmpregs[PPC32_N_REGPARMS];
+ HReg argregs[PPC_N_REGPARMS];
+ HReg tmpregs[PPC_N_REGPARMS];
Bool go_fast;
Int n_args, i, argreg;
UInt argiregs;
+ ULong target;
/* Marshal args for a call and do the call.
This function only deals with a tiny set of possibilities, which
cover all helpers in practice. The restrictions are that only
- arguments in registers are supported, hence only PPC32_N_REGPARMSx32
- integer bits in total can be passed. In fact the only supported
- arg type is I32.
+ arguments in registers are supported, hence only PPC_N_REGPARMS x
+ (mode32:32 | mode64:64) integer bits in total can be passed.
+ In fact the only supported arg type is (mode32:I32 | mode64:I64).
Generating code which is both efficient and correct when
parameters are to be passed in registers is difficult, for the
practice: IRExpr_Tmp IRExpr_Const IRExpr_Get.
*/
- /* Note that the cee->regparms field is meaningless on PPC32 host
+ /* Note that the cee->regparms field is meaningless on PPC32/64 host
(since there is only one calling convention) and so we always
ignore it. */
for (i = 0; args[i]; i++)
n_args++;
- if (PPC32_N_REGPARMS < n_args + (passBBP ? 1 : 0)) {
- vpanic("doHelperCall(PPC32): cannot currently handle > 8 args");
- // PPC32_N_REGPARMS
+ if (PPC_N_REGPARMS < n_args + (passBBP ? 1 : 0)) {
+ vpanic("doHelperCall(PPC): cannot currently handle > 8 args");
+ // PPC_N_REGPARMS
}
- argregs[0] = hregPPC32_GPR3();
- argregs[1] = hregPPC32_GPR4();
- argregs[2] = hregPPC32_GPR5();
- argregs[3] = hregPPC32_GPR6();
- argregs[4] = hregPPC32_GPR7();
- argregs[5] = hregPPC32_GPR8();
- argregs[6] = hregPPC32_GPR9();
- argregs[7] = hregPPC32_GPR10();
+ argregs[0] = hregPPC_GPR3(mode64);
+ argregs[1] = hregPPC_GPR4(mode64);
+ argregs[2] = hregPPC_GPR5(mode64);
+ argregs[3] = hregPPC_GPR6(mode64);
+ argregs[4] = hregPPC_GPR7(mode64);
+ argregs[5] = hregPPC_GPR8(mode64);
+ argregs[6] = hregPPC_GPR9(mode64);
+ argregs[7] = hregPPC_GPR10(mode64);
argiregs = 0;
tmpregs[0] = tmpregs[1] = tmpregs[2] =
argreg = 0;
if (passBBP) {
argiregs |= (1 << (argreg+3));
- addInstr(env, mk_iMOVds_RR( argregs[argreg], GuestStatePtr ));
+ addInstr(env, mk_iMOVds_RR( argregs[argreg],
+ GuestStatePtr(mode64) ));
argreg++;
}
for (i = 0; i < n_args; i++) {
- vassert(argreg < PPC32_N_REGPARMS);
+ vassert(argreg < PPC_N_REGPARMS);
vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 ||
typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
- if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) {
+
+ if (!mode64) {
+ if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) {
+ argiregs |= (1 << (argreg+3));
+ addInstr(env, mk_iMOVds_RR( argregs[argreg],
+ iselIntExpr_R(env, args[i]) ));
+ } else { // Ity_I64
+ HReg rHi, rLo;
+ if (argreg%2 == 1) // ppc32 abi spec for passing a LONG_LONG
+ argreg++; // XXX: odd argreg => even rN
+ vassert(argreg < PPC_N_REGPARMS-1);
+ iselInt64Expr(&rHi,&rLo, env, args[i]);
+ argiregs |= (1 << (argreg+3));
+ addInstr(env, mk_iMOVds_RR( argregs[argreg++], rHi ));
+ argiregs |= (1 << (argreg+3));
+ addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo));
+ }
+ } else { // mode64
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg],
iselIntExpr_R(env, args[i]) ));
- } else { // Ity_I64
- HReg rHi, rLo;
- if (argreg%2 == 1) // ppc32 abi spec for passing a LONG_LONG
- argreg++; // XXX: odd argreg => even rN
- vassert(argreg < PPC32_N_REGPARMS-1);
- iselInt64Expr(&rHi,&rLo, env, args[i]);
- argiregs |= (1 << (argreg+3));
- addInstr(env, mk_iMOVds_RR( argregs[argreg++], rHi ));
- argiregs |= (1 << (argreg+3));
- addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo));
}
argreg++;
}
/* This is pretty stupid; better to move directly to r3
after the rest of the args are done. */
tmpregs[argreg] = newVRegI(env);
- addInstr(env, mk_iMOVds_RR( tmpregs[argreg], GuestStatePtr ));
+ addInstr(env, mk_iMOVds_RR( tmpregs[argreg], GuestStatePtr(mode64) ));
argreg++;
}
for (i = 0; i < n_args; i++) {
- vassert(argreg < PPC32_N_REGPARMS);
+ vassert(argreg < PPC_N_REGPARMS);
vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 ||
typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
- if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) {
+ if (!mode64) {
+ if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) {
+ tmpregs[argreg] = iselIntExpr_R(env, args[i]);
+ } else { // Ity_I64
+ HReg rHi, rLo;
+ if (argreg%2 == 1) // ppc32 abi spec for passing a LONG_LONG
+ argreg++; // XXX: odd argreg => even rN
+ vassert(argreg < PPC_N_REGPARMS-1);
+ iselInt64Expr(&rHi,&rLo, env, args[i]);
+ tmpregs[argreg++] = rHi;
+ tmpregs[argreg] = rLo;
+ }
+ } else { // mode64
tmpregs[argreg] = iselIntExpr_R(env, args[i]);
- } else { // Ity_I64
- HReg rHi, rLo;
- if (argreg%2 == 1) // ppc32 abi spec for passing a LONG_LONG
- argreg++; // XXX: odd argreg => even rN
- vassert(argreg < PPC32_N_REGPARMS-1);
- iselInt64Expr(&rHi,&rLo, env, args[i]);
- tmpregs[argreg++] = rHi;
- tmpregs[argreg] = rLo;
}
argreg++;
}
}
+ target = mode64 ? Ptr_to_ULong(cee->addr) :
+ toUInt(Ptr_to_ULong(cee->addr));
+
/* Finally, the call itself. */
- addInstr(env, PPC32Instr_Call( cc,
- (Addr32)toUInt(Ptr_to_ULong(cee->addr)),
- argiregs ));
+ addInstr(env, PPC32Instr_Call( cc, (Addr64)target, argiregs ));
}
static
void set_FPU_rounding_default ( ISelEnv* env )
{
- HReg fr_src = newVRegF(env);
- HReg r_srcHi = newVRegI(env);
- HReg r_srcLo = newVRegI(env);
+ HReg fr_src = newVRegF(env);
+ HReg r_src = newVRegI(env);
/* Default rounding mode = 0x0
Only supporting the rounding-mode bits - the rest of FPSCR is 0x0
- so we can set the whole register at once (faster)
+ note: upper 32 bits ignored by FpLdFPSCR
*/
- addInstr(env, PPC32Instr_LI32(r_srcLo, 0x0));
- // r_srcHi = 0: upper 32 bits ignored by FpLdFPSCR
- addInstr(env, PPC32Instr_LI32(r_srcHi, 0x0));
-
- fr_src = mk_LoadRRtoFPR( env, r_srcHi, r_srcLo );
+ addInstr(env, PPC32Instr_LI(r_src, 0x0, mode64));
+ if (mode64) {
+ fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64
+ } else {
+ fr_src = mk_LoadRR32toFPR( env, r_src, r_src ); // 2*I32 -> F64
+ }
addInstr(env, PPC32Instr_FpLdFPSCR( fr_src ));
}
HReg r_rmPPC32 = newVRegI(env);
HReg r_tmp = newVRegI(env);
- // AND r_rmRI,3 -- shouldn't be needed; paranoia
+ vassert(hregClass(r_rmIR) == HRcIntWRDSZ);
+
+ // AND r_rmIR,3 -- shouldn't be needed; paranoia
addInstr(env,
- PPC32Instr_Alu32(Palu_AND, r_rmIR, r_rmIR, PPC32RH_Imm(False,3)));
+ PPC32Instr_Alu(Palu_AND, r_rmIR, r_rmIR, PPC32RH_Imm(False,3)));
// r_rmPPC32 = XOR( r_rmIR, (r_rmIR << 1) & 2)
addInstr(env,
- PPC32Instr_Alu32(Palu_SHL, r_tmp, r_rmIR, PPC32RH_Imm(False,1)));
+ PPC32Instr_Alu(Palu_SHL, r_tmp, r_rmIR, PPC32RH_Imm(False,1)));
addInstr(env,
- PPC32Instr_Alu32(Palu_AND, r_tmp, r_tmp, PPC32RH_Imm(False,2)));
+ PPC32Instr_Alu(Palu_AND, r_tmp, r_tmp, PPC32RH_Imm(False,2)));
addInstr(env,
- PPC32Instr_Alu32(Palu_XOR, r_rmPPC32, r_rmIR, PPC32RH_Reg(r_tmp)));
+ PPC32Instr_Alu(Palu_XOR, r_rmPPC32, r_rmIR, PPC32RH_Reg(r_tmp)));
return r_rmPPC32;
}
void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode )
{
HReg fr_src = newVRegF(env);
- HReg r_srcHi = newVRegI(env);
+ HReg r_src;
+
+ if (mode64)
+ vassert(typeOfIRExpr(env->type_env,mode) == Ity_I64);
+ else
+ vassert(typeOfIRExpr(env->type_env,mode) == Ity_I32);
/* Only supporting the rounding-mode bits - the rest of FPSCR is 0x0
- so we can set the whole register at once (faster)
*/
// Resolve rounding mode and convert to PPC32 representation
- HReg r_srcLo = roundModeIRtoPPC32( env, iselIntExpr_R(env, mode) );
-
- // srcHi = 0: upper 32 bits ignored by FpLdFPSCR
- addInstr(env, PPC32Instr_LI32(r_srcHi, 0));
-
- // Load 2*I32 regs to fp reg:
- fr_src = mk_LoadRRtoFPR( env, r_srcHi, r_srcLo );
+ r_src = roundModeIRtoPPC32( env, iselIntExpr_R(env, mode) );
+ // gpr -> fpr
+ if (mode64) {
+ fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64
+ } else {
+ fr_src = mk_LoadRR32toFPR( env, r_src, r_src ); // 2*I32 -> F64
+ }
// Move to FPSCR
addInstr(env, PPC32Instr_FpLdFPSCR( fr_src ));
/* no luck; use the Slow way. */
r_src = newVRegI(env);
- addInstr(env, PPC32Instr_LI32(r_src, (UInt)simm32));
+ addInstr(env, PPC32Instr_LI(r_src, (Long)simm32, mode64));
}
else {
r_src = ri->Pri.Reg;
am_off12 = PPC32AMode_IR( 12, r_aligned16);
/* Store r_src in low word of 16-aligned mem */
- addInstr(env, PPC32Instr_Store( 4, am_off12, r_src ));
+ addInstr(env, PPC32Instr_Store( 4, am_off12, r_src, mode64 ));
/* Load src to vector[low lane] */
addInstr(env, PPC32Instr_AvLdSt( True/*load*/, 4, v_src, am_off12 ));
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
# endif
- vassert(hregClass(r) == HRcInt32);
+
+ vassert(hregClass(r) == HRcIntWRDSZ);
vassert(hregIsVirtual(r));
return r;
}
DECLARE_PATTERN(p_32to1_then_1Uto8);
IRType ty = typeOfIRExpr(env->type_env,e);
- vassert(ty == Ity_I32 || Ity_I16 || Ity_I8);
+ vassert(ty == Ity_I8 || ty == Ity_I16 ||
+ ty == Ity_I32 || ((ty == Ity_I64) && mode64));
switch (e->tag) {
PPC32AMode* am_addr = iselIntExpr_AMode(env, e->Iex.Load.addr);
if (e->Iex.Load.end != Iend_BE)
goto irreducible;
- if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32) {
- addInstr(env, PPC32Instr_Load( toUChar(sizeofIRType(ty)),
- False, r_dst, am_addr ));
- return r_dst;
- }
+ addInstr(env, PPC32Instr_Load( toUChar(sizeofIRType(ty)),
+ False, r_dst, am_addr, mode64 ));
+ return r_dst;
break;
}
//.. HReg dst = newVRegI(env);
//.. HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg2);
//.. addInstr(env, mk_iMOVsd_RR(reg,dst));
-//.. addInstr(env, PPC32Instr_Unary32(Xun_NEG,PPC32RM_Reg(dst)));
+//.. addInstr(env, PPC32Instr_Unary(Xun_NEG,PPC32RM_Reg(dst)));
//.. return dst;
//.. }
/* Is it an addition or logical style op? */
switch (e->Iex.Binop.op) {
- case Iop_Add8: case Iop_Add16: case Iop_Add32:
- aluOp = Palu_ADD; break;
- case Iop_Sub8: case Iop_Sub16: case Iop_Sub32:
- aluOp = Palu_SUB; break;
- case Iop_And8: case Iop_And16: case Iop_And32:
- aluOp = Palu_AND; break;
- case Iop_Or8: case Iop_Or16: case Iop_Or32:
- aluOp = Palu_OR; break;
- case Iop_Xor8: case Iop_Xor16: case Iop_Xor32:
- aluOp = Palu_XOR; break;
- case Iop_Shl32: case Iop_Shl16: case Iop_Shl8:
- aluOp = Palu_SHL; break;
- case Iop_Shr32: case Iop_Shr16: case Iop_Shr8:
- aluOp = Palu_SHR; break;
- case Iop_Sar32: case Iop_Sar16: case Iop_Sar8:
- aluOp = Palu_SAR; break;
- default:
- aluOp = Palu_INVALID; break;
+ case Iop_Add8: case Iop_Add16: case Iop_Add32: case Iop_Add64:
+ aluOp = Palu_ADD; break;
+ case Iop_Sub8: case Iop_Sub16: case Iop_Sub32: case Iop_Sub64:
+ aluOp = Palu_SUB; break;
+ case Iop_And8: case Iop_And16: case Iop_And32: case Iop_And64:
+ aluOp = Palu_AND; break;
+ case Iop_Or8: case Iop_Or16: case Iop_Or32: case Iop_Or64:
+ aluOp = Palu_OR; break;
+ case Iop_Xor8: case Iop_Xor16: case Iop_Xor32: case Iop_Xor64:
+ aluOp = Palu_XOR; break;
+ case Iop_Shl8: case Iop_Shl16: case Iop_Shl32: case Iop_Shl64:
+ aluOp = Palu_SHL; break;
+ case Iop_Shr8: case Iop_Shr16: case Iop_Shr32: case Iop_Shr64:
+ aluOp = Palu_SHR; break;
+ case Iop_Sar8: case Iop_Sar16: case Iop_Sar32: case Iop_Sar64:
+ aluOp = Palu_SAR; break;
+ default:
+ aluOp = Palu_INVALID; break;
}
/* For commutative ops we assume any literal
values are on the second operand. */
if (aluOp != Palu_INVALID) {
- HReg r_dst, r_srcL;
+ HReg r_dst = newVRegI(env);
+ HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
PPC32RH* ri_srcR = NULL;
- r_dst = newVRegI(env);
- /* get left arg into a reg */
- r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
/* get right arg into an RH, in the appropriate way */
switch (aluOp) {
- case Palu_ADD: case Palu_SUB:
- ri_srcR = iselIntExpr_RH(env, True/*signed*/,
- e->Iex.Binop.arg2);
- break;
- case Palu_AND: case Palu_OR: case Palu_XOR:
- ri_srcR = iselIntExpr_RH(env, False/*signed*/,
- e->Iex.Binop.arg2);
- break;
- case Palu_SHL: case Palu_SHR: case Palu_SAR:
+ case Palu_ADD: case Palu_SUB:
+ ri_srcR = iselIntExpr_RH(env, True/*signed*/,
+ e->Iex.Binop.arg2);
+ break;
+ case Palu_AND: case Palu_OR: case Palu_XOR:
+ ri_srcR = iselIntExpr_RH(env, False/*signed*/,
+ e->Iex.Binop.arg2);
+ break;
+ case Palu_SHL: case Palu_SHR: case Palu_SAR:
+ if (!mode64)
ri_srcR = iselIntExpr_RH5u(env, e->Iex.Binop.arg2);
- break;
- default:
- vpanic("iselIntExpr_R_wrk-aluOp-arg2");
+ else
+ ri_srcR = iselIntExpr_RH6u(env, e->Iex.Binop.arg2);
+ break;
+ default:
+ vpanic("iselIntExpr_R_wrk-aluOp-arg2");
}
- /* widen the left arg if needed */
- if ((aluOp == Palu_SHR || aluOp == Palu_SAR)
- && (ty == Ity_I8 || ty == Ity_I16)) {
- PPC32RH* amt = PPC32RH_Imm(False, toUShort(ty == Ity_I8 ? 24 : 16));
- HReg tmp = newVRegI(env);
- addInstr(env, PPC32Instr_Alu32(Palu_SHL, tmp, r_srcL, amt));
- addInstr(env, PPC32Instr_Alu32(aluOp, tmp, tmp, amt));
- r_srcL = tmp;
- vassert(0); /* AWAITING TEST CASE */
- }
- addInstr(env, PPC32Instr_Alu32(aluOp, r_dst, r_srcL, ri_srcR));
+ /* widen the left arg if needed */
+ if ((aluOp == Palu_SHR || aluOp == Palu_SAR)) {
+ if (!mode64 && (ty == Ity_I8 || ty == Ity_I16)) {
+ PPC32RH* amt = PPC32RH_Imm(False, toUShort(ty == Ity_I8 ? 24 : 16));
+ HReg tmp = newVRegI(env);
+ addInstr(env, PPC32Instr_Alu(Palu_SHL, tmp, r_srcL, amt));
+ addInstr(env, PPC32Instr_Alu(aluOp, tmp, tmp, amt));
+ r_srcL = tmp;
+ vassert(0); /* AWAITING TEST CASE */
+ }
+ if (mode64 && (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32)) {
+ PPC32RH* amt = PPC32RH_Imm(False, toUShort(ty == Ity_I8 ? 56 :
+ ty == Ity_I16 ? 48 : 32));
+ HReg tmp = newVRegI(env);
+ addInstr(env, PPC32Instr_Alu(Palu_SHL, tmp, r_srcL, amt));
+ addInstr(env, PPC32Instr_Alu(aluOp, tmp, tmp, amt));
+ r_srcL = tmp;
+ }
+ }
+ addInstr(env, PPC32Instr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
return r_dst;
}
/* How about a div? */
if (e->Iex.Binop.op == Iop_DivS32 ||
- e->Iex.Binop.op == Iop_DivU32) {
+ e->Iex.Binop.op == Iop_DivU32 ||
+ e->Iex.Binop.op == Iop_DivS64 ||
+ e->Iex.Binop.op == Iop_DivU64) {
HReg r_dst = newVRegI(env);
HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Div(toBool(e->Iex.Binop.op == Iop_DivS32),
- r_dst, r_srcL, r_srcR));
+ Bool syned = toBool(e->Iex.Binop.op == Iop_DivS32 ||
+ e->Iex.Binop.op == Iop_DivS64);
+ addInstr(env, PPC32Instr_Div(syned, r_dst, r_srcL, r_srcR));
return r_dst;
}
/* No? Anyone for a mul? */
if (e->Iex.Binop.op == Iop_Mul16 ||
- e->Iex.Binop.op == Iop_Mul32) {
- Bool syned = True;
+ e->Iex.Binop.op == Iop_Mul32 ||
+ e->Iex.Binop.op == Iop_Mul64) {
+ Bool syned = False;
HReg r_dst = newVRegI(env);
HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
}
/* El-mutanto 3-way compare? */
- if (e->Iex.Binop.op == Iop_CmpORD32S
- || e->Iex.Binop.op == Iop_CmpORD32U) {
- Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S);
+ if (e->Iex.Binop.op == Iop_CmpORD32S ||
+ e->Iex.Binop.op == Iop_CmpORD32U ||
+ e->Iex.Binop.op == Iop_CmpORD64S ||
+ e->Iex.Binop.op == Iop_CmpORD64U) {
+ Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S ||
+ e->Iex.Binop.op == Iop_CmpORD64S);
HReg dst = newVRegI(env);
HReg srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
PPC32RH* srcR = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Cmp32(syned, /*cr*/7, srcL, srcR));
+ addInstr(env, PPC32Instr_Cmp(syned, /*cr*/7, srcL, srcR));
addInstr(env, PPC32Instr_MfCR(dst));
- addInstr(env, PPC32Instr_Alu32(Palu_AND, dst, dst,
- PPC32RH_Imm(False,7<<1)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, dst, dst,
+ PPC32RH_Imm(False,7<<1)));
return dst;
}
//zz /* Handle misc other ops. */
//zz if (e->Iex.Binop.op == Iop_8HLto16) {
-//zz HReg hi8 = newVRegI(env);
-//zz HReg lo8 = newVRegI(env);
+//zz HReg hi8 = newVRegI32(env);
+//zz HReg lo8 = newVRegI32(env);
//zz HReg hi8s = iselIntExpr_R(env, e->Iex.Binop.arg1);
//zz HReg lo8s = iselIntExpr_R(env, e->Iex.Binop.arg2);
//zz addInstr(env,
-//zz PPC32Instr_Alu32(Palu_SHL, hi8, hi8s, PPC32RH_Imm(False,8)));
+//zz PPC32Instr_Alu(Palu_SHL, hi8, hi8s, PPC32RH_Imm(False,8)));
//zz addInstr(env,
-//zz PPC32Instr_Alu32(Palu_AND, lo8, lo8s, PPC32RH_Imm(False,0xFF)));
+//zz PPC32Instr_Alu(Palu_AND, lo8, lo8s, PPC32RH_Imm(False,0xFF)));
//zz addInstr(env,
-//zz PPC32Instr_Alu32(Palu_OR, hi8, hi8, PPC32RI_Reg(lo8)));
+//zz PPC32Instr_Alu(Palu_OR, hi8, hi8, PPC32RI_Reg(lo8)));
//zz return hi8;
//zz }
//zz
//zz if (e->Iex.Binop.op == Iop_16HLto32) {
-//zz HReg hi16 = newVRegI(env);
-//zz HReg lo16 = newVRegI(env);
+//zz HReg hi16 = newVRegI32(env);
+//zz HReg lo16 = newVRegI32(env);
//zz HReg hi16s = iselIntExpr_R(env, e->Iex.Binop.arg1);
//zz HReg lo16s = iselIntExpr_R(env, e->Iex.Binop.arg2);
//zz addInstr(env, mk_sh32(env, Psh_SHL, hi16, hi16s, PPC32RI_Imm(16)));
-//zz addInstr(env, PPC32Instr_Alu32(Palu_AND, lo16, lo16s, PPC32RI_Imm(0xFFFF)));
-//zz addInstr(env, PPC32Instr_Alu32(Palu_OR, hi16, hi16, PPC32RI_Reg(lo16)));
+//zz addInstr(env, PPC32Instr_Alu(Palu_AND, lo16, lo16s, PPC32RI_Imm(0xFFFF)));
+//zz addInstr(env, PPC32Instr_Alu(Palu_OR, hi16, hi16, PPC32RI_Reg(lo16)));
//zz return hi16;
//zz }
//.. if (e->Iex.Binop.op == Iop_MullS16 || e->Iex.Binop.op == Iop_MullS8
//.. || e->Iex.Binop.op == Iop_MullU16 || e->Iex.Binop.op == Iop_MullU8) {
-//.. HReg a16 = newVRegI(env);
-//.. HReg b16 = newVRegI(env);
+//.. HReg a16 = newVRegI32(env);
+//.. HReg b16 = newVRegI32(env);
//.. HReg a16s = iselIntExpr_R(env, e->Iex.Binop.arg1);
//.. HReg b16s = iselIntExpr_R(env, e->Iex.Binop.arg2);
//.. Int shift = (e->Iex.Binop.op == Iop_MullS8
*/
// r_ccIR_b0 = r_ccPPC32[0] | r_ccPPC32[3]
- addInstr(env, PPC32Instr_Alu32(Palu_SHR, r_ccIR_b0, r_ccPPC32, PPC32RH_Imm(False,0x3)));
- addInstr(env, PPC32Instr_Alu32(Palu_OR, r_ccIR_b0, r_ccPPC32, PPC32RH_Reg(r_ccIR_b0)));
- addInstr(env, PPC32Instr_Alu32(Palu_AND, r_ccIR_b0, r_ccIR_b0, PPC32RH_Imm(False,0x1)));
+ addInstr(env, PPC32Instr_Alu(Palu_SHR, r_ccIR_b0, r_ccPPC32, PPC32RH_Imm(False,0x3)));
+ addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR_b0, r_ccPPC32, PPC32RH_Reg(r_ccIR_b0)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b0, r_ccIR_b0, PPC32RH_Imm(False,0x1)));
// r_ccIR_b2 = r_ccPPC32[0]
- addInstr(env, PPC32Instr_Alu32(Palu_SHL, r_ccIR_b2, r_ccPPC32, PPC32RH_Imm(False,0x2)));
- addInstr(env, PPC32Instr_Alu32(Palu_AND, r_ccIR_b2, r_ccIR_b2, PPC32RH_Imm(False,0x4)));
+ addInstr(env, PPC32Instr_Alu(Palu_SHL, r_ccIR_b2, r_ccPPC32, PPC32RH_Imm(False,0x2)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b2, r_ccIR_b2, PPC32RH_Imm(False,0x4)));
// r_ccIR_b6 = r_ccPPC32[0] | r_ccPPC32[1]
- addInstr(env, PPC32Instr_Alu32(Palu_SHR, r_ccIR_b6, r_ccPPC32, PPC32RH_Imm(False,0x1)));
- addInstr(env, PPC32Instr_Alu32(Palu_OR, r_ccIR_b6, r_ccPPC32, PPC32RH_Reg(r_ccIR_b6)));
- addInstr(env, PPC32Instr_Alu32(Palu_SHL, r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm(False,0x6)));
- addInstr(env, PPC32Instr_Alu32(Palu_AND, r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm(False,0x40)));
+ addInstr(env, PPC32Instr_Alu(Palu_SHR, r_ccIR_b6, r_ccPPC32, PPC32RH_Imm(False,0x1)));
+ addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR_b6, r_ccPPC32, PPC32RH_Reg(r_ccIR_b6)));
+ addInstr(env, PPC32Instr_Alu(Palu_SHL, r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm(False,0x6)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r_ccIR_b6, r_ccIR_b6, PPC32RH_Imm(False,0x40)));
// r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6
- addInstr(env, PPC32Instr_Alu32(Palu_OR, r_ccIR, r_ccIR_b0, PPC32RH_Reg(r_ccIR_b2)));
- addInstr(env, PPC32Instr_Alu32(Palu_OR, r_ccIR, r_ccIR, PPC32RH_Reg(r_ccIR_b6)));
+ addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR, r_ccIR_b0, PPC32RH_Reg(r_ccIR_b2)));
+ addInstr(env, PPC32Instr_Alu(Palu_OR, r_ccIR, r_ccIR, PPC32RH_Reg(r_ccIR_b6)));
return r_ccIR;
}
//.. if (e->Iex.Binop.op == Iop_PRemC3210F64
//.. || e->Iex.Binop.op == Iop_PRem1C3210F64) {
//.. HReg junk = newVRegF(env);
-//.. HReg dst = newVRegI(env);
+//.. HReg dst = newVRegI32(env);
//.. HReg srcL = iselDblExpr(env, e->Iex.Binop.arg1);
//.. HReg srcR = iselDblExpr(env, e->Iex.Binop.arg2);
//.. addInstr(env, X86Instr_FpBinary(
IRExpr* expr32 = mi.bindee[0];
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, expr32);
- addInstr(env, PPC32Instr_Alu32(Palu_AND, r_dst, r_src, PPC32RH_Imm(False,1)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r_dst, r_src, PPC32RH_Imm(False,1)));
return r_dst;
}
if (matchIRExpr(&mi,p_LDbe16_then_16Uto32,e)) {
HReg r_dst = newVRegI(env);
PPC32AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
- addInstr(env, PPC32Instr_Load(2,False,r_dst,amode));
+ addInstr(env, PPC32Instr_Load(2,False,r_dst,amode, mode64));
return r_dst;
}
}
switch (e->Iex.Unop.op) {
case Iop_8Uto16:
case Iop_8Uto32:
- case Iop_16Uto32: {
+ case Iop_8Uto64:
+ case Iop_16Uto32:
+ case Iop_16Uto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- UShort mask = toUShort(e->Iex.Unop.op==Iop_16Uto32 ? 0xFFFF : 0xFF);
- addInstr(env, PPC32Instr_Alu32(Palu_AND,r_dst,r_src,
+ UShort mask = toUShort(e->Iex.Unop.op==Iop_16Uto64 ? 0xFFFF :
+ e->Iex.Unop.op==Iop_16Uto32 ? 0xFFFF : 0xFF);
+ addInstr(env, PPC32Instr_Alu(Palu_AND,r_dst,r_src,
PPC32RH_Imm(False,mask)));
return r_dst;
}
+ case Iop_32Uto64: {
+ HReg r_dst = newVRegI(env);
+ HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
+ vassert(mode64);
+ addInstr(env, PPC32Instr_Alu(Palu_SHL, r_dst, r_src,
+ PPC32RH_Imm(False,32)));
+ addInstr(env, PPC32Instr_Alu(Palu_SHR, r_dst, r_dst,
+ PPC32RH_Imm(False,32)));
+ return r_dst;
+ }
case Iop_8Sto16:
case Iop_8Sto32:
- case Iop_16Sto32: {
+ case Iop_16Sto32:
+ case Iop_16Sto64:
+ case Iop_32Sto64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- UShort amt = toUShort(e->Iex.Unop.op==Iop_16Sto32 ? 16 : 24);
- addInstr(env, PPC32Instr_Alu32(Palu_SHL, r_dst, r_src,
+ UShort amt = toUShort(e->Iex.Unop.op==Iop_16Sto64 ? 48 :
+ e->Iex.Unop.op==Iop_32Sto64 ? 32 :
+ e->Iex.Unop.op==Iop_16Sto32 ? 16 : 24);
+ vassert(amt<32 || mode64);
+ addInstr(env, PPC32Instr_Alu(Palu_SHL, r_dst, r_src,
PPC32RH_Imm(False,amt)));
- addInstr(env, PPC32Instr_Alu32(Palu_SAR, r_dst, r_dst,
+ addInstr(env, PPC32Instr_Alu(Palu_SAR, r_dst, r_dst,
PPC32RH_Imm(False,amt)));
return r_dst;
}
case Iop_Not8:
case Iop_Not16:
- case Iop_Not32: {
+ case Iop_Not32:
+ case Iop_Not64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Unary32(Pun_NOT,r_dst,r_src));
+ addInstr(env, PPC32Instr_Unary(Pun_NOT,r_dst,r_src));
return r_dst;
}
case Iop_64HIto32: {
- HReg rHi, rLo;
- iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
- return rHi; /* and abandon rLo .. poor wee thing :-) */
+ if (!mode64) {
+ HReg rHi, rLo;
+ iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+ return rHi; /* and abandon rLo .. poor wee thing :-) */
+ } else {
+ HReg r_dst = newVRegI(env);
+ HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
+ addInstr(env, PPC32Instr_Alu(Palu_SHR, r_dst, r_src,
+ PPC32RH_Imm(False,32)));
+ return r_dst;
+ }
}
case Iop_64to32: {
//:: unop(Iop_64to32,
//:: binop(Iop_MullS32, bind(0), bind(1))));
//:: if (matchIRExpr(&mi,p_MullS32_then_64to32,e)) {
-//:: HReg r_dst = newVRegI(env);
+//:: HReg r_dst = newVRegI32(env);
//:: HReg r_srcL = iselIntExpr_R( env, mi.bindee[0] );
//:: PPC32RI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] ));
//:: addInstr(env, PPC32Instr_MulL(True, 0, r_dst, r_srcL, ri_srcR));
//:: unop(Iop_64to32,
//:: binop(Iop_MullU32, bind(0), bind(1))));
//:: if (matchIRExpr(&mi,p_MullU32_then_64to32,e)) {
-//:: HReg r_dst = newVRegI(env);
+//:: HReg r_dst = newVRegI32(env);
//:: HReg r_srcL = iselIntExpr_R( env, mi.bindee[0] );
//:: PPC32RI* ri_srcR = mk_FitRI16_S(env, iselIntExpr_RI( env, mi.bindee[1] ));
//:: addInstr(env, PPC32Instr_MulL(False, 0, r_dst, r_srcL, ri_srcR));
//:: // CAB: Also: 64HIto32(MullU32(expr,expr))
//:: // CAB: Also: 64HIto32(MullS32(expr,expr))
- HReg rHi, rLo;
- iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
- return rLo; /* similar stupid comment to the above ... */
+ if (!mode64) {
+ HReg rHi, rLo;
+ iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+ return rLo; /* similar stupid comment to the above ... */
+ } else {
+ /* This is a no-op. */
+ return iselIntExpr_R(env, e->Iex.Unop.arg);
+ }
+ }
+ case Iop_64to16: {
+ if (mode64) { /* This is a no-op. */
+ return iselIntExpr_R(env, e->Iex.Unop.arg);
+ }
}
case Iop_16HIto8:
case Iop_32HIto16: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
UShort shift = toUShort(e->Iex.Unop.op == Iop_16HIto8 ? 8 : 16);
- addInstr(env, PPC32Instr_Alu32(Palu_SHR, r_dst, r_src,
- PPC32RH_Imm(False,shift)));
+ addInstr(env, PPC32Instr_Alu(Palu_SHR, r_dst, r_src,
+ PPC32RH_Imm(False,shift)));
return r_dst;
}
+ case Iop_128to64: {
+ vassert(mode64);
+ HReg rHi, rLo;
+ iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+ return rLo; /* similar stupid comment to the above ... */
+ }
case Iop_1Uto32:
case Iop_1Uto8: {
HReg r_dst = newVRegI(env);
HReg r_dst = newVRegI(env);
PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
addInstr(env, PPC32Instr_Set32(cond,r_dst));
- addInstr(env, PPC32Instr_Alu32(Palu_SHL, r_dst, r_dst,
+ addInstr(env, PPC32Instr_Alu(Palu_SHL, r_dst, r_dst,
PPC32RH_Imm(False,31)));
- addInstr(env, PPC32Instr_Alu32(Palu_SAR, r_dst, r_dst,
+ addInstr(env, PPC32Instr_Alu(Palu_SAR, r_dst, r_dst,
PPC32RH_Imm(False,31)));
return r_dst;
}
//.. case Iop_Ctz32: {
//.. /* Count trailing zeroes, implemented by x86 'bsfl' */
-//.. HReg dst = newVRegI(env);
+//.. HReg dst = newVRegI32(env);
//.. HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
//.. addInstr(env, X86Instr_Bsfr32(True,src,dst));
//.. return dst;
/* Count leading zeroes. */
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Unary32(Pun_CLZ,r_dst,r_src));
+ addInstr(env, PPC32Instr_Unary(Pun_CLZ,r_dst,r_src));
return r_dst;
}
case Iop_Neg8:
case Iop_Neg16:
- case Iop_Neg32: {
+ case Iop_Neg32:
+ case Iop_Neg64: {
HReg r_dst = newVRegI(env);
HReg r_src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Unary32(Pun_NEG,r_dst,r_src));
+ addInstr(env, PPC32Instr_Unary(Pun_NEG,r_dst,r_src));
return r_dst;
}
// store vec, load low word to dst
addInstr(env, PPC32Instr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
- addInstr(env, PPC32Instr_Load( 4, False, dst, am_off12 ));
+ addInstr(env, PPC32Instr_Load( 4, False, dst, am_off12, mode64 ));
add_to_sp( env, 32 ); // Reset SP
return dst;
case Iop_16to8:
case Iop_32to8:
case Iop_32to16:
+ case Iop_64to8:
/* These are no-ops. */
return iselIntExpr_R(env, e->Iex.Unop.arg);
/* --------- GET --------- */
case Iex_Get: {
- if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32) {
+ if (ty == Ity_I8 || ty == Ity_I16 ||
+ ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
HReg r_dst = newVRegI(env);
- PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset, GuestStatePtr );
+ PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset,
+ GuestStatePtr(mode64) );
addInstr(env, PPC32Instr_Load( toUChar(sizeofIRType(ty)),
- False, r_dst, am_addr ));
+ False, r_dst, am_addr, mode64 ));
return r_dst;
}
break;
//.. = genGuestArrayOffset(
//.. env, e->Iex.GetI.descr,
//.. e->Iex.GetI.ix, e->Iex.GetI.bias );
-//.. HReg dst = newVRegI(env);
+//.. HReg dst = newVRegI32(env);
//.. if (ty == Ity_I8) {
//.. addInstr(env, X86Instr_Load( 1, False, am, dst ));
//.. return dst;
doHelperCall( env, False, NULL, e->Iex.CCall.cee, e->Iex.CCall.args );
/* GPR3 now holds the destination address from Pin_Goto */
- addInstr(env, mk_iMOVds_RR(r_dst, hregPPC32_GPR3()));
+ addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64)));
return r_dst;
}
/* --------- LITERAL --------- */
/* 32/16/8-bit literals */
case Iex_Const: {
- Int i;
+ Long l;
HReg r_dst = newVRegI(env);
switch (e->Iex.Const.con->tag) {
- case Ico_U32: i = (Int)e->Iex.Const.con->Ico.U32; break;
- case Ico_U16: i = (Int)(Short)e->Iex.Const.con->Ico.U16; break;
- case Ico_U8: i = (Int)(Char)e->Iex.Const.con->Ico.U8; break;
- default: vpanic("iselIntExpr_R.const(ppc32)");
+ case Ico_U64: vassert(mode64);
+ l = (Long) e->Iex.Const.con->Ico.U64; break;
+ case Ico_U32: l = (Long)(Int) e->Iex.Const.con->Ico.U32; break;
+ case Ico_U16: l = (Long)(Int)(Short)e->Iex.Const.con->Ico.U16; break;
+ case Ico_U8: l = (Long)(Int)(Char )e->Iex.Const.con->Ico.U8; break;
+ default: vpanic("iselIntExpr_R.const(ppc32)");
}
- addInstr(env, PPC32Instr_LI32(r_dst, (UInt)i));
+ addInstr(env, PPC32Instr_LI(r_dst, (ULong)l, mode64));
return r_dst;
}
/* --------- MULTIPLEX --------- */
case Iex_Mux0X: {
- if ((ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
+ if ((ty == Ity_I8 || ty == Ity_I16 ||
+ ty == Ity_I32 || ((ty == Ity_I64) && mode64)) &&
+ typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
PPC32CondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond);
HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
HReg r_dst = newVRegI(env);
HReg r_tmp = newVRegI(env);
addInstr(env, mk_iMOVds_RR(r_dst,rX));
- addInstr(env, PPC32Instr_Alu32(Palu_AND, r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
- addInstr(env, PPC32Instr_Cmp32(False/*unsigned*/, 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
- addInstr(env, PPC32Instr_CMov32(cc,r_dst,r0));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
+ addInstr(env, PPC32Instr_CMov(cc,r_dst,r0));
return r_dst;
}
break;
static Bool sane_AMode ( PPC32AMode* am )
{
switch (am->tag) {
- case Pam_IR:
- return toBool(
- hregClass(am->Pam.IR.base) == HRcInt32
- && hregIsVirtual(am->Pam.IR.base)
- && fits16bits(am->Pam.IR.index)
- );
- case Pam_RR:
- return toBool(
- hregClass(am->Pam.RR.base) == HRcInt32
- && hregIsVirtual(am->Pam.IR.base)
- && hregClass(am->Pam.RR.base) == HRcInt32
- && hregIsVirtual(am->Pam.IR.base)
- );
- default:
- vpanic("sane_AMode: unknown ppc32 amode tag");
+ case Pam_IR:
+ return toBool( hregClass(am->Pam.IR.base) == HRcIntWRDSZ &&
+ hregIsVirtual(am->Pam.IR.base) &&
+ fits16bits(am->Pam.IR.index) );
+ case Pam_RR:
+ return toBool( hregClass(am->Pam.RR.base) == HRcIntWRDSZ &&
+ hregIsVirtual(am->Pam.IR.base) &&
+ hregClass(am->Pam.RR.index) == HRcIntWRDSZ &&
+ hregIsVirtual(am->Pam.IR.index) );
+ default:
+ vpanic("sane_AMode: unknown ppc32 amode tag");
}
}
static PPC32AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e )
{
IRType ty = typeOfIRExpr(env->type_env,e);
- vassert(ty == Ity_I32);
+ vassert(ty == (mode64 ? Ity_I64 : Ity_I32));
/* Add32(expr,i), where i == sign-extend of (i & 0xFFFF) */
if (e->tag == Iex_Binop
PPC32RH* ri = iselIntExpr_RH_wrk(env, syned, e);
/* sanity checks ... */
switch (ri->tag) {
- case Prh_Imm:
- vassert(ri->Prh.Imm.syned == syned);
- if (syned)
- vassert(ri->Prh.Imm.imm16 != 0x8000);
- return ri;
- case Prh_Reg:
- vassert(hregClass(ri->Prh.Reg.reg) == HRcInt32);
- vassert(hregIsVirtual(ri->Prh.Reg.reg));
- return ri;
- default:
- vpanic("iselIntExpr_RH: unknown ppc32 RH tag");
+ case Prh_Imm:
+ vassert(ri->Prh.Imm.syned == syned);
+ if (syned)
+ vassert(ri->Prh.Imm.imm16 != 0x8000);
+ return ri;
+ case Prh_Reg:
+ vassert(hregClass(ri->Prh.Reg.reg) == HRcIntWRDSZ);
+ vassert(hregIsVirtual(ri->Prh.Reg.reg));
+ return ri;
+ default:
+ vpanic("iselIntExpr_RH: unknown ppc32 RH tag");
}
}
/* DO NOT CALL THIS DIRECTLY ! */
static PPC32RH* iselIntExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e )
{
- UInt u;
- Int i;
+ ULong u;
+ Long l;
IRType ty = typeOfIRExpr(env->type_env,e);
- vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
+ vassert(ty == Ity_I8 || ty == Ity_I16 ||
+ ty == Ity_I32 || ((ty == Ity_I64) && mode64));
/* special case: immediate */
if (e->tag == Iex_Const) {
/* What value are we aiming to generate? */
switch (e->Iex.Const.con->tag) {
- case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
- case Ico_U16: u = 0xFFFF & e->Iex.Const.con->Ico.U16; break;
- case Ico_U8: u = 0xFF & e->Iex.Const.con->Ico.U8; break;
- default: vpanic("iselIntExpr_RH.Iex_Const(ppc32h)");
+ /* Note: Not sign-extending - we carry 'syned' around */
+ case Ico_U64: vassert(mode64);
+ u = e->Iex.Const.con->Ico.U64; break;
+ case Ico_U32: u = 0xFFFFFFFF & e->Iex.Const.con->Ico.U32; break;
+ case Ico_U16: u = 0x0000FFFF & e->Iex.Const.con->Ico.U16; break;
+ case Ico_U8: u = 0x000000FF & e->Iex.Const.con->Ico.U8; break;
+ default: vpanic("iselIntExpr_RH.Iex_Const(ppc32h)");
}
- i = (Int)u;
+ l = (Long)u;
/* Now figure out if it's representable. */
if (!syned && u <= 65535) {
return PPC32RH_Imm(False/*unsigned*/, toUShort(u & 0xFFFF));
}
- if (syned && i >= -32767 && i <= 32767) {
+ if (syned && l >= -32767 && l <= 32767) {
return PPC32RH_Imm(True/*signed*/, toUShort(u & 0xFFFF));
}
/* no luck; use the Slow Way. */
PPC32RI* ri = iselIntExpr_RI_wrk(env, e);
/* sanity checks ... */
switch (ri->tag) {
- case Pri_Imm:
- return ri;
- case Pri_Reg:
- vassert(hregClass(ri->Pri.Reg) == HRcInt32);
- vassert(hregIsVirtual(ri->Pri.Reg));
- return ri;
- default:
- vpanic("iselIntExpr_RI: unknown ppc32 RI tag");
+ case Pri_Imm:
+ return ri;
+ case Pri_Reg:
+ vassert(hregClass(ri->Pri.Reg) == HRcIntWRDSZ);
+ vassert(hregIsVirtual(ri->Pri.Reg));
+ return ri;
+ default:
+ vpanic("iselIntExpr_RI: unknown ppc32 RI tag");
}
}
/* DO NOT CALL THIS DIRECTLY ! */
static PPC32RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e )
{
+ Long l;
IRType ty = typeOfIRExpr(env->type_env,e);
- vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
+ vassert(ty == Ity_I8 || ty == Ity_I16 ||
+ ty == Ity_I32 || ((ty == Ity_I64) && mode64));
/* special case: immediate */
if (e->tag == Iex_Const) {
- UInt u;
switch (e->Iex.Const.con->tag) {
- case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
- case Ico_U16: u = 0xFFFF & e->Iex.Const.con->Ico.U16; break;
- case Ico_U8: u = 0xFF & e->Iex.Const.con->Ico.U8; break;
- default: vpanic("iselIntExpr_RI.Iex_Const(ppc32h)");
+ case Ico_U64: vassert(mode64);
+ l = (Long) e->Iex.Const.con->Ico.U64; break;
+ case Ico_U32: l = (Long)(Int) e->Iex.Const.con->Ico.U32; break;
+ case Ico_U16: l = (Long)(Int)(Short)e->Iex.Const.con->Ico.U16; break;
+ case Ico_U8: l = (Long)(Int)(Char )e->Iex.Const.con->Ico.U8; break;
+ default: vpanic("iselIntExpr_RI.Iex_Const(ppc32h)");
}
- return PPC32RI_Imm(u);
+ return PPC32RI_Imm((ULong)l);
}
/* default case: calculate into a register and return that */
PPC32RH* ri = iselIntExpr_RH5u_wrk(env, e);
/* sanity checks ... */
switch (ri->tag) {
- case Prh_Imm:
- vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 31);
- vassert(!ri->Prh.Imm.syned);
- return ri;
- case Prh_Reg:
- vassert(hregClass(ri->Prh.Reg.reg) == HRcInt32);
- vassert(hregIsVirtual(ri->Prh.Reg.reg));
- return ri;
- default:
- vpanic("iselIntExpr_RH5u: unknown ppc32 RI tag");
+ case Prh_Imm:
+ vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 31);
+ vassert(!ri->Prh.Imm.syned);
+ return ri;
+ case Prh_Reg:
+ vassert(hregClass(ri->Prh.Reg.reg) == HRcIntWRDSZ);
+ vassert(hregIsVirtual(ri->Prh.Reg.reg));
+ return ri;
+ default:
+ vpanic("iselIntExpr_RH5u: unknown ppc32 RI tag");
}
}
}
+/* --------------------- RH6u --------------------- */
+
+/* Compute an I8 into a reg-or-6-bit-unsigned-immediate, the latter
+ being an immediate in the range 1 .. 63 inclusive. Used for doing
+ shift amounts. */
+
+static PPC32RH* iselIntExpr_RH6u ( ISelEnv* env, IRExpr* e )
+{
+ PPC32RH* ri = iselIntExpr_RH6u_wrk(env, e);
+ /* sanity checks ... */
+ switch (ri->tag) {
+ case Prh_Imm:
+ vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 63);
+ vassert(!ri->Prh.Imm.syned);
+ return ri;
+ case Prh_Reg:
+ vassert(hregClass(ri->Prh.Reg.reg) == HRcIntWRDSZ);
+ vassert(hregIsVirtual(ri->Prh.Reg.reg));
+ return ri;
+ default:
+ vpanic("iselIntExpr_RH6u: unknown ppc64 RI tag");
+ }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static PPC32RH* iselIntExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e )
+{
+ IRType ty = typeOfIRExpr(env->type_env,e);
+ vassert(ty == Ity_I8);
+
+ /* special case: immediate */
+ if (e->tag == Iex_Const
+ && e->Iex.Const.con->tag == Ico_U8
+ && e->Iex.Const.con->Ico.U8 >= 1
+ && e->Iex.Const.con->Ico.U8 <= 63) {
+ return PPC32RH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8);
+ }
+
+ /* default case: calculate into a register and return that */
+ {
+ HReg r = iselIntExpr_R ( env, e );
+ return PPC32RH_Reg(r);
+ }
+}
+
+
/* --------------------- CONDCODE --------------------- */
/* Generate code to evaluated a bit-typed expression, returning the
if (e->tag == Iex_Const && e->Iex.Const.con->Ico.U1 == True) {
// Make a compare that will always be true:
HReg r_zero = newVRegI(env);
- addInstr(env, PPC32Instr_LI32(r_zero, 0));
- addInstr(env, PPC32Instr_Cmp32(False/*unsigned*/, /*cr*/7,
- r_zero, PPC32RH_Reg(r_zero)));
+ addInstr(env, PPC32Instr_LI(r_zero, 0, mode64));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, /*cr*/7,
+ r_zero, PPC32RH_Reg(r_zero)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
HReg tmp = newVRegI(env);
/* could do better, probably -- andi. */
- addInstr(env, PPC32Instr_Alu32(
+ addInstr(env, PPC32Instr_Alu(
Palu_AND, tmp, src, PPC32RH_Imm(False,1)));
- addInstr(env, PPC32Instr_Cmp32(
+ addInstr(env, PPC32Instr_Cmp(
False/*unsigned*/, 7/*cr*/,
tmp, PPC32RH_Imm(False,1)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
&& e->Iex.Unop.op == Iop_CmpNEZ8) {
HReg r_32 = iselIntExpr_R(env, e->Iex.Unop.arg);
HReg r_l = newVRegI(env);
- addInstr(env, PPC32Instr_Alu32(Palu_AND, r_l, r_32, PPC32RH_Imm(False,0xFF)));
- addInstr(env, PPC32Instr_Cmp32(False/*unsigned*/, 7/*cr*/,
-r_l, PPC32RH_Imm(False,0)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r_l, r_32, PPC32RH_Imm(False,0xFF)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7/*cr*/,
+ r_l, PPC32RH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ32) {
HReg r1 = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Cmp32(False/*unsigned*/, 7, r1, PPC32RH_Imm(False,0)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7, r1, PPC32RH_Imm(False,0)));
return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
}
//.. || e->Iex.Binop.op == Iop_CmpNE8)) {
//.. HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
//.. X86RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
-//.. HReg r = newVRegI(env);
+//.. HReg r = newVRegI32(env);
//.. addInstr(env, mk_iMOVsd_RR(r1,r));
//.. addInstr(env, X86Instr_Alu32R(Xalu_XOR,rmi2,r));
//.. addInstr(env, X86Instr_Alu32R(Xalu_AND,X86RMI_Imm(0xFF),r));
//.. || e->Iex.Binop.op == Iop_CmpNE16)) {
//.. HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
//.. X86RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
-//.. HReg r = newVRegI(env);
+//.. HReg r = newVRegI32(env);
//.. addInstr(env, mk_iMOVsd_RR(r1,r));
//.. addInstr(env, X86Instr_Alu32R(Xalu_XOR,rmi2,r));
//.. addInstr(env, X86Instr_Alu32R(Xalu_AND,X86RMI_Imm(0xFFFF),r));
syned = True;
}
ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Cmp32(syned,7,r1,ri2));
+ addInstr(env, PPC32Instr_Cmp(syned,7,r1,ri2));
switch (e->Iex.Binop.op) {
- case Iop_CmpEQ32: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
- case Iop_CmpNE32: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
-// case Iop_CmpLT32S: return mk_PPCCondCode( Pct_TRUE, Pcf_LT );
- case Iop_CmpLT32U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
-// case Iop_CmpLE32S: return mk_PPCCondCode( Pct_FALSE, Pcf_GT );
- case Iop_CmpLE32U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
+ case Iop_CmpEQ32: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
+ case Iop_CmpNE32: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+// case Iop_CmpLT32S: return mk_PPCCondCode( Pct_TRUE, Pcf_LT );
+ case Iop_CmpLT32U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
+// case Iop_CmpLE32S: return mk_PPCCondCode( Pct_FALSE, Pcf_GT );
+ case Iop_CmpLE32U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
default: vpanic("iselCondCode(ppc32): CmpXX32");
}
}
+ /* Cmp*64*(x,y) */
+ if (e->tag == Iex_Binop
+ && (e->Iex.Binop.op == Iop_CmpEQ64
+ || e->Iex.Binop.op == Iop_CmpNE64
+ || e->Iex.Binop.op == Iop_CmpLT64S
+ || e->Iex.Binop.op == Iop_CmpLT64U
+ || e->Iex.Binop.op == Iop_CmpLE64S
+ || e->Iex.Binop.op == Iop_CmpLE64U)) {
+ PPC32RH* ri2;
+ HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ Bool syned = False;
+ if (e->Iex.Binop.op == Iop_CmpLT64S ||
+ e->Iex.Binop.op == Iop_CmpLE64S) {
+ syned = True;
+ }
+ ri2 = iselIntExpr_RH(env, syned, e->Iex.Binop.arg2);
+ addInstr(env, PPC32Instr_Cmp(syned,7,r1,ri2));
+
+ switch (e->Iex.Binop.op) {
+ case Iop_CmpEQ64: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
+ case Iop_CmpNE64: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+// case Iop_CmpLT64S: return mk_PPCCondCode( Pct_TRUE, Pcf_LT );
+ case Iop_CmpLT64U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
+// case Iop_CmpLE64S: return mk_PPCCondCode( Pct_FALSE, Pcf_GT );
+ case Iop_CmpLE64U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
+ default: vpanic("iselCondCode(ppc32): CmpXX64");
+ }
+ }
+
//.. /* CmpNE64(1Sto64(b), 0) ==> b */
//.. {
//.. DECLARE_PATTERN(p_CmpNE64_1Sto64);
//.. if (matchIRExpr(&mi, p_CmpNE64_x_zero, e)) {
//.. HReg hi, lo;
//.. IRExpr* x = mi.bindee[0];
-//.. HReg tmp = newVRegI(env);
+//.. HReg tmp = newVRegI32(env);
//.. iselInt64Expr( &hi, &lo, env, x );
//.. addInstr(env, mk_iMOVsd_RR(hi, tmp));
//.. addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(lo), tmp));
//.. if (e->tag == Iex_Binop
//.. && e->Iex.Binop.op == Iop_CmpNE64) {
//.. HReg hi1, hi2, lo1, lo2;
-//.. HReg tHi = newVRegI(env);
-//.. HReg tLo = newVRegI(env);
+//.. HReg tHi = newVRegI32(env);
+//.. HReg tLo = newVRegI32(env);
//.. iselInt64Expr( &hi1, &lo1, env, e->Iex.Binop.arg1 );
//.. iselInt64Expr( &hi2, &lo2, env, e->Iex.Binop.arg2 );
//.. addInstr(env, mk_iMOVsd_RR(hi1, tHi));
/* CmpNEZ64 */
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ64) {
- HReg hi, lo;
- HReg tmp = newVRegI(env);
- iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg );
- addInstr(env, mk_iMOVds_RR(tmp, lo));
- addInstr(env, PPC32Instr_Alu32(Palu_OR, tmp, tmp, PPC32RH_Reg(hi)));
- addInstr(env, PPC32Instr_Cmp32(False/*sign*/,7/*cr*/,tmp,PPC32RH_Imm(False,0)));
- return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+ if (!mode64) {
+ HReg hi, lo;
+ HReg tmp = newVRegI(env);
+ iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg );
+ addInstr(env, mk_iMOVds_RR(tmp, lo));
+ addInstr(env, PPC32Instr_Alu(Palu_OR, tmp, tmp, PPC32RH_Reg(hi)));
+ addInstr(env, PPC32Instr_Cmp(False/*sign*/,7/*cr*/,tmp,PPC32RH_Imm(False,0)));
+ return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+ } else { // mode64
+ HReg r_src = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ addInstr(env, PPC32Instr_Cmp(False/*sign*/,7/*cr*/,r_src,PPC32RH_Imm(False,0)));
+ return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+ }
}
/* var */
if (e->tag == Iex_Tmp) {
HReg r_src = lookupIRTemp(env, e->Iex.Tmp.tmp);
HReg src_masked = newVRegI(env);
- addInstr(env, PPC32Instr_Alu32(Palu_AND, src_masked, r_src, PPC32RH_Imm(False,1)));
- addInstr(env, PPC32Instr_Cmp32(False/*unsigned*/, 7/*cr*/, src_masked, PPC32RH_Imm(False,1)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, src_masked, r_src, PPC32RH_Imm(False,1)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7/*cr*/, src_masked, PPC32RH_Imm(False,1)));
return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
}
+ vex_printf("iselCondCode(ppc): No such tag(%u)\n", e->tag);
ppIRExpr(e);
- vpanic("iselCondCode(ppc32)");
+ vpanic("iselCondCode(ppc)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (128 bit) ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 128-bit value into a register pair, which is returned as
+ the first two parameters. As with iselIntExpr_R, these may be
+ either real or virtual regs; in any case they must not be changed
+ by subsequent code emitted by the caller. */
+
+static void iselInt128Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+{
+ vassert(mode64);
+ iselInt128Expr_wrk(rHi, rLo, env, e);
+# if 0
+ vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+# endif
+ vassert(hregClass(*rHi) == HRcIntWRDSZ);
+ vassert(hregIsVirtual(*rHi));
+ vassert(hregClass(*rLo) == HRcIntWRDSZ);
+ vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+{
+ vassert(e);
+ vassert(typeOfIRExpr(env->type_env,e) == Ity_I128);
+
+ /* read 128-bit IRTemp */
+ if (e->tag == Iex_Tmp) {
+ lookupIRTemp128( rHi, rLo, env, e->Iex.Tmp.tmp);
+ return;
+ }
+
+ /* --------- BINARY ops --------- */
+ if (e->tag == Iex_Binop) {
+ switch (e->Iex.Binop.op) {
+ /* 64 x 64 -> 128 multiply */
+ case Iop_MullU64:
+ case Iop_MullS64: {
+ HReg tLo = newVRegI(env);
+ HReg tHi = newVRegI(env);
+ Bool syned = toBool(e->Iex.Binop.op == Iop_MullS64);
+ HReg r_srcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ HReg r_srcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+ addInstr(env, PPC32Instr_MulL(False/*signedness irrelevant*/,
+ False/*lo64*/, tLo, r_srcL, r_srcR));
+ addInstr(env, PPC32Instr_MulL(syned,
+ True/*hi64*/, tHi, r_srcL, r_srcR));
+ *rHi = tHi;
+ *rLo = tLo;
+ return;
+ }
+
+ default:
+ break;
+ }
+ } /* if (e->tag == Iex_Binop) */
+
+
+ /* --------- UNARY ops --------- */
+ if (e->tag == Iex_Unop) {
+ switch (e->Iex.Unop.op) {
+ default:
+ break;
+ }
+ } /* if (e->tag == Iex_Unop) */
+
+ vex_printf("iselInt128Expr(ppc64): No such tag(%u)\n", e->tag);
+ ppIRExpr(e);
+ vpanic("iselInt128Expr(ppc64)");
}
static void iselInt64Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
{
+ vassert(!mode64);
iselInt64Expr_wrk(rHi, rLo, env, e);
# if 0
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
vassert(e->Iex.Const.con->tag == Ico_U64);
- addInstr(env, PPC32Instr_LI32(tHi, wHi));
- addInstr(env, PPC32Instr_LI32(tLo, wLo));
+ addInstr(env, PPC32Instr_LI(tHi, wHi, mode64));
+ addInstr(env, PPC32Instr_LI(tLo, wLo, mode64));
*rHi = tHi;
*rLo = tLo;
return;
//.. HReg tLo, tHi;
//.. X86AMode *am0, *am4;
//.. vassert(e->Iex.LDle.ty == Ity_I64);
-//.. tLo = newVRegI(env);
-//.. tHi = newVRegI(env);
+//.. tLo = newVRegI32(env);
+//.. tHi = newVRegI32(env);
//.. am0 = iselIntExpr_AMode(env, e->Iex.LDle.addr);
//.. am4 = advance4(am0);
//.. addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am0), tLo ));
/* 64-bit GET */
if (e->tag == Iex_Get) {
- PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset, GuestStatePtr );
+ PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset,
+ GuestStatePtr(mode64) );
PPC32AMode* am_addr4 = advance4(env, am_addr);
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
- addInstr(env, PPC32Instr_Load( 4, False, tHi, am_addr ));
- addInstr(env, PPC32Instr_Load( 4, False, tLo, am_addr4 ));
+ addInstr(env, PPC32Instr_Load( 4, False, tHi, am_addr, mode64 ));
+ addInstr(env, PPC32Instr_Load( 4, False, tLo, am_addr4, mode64 ));
*rHi = tHi;
*rLo = tLo;
return;
//.. = genGuestArrayOffset( env, e->Iex.GetI.descr,
//.. e->Iex.GetI.ix, e->Iex.GetI.bias );
//.. X86AMode* am4 = advance4(am);
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
+//.. HReg tLo = newVRegI32(env);
+//.. HReg tHi = newVRegI32(env);
//.. addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am), tLo ));
//.. addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am4), tHi ));
//.. *rHi = tHi;
HReg e0Lo, e0Hi, eXLo, eXHi;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
-
+
PPC32CondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
HReg r_cond = iselIntExpr_R(env, e->Iex.Mux0X.cond);
HReg r_tmp = newVRegI(env);
-
+
iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0);
iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX);
addInstr(env, mk_iMOVds_RR(tHi,eXHi));
addInstr(env, mk_iMOVds_RR(tLo,eXLo));
-
- addInstr(env, PPC32Instr_Alu32(Palu_AND,
- r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
- addInstr(env, PPC32Instr_Cmp32(False/*unsigned*/,
- 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
-
- addInstr(env, PPC32Instr_CMov32(cc,tHi,PPC32RI_Reg(e0Hi)));
- addInstr(env, PPC32Instr_CMov32(cc,tLo,PPC32RI_Reg(e0Lo)));
+
+ addInstr(env, PPC32Instr_Alu(Palu_AND,
+ r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/,
+ 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
+
+ addInstr(env, PPC32Instr_CMov(cc,tHi,PPC32RI_Reg(e0Hi)));
+ addInstr(env, PPC32Instr_CMov(cc,tLo,PPC32RI_Reg(e0Lo)));
*rHi = tHi;
*rLo = tLo;
return;
//.. /* Get the 64-bit operand into edx:eax, and the other into
//.. any old R/M. */
//.. HReg sHi, sLo;
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
+//.. HReg tLo = newVRegI32(env);
+//.. HReg tHi = newVRegI32(env);
//.. Bool syned = e->Iex.Binop.op == Iop_DivModS64to32;
//.. X86RM* rmRight = iselIntExpr_RM(env, e->Iex.Binop.arg2);
//.. iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1);
: Palu_XOR;
iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Alu32(op, tHi, xHi, PPC32RH_Reg(yHi)));
- addInstr(env, PPC32Instr_Alu32(op, tLo, xLo, PPC32RH_Reg(yLo)));
+ addInstr(env, PPC32Instr_Alu(op, tHi, xHi, PPC32RH_Reg(yHi)));
+ addInstr(env, PPC32Instr_Alu(op, tLo, xLo, PPC32RH_Reg(yLo)));
*rHi = tHi;
*rLo = tLo;
return;
//.. cmovnz %tmp, %lo
//.. */
//.. HReg rAmt, sHi, sLo, tHi, tLo, tTemp;
-//.. tLo = newVRegI(env);
-//.. tHi = newVRegI(env);
-//.. tTemp = newVRegI(env);
+//.. tLo = newVRegI32(env);
+//.. tHi = newVRegI32(env);
+//.. tTemp = newVRegI32(env);
//.. rAmt = iselIntExpr_R(env, e->Iex.Binop.arg2);
//.. iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1);
//.. addInstr(env, mk_iMOVsd_RR(rAmt, hregX86_ECX()));
//.. cmovnz %tmp, %hi
//.. */
//.. HReg rAmt, sHi, sLo, tHi, tLo, tTemp;
-//.. tLo = newVRegI(env);
-//.. tHi = newVRegI(env);
-//.. tTemp = newVRegI(env);
+//.. tLo = newVRegI32(env);
+//.. tHi = newVRegI32(env);
+//.. tTemp = newVRegI32(env);
//.. rAmt = iselIntExpr_R(env, e->Iex.Binop.arg2);
//.. iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1);
//.. addInstr(env, mk_iMOVsd_RR(rAmt, hregX86_ECX()));
//.. duplication. */
//.. case Iop_F64toI64: {
//.. HReg rf = iselDblExpr(env, e->Iex.Binop.arg2);
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
+//.. HReg tLo = newVRegI32(env);
+//.. HReg tHi = newVRegI32(env);
//..
//.. /* Used several times ... */
//.. /* Careful ... this sharing is only safe because
//.. not marked as regparm functions.
//.. */
//.. HReg xLo, xHi, yLo, yHi;
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
+//.. HReg tLo = newVRegI32(env);
+//.. HReg tHi = newVRegI32(env);
//.. iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
//.. addInstr(env, X86Instr_Push(X86RMI_Reg(yHi)));
//.. addInstr(env, X86Instr_Push(X86RMI_Reg(yLo)));
//.. not marked as regparm functions.
//.. */
//.. HReg xLo, xHi;
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
+//.. HReg tLo = newVRegI32(env);
+//.. HReg tHi = newVRegI32(env);
//.. X86RMI* y = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
//.. addInstr(env, X86Instr_Push(y));
//.. iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
if (e->tag == Iex_Unop) {
switch (e->Iex.Unop.op) {
- /* 32Sto64(e) */
- case Iop_32Sto64: {
- HReg tHi = newVRegI(env);
- HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Alu32(Palu_SAR, tHi, src, PPC32RH_Imm(False,31)));
- *rHi = tHi;
- *rLo = src;
- return;
- }
-
- /* 32Uto64(e) */
- case Iop_32Uto64: {
- HReg tHi = newVRegI(env);
- HReg tLo = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_LI32(tHi, 0));
- *rHi = tHi;
- *rLo = tLo;
- return;
- }
-
- /* V128{HI}to64 */
- case Iop_V128HIto64:
- case Iop_V128to64: {
- HReg r_aligned16;
- Int off = e->Iex.Unop.op==Iop_V128HIto64 ? 0 : 8;
- HReg tLo = newVRegI(env);
- HReg tHi = newVRegI(env);
- HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
- PPC32AMode *am_off0, *am_offLO, *am_offHI;
- sub_from_sp( env, 32 ); // Move SP down 32 bytes
-
- // get a quadword aligned address within our stack space
- r_aligned16 = get_sp_aligned16( env );
- am_off0 = PPC32AMode_IR( 0, r_aligned16 );
- am_offHI = PPC32AMode_IR( off, r_aligned16 );
- am_offLO = PPC32AMode_IR( off+4, r_aligned16 );
-
- // store as Vec128
- addInstr(env, PPC32Instr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
-
- // load hi,lo words (of hi/lo half of vec) as Ity_I32's
- addInstr(env, PPC32Instr_Load( 4, False, tHi, am_offHI ));
- addInstr(env, PPC32Instr_Load( 4, False, tLo, am_offLO ));
+ /* 32Sto64(e) */
+ case Iop_32Sto64: {
+ HReg tHi = newVRegI(env);
+ HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+ addInstr(env, PPC32Instr_Alu(Palu_SAR, tHi, src, PPC32RH_Imm(False,31)));
+ *rHi = tHi;
+ *rLo = src;
+ return;
+ }
- add_to_sp( env, 32 ); // Reset SP
- *rHi = tHi;
- *rLo = tLo;
- return;
- }
+ /* 32Uto64(e) */
+ case Iop_32Uto64: {
+ HReg tHi = newVRegI(env);
+ HReg tLo = iselIntExpr_R(env, e->Iex.Unop.arg);
+ addInstr(env, PPC32Instr_LI(tHi, 0, mode64));
+ *rHi = tHi;
+ *rLo = tLo;
+ return;
+ }
- /* could do better than this, but for now ... */
- case Iop_1Sto64: {
- HReg tLo = newVRegI(env);
- HReg tHi = newVRegI(env);
- PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_Set32(cond,tLo));
- addInstr(env, PPC32Instr_Alu32(Palu_SHL, tLo, tLo, PPC32RH_Imm(False,31)));
- addInstr(env, PPC32Instr_Alu32(Palu_SAR, tLo, tLo, PPC32RH_Imm(False,31)));
- addInstr(env, mk_iMOVds_RR(tHi, tLo));
- *rHi = tHi;
- *rLo = tLo;
- return;
- }
+ /* V128{HI}to64 */
+ case Iop_V128HIto64:
+ case Iop_V128to64: {
+ HReg r_aligned16;
+ Int off = e->Iex.Unop.op==Iop_V128HIto64 ? 0 : 8;
+ HReg tLo = newVRegI(env);
+ HReg tHi = newVRegI(env);
+ HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
+ PPC32AMode *am_off0, *am_offLO, *am_offHI;
+ sub_from_sp( env, 32 ); // Move SP down 32 bytes
+
+ // get a quadword aligned address within our stack space
+ r_aligned16 = get_sp_aligned16( env );
+ am_off0 = PPC32AMode_IR( 0, r_aligned16 );
+ am_offHI = PPC32AMode_IR( off, r_aligned16 );
+ am_offLO = PPC32AMode_IR( off+4, r_aligned16 );
+
+ // store as Vec128
+ addInstr(env, PPC32Instr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
+
+ // load hi,lo words (of hi/lo half of vec) as Ity_I32's
+ addInstr(env, PPC32Instr_Load( 4, False, tHi, am_offHI, mode64 ));
+ addInstr(env, PPC32Instr_Load( 4, False, tLo, am_offLO, mode64 ));
+
+ add_to_sp( env, 32 ); // Reset SP
+ *rHi = tHi;
+ *rLo = tLo;
+ return;
+ }
- case Iop_Neg64: {
- HReg yLo, yHi;
- HReg zero = newVRegI(env);
- HReg tLo = newVRegI(env);
- HReg tHi = newVRegI(env);
- iselInt64Expr(&yHi, &yLo, env, e->Iex.Unop.arg);
- addInstr(env, PPC32Instr_LI32(zero, 0));
- addInstr(env, PPC32Instr_AddSubC32( False/*sub*/, True /*set carry*/,
- tLo, zero, yLo));
- addInstr(env, PPC32Instr_AddSubC32( False/*sub*/, False/*read carry*/,
- tHi, zero, yHi));
- *rHi = tHi;
- *rLo = tLo;
- return;
- }
+ /* could do better than this, but for now ... */
+ case Iop_1Sto64: {
+ HReg tLo = newVRegI(env);
+ HReg tHi = newVRegI(env);
+ PPC32CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+ addInstr(env, PPC32Instr_Set32(cond,tLo));
+ addInstr(env, PPC32Instr_Alu(Palu_SHL, tLo, tLo, PPC32RH_Imm(False,31)));
+ addInstr(env, PPC32Instr_Alu(Palu_SAR, tLo, tLo, PPC32RH_Imm(False,31)));
+ addInstr(env, mk_iMOVds_RR(tHi, tLo));
+ *rHi = tHi;
+ *rLo = tLo;
+ return;
+ }
+
+ case Iop_Neg64: {
+ HReg yLo, yHi;
+ HReg zero = newVRegI(env);
+ HReg tLo = newVRegI(env);
+ HReg tHi = newVRegI(env);
+ iselInt64Expr(&yHi, &yLo, env, e->Iex.Unop.arg);
+ addInstr(env, PPC32Instr_LI(zero, 0, mode64));
+ addInstr(env, PPC32Instr_AddSubC32( False/*sub*/, True /*set carry*/,
+ tLo, zero, yLo));
+ addInstr(env, PPC32Instr_AddSubC32( False/*sub*/, False/*read carry*/,
+ tHi, zero, yHi));
+ *rHi = tHi;
+ *rLo = tLo;
+ return;
+ }
//.. /* Not64(e) */
//.. case Iop_Not64: {
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
+//.. HReg tLo = newVRegI32(env);
+//.. HReg tHi = newVRegI32(env);
//.. HReg sHi, sLo;
//.. iselInt64Expr(&sHi, &sLo, env, e->Iex.Unop.arg);
//.. addInstr(env, mk_iMOVsd_RR(sHi, tHi));
//.. return;
//.. }
- /* ReinterpF64asI64(e) */
- /* Given an IEEE754 double, produce an I64 with the same bit
- pattern. */
- case Iop_ReinterpF64asI64: {
- PPC32AMode *am_addr0, *am_addr1;
- HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
- HReg r_dstLo = newVRegI(env);
- HReg r_dstHi = newVRegI(env);
-
- sub_from_sp( env, 16 ); // Move SP down 16 bytes
- am_addr0 = PPC32AMode_IR(0, StackFramePtr);
- am_addr1 = PPC32AMode_IR(4, StackFramePtr);
-
- // store as F64
- addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr0 ));
-
- // load hi,lo as Ity_I32's
- addInstr(env, PPC32Instr_Load( 4, False, r_dstHi, am_addr0 ));
- addInstr(env, PPC32Instr_Load( 4, False, r_dstLo, am_addr1 ));
- *rHi = r_dstHi;
- *rLo = r_dstLo;
-
- add_to_sp( env, 16 ); // Reset SP
- return;
- }
+ /* ReinterpF64asI64(e) */
+ /* Given an IEEE754 double, produce an I64 with the same bit
+ pattern. */
+ case Iop_ReinterpF64asI64: {
+ PPC32AMode *am_addr0, *am_addr1;
+ HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
+ HReg r_dstLo = newVRegI(env);
+ HReg r_dstHi = newVRegI(env);
+
+ sub_from_sp( env, 16 ); // Move SP down 16 bytes
+ am_addr0 = PPC32AMode_IR(0, StackFramePtr(mode64));
+ am_addr1 = PPC32AMode_IR(4, StackFramePtr(mode64));
+
+ // store as F64
+ addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr0 ));
+
+ // load hi,lo as Ity_I32's
+ addInstr(env, PPC32Instr_Load( 4, False, r_dstHi, am_addr0, mode64 ));
+ addInstr(env, PPC32Instr_Load( 4, False, r_dstLo, am_addr1, mode64 ));
+ *rHi = r_dstHi;
+ *rLo = r_dstLo;
+
+ add_to_sp( env, 16 ); // Reset SP
+ return;
+ }
//.. case Iop_CmpNEZ32x2:
//.. fn = (HWord)h_generic_calc_CmpNEZ32x2; goto unish;
//.. not marked as regparm functions.
//.. */
//.. HReg xLo, xHi;
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
+//.. HReg tLo = newVRegI32(env);
+//.. HReg tHi = newVRegI32(env);
//.. iselInt64Expr(&xHi, &xLo, env, e->Iex.Unop.arg);
//.. addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
//.. addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
//.. return;
//.. }
- default:
- break;
+ default:
+ break;
}
} /* if (e->tag == Iex_Unop) */
//.. /* --------- CCALL --------- */
//.. if (e->tag == Iex_CCall) {
-//.. HReg tLo = newVRegI(env);
-//.. HReg tHi = newVRegI(env);
+//.. HReg tLo = newVRegI32(env);
+//.. HReg tHi = newVRegI32(env);
//..
//.. /* Marshal args, do the call, clear stack. */
//.. doHelperCall( env, False, NULL, e->Iex.CCall.cee, e->Iex.CCall.args );
if (e->tag == Iex_Get) {
HReg r_dst = newVRegF(env);
- PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset, GuestStatePtr );
+ PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset,
+ GuestStatePtr(mode64) );
addInstr(env, PPC32Instr_FpLdSt( True/*load*/, 4, r_dst, am_addr ));
return r_dst;
}
/* --------- LITERAL --------- */
if (e->tag == Iex_Const) {
- HReg r_srcHi, r_srcLo;
union { UInt u32x2[2]; ULong u64; Double f64; } u;
vassert(sizeof(u) == 8);
vassert(sizeof(u.u64) == 8);
else
vpanic("iselDblExpr(ppc32): const");
- r_srcHi = newVRegI(env);
- r_srcLo = newVRegI(env);
- addInstr(env, PPC32Instr_LI32(r_srcHi, u.u32x2[1]));
- addInstr(env, PPC32Instr_LI32(r_srcLo, u.u32x2[0]));
- return mk_LoadRRtoFPR( env, r_srcHi, r_srcLo );
+ if (mode64) {
+ HReg r_src = newVRegI(env);
+ vassert(0);
+ // AWAITING TEST CASE
+ addInstr(env, PPC32Instr_LI(r_src, u.u64, mode64));
+ return mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64
+ } else { // mode32
+ HReg r_srcHi = newVRegI(env);
+ HReg r_srcLo = newVRegI(env);
+ addInstr(env, PPC32Instr_LI(r_srcHi, u.u32x2[1], mode64));
+ addInstr(env, PPC32Instr_LI(r_srcLo, u.u32x2[0], mode64));
+ return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
+ }
}
if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) {
if (e->tag == Iex_Get) {
HReg r_dst = newVRegF(env);
- PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset, GuestStatePtr );
+ PPC32AMode* am_addr = PPC32AMode_IR(e->Iex.Get.offset,
+ GuestStatePtr(mode64) );
addInstr(env, PPC32Instr_FpLdSt( True/*load*/, 8, r_dst, am_addr ));
return r_dst;
}
if (e->tag == Iex_Binop) {
PPC32FpOp fpop = Pfp_INVALID;
switch (e->Iex.Binop.op) {
- case Iop_AddF64: fpop = Pfp_ADD; break;
- case Iop_SubF64: fpop = Pfp_SUB; break;
- case Iop_MulF64: fpop = Pfp_MUL; break;
- case Iop_DivF64: fpop = Pfp_DIV; break;
- default: break;
+ case Iop_AddF64: fpop = Pfp_ADD; break;
+ case Iop_SubF64: fpop = Pfp_SUB; break;
+ case Iop_MulF64: fpop = Pfp_MUL; break;
+ case Iop_DivF64: fpop = Pfp_DIV; break;
+ default: break;
}
if (fpop != Pfp_INVALID) {
HReg r_dst = newVRegF(env);
if (e->tag == Iex_Unop) {
PPC32FpOp fpop = Pfp_INVALID;
switch (e->Iex.Unop.op) {
- case Iop_NegF64: fpop = Pfp_NEG; break;
- case Iop_AbsF64: fpop = Pfp_ABS; break;
- case Iop_SqrtF64: fpop = Pfp_SQRT; break;
+ case Iop_NegF64: fpop = Pfp_NEG; break;
+ case Iop_AbsF64: fpop = Pfp_ABS; break;
+ case Iop_SqrtF64: fpop = Pfp_SQRT; break;
//.. case Iop_SinF64: fpop = Xfp_SIN; break;
//.. case Iop_CosF64: fpop = Xfp_COS; break;
//.. case Iop_TanF64: fpop = Xfp_TAN; break;
//.. add_to_esp(env, 4);
//.. return dst;
//.. }
- case Iop_ReinterpI64asF64: {
- /* Given an I64, produce an IEEE754 double with the same
- bit pattern. */
+ case Iop_ReinterpI64asF64: {
+ /* Given an I64, produce an IEEE754 double with the same
+ bit pattern. */
+ if (!mode64) {
HReg r_srcHi, r_srcLo;
iselInt64Expr( &r_srcHi, &r_srcLo, env, e->Iex.Unop.arg);
- return mk_LoadRRtoFPR( env, r_srcHi, r_srcLo );
- }
- case Iop_F32toF64: {
- /* this is a no-op */
- HReg res = iselFltExpr(env, e->Iex.Unop.arg);
- return res;
+ return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
+ } else {
+ // TODO
+ vassert(0);
}
- default:
- break;
+ }
+ case Iop_F32toF64: {
+ /* this is a no-op */
+ HReg res = iselFltExpr(env, e->Iex.Unop.arg);
+ return res;
+ }
+ default:
+ break;
}
}
HReg fr0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
HReg fr_dst = newVRegF(env);
HReg r_tmp = newVRegI(env);
- addInstr(env, PPC32Instr_Alu32(Palu_AND, r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
+ addInstr(env, PPC32Instr_Alu(Palu_AND, r_tmp, r_cond, PPC32RH_Imm(False,0xFF)));
addInstr(env, PPC32Instr_FpUnary( Pfp_MOV, fr_dst, frX ));
- addInstr(env, PPC32Instr_Cmp32(False/*unsigned*/, 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
+ addInstr(env, PPC32Instr_Cmp(False/*unsigned*/, 7/*cr*/, r_tmp, PPC32RH_Imm(False,0)));
addInstr(env, PPC32Instr_FpCMov( cc, fr_dst, fr0 ));
return fr_dst;
}
HReg dst = newVRegV(env);
addInstr(env,
PPC32Instr_AvLdSt( True/*load*/, 16, dst,
- PPC32AMode_IR(e->Iex.Get.offset, GuestStatePtr)));
+ PPC32AMode_IR(e->Iex.Get.offset,
+ GuestStatePtr(mode64))));
return dst;
}
/* Store zeros */
r_zeros = newVRegI(env);
- addInstr(env, PPC32Instr_LI32(r_zeros, 0x0));
- addInstr(env, PPC32Instr_Store( 4, am_off0, r_zeros ));
- addInstr(env, PPC32Instr_Store( 4, am_off4, r_zeros ));
- addInstr(env, PPC32Instr_Store( 4, am_off8, r_zeros ));
+ addInstr(env, PPC32Instr_LI(r_zeros, 0x0, mode64));
+ addInstr(env, PPC32Instr_Store( 4, am_off0, r_zeros, mode64 ));
+ addInstr(env, PPC32Instr_Store( 4, am_off4, r_zeros, mode64 ));
+ addInstr(env, PPC32Instr_Store( 4, am_off8, r_zeros, mode64 ));
/* Store r_src in low word of quadword-aligned mem */
- addInstr(env, PPC32Instr_Store( 4, am_off12, r_src ));
+ addInstr(env, PPC32Instr_Store( 4, am_off12, r_src, mode64 ));
/* Load word into low word of quadword vector reg */
addInstr(env, PPC32Instr_AvLdSt( True/*load*/, 4, dst, am_off12 ));
//.. }
//..
case Iop_64HLtoV128: {
- HReg r3, r2, r1, r0, r_aligned16;
- PPC32AMode *am_off0, *am_off4, *am_off8, *am_off12;
- HReg dst = newVRegV(env);
- /* do this via the stack (easy, convenient, etc) */
- sub_from_sp( env, 32 ); // Move SP down
-
- // get a quadword aligned address within our stack space
- r_aligned16 = get_sp_aligned16( env );
- am_off0 = PPC32AMode_IR( 0, r_aligned16);
- am_off4 = PPC32AMode_IR( 4, r_aligned16);
- am_off8 = PPC32AMode_IR( 8, r_aligned16);
- am_off12 = PPC32AMode_IR( 12, r_aligned16);
-
- /* Do the less significant 64 bits */
- iselInt64Expr(&r1, &r0, env, e->Iex.Binop.arg2);
- addInstr(env, PPC32Instr_Store( 4, am_off12, r0 ));
- addInstr(env, PPC32Instr_Store( 4, am_off8, r1 ));
- /* Do the more significant 64 bits */
- iselInt64Expr(&r3, &r2, env, e->Iex.Binop.arg1);
- addInstr(env, PPC32Instr_Store( 4, am_off4, r2 ));
- addInstr(env, PPC32Instr_Store( 4, am_off0, r3 ));
-
- /* Fetch result back from stack. */
- addInstr(env, PPC32Instr_AvLdSt(True/*load*/, 16, dst, am_off0));
-
- add_to_sp( env, 32 ); // Reset SP
- return dst;
+ if (!mode64) {
+ HReg r3, r2, r1, r0, r_aligned16;
+ PPC32AMode *am_off0, *am_off4, *am_off8, *am_off12;
+ HReg dst = newVRegV(env);
+ /* do this via the stack (easy, convenient, etc) */
+ sub_from_sp( env, 32 ); // Move SP down
+
+ // get a quadword aligned address within our stack space
+ r_aligned16 = get_sp_aligned16( env );
+ am_off0 = PPC32AMode_IR( 0, r_aligned16);
+ am_off4 = PPC32AMode_IR( 4, r_aligned16);
+ am_off8 = PPC32AMode_IR( 8, r_aligned16);
+ am_off12 = PPC32AMode_IR( 12, r_aligned16);
+
+ /* Do the less significant 64 bits */
+ iselInt64Expr(&r1, &r0, env, e->Iex.Binop.arg2);
+ addInstr(env, PPC32Instr_Store( 4, am_off12, r0, mode64 ));
+ addInstr(env, PPC32Instr_Store( 4, am_off8, r1, mode64 ));
+ /* Do the more significant 64 bits */
+ iselInt64Expr(&r3, &r2, env, e->Iex.Binop.arg1);
+ addInstr(env, PPC32Instr_Store( 4, am_off4, r2, mode64 ));
+ addInstr(env, PPC32Instr_Store( 4, am_off0, r3, mode64 ));
+
+ /* Fetch result back from stack. */
+ addInstr(env, PPC32Instr_AvLdSt(True/*load*/, 16, dst, am_off0));
+
+ add_to_sp( env, 32 ); // Reset SP
+ return dst;
+ } else {
+ // TODO
+ vassert(0);
+ }
}
case Iop_Add32Fx4: op = Pavfp_ADDF; goto do_32Fx4;
IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
IREndness end = stmt->Ist.Store.end;
- if (tya != Ity_I32 || end != Iend_BE)
+ if ( end != Iend_BE ||
+ (!mode64 && (tya != Ity_I32)) ||
+ ( mode64 && (tya != Ity_I64)) )
goto stmt_fail;
am_addr = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
- if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32) {
+ if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 ||
+ (mode64 && (tyd == Ity_I64))) {
HReg r_src = iselIntExpr_R(env, stmt->Ist.Store.data);
addInstr(env, PPC32Instr_Store( toUChar(sizeofIRType(tyd)),
- am_addr, r_src));
+ am_addr, r_src, mode64 ));
return;
}
if (tyd == Ity_F64) {
/* --------- PUT --------- */
case Ist_Put: {
IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
- if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32) {
+ if (ty == Ity_I8 || ty == Ity_I16 ||
+ ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
HReg r_src = iselIntExpr_R(env, stmt->Ist.Put.data);
- PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset, GuestStatePtr);
+ PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset,
+ GuestStatePtr(mode64));
addInstr(env, PPC32Instr_Store( toUChar(sizeofIRType(ty)),
- am_addr, r_src ));
+ am_addr, r_src, mode64 ));
return;
}
- if (ty == Ity_I64) {
+ if (!mode64 && ty == Ity_I64) {
HReg rHi, rLo;
- PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset, GuestStatePtr);
+ PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset,
+ GuestStatePtr(mode64));
PPC32AMode* am_addr4 = advance4(env, am_addr);
iselInt64Expr(&rHi,&rLo, env, stmt->Ist.Put.data);
- addInstr(env, PPC32Instr_Store( 4, am_addr, rHi ));
- addInstr(env, PPC32Instr_Store( 4, am_addr4, rLo ));
+ addInstr(env, PPC32Instr_Store( 4, am_addr, rHi, mode64 ));
+ addInstr(env, PPC32Instr_Store( 4, am_addr4, rLo, mode64 ));
return;
}
if (ty == Ity_V128) {
/* Guest state vectors are 16byte aligned, so don't need to worry here */
HReg v_src = iselVecExpr(env, stmt->Ist.Put.data);
- PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset, GuestStatePtr);
+ PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset,
+ GuestStatePtr(mode64));
addInstr(env, PPC32Instr_AvLdSt(False/*store*/, 16, v_src, am_addr));
return;
}
//.. }
if (ty == Ity_F64) {
HReg fr_src = iselDblExpr(env, stmt->Ist.Put.data);
- PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset, GuestStatePtr);
+ PPC32AMode* am_addr = PPC32AMode_IR(stmt->Ist.Put.offset,
+ GuestStatePtr(mode64));
addInstr(env, PPC32Instr_FpLdSt( False/*store*/, 8, fr_src, am_addr ));
return;
}
case Ist_Tmp: {
IRTemp tmp = stmt->Ist.Tmp.tmp;
IRType ty = typeOfIRTemp(env->type_env, tmp);
- if (ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) {
+ if (ty == Ity_I8 || ty == Ity_I16 ||
+ ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
HReg r_dst = lookupIRTemp(env, tmp);
HReg r_src = iselIntExpr_R(env, stmt->Ist.Tmp.data);
addInstr(env, mk_iMOVds_RR( r_dst, r_src ));
return;
}
- if (ty == Ity_I64) {
+ if (!mode64 && ty == Ity_I64) {
HReg r_srcHi, r_srcLo, r_dstHi, r_dstLo;
iselInt64Expr(&r_srcHi,&r_srcLo, env, stmt->Ist.Tmp.data);
lookupIRTemp64( &r_dstHi, &r_dstLo, env, tmp);
return;
retty = typeOfIRTemp(env->type_env, d->tmp);
- if (retty == Ity_I64) {
+ if (!mode64 && retty == Ity_I64) {
HReg r_dstHi, r_dstLo;
/* The returned value is in %r3:%r4. Park it in the
register-pair associated with tmp. */
lookupIRTemp64( &r_dstHi, &r_dstLo, env, d->tmp);
- addInstr(env, mk_iMOVds_RR(r_dstHi, hregPPC32_GPR3()));
- addInstr(env, mk_iMOVds_RR(r_dstLo, hregPPC32_GPR4()));
+ addInstr(env, mk_iMOVds_RR(r_dstHi, hregPPC_GPR3(mode64)));
+ addInstr(env, mk_iMOVds_RR(r_dstLo, hregPPC_GPR4(mode64)));
return;
}
- if (retty == Ity_I32 || retty == Ity_I16 || retty == Ity_I8) {
+ if (retty == Ity_I8 || retty == Ity_I16 ||
+ retty == Ity_I32 || ((retty == Ity_I64) && mode64)) {
/* The returned value is in %r3. Park it in the register
associated with tmp. */
HReg r_dst = lookupIRTemp(env, d->tmp);
- addInstr(env, mk_iMOVds_RR(r_dst, hregPPC32_GPR3()));
+ addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64)));
return;
}
break;
case Ist_Exit: {
PPC32RI* ri_dst;
PPC32CondCode cc;
- if (stmt->Ist.Exit.dst->tag != Ico_U32)
- vpanic("isel_ppc32: Ist_Exit: dst is not a 32-bit value");
+ IRConstTag tag = stmt->Ist.Exit.dst->tag;
+ if (!mode64 && (tag != Ico_U32))
+ vpanic("iselStmt(ppc32): Ist_Exit: dst is not a 32-bit value");
+ if (mode64 && (tag != Ico_U64))
+ vpanic("iselStmt(ppc64): Ist_Exit: dst is not a 64-bit value");
ri_dst = iselIntExpr_RI(env, IRExpr_Const(stmt->Ist.Exit.dst));
cc = iselCondCode(env,stmt->Ist.Exit.guard);
addInstr(env, PPC32Instr_RdWrLR(True, env->savedLR));
ISelEnv* env;
VexSubArch subarch_host = archinfo_host->subarch;
- /* sanity ... */
- vassert(subarch_host == VexSubArchPPC32_I
- || subarch_host == VexSubArchPPC32_FI
- || subarch_host == VexSubArchPPC32_VFI);
+ /* Figure out whether we're being ppc32 or ppc64 today. */
+ switch (subarch_host) {
+ case VexSubArchPPC32_VFI:
+ case VexSubArchPPC32_FI:
+ case VexSubArchPPC32_I:
+ mode64 = False;
+ break;
+ case VexSubArchPPC64_VFI:
+ case VexSubArchPPC64_FI:
+ mode64 = True;
+ break;
+ default:
+ vpanic("iselBB_PPC32: illegal subarch");
+ }
/* Make up an initial environment to use. */
env = LibVEX_Alloc(sizeof(ISelEnv));
case Ity_I1:
case Ity_I8:
case Ity_I16:
- case Ity_I32: hreg = mkHReg(j++, HRcInt32, True); break;
- case Ity_I64: hreg = mkHReg(j++, HRcInt32, True);
- hregHI = mkHReg(j++, HRcInt32, True); break;
+ case Ity_I32: hreg = mkHReg(j++, HRcIntWRDSZ, True);
+ case Ity_I64:
+ if (mode64) {
+ hreg = mkHReg(j++, HRcInt64, True);
+ } else {
+ hreg = mkHReg(j++, HRcInt32, True);
+ hregHI = mkHReg(j++, HRcInt32, True);
+ }
+ break;
+ case Ity_I128: vassert(mode64);
+ hreg = mkHReg(j++, HRcInt64, True);
+ hregHI = mkHReg(j++, HRcInt64, True);
+
case Ity_F32:
case Ity_F64: hreg = mkHReg(j++, HRcFlt64, True); break;
case Ity_V128: hreg = mkHReg(j++, HRcVec128, True); break;
default:
ppIRType(bb->tyenv->types[i]);
- vpanic("iselBB(ppc32): IRTemp type");
+ if (mode64)
+ vpanic("iselBB(ppc64): IRTemp type");
+ else
+ vpanic("iselBB(ppc32): IRTemp type");
}
env->vregmap[i] = hreg;
env->vregmapHI[i] = hregHI;
case Iop_DivU32: vex_printf("DivU32"); return;
case Iop_DivS32: vex_printf("DivS32"); return;
+ case Iop_DivU64: vex_printf("DivU64"); return;
+ case Iop_DivS64: vex_printf("DivS64"); return;
case Iop_DivModU64to32: vex_printf("DivModU64to32"); return;
case Iop_DivModS64to32: vex_printf("DivModS64to32"); return;
case Iop_DivU32: case Iop_DivS32:
BINARY(Ity_I32, Ity_I32,Ity_I32);
+ case Iop_DivU64: case Iop_DivS64:
+ BINARY(Ity_I64, Ity_I64, Ity_I64);
+
case Iop_DivModU64to32: case Iop_DivModS64to32:
BINARY(Ity_I64, Ity_I64,Ity_I32);
#include "libvex_guest_amd64.h"
#include "libvex_guest_arm.h"
#include "libvex_guest_ppc32.h"
+#include "libvex_guest_ppc64.h"
#include "main/vex_globals.h"
#include "main/vex_util.h"
|| archinfo_guest->subarch == VexSubArchPPC32_VFI);
break;
+ case VexArchPPC64:
+ mode64 = True;
+ getAllocableRegs_PPC32 ( &n_available_real_regs,
+ &available_real_regs, mode64 );
+ isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPC32Instr;
+ getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPC32Instr;
+ mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPC32Instr;
+ genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_PPC32;
+ genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_PPC32;
+ ppInstr = (void(*)(HInstr*, Bool)) ppPPC32Instr;
+ ppReg = (void(*)(HReg)) ppHRegPPC32;
+ iselBB = iselBB_PPC32;
+ emit = (Int(*)(UChar*,Int,HInstr*, Bool)) emit_PPC32Instr;
+ host_is_bigendian = True;
+ host_word_type = Ity_I64;
+ vassert(archinfo_guest->subarch == VexSubArchPPC64_FI
+ || archinfo_guest->subarch == VexSubArchPPC64_VFI);
+ break;
+
default:
vpanic("LibVEX_Translate: unsupported target insn set");
}
vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TILEN ) == 4);
break;
+ case VexArchPPC64:
+ preciseMemExnsFn = guest_ppc64_state_requires_precise_mem_exns;
+ disInstrFn = disInstr_PPC32;
+ specHelper = guest_ppc64_spechelper;
+ guest_sizeB = sizeof(VexGuestPPC64State);
+ guest_word_type = Ity_I64;
+ guest_layout = &ppc64Guest_layout;
+ offB_TISTART = offsetof(VexGuestPPC64State,guest_TISTART);
+ offB_TILEN = offsetof(VexGuestPPC64State,guest_TILEN);
+ vassert(archinfo_guest->subarch == VexSubArchPPC64_FI
+ || archinfo_guest->subarch == VexSubArchPPC64_VFI);
+ vassert(0 == sizeof(VexGuestPPC64State) % 16);
+ vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TISTART ) == 8);
+ vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TILEN ) == 8);
+ break;
+
default:
vpanic("LibVEX_Translate: unsupported guest insn set");
}
UInt guest_bytes_read = (UInt)guest_extents->len[0];
vex_printf(". 0 %llx %u\n.", guest_bytes_addr, guest_bytes_read );
for (i = 0; i < guest_bytes_read; i++)
- vex_printf(" %02x", (Int)p[i] );
+ vex_printf(" %02x", (Int)p[i] );
vex_printf("\n\n");
}
}
/* Register allocate. */
rcode = doRegisterAllocation ( vcode, available_real_regs,
- n_available_real_regs,
+ n_available_real_regs,
isMove, getRegUsage, mapRegs,
genSpill, genReload, guest_sizeB,
ppInstr, ppReg, mode64 );
case VexArchAMD64: return "AMD64";
case VexArchARM: return "ARM";
case VexArchPPC32: return "PPC32";
+ case VexArchPPC64: return "PPC64";
default: return "VexArch???";
}
}
case VexSubArchPPC32_I: return "ppc32-int-only";
case VexSubArchPPC32_FI: return "ppc32-int-and-fp";
case VexSubArchPPC32_VFI: return "ppc32-int-fp-and-AV";
+ case VexSubArchPPC64_FI: return "ppc64-int-and-fp";
+ case VexSubArchPPC64_VFI: return "ppc64-int-fp-and-AV";
default: return "VexSubArch???";
}
}
/* TODO: clarify semantics wrt rounding, negative values, whatever */
Iop_DivU32, // :: I32,I32 -> I32 (simple div, no mod)
Iop_DivS32, // ditto, signed
+ Iop_DivU64, // :: I64,I64 -> I64 (simple div, no mod)
+ Iop_DivS64, // ditto, signed
Iop_DivModU64to32, // :: I64,I32 -> I64
// of which lo half is div and hi half is mod