This will be needed for the ppc32/64 backend.
git-svn-id: svn://svn.valgrind.org/vex/trunk@1487
vassert(resteerOKfn(dres.continueAt));
delta = dres.continueAt - guest_IP_bbstart;
/* we now have to start a new extent slot. */
- vge->n_used++;
- vassert(vge->n_used <= 3);
+ vge->n_used++;
+ vassert(vge->n_used <= 3);
vge->base[vge->n_used-1] = dres.continueAt;
vge->len[vge->n_used-1] = 0;
n_resteers++;
return i;
}
-void ppAMD64Instr ( AMD64Instr* i )
+void ppAMD64Instr ( AMD64Instr* i, Bool mode64 )
{
+ vassert(mode64 == True);
switch (i->tag) {
case Ain_Imm64:
vex_printf("movabsq $0x%llx,", i->Ain.Imm64.imm64);
/* --------- Helpers for register allocation. --------- */
-void getRegUsage_AMD64Instr ( HRegUsage* u, AMD64Instr* i )
+void getRegUsage_AMD64Instr ( HRegUsage* u, AMD64Instr* i, Bool mode64 )
{
Bool unary;
+ vassert(mode64 == True);
initHRegUsage(u);
switch (i->tag) {
case Ain_Imm64:
addHRegUse(u, HRmWrite, i->Ain.SseShuf.dst);
return;
default:
- ppAMD64Instr(i);
+ ppAMD64Instr(i, mode64);
vpanic("getRegUsage_AMD64Instr");
}
}
*r = lookupHRegRemap(m, *r);
}
-void mapRegs_AMD64Instr ( HRegRemap* m, AMD64Instr* i )
+void mapRegs_AMD64Instr ( HRegRemap* m, AMD64Instr* i, Bool mode64 )
{
+ vassert(mode64 == True);
switch (i->tag) {
case Ain_Imm64:
mapReg(m, &i->Ain.Imm64.dst);
mapReg(m, &i->Ain.SseShuf.dst);
return;
default:
- ppAMD64Instr(i);
+ ppAMD64Instr(i, mode64);
vpanic("mapRegs_AMD64Instr");
}
}
register allocator. Note it's critical these don't write the
condition codes. */
-AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offsetB )
+AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offsetB, Bool mode64 )
{
AMD64AMode* am;
vassert(offsetB >= 0);
vassert(!hregIsVirtual(rreg));
+ vassert(mode64 == True);
am = AMD64AMode_IR(offsetB, hregAMD64_RBP());
switch (hregClass(rreg)) {
}
}
-AMD64Instr* genReload_AMD64 ( HReg rreg, Int offsetB )
+AMD64Instr* genReload_AMD64 ( HReg rreg, Int offsetB, Bool mode64 )
{
AMD64AMode* am;
vassert(offsetB >= 0);
vassert(!hregIsVirtual(rreg));
+ vassert(mode64 == True);
am = AMD64AMode_IR(offsetB, hregAMD64_RBP());
switch (hregClass(rreg)) {
case HRcInt64:
Note that buf is not the insn's final place, and therefore it is
imperative to emit position-independent code. */
-Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* i )
+Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* i, Bool mode64 )
{
UInt /*irno,*/ opc, opc_rr, subopc_imm, opc_imma, opc_cl, opc_imm, subopc;
UInt xtra;
UChar* ptmp;
Int j;
vassert(nbuf >= 32);
+ vassert(mode64 == True);
/* Wrap an integer as a int register, for use assembling
GrpN insns, in which the greg field is used as a sub-opcode
and does not really contain a register. */
# define fake(_n) mkHReg((_n), HRcInt64, False)
- /* vex_printf("asm "); ppAMD64Instr(i); vex_printf("\n"); */
+ /* vex_printf("asm "); ppAMD64Instr(i, mode64); vex_printf("\n"); */
switch (i->tag) {
}
bad:
- ppAMD64Instr(i);
+ ppAMD64Instr(i, mode64);
vpanic("emit_AMD64Instr");
/*NOTREACHED*/
extern AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst );
-extern void ppAMD64Instr ( AMD64Instr* );
+extern void ppAMD64Instr ( AMD64Instr*, Bool );
/* Some functions that insulate the register allocator from details
of the underlying instruction set. */
-extern void getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr* );
-extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr* );
+extern void getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr*, Bool );
+extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool );
extern Bool isMove_AMD64Instr ( AMD64Instr*, HReg*, HReg* );
-extern Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* );
-extern AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offset );
-extern AMD64Instr* genReload_AMD64 ( HReg rreg, Int offset );
+extern Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr*, Bool );
+extern AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offset, Bool );
+extern AMD64Instr* genReload_AMD64 ( HReg rreg, Int offset, Bool );
extern void getAllocableRegs_AMD64 ( Int*, HReg** );
extern HInstrArray* iselBB_AMD64 ( IRBB*, VexArchInfo* );
{
addHInstr(env->code, instr);
if (vex_traceflags & VEX_TRACE_VCODE) {
- ppAMD64Instr(instr);
+ ppAMD64Instr(instr, False);
vex_printf("\n");
}
}
Bool (*isMove) (HInstr*, HReg*, HReg*),
/* Get info about register usage in this insn. */
- void (*getRegUsage) (HRegUsage*, HInstr*),
+ void (*getRegUsage) (HRegUsage*, HInstr*, Bool),
/* Apply a reg-reg mapping to an insn. */
- void (*mapRegs) (HRegRemap*, HInstr*),
+ void (*mapRegs) (HRegRemap*, HInstr*, Bool),
/* Return an insn to spill/restore a real reg to a spill slot
offset. */
- HInstr* (*genSpill) ( HReg, Int ),
- HInstr* (*genReload) ( HReg, Int ),
+ HInstr* (*genSpill) ( HReg, Int, Bool ),
+ HInstr* (*genReload) ( HReg, Int, Bool ),
Int guest_sizeB,
/* For debug printing only. */
- void (*ppInstr) ( HInstr* ),
- void (*ppReg) ( HReg )
+ void (*ppInstr) ( HInstr*, Bool ),
+ void (*ppReg) ( HReg ),
+
+ /* 32/64bit mode */
+ Bool mode64
);
/* Does this instruction mention a particular reg? */
static Bool instrMentionsReg (
- void (*getRegUsage) (HRegUsage*, HInstr*),
+ void (*getRegUsage) (HRegUsage*, HInstr*, Bool),
HInstr* instr,
- HReg r
+ HReg r,
+ Bool mode64
)
{
Int i;
HRegUsage reg_usage;
- (*getRegUsage)(®_usage, instr);
+ (*getRegUsage)(®_usage, instr, mode64);
for (i = 0; i < reg_usage.n_used; i++)
if (reg_usage.hreg[i] == r)
return True;
spill, or -1 if none was found. */
static
Int findMostDistantlyMentionedVReg (
- void (*getRegUsage) (HRegUsage*, HInstr*),
+ void (*getRegUsage) (HRegUsage*, HInstr*, Bool),
HInstrArray* instrs_in,
Int search_from_instr,
RRegState* state,
- Int n_state
+ Int n_state,
+ Bool mode64
)
{
Int k, m;
vassert(state[k].disp == Bound);
for (m = search_from_instr; m < instrs_in->arr_used; m++) {
if (instrMentionsReg(getRegUsage,
- instrs_in->arr[m], state[k].vreg))
+ instrs_in->arr[m], state[k].vreg, mode64))
break;
}
if (m > furthest) {
/* Return True iff the given insn is a reg-reg move, in which
case also return the src and dst regs. */
- Bool (*isMove) (HInstr*, HReg*, HReg*),
+ Bool (*isMove) ( HInstr*, HReg*, HReg* ),
/* Get info about register usage in this insn. */
- void (*getRegUsage) (HRegUsage*, HInstr*),
+ void (*getRegUsage) ( HRegUsage*, HInstr*, Bool ),
/* Apply a reg-reg mapping to an insn. */
- void (*mapRegs) (HRegRemap*, HInstr*),
+ void (*mapRegs) ( HRegRemap*, HInstr*, Bool ),
/* Return an insn to spill/restore a real reg to a spill slot
byte offset. */
- HInstr* (*genSpill) ( HReg, Int ),
- HInstr* (*genReload) ( HReg, Int ),
+ HInstr* (*genSpill) ( HReg, Int, Bool ),
+ HInstr* (*genReload) ( HReg, Int, Bool ),
Int guest_sizeB,
/* For debug printing only. */
- void (*ppInstr) ( HInstr* ),
- void (*ppReg) ( HReg )
+ void (*ppInstr) ( HInstr*, Bool ),
+ void (*ppReg) ( HReg ),
+
+ /* 32/64bit mode */
+ Bool mode64
)
{
# define N_SPILL64S (LibVEX_N_SPILL_BYTES / 8)
HInstr* _tmp = (_instr); \
if (DEBUG_REGALLOC) { \
vex_printf("** "); \
- (*ppInstr)(_tmp); \
+ (*ppInstr)(_tmp, mode64); \
vex_printf("\n\n"); \
} \
addHInstr ( instrs_out, _tmp ); \
for (ii = 0; ii < instrs_in->arr_used; ii++) {
- (*getRegUsage)( ®_usage, instrs_in->arr[ii] );
+ (*getRegUsage)( ®_usage, instrs_in->arr[ii], mode64 );
# if 0
vex_printf("\n%d stage1: ", ii);
- (*ppInstr)(instrs_in->arr[ii]);
+ (*ppInstr)(instrs_in->arr[ii], mode64);
vex_printf("\n");
ppHRegUsage(®_usage);
# endif
k = hregNumber(vreg);
if (k < 0 || k >= n_vregs) {
vex_printf("\n");
- (*ppInstr)(instrs_in->arr[ii]);
+ (*ppInstr)(instrs_in->arr[ii], mode64);
vex_printf("\n");
vex_printf("vreg %d, n_vregs %d\n", k, n_vregs);
vpanic("doRegisterAllocation: out-of-range vreg");
(*ppReg)(available_real_regs[k]);
vex_printf("\n");
vex_printf("\nOFFENDING instr = ");
- (*ppInstr)(instrs_in->arr[ii]);
+ (*ppInstr)(instrs_in->arr[ii], mode64);
vex_printf("\n");
vpanic("doRegisterAllocation: "
"first event for rreg is Read");
(*ppReg)(available_real_regs[k]);
vex_printf("\n");
vex_printf("\nOFFENDING instr = ");
- (*ppInstr)(instrs_in->arr[ii]);
+ (*ppInstr)(instrs_in->arr[ii], mode64);
vex_printf("\n");
vpanic("doRegisterAllocation: "
"first event for rreg is Modify");
# if DEBUG_REGALLOC
vex_printf("\n====----====---- Insn %d ----====----====\n", ii);
vex_printf("---- ");
- (*ppInstr)(instrs_in->arr[ii]);
+ (*ppInstr)(instrs_in->arr[ii], mode64);
vex_printf("\n\nInitial state:\n");
PRINT_STATE;
vex_printf("\n");
if (vreg_lrs[m].dead_before > ii) {
vassert(vreg_lrs[m].reg_class != HRcINVALID);
EMIT_INSTR( (*genSpill)( rreg_state[k].rreg,
- vreg_lrs[m].spill_offset ) );
+ vreg_lrs[m].spill_offset,
+ mode64 ) );
}
}
rreg_state[k].disp = Unavail;
We also build up the final vreg->rreg mapping to be applied
to the insn. */
- (*getRegUsage)( ®_usage, instrs_in->arr[ii] );
+ (*getRegUsage)( ®_usage, instrs_in->arr[ii], mode64 );
initHRegRemap(&remap);
if (reg_usage.mode[j] != HRmWrite) {
vassert(vreg_lrs[m].reg_class != HRcINVALID);
EMIT_INSTR( (*genReload)( rreg_state[k].rreg,
- vreg_lrs[m].spill_offset ) );
+ vreg_lrs[m].spill_offset,
+ mode64 ) );
}
continue;
}
of consequent reloads required. */
spillee
= findMostDistantlyMentionedVReg (
- getRegUsage, instrs_in, ii+1, rreg_state, n_rregs );
+ getRegUsage, instrs_in, ii+1, rreg_state, n_rregs, mode64 );
if (spillee == -1) {
/* Hmmmmm. There don't appear to be any spill candidates.
vassert(vreg_lrs[m].dead_before > ii);
vassert(vreg_lrs[m].reg_class != HRcINVALID);
EMIT_INSTR( (*genSpill)( rreg_state[spillee].rreg,
- vreg_lrs[m].spill_offset ) );
+ vreg_lrs[m].spill_offset,
+ mode64 ) );
/* Update the rreg_state to reflect the new assignment for this
rreg. */
if (reg_usage.mode[j] != HRmWrite) {
vassert(vreg_lrs[m].reg_class != HRcINVALID);
EMIT_INSTR( (*genReload)( rreg_state[spillee].rreg,
- vreg_lrs[m].spill_offset ) );
+ vreg_lrs[m].spill_offset,
+ mode64 ) );
}
/* So after much twisting and turning, we have vreg mapped to
*/
/* NOTE, DESTRUCTIVELY MODIFIES instrs_in->arr[ii]. */
- (*mapRegs)( &remap, instrs_in->arr[ii] );
+ (*mapRegs)( &remap, instrs_in->arr[ii], mode64 );
EMIT_INSTR( instrs_in->arr[ii] );
# if DEBUG_REGALLOC
HReg hregPPC32_VR30 ( void ) { return mkHReg(30, HRcVec128, False); }
HReg hregPPC32_VR31 ( void ) { return mkHReg(31, HRcVec128, False); }
-void getAllocableRegs_PPC32 ( Int* nregs, HReg** arr )
+void getAllocableRegs_PPC32 ( Int* nregs, HReg** arr, Bool mode64 )
{
UInt i=0;
*nregs = 90 - 24 - 24;
}
}
-void ppPPC32Instr ( PPC32Instr* i )
+void ppPPC32Instr ( PPC32Instr* i, Bool mode64 )
{
switch (i->tag) {
case Pin_LI32:
/* --------- Helpers for register allocation. --------- */
-void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i )
+void getRegUsage_PPC32Instr ( HRegUsage* u, PPC32Instr* i, Bool mode64 )
{
initHRegUsage(u);
switch (i->tag) {
return;
default:
- ppPPC32Instr(i);
+ ppPPC32Instr(i, mode64);
vpanic("getRegUsage_PPC32Instr");
}
}
/* local helper */
-static void mapReg(HRegRemap* m, HReg* r)
+static void mapReg( HRegRemap* m, HReg* r )
{
*r = lookupHRegRemap(m, *r);
}
-void mapRegs_PPC32Instr (HRegRemap* m, PPC32Instr* i)
+void mapRegs_PPC32Instr ( HRegRemap* m, PPC32Instr* i, Bool mode64 )
{
switch (i->tag) {
case Pin_LI32:
return;
default:
- ppPPC32Instr(i);
+ ppPPC32Instr(i, mode64);
vpanic("mapRegs_PPC32Instr");
}
}
/* Generate ppc32 spill/reload instructions under the direction of the
register allocator. Note it's critical these don't write the
condition codes. */
-PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB )
+PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 )
{
PPC32AMode* am;
vassert(!hregIsVirtual(rreg));
}
}
-PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB )
+PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 )
{
PPC32AMode* am;
vassert(!hregIsVirtual(rreg));
Note that buf is not the insn's final place, and therefore it is
imperative to emit position-independent code. */
-Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i )
+Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* i, Bool mode64 )
{
UChar* p = &buf[0];
UChar* ptmp = p;
vassert(nbuf >= 32);
-// vex_printf("asm ");ppPPC32Instr(i); vex_printf("\n");
+// vex_printf("asm ");ppPPC32Instr(i, mode64); vex_printf("\n");
switch (i->tag) {
bad:
vex_printf("\n=> ");
- ppPPC32Instr(i);
+ ppPPC32Instr(i, mode64);
vpanic("emit_PPC32Instr");
/*NOTREACHED*/
extern PPC32Instr* PPC32Instr_AvCMov ( PPC32CondCode, HReg dst, HReg src );
extern PPC32Instr* PPC32Instr_AvLdVSCR ( HReg src );
-extern void ppPPC32Instr ( PPC32Instr* );
+extern void ppPPC32Instr ( PPC32Instr*, Bool mode64 );
/* Some functions that insulate the register allocator from details
of the underlying instruction set. */
-extern void getRegUsage_PPC32Instr ( HRegUsage*, PPC32Instr* );
-extern void mapRegs_PPC32Instr ( HRegRemap*, PPC32Instr* );
+extern void getRegUsage_PPC32Instr ( HRegUsage*, PPC32Instr*, Bool mode64 );
+extern void mapRegs_PPC32Instr ( HRegRemap*, PPC32Instr* , Bool mode64);
extern Bool isMove_PPC32Instr ( PPC32Instr*, HReg*, HReg* );
-extern Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr* );
-extern PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB );
-extern PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB );
-extern void getAllocableRegs_PPC32 ( Int*, HReg** );
+extern Int emit_PPC32Instr ( UChar* buf, Int nbuf, PPC32Instr*, Bool mode64 );
+extern PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 );
+extern PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB, Bool mode64 );
+extern void getAllocableRegs_PPC32 ( Int*, HReg**, Bool mode64 );
extern HInstrArray* iselBB_PPC32 ( IRBB*, VexArchInfo* );
#endif /* ndef __LIBVEX_HOST_PPC32_HDEFS_H */
{
addHInstr(env->code, instr);
if (vex_traceflags & VEX_TRACE_VCODE) {
- ppPPC32Instr(instr);
+ ppPPC32Instr(instr, False);
vex_printf("\n");
}
}
return i;
}
-void ppX86Instr ( X86Instr* i ) {
+void ppX86Instr ( X86Instr* i, Bool mode64 ) {
+ vassert(mode64 == False);
switch (i->tag) {
case Xin_Alu32R:
vex_printf("%sl ", showX86AluOp(i->Xin.Alu32R.op));
/* --------- Helpers for register allocation. --------- */
-void getRegUsage_X86Instr (HRegUsage* u, X86Instr* i)
+void getRegUsage_X86Instr (HRegUsage* u, X86Instr* i, Bool mode64)
{
Bool unary;
+ vassert(mode64 == False);
initHRegUsage(u);
switch (i->tag) {
case Xin_Alu32R:
addHRegUse(u, HRmWrite, i->Xin.SseShuf.dst);
return;
default:
- ppX86Instr(i);
+ ppX86Instr(i, False);
vpanic("getRegUsage_X86Instr");
}
}
/* local helper */
-static void mapReg(HRegRemap* m, HReg* r)
+static void mapReg( HRegRemap* m, HReg* r )
{
*r = lookupHRegRemap(m, *r);
}
-void mapRegs_X86Instr (HRegRemap* m, X86Instr* i)
+void mapRegs_X86Instr ( HRegRemap* m, X86Instr* i, Bool mode64 )
{
+ vassert(mode64 == False);
switch (i->tag) {
case Xin_Alu32R:
mapRegs_X86RMI(m, i->Xin.Alu32R.src);
mapReg(m, &i->Xin.SseShuf.dst);
return;
default:
- ppX86Instr(i);
+ ppX86Instr(i, mode64);
vpanic("mapRegs_X86Instr");
}
}
register allocator. Note it's critical these don't write the
condition codes. */
-X86Instr* genSpill_X86 ( HReg rreg, Int offsetB )
+X86Instr* genSpill_X86 ( HReg rreg, Int offsetB, Bool mode64 )
{
X86AMode* am;
vassert(offsetB >= 0);
vassert(!hregIsVirtual(rreg));
+ vassert(mode64 == False);
am = X86AMode_IR(offsetB, hregX86_EBP());
switch (hregClass(rreg)) {
}
}
-X86Instr* genReload_X86 ( HReg rreg, Int offsetB )
+X86Instr* genReload_X86 ( HReg rreg, Int offsetB, Bool mode64 )
{
X86AMode* am;
vassert(offsetB >= 0);
vassert(!hregIsVirtual(rreg));
+ vassert(mode64 == False);
am = X86AMode_IR(offsetB, hregX86_EBP());
switch (hregClass(rreg)) {
case HRcInt32:
Note that buf is not the insn's final place, and therefore it is
imperative to emit position-independent code. */
-Int emit_X86Instr ( UChar* buf, Int nbuf, X86Instr* i )
+Int emit_X86Instr ( UChar* buf, Int nbuf, X86Instr* i, Bool mode64 )
{
UInt irno, opc, opc_rr, subopc_imm, opc_imma, opc_cl, opc_imm, subopc;
UChar* p = &buf[0];
UChar* ptmp;
vassert(nbuf >= 32);
+ vassert(mode64 == False);
/* Wrap an integer as a int register, for use assembling
GrpN insns, in which the greg field is used as a sub-opcode
and does not really contain a register. */
# define fake(_n) mkHReg((_n), HRcInt32, False)
- /* vex_printf("asm ");ppX86Instr(i); vex_printf("\n"); */
+ /* vex_printf("asm ");ppX86Instr(i, mode64); vex_printf("\n"); */
switch (i->tag) {
}
bad:
- ppX86Instr(i);
+ ppX86Instr(i, mode64);
vpanic("emit_X86Instr");
/*NOTREACHED*/
extern X86Instr* X86Instr_SseShuf ( Int order, HReg src, HReg dst );
-extern void ppX86Instr ( X86Instr* );
+extern void ppX86Instr ( X86Instr*, Bool );
/* Some functions that insulate the register allocator from details
of the underlying instruction set. */
-extern void getRegUsage_X86Instr ( HRegUsage*, X86Instr* );
-extern void mapRegs_X86Instr ( HRegRemap*, X86Instr* );
+extern void getRegUsage_X86Instr ( HRegUsage*, X86Instr*, Bool );
+extern void mapRegs_X86Instr ( HRegRemap*, X86Instr*, Bool );
extern Bool isMove_X86Instr ( X86Instr*, HReg*, HReg* );
-extern Int emit_X86Instr ( UChar* buf, Int nbuf, X86Instr* );
-extern X86Instr* genSpill_X86 ( HReg rreg, Int offset );
-extern X86Instr* genReload_X86 ( HReg rreg, Int offset );
+extern Int emit_X86Instr ( UChar* buf, Int nbuf, X86Instr*, Bool );
+extern X86Instr* genSpill_X86 ( HReg rreg, Int offset, Bool );
+extern X86Instr* genReload_X86 ( HReg rreg, Int offset, Bool );
extern void getAllocableRegs_X86 ( Int*, HReg** );
extern HInstrArray* iselBB_X86 ( IRBB*, VexArchInfo* );
{
addHInstr(env->code, instr);
if (vex_traceflags & VEX_TRACE_VCODE) {
- ppX86Instr(instr);
+ ppX86Instr(instr, False);
vex_printf("\n");
}
}
from the target instruction set. */
HReg* available_real_regs;
Int n_available_real_regs;
- Bool (*isMove) (HInstr*, HReg*, HReg*);
- void (*getRegUsage) (HRegUsage*, HInstr*);
- void (*mapRegs) (HRegRemap*, HInstr*);
- HInstr* (*genSpill) ( HReg, Int );
- HInstr* (*genReload) ( HReg, Int );
- void (*ppInstr) ( HInstr* );
+ Bool (*isMove) ( HInstr*, HReg*, HReg* );
+ void (*getRegUsage) ( HRegUsage*, HInstr*, Bool );
+ void (*mapRegs) ( HRegRemap*, HInstr*, Bool );
+ HInstr* (*genSpill) ( HReg, Int, Bool );
+ HInstr* (*genReload) ( HReg, Int, Bool );
+ void (*ppInstr) ( HInstr*, Bool );
void (*ppReg) ( HReg );
HInstrArray* (*iselBB) ( IRBB*, VexArchInfo* );
- Int (*emit) ( UChar*, Int, HInstr* );
+ Int (*emit) ( UChar*, Int, HInstr*, Bool );
IRExpr* (*specHelper) ( HChar*, IRExpr** );
Bool (*preciseMemExnsFn) ( Int, Int );
UChar insn_bytes[32];
IRType guest_word_type;
IRType host_word_type;
+ Bool mode64;
guest_layout = NULL;
available_real_regs = NULL;
host_word_type = Ity_INVALID;
offB_TISTART = 0;
offB_TILEN = 0;
+ mode64 = False;
vex_traceflags = traceflags;
switch (arch_host) {
case VexArchX86:
+ mode64 = False;
getAllocableRegs_X86 ( &n_available_real_regs,
&available_real_regs );
isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_X86Instr;
- getRegUsage = (void(*)(HRegUsage*,HInstr*)) getRegUsage_X86Instr;
- mapRegs = (void(*)(HRegRemap*,HInstr*)) mapRegs_X86Instr;
- genSpill = (HInstr*(*)(HReg,Int)) genSpill_X86;
- genReload = (HInstr*(*)(HReg,Int)) genReload_X86;
- ppInstr = (void(*)(HInstr*)) ppX86Instr;
+ getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_X86Instr;
+ mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_X86Instr;
+ genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_X86;
+ genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_X86;
+ ppInstr = (void(*)(HInstr*, Bool)) ppX86Instr;
ppReg = (void(*)(HReg)) ppHRegX86;
iselBB = iselBB_X86;
- emit = (Int(*)(UChar*,Int,HInstr*)) emit_X86Instr;
+ emit = (Int(*)(UChar*,Int,HInstr*, Bool)) emit_X86Instr;
host_is_bigendian = False;
host_word_type = Ity_I32;
vassert(archinfo_host->subarch == VexSubArchX86_sse0
break;
case VexArchAMD64:
+ mode64 = True;
getAllocableRegs_AMD64 ( &n_available_real_regs,
&available_real_regs );
isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_AMD64Instr;
- getRegUsage = (void(*)(HRegUsage*,HInstr*)) getRegUsage_AMD64Instr;
- mapRegs = (void(*)(HRegRemap*,HInstr*)) mapRegs_AMD64Instr;
- genSpill = (HInstr*(*)(HReg,Int)) genSpill_AMD64;
- genReload = (HInstr*(*)(HReg,Int)) genReload_AMD64;
- ppInstr = (void(*)(HInstr*)) ppAMD64Instr;
+ getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_AMD64Instr;
+ mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_AMD64Instr;
+ genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_AMD64;
+ genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_AMD64;
+ ppInstr = (void(*)(HInstr*, Bool)) ppAMD64Instr;
ppReg = (void(*)(HReg)) ppHRegAMD64;
iselBB = iselBB_AMD64;
- emit = (Int(*)(UChar*,Int,HInstr*)) emit_AMD64Instr;
+ emit = (Int(*)(UChar*,Int,HInstr*, Bool)) emit_AMD64Instr;
host_is_bigendian = False;
host_word_type = Ity_I64;
vassert(archinfo_host->subarch == VexSubArch_NONE);
break;
case VexArchPPC32:
+ mode64 = False;
getAllocableRegs_PPC32 ( &n_available_real_regs,
- &available_real_regs );
+ &available_real_regs, mode64 );
isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPC32Instr;
- getRegUsage = (void(*)(HRegUsage*,HInstr*)) getRegUsage_PPC32Instr;
- mapRegs = (void(*)(HRegRemap*,HInstr*)) mapRegs_PPC32Instr;
- genSpill = (HInstr*(*)(HReg,Int)) genSpill_PPC32;
- genReload = (HInstr*(*)(HReg,Int)) genReload_PPC32;
- ppInstr = (void(*)(HInstr*)) ppPPC32Instr;
+ getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPC32Instr;
+ mapRegs = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPC32Instr;
+ genSpill = (HInstr*(*)(HReg,Int,Bool)) genSpill_PPC32;
+ genReload = (HInstr*(*)(HReg,Int,Bool)) genReload_PPC32;
+ ppInstr = (void(*)(HInstr*,Bool)) ppPPC32Instr;
ppReg = (void(*)(HReg)) ppHRegPPC32;
iselBB = iselBB_PPC32;
- emit = (Int(*)(UChar*,Int,HInstr*)) emit_PPC32Instr;
+ emit = (Int(*)(UChar*,Int,HInstr*,Bool)) emit_PPC32Instr;
host_is_bigendian = True;
host_word_type = Ity_I32;
vassert(archinfo_guest->subarch == VexSubArchPPC32_I
if (vex_traceflags & VEX_TRACE_VCODE) {
for (i = 0; i < vcode->arr_used; i++) {
vex_printf("%3d ", i);
- ppInstr(vcode->arr[i]);
+ ppInstr(vcode->arr[i], mode64);
vex_printf("\n");
}
vex_printf("\n");
n_available_real_regs,
isMove, getRegUsage, mapRegs,
genSpill, genReload, guest_sizeB,
- ppInstr, ppReg );
+ ppInstr, ppReg, mode64 );
vexAllocSanityCheck();
"------------------------\n\n");
for (i = 0; i < rcode->arr_used; i++) {
vex_printf("%3d ", i);
- ppInstr(rcode->arr[i]);
+ ppInstr(rcode->arr[i], mode64);
vex_printf("\n");
}
vex_printf("\n");
out_used = 0; /* tracks along the host_bytes array */
for (i = 0; i < rcode->arr_used; i++) {
if (vex_traceflags & VEX_TRACE_ASM) {
- ppInstr(rcode->arr[i]);
+ ppInstr(rcode->arr[i], mode64);
vex_printf("\n");
}
- j = (*emit)( insn_bytes, 32, rcode->arr[i] );
+ j = (*emit)( insn_bytes, 32, rcode->arr[i], mode64 );
if (vex_traceflags & VEX_TRACE_ASM) {
for (k = 0; k < j; k++)
if (insn_bytes[k] < 16)