scheme.
git-svn-id: svn://svn.valgrind.org/vex/trunk@1495
vex_printf("if (%%rflags.%s) { ",
showAMD64CondCode(i->Ain.Goto.cond));
}
- if (i->Ain.Goto.jk != Ijk_Boring) {
+ if (i->Ain.Goto.jk != Ijk_Boring
+ && i->Ain.Goto.jk != Ijk_Call
+ && i->Ain.Goto.jk != Ijk_Ret) {
vex_printf("movl $");
ppIRJumpKind(i->Ain.Goto.jk);
vex_printf(",%%ebp ; ");
}
vex_printf("movq ");
ppAMD64RI(i->Ain.Goto.dst);
- vex_printf(",%%rax ; ret");
+ vex_printf(",%%rax ; movabsq $dispatcher_addr,%%rdx ; jmp *%%rdx");
if (i->Ain.Goto.cond != Acc_ALWAYS) {
vex_printf(" }");
}
return;
case Ain_Goto:
addRegUsage_AMD64RI(u, i->Ain.Goto.dst);
- addHRegUse(u, HRmWrite, hregAMD64_RAX());
- if (i->Ain.Goto.jk != Ijk_Boring)
+ addHRegUse(u, HRmWrite, hregAMD64_RAX()); /* used for next guest addr */
+ addHRegUse(u, HRmWrite, hregAMD64_RDX()); /* used for dispatcher addr */
+ if (i->Ain.Goto.jk != Ijk_Boring
+ && i->Ain.Goto.jk != Ijk_Call
+ && i->Ain.Goto.jk != Ijk_Ret)
+ /* note, this is irrelevant since rbp is not actually
+ available to the allocator. But still .. */
addHRegUse(u, HRmWrite, hregAMD64_RBP());
return;
case Ain_CMov64:
Note that buf is not the insn's final place, and therefore it is
imperative to emit position-independent code. */
-Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* i, Bool mode64 )
+Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* i,
+ Bool mode64, void* dispatch )
{
UInt /*irno,*/ opc, opc_rr, subopc_imm, opc_imma, opc_cl, opc_imm, subopc;
UInt xtra;
}
}
- /* ret */
- *p++ = 0xC3;
+ /* Get the dispatcher address into %rdx. This has to happen
+ after the load of %rax since %rdx might be carrying the value
+ destined for %rax immediately prior to this Ain_Goto. */
+ vassert(sizeof(ULong) == sizeof(void*));
+ vassert(dispatch != NULL);
+ /* movabsq $imm64, %rdx */
+ *p++ = 0x48;
+ *p++ = 0xBA;
+ p = emit64(p, Ptr_to_ULong(dispatch));
+
+ /* jmp *%rdx */
+ *p++ = 0xFF;
+ *p++ = 0xE2;
/* Fix up the conditional jump, if there was one. */
if (i->Ain.Goto.cond != Acc_ALWAYS) {
Int delta = p - ptmp;
- vassert(delta > 0 && delta < 20);
+ vassert(delta > 0 && delta < 30);
*ptmp = toUChar(delta-1);
}
goto done;
extern void getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr*, Bool );
extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool );
extern Bool isMove_AMD64Instr ( AMD64Instr*, HReg*, HReg* );
-extern Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr*, Bool );
+extern Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr*,
+ Bool, void* dispatch );
extern AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offset, Bool );
extern AMD64Instr* genReload_AMD64 ( HReg rreg, Int offset, Bool );
extern void getAllocableRegs_AMD64 ( Int*, HReg** );
vassert(dispatch != NULL);
/* movl $imm32, %edx */
*p++ = 0xBA;
- p = emit32(p, (UInt)dispatch);
+ p = emit32(p, (UInt)Ptr_to_ULong(dispatch));
/* jmp *%edx */
*p++ = 0xFF;
ppInstr = (void(*)(HInstr*, Bool)) ppX86Instr;
ppReg = (void(*)(HReg)) ppHRegX86;
iselBB = iselBB_X86;
- emit = emit_X86Instr;
+ emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_X86Instr;
host_is_bigendian = False;
host_word_type = Ity_I32;
vassert(vta->archinfo_host.subarch == VexSubArchX86_sse0
ppInstr = (void(*)(HInstr*, Bool)) ppAMD64Instr;
ppReg = (void(*)(HReg)) ppHRegAMD64;
iselBB = iselBB_AMD64;
- emit = (Int(*)(UChar*,Int,HInstr*, Bool)) emit_AMD64Instr;
+ emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_AMD64Instr;
host_is_bigendian = False;
host_word_type = Ity_I64;
vassert(vta->archinfo_host.subarch == VexSubArch_NONE);
ppInstr = (void(*)(HInstr*,Bool)) ppPPC32Instr;
ppReg = (void(*)(HReg)) ppHRegPPC32;
iselBB = iselBB_PPC32;
- emit = (Int(*)(UChar*,Int,HInstr*,Bool)) emit_PPC32Instr;
+ emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPC32Instr;
host_is_bigendian = True;
host_word_type = Ity_I32;
vassert(vta->archinfo_guest.subarch == VexSubArchPPC32_I
ppInstr = (void(*)(HInstr*, Bool)) ppPPC32Instr;
ppReg = (void(*)(HReg)) ppHRegPPC32;
iselBB = iselBB_PPC32;
- emit = (Int(*)(UChar*,Int,HInstr*, Bool)) emit_PPC32Instr;
+ emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPC32Instr;
host_is_bigendian = True;
host_word_type = Ity_I64;
vassert(vta->archinfo_guest.subarch == VexSubArchPPC64_FI