From: Julian Seward Date: Thu, 15 Dec 2005 15:45:20 +0000 (+0000) Subject: Modify amd64 backend to use jump-jump scheme rather than call-return X-Git-Tag: svn/VALGRIND_3_2_3^2~159 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=664f3b0f1fffb61eff96ec407425a1e0b39ef6ec;p=thirdparty%2Fvalgrind.git Modify amd64 backend to use jump-jump scheme rather than call-return scheme. git-svn-id: svn://svn.valgrind.org/vex/trunk@1495 --- diff --git a/VEX/priv/host-amd64/hdefs.c b/VEX/priv/host-amd64/hdefs.c index 5589bc807c..acc5a116df 100644 --- a/VEX/priv/host-amd64/hdefs.c +++ b/VEX/priv/host-amd64/hdefs.c @@ -1091,14 +1091,16 @@ void ppAMD64Instr ( AMD64Instr* i, Bool mode64 ) vex_printf("if (%%rflags.%s) { ", showAMD64CondCode(i->Ain.Goto.cond)); } - if (i->Ain.Goto.jk != Ijk_Boring) { + if (i->Ain.Goto.jk != Ijk_Boring + && i->Ain.Goto.jk != Ijk_Call + && i->Ain.Goto.jk != Ijk_Ret) { vex_printf("movl $"); ppIRJumpKind(i->Ain.Goto.jk); vex_printf(",%%ebp ; "); } vex_printf("movq "); ppAMD64RI(i->Ain.Goto.dst); - vex_printf(",%%rax ; ret"); + vex_printf(",%%rax ; movabsq $dispatcher_addr,%%rdx ; jmp *%%rdx"); if (i->Ain.Goto.cond != Acc_ALWAYS) { vex_printf(" }"); } @@ -1447,8 +1449,13 @@ void getRegUsage_AMD64Instr ( HRegUsage* u, AMD64Instr* i, Bool mode64 ) return; case Ain_Goto: addRegUsage_AMD64RI(u, i->Ain.Goto.dst); - addHRegUse(u, HRmWrite, hregAMD64_RAX()); - if (i->Ain.Goto.jk != Ijk_Boring) + addHRegUse(u, HRmWrite, hregAMD64_RAX()); /* used for next guest addr */ + addHRegUse(u, HRmWrite, hregAMD64_RDX()); /* used for dispatcher addr */ + if (i->Ain.Goto.jk != Ijk_Boring + && i->Ain.Goto.jk != Ijk_Call + && i->Ain.Goto.jk != Ijk_Ret) + /* note, this is irrelevant since rbp is not actually + available to the allocator. But still .. */ addHRegUse(u, HRmWrite, hregAMD64_RBP()); return; case Ain_CMov64: @@ -2200,7 +2207,8 @@ static UChar* do_ffree_st ( UChar* p, Int n ) Note that buf is not the insn's final place, and therefore it is imperative to emit position-independent code. */ -Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* i, Bool mode64 ) +Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* i, + Bool mode64, void* dispatch ) { UInt /*irno,*/ opc, opc_rr, subopc_imm, opc_imma, opc_cl, opc_imm, subopc; UInt xtra; @@ -2638,13 +2646,24 @@ Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* i, Bool mode64 ) } } - /* ret */ - *p++ = 0xC3; + /* Get the dispatcher address into %rdx. This has to happen + after the load of %rax since %rdx might be carrying the value + destined for %rax immediately prior to this Ain_Goto. */ + vassert(sizeof(ULong) == sizeof(void*)); + vassert(dispatch != NULL); + /* movabsq $imm64, %rdx */ + *p++ = 0x48; + *p++ = 0xBA; + p = emit64(p, Ptr_to_ULong(dispatch)); + + /* jmp *%rdx */ + *p++ = 0xFF; + *p++ = 0xE2; /* Fix up the conditional jump, if there was one. */ if (i->Ain.Goto.cond != Acc_ALWAYS) { Int delta = p - ptmp; - vassert(delta > 0 && delta < 20); + vassert(delta > 0 && delta < 30); *ptmp = toUChar(delta-1); } goto done; diff --git a/VEX/priv/host-amd64/hdefs.h b/VEX/priv/host-amd64/hdefs.h index b7a05804fb..ab1a5fb4ef 100644 --- a/VEX/priv/host-amd64/hdefs.h +++ b/VEX/priv/host-amd64/hdefs.h @@ -715,7 +715,8 @@ extern void ppAMD64Instr ( AMD64Instr*, Bool ); extern void getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr*, Bool ); extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool ); extern Bool isMove_AMD64Instr ( AMD64Instr*, HReg*, HReg* ); -extern Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr*, Bool ); +extern Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr*, + Bool, void* dispatch ); extern AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offset, Bool ); extern AMD64Instr* genReload_AMD64 ( HReg rreg, Int offset, Bool ); extern void getAllocableRegs_AMD64 ( Int*, HReg** ); diff --git a/VEX/priv/host-x86/hdefs.c b/VEX/priv/host-x86/hdefs.c index ca946054f9..f1a67b8fc6 100644 --- a/VEX/priv/host-x86/hdefs.c +++ b/VEX/priv/host-x86/hdefs.c @@ -2212,7 +2212,7 @@ Int emit_X86Instr ( UChar* buf, Int nbuf, X86Instr* i, vassert(dispatch != NULL); /* movl $imm32, %edx */ *p++ = 0xBA; - p = emit32(p, (UInt)dispatch); + p = emit32(p, (UInt)Ptr_to_ULong(dispatch)); /* jmp *%edx */ *p++ = 0xFF; diff --git a/VEX/priv/main/vex_main.c b/VEX/priv/main/vex_main.c index 24b6fe3817..1db2282928 100644 --- a/VEX/priv/main/vex_main.c +++ b/VEX/priv/main/vex_main.c @@ -250,7 +250,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) ppInstr = (void(*)(HInstr*, Bool)) ppX86Instr; ppReg = (void(*)(HReg)) ppHRegX86; iselBB = iselBB_X86; - emit = emit_X86Instr; + emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_X86Instr; host_is_bigendian = False; host_word_type = Ity_I32; vassert(vta->archinfo_host.subarch == VexSubArchX86_sse0 @@ -271,7 +271,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) ppInstr = (void(*)(HInstr*, Bool)) ppAMD64Instr; ppReg = (void(*)(HReg)) ppHRegAMD64; iselBB = iselBB_AMD64; - emit = (Int(*)(UChar*,Int,HInstr*, Bool)) emit_AMD64Instr; + emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_AMD64Instr; host_is_bigendian = False; host_word_type = Ity_I64; vassert(vta->archinfo_host.subarch == VexSubArch_NONE); @@ -290,7 +290,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) ppInstr = (void(*)(HInstr*,Bool)) ppPPC32Instr; ppReg = (void(*)(HReg)) ppHRegPPC32; iselBB = iselBB_PPC32; - emit = (Int(*)(UChar*,Int,HInstr*,Bool)) emit_PPC32Instr; + emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPC32Instr; host_is_bigendian = True; host_word_type = Ity_I32; vassert(vta->archinfo_guest.subarch == VexSubArchPPC32_I @@ -311,7 +311,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) ppInstr = (void(*)(HInstr*, Bool)) ppPPC32Instr; ppReg = (void(*)(HReg)) ppHRegPPC32; iselBB = iselBB_PPC32; - emit = (Int(*)(UChar*,Int,HInstr*, Bool)) emit_PPC32Instr; + emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPC32Instr; host_is_bigendian = True; host_word_type = Ity_I64; vassert(vta->archinfo_guest.subarch == VexSubArchPPC64_FI