From: Julian Seward Date: Mon, 3 Apr 2017 13:24:05 +0000 (+0000) Subject: Split LibVEX_Translate into front- and back-end parts. Also, removes use X-Git-Tag: svn/VALGRIND_3_13_0^2~38 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=43de64aa25b697242d82e3894c28d1e0a97ee471;p=thirdparty%2Fvalgrind.git Split LibVEX_Translate into front- and back-end parts. Also, removes use of __typeof__ when built with MSVC. A combination of parts of two patches from Andrew Dutcher . git-svn-id: svn://svn.valgrind.org/vex/trunk@3341 --- diff --git a/VEX/priv/main_main.c b/VEX/priv/main_main.c index f5afc87850..a13d2abc82 100644 --- a/VEX/priv/main_main.c +++ b/VEX/priv/main_main.c @@ -173,7 +173,7 @@ static void check_hwcaps ( VexArch arch, UInt hwcaps ); static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps ); - +static IRType arch_word_size ( VexArch arch ); /* --------- helpers --------- */ @@ -306,6 +306,7 @@ void LibVEX_Init ( /* --------- Make a translation. --------- */ + /* KLUDGE: S390 need to know the hwcaps of the host when generating code. But that info is not passed to emit_S390Instr. Only mode64 is being passed. So, ideally, we want this passed as an argument, too. @@ -318,72 +319,31 @@ UInt s390_host_hwcaps; /* Exported to library client. */ -VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) +IRSB* LibVEX_FrontEnd ( /*MOD*/ VexTranslateArgs* vta, + /*OUT*/ VexTranslateResult* res, + /*OUT*/ VexRegisterUpdates* pxControl) { - /* This the bundle of functions we need to do the back-end stuff - (insn selection, reg-alloc, assembly) whilst being insulated - from the target instruction set. */ - Bool (*isMove) ( const HInstr*, HReg*, HReg* ); - void (*getRegUsage) ( HRegUsage*, const HInstr*, Bool ); - void (*mapRegs) ( HRegRemap*, HInstr*, Bool ); - void (*genSpill) ( HInstr**, HInstr**, HReg, Int, Bool ); - void (*genReload) ( HInstr**, HInstr**, HReg, Int, Bool ); - HInstr* (*directReload) ( HInstr*, HReg, Short ); - void (*ppInstr) ( const HInstr*, Bool ); - void (*ppReg) ( HReg ); - HInstrArray* (*iselSB) ( const IRSB*, VexArch, const VexArchInfo*, - const VexAbiInfo*, Int, Int, Bool, Bool, - Addr ); - Int (*emit) ( /*MB_MOD*/Bool*, - UChar*, Int, const HInstr*, Bool, VexEndness, - const void*, const void*, const void*, - const void* ); IRExpr* (*specHelper) ( const HChar*, IRExpr**, IRStmt**, Int ); - Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates ); - - const RRegUniverse* rRegUniv = NULL; - + Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates ); DisOneInstrFn disInstrFn; VexGuestLayout* guest_layout; IRSB* irsb; - HInstrArray* vcode; - HInstrArray* rcode; - Int i, j, k, out_used, guest_sizeB; + Int i; Int offB_CMSTART, offB_CMLEN, offB_GUEST_IP, szB_GUEST_IP; - Int offB_HOST_EvC_COUNTER, offB_HOST_EvC_FAILADDR; - UChar insn_bytes[128]; IRType guest_word_type; IRType host_word_type; - Bool mode64, chainingAllowed; - Addr max_ga; - guest_layout = NULL; - isMove = NULL; - getRegUsage = NULL; - mapRegs = NULL; - genSpill = NULL; - genReload = NULL; - directReload = NULL; - ppInstr = NULL; - ppReg = NULL; - iselSB = NULL; - emit = NULL; - specHelper = NULL; - preciseMemExnsFn = NULL; - disInstrFn = NULL; - guest_word_type = Ity_INVALID; - host_word_type = Ity_INVALID; - offB_CMSTART = 0; - offB_CMLEN = 0; - offB_GUEST_IP = 0; - szB_GUEST_IP = 0; - offB_HOST_EvC_COUNTER = 0; - offB_HOST_EvC_FAILADDR = 0; - mode64 = False; - chainingAllowed = False; - - vex_traceflags = vta->traceflags; + guest_layout = NULL; + specHelper = NULL; + disInstrFn = NULL; + preciseMemExnsFn = NULL; + guest_word_type = arch_word_size(vta->arch_guest); + host_word_type = arch_word_size(vta->arch_host); + offB_CMSTART = 0; + offB_CMLEN = 0; + offB_GUEST_IP = 0; + szB_GUEST_IP = 0; vassert(vex_initdone); vassert(vta->needs_self_check != NULL); @@ -392,7 +352,6 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) if (vta->disp_cp_chain_me_to_slowEP != NULL) { vassert(vta->disp_cp_chain_me_to_fastEP != NULL); vassert(vta->disp_cp_xindir != NULL); - chainingAllowed = True; } else { vassert(vta->disp_cp_chain_me_to_fastEP == NULL); vassert(vta->disp_cp_xindir == NULL); @@ -401,213 +360,23 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) vexSetAllocModeTEMP_and_clear(); vexAllocSanityCheck(); + vex_traceflags = vta->traceflags; + /* First off, check that the guest and host insn sets are supported. */ - switch (vta->arch_host) { - - case VexArchX86: - mode64 = False; - rRegUniv = X86FN(getRRegUniverse_X86()); - isMove = (__typeof__(isMove)) X86FN(isMove_X86Instr); - getRegUsage - = (__typeof__(getRegUsage)) X86FN(getRegUsage_X86Instr); - mapRegs = (__typeof__(mapRegs)) X86FN(mapRegs_X86Instr); - genSpill = (__typeof__(genSpill)) X86FN(genSpill_X86); - genReload = (__typeof__(genReload)) X86FN(genReload_X86); - directReload = (__typeof__(directReload)) X86FN(directReload_X86); - ppInstr = (__typeof__(ppInstr)) X86FN(ppX86Instr); - ppReg = (__typeof__(ppReg)) X86FN(ppHRegX86); - iselSB = X86FN(iselSB_X86); - emit = (__typeof__(emit)) X86FN(emit_X86Instr); - host_word_type = Ity_I32; - vassert(vta->archinfo_host.endness == VexEndnessLE); - break; - - case VexArchAMD64: - mode64 = True; - rRegUniv = AMD64FN(getRRegUniverse_AMD64()); - isMove = (__typeof__(isMove)) AMD64FN(isMove_AMD64Instr); - getRegUsage - = (__typeof__(getRegUsage)) AMD64FN(getRegUsage_AMD64Instr); - mapRegs = (__typeof__(mapRegs)) AMD64FN(mapRegs_AMD64Instr); - genSpill = (__typeof__(genSpill)) AMD64FN(genSpill_AMD64); - genReload = (__typeof__(genReload)) AMD64FN(genReload_AMD64); - directReload = (__typeof__(directReload)) AMD64FN(directReload_AMD64); - ppInstr = (__typeof__(ppInstr)) AMD64FN(ppAMD64Instr); - ppReg = (__typeof__(ppReg)) AMD64FN(ppHRegAMD64); - iselSB = AMD64FN(iselSB_AMD64); - emit = (__typeof__(emit)) AMD64FN(emit_AMD64Instr); - host_word_type = Ity_I64; - vassert(vta->archinfo_host.endness == VexEndnessLE); - break; - - case VexArchPPC32: - mode64 = False; - rRegUniv = PPC32FN(getRRegUniverse_PPC(mode64)); - isMove = (__typeof__(isMove)) PPC32FN(isMove_PPCInstr); - getRegUsage - = (__typeof__(getRegUsage)) PPC32FN(getRegUsage_PPCInstr); - mapRegs = (__typeof__(mapRegs)) PPC32FN(mapRegs_PPCInstr); - genSpill = (__typeof__(genSpill)) PPC32FN(genSpill_PPC); - genReload = (__typeof__(genReload)) PPC32FN(genReload_PPC); - ppInstr = (__typeof__(ppInstr)) PPC32FN(ppPPCInstr); - ppReg = (__typeof__(ppReg)) PPC32FN(ppHRegPPC); - iselSB = PPC32FN(iselSB_PPC); - emit = (__typeof__(emit)) PPC32FN(emit_PPCInstr); - host_word_type = Ity_I32; - vassert(vta->archinfo_host.endness == VexEndnessBE); - break; - - case VexArchPPC64: - mode64 = True; - rRegUniv = PPC64FN(getRRegUniverse_PPC(mode64)); - isMove = (__typeof__(isMove)) PPC64FN(isMove_PPCInstr); - getRegUsage - = (__typeof__(getRegUsage)) PPC64FN(getRegUsage_PPCInstr); - mapRegs = (__typeof__(mapRegs)) PPC64FN(mapRegs_PPCInstr); - genSpill = (__typeof__(genSpill)) PPC64FN(genSpill_PPC); - genReload = (__typeof__(genReload)) PPC64FN(genReload_PPC); - ppInstr = (__typeof__(ppInstr)) PPC64FN(ppPPCInstr); - ppReg = (__typeof__(ppReg)) PPC64FN(ppHRegPPC); - iselSB = PPC64FN(iselSB_PPC); - emit = (__typeof__(emit)) PPC64FN(emit_PPCInstr); - host_word_type = Ity_I64; - vassert(vta->archinfo_host.endness == VexEndnessBE || - vta->archinfo_host.endness == VexEndnessLE ); - break; - - case VexArchS390X: - mode64 = True; - /* KLUDGE: export hwcaps. */ - s390_host_hwcaps = vta->archinfo_host.hwcaps; - rRegUniv = S390FN(getRRegUniverse_S390()); - isMove = (__typeof__(isMove)) S390FN(isMove_S390Instr); - getRegUsage - = (__typeof__(getRegUsage)) S390FN(getRegUsage_S390Instr); - mapRegs = (__typeof__(mapRegs)) S390FN(mapRegs_S390Instr); - genSpill = (__typeof__(genSpill)) S390FN(genSpill_S390); - genReload = (__typeof__(genReload)) S390FN(genReload_S390); - // fixs390: consider implementing directReload_S390 - ppInstr = (__typeof__(ppInstr)) S390FN(ppS390Instr); - ppReg = (__typeof__(ppReg)) S390FN(ppHRegS390); - iselSB = S390FN(iselSB_S390); - emit = (__typeof__(emit)) S390FN(emit_S390Instr); - host_word_type = Ity_I64; - vassert(vta->archinfo_host.endness == VexEndnessBE); - break; - - case VexArchARM: - mode64 = False; - rRegUniv = ARMFN(getRRegUniverse_ARM()); - isMove = (__typeof__(isMove)) ARMFN(isMove_ARMInstr); - getRegUsage - = (__typeof__(getRegUsage)) ARMFN(getRegUsage_ARMInstr); - mapRegs = (__typeof__(mapRegs)) ARMFN(mapRegs_ARMInstr); - genSpill = (__typeof__(genSpill)) ARMFN(genSpill_ARM); - genReload = (__typeof__(genReload)) ARMFN(genReload_ARM); - ppInstr = (__typeof__(ppInstr)) ARMFN(ppARMInstr); - ppReg = (__typeof__(ppReg)) ARMFN(ppHRegARM); - iselSB = ARMFN(iselSB_ARM); - emit = (__typeof__(emit)) ARMFN(emit_ARMInstr); - host_word_type = Ity_I32; - vassert(vta->archinfo_host.endness == VexEndnessLE); - break; - - case VexArchARM64: - mode64 = True; - rRegUniv = ARM64FN(getRRegUniverse_ARM64()); - isMove = (__typeof__(isMove)) ARM64FN(isMove_ARM64Instr); - getRegUsage - = (__typeof__(getRegUsage)) ARM64FN(getRegUsage_ARM64Instr); - mapRegs = (__typeof__(mapRegs)) ARM64FN(mapRegs_ARM64Instr); - genSpill = (__typeof__(genSpill)) ARM64FN(genSpill_ARM64); - genReload = (__typeof__(genReload)) ARM64FN(genReload_ARM64); - ppInstr = (__typeof__(ppInstr)) ARM64FN(ppARM64Instr); - ppReg = (__typeof__(ppReg)) ARM64FN(ppHRegARM64); - iselSB = ARM64FN(iselSB_ARM64); - emit = (__typeof__(emit)) ARM64FN(emit_ARM64Instr); - host_word_type = Ity_I64; - vassert(vta->archinfo_host.endness == VexEndnessLE); - break; - - case VexArchMIPS32: - mode64 = False; - rRegUniv = MIPS32FN(getRRegUniverse_MIPS(mode64)); - isMove = (__typeof__(isMove)) MIPS32FN(isMove_MIPSInstr); - getRegUsage - = (__typeof__(getRegUsage)) MIPS32FN(getRegUsage_MIPSInstr); - mapRegs = (__typeof__(mapRegs)) MIPS32FN(mapRegs_MIPSInstr); - genSpill = (__typeof__(genSpill)) MIPS32FN(genSpill_MIPS); - genReload = (__typeof__(genReload)) MIPS32FN(genReload_MIPS); - ppInstr = (__typeof__(ppInstr)) MIPS32FN(ppMIPSInstr); - ppReg = (__typeof__(ppReg)) MIPS32FN(ppHRegMIPS); - iselSB = MIPS32FN(iselSB_MIPS); - emit = (__typeof__(emit)) MIPS32FN(emit_MIPSInstr); - host_word_type = Ity_I32; - vassert(vta->archinfo_host.endness == VexEndnessLE - || vta->archinfo_host.endness == VexEndnessBE); - break; - - case VexArchMIPS64: - mode64 = True; - rRegUniv = MIPS64FN(getRRegUniverse_MIPS(mode64)); - isMove = (__typeof__(isMove)) MIPS64FN(isMove_MIPSInstr); - getRegUsage - = (__typeof__(getRegUsage)) MIPS64FN(getRegUsage_MIPSInstr); - mapRegs = (__typeof__(mapRegs)) MIPS64FN(mapRegs_MIPSInstr); - genSpill = (__typeof__(genSpill)) MIPS64FN(genSpill_MIPS); - genReload = (__typeof__(genReload)) MIPS64FN(genReload_MIPS); - ppInstr = (__typeof__(ppInstr)) MIPS64FN(ppMIPSInstr); - ppReg = (__typeof__(ppReg)) MIPS64FN(ppHRegMIPS); - iselSB = MIPS64FN(iselSB_MIPS); - emit = (__typeof__(emit)) MIPS64FN(emit_MIPSInstr); - host_word_type = Ity_I64; - vassert(vta->archinfo_host.endness == VexEndnessLE - || vta->archinfo_host.endness == VexEndnessBE); - break; - - case VexArchTILEGX: - mode64 = True; - rRegUniv = TILEGXFN(getRRegUniverse_TILEGX()); - isMove = (__typeof__(isMove)) TILEGXFN(isMove_TILEGXInstr); - getRegUsage = - (__typeof__(getRegUsage)) TILEGXFN(getRegUsage_TILEGXInstr); - mapRegs = (__typeof__(mapRegs)) TILEGXFN(mapRegs_TILEGXInstr); - genSpill = (__typeof__(genSpill)) TILEGXFN(genSpill_TILEGX); - genReload = (__typeof__(genReload)) TILEGXFN(genReload_TILEGX); - ppInstr = (__typeof__(ppInstr)) TILEGXFN(ppTILEGXInstr); - ppReg = (__typeof__(ppReg)) TILEGXFN(ppHRegTILEGX); - iselSB = TILEGXFN(iselSB_TILEGX); - emit = (__typeof__(emit)) TILEGXFN(emit_TILEGXInstr); - host_word_type = Ity_I64; - vassert(vta->archinfo_host.endness == VexEndnessLE); - break; - - default: - vpanic("LibVEX_Translate: unsupported host insn set"); - } - - // Are the host's hardware capabilities feasible. The function will - // not return if hwcaps are infeasible in some sense. - check_hwcaps(vta->arch_host, vta->archinfo_host.hwcaps); - switch (vta->arch_guest) { case VexArchX86: preciseMemExnsFn = X86FN(guest_x86_state_requires_precise_mem_exns); - disInstrFn = X86FN(disInstr_X86); - specHelper = X86FN(guest_x86_spechelper); - guest_sizeB = sizeof(VexGuestX86State); - guest_word_type = Ity_I32; - guest_layout = X86FN(&x86guest_layout); - offB_CMSTART = offsetof(VexGuestX86State,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestX86State,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestX86State,guest_EIP); - szB_GUEST_IP = sizeof( ((VexGuestX86State*)0)->guest_EIP ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestX86State,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR); + disInstrFn = X86FN(disInstr_X86); + specHelper = X86FN(guest_x86_spechelper); + guest_layout = X86FN(&x86guest_layout); + offB_CMSTART = offsetof(VexGuestX86State,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestX86State,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestX86State,guest_EIP); + szB_GUEST_IP = sizeof( ((VexGuestX86State*)0)->guest_EIP ); vassert(vta->archinfo_guest.endness == VexEndnessLE); vassert(0 == sizeof(VexGuestX86State) % LibVEX_GUEST_STATE_ALIGN); vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4); @@ -618,17 +387,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchAMD64: preciseMemExnsFn = AMD64FN(guest_amd64_state_requires_precise_mem_exns); - disInstrFn = AMD64FN(disInstr_AMD64); - specHelper = AMD64FN(guest_amd64_spechelper); - guest_sizeB = sizeof(VexGuestAMD64State); - guest_word_type = Ity_I64; - guest_layout = AMD64FN(&amd64guest_layout); - offB_CMSTART = offsetof(VexGuestAMD64State,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestAMD64State,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestAMD64State,guest_RIP); - szB_GUEST_IP = sizeof( ((VexGuestAMD64State*)0)->guest_RIP ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestAMD64State,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR); + disInstrFn = AMD64FN(disInstr_AMD64); + specHelper = AMD64FN(guest_amd64_spechelper); + guest_layout = AMD64FN(&amd64guest_layout); + offB_CMSTART = offsetof(VexGuestAMD64State,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestAMD64State,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestAMD64State,guest_RIP); + szB_GUEST_IP = sizeof( ((VexGuestAMD64State*)0)->guest_RIP ); vassert(vta->archinfo_guest.endness == VexEndnessLE); vassert(0 == sizeof(VexGuestAMD64State) % LibVEX_GUEST_STATE_ALIGN); vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8); @@ -639,17 +404,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchPPC32: preciseMemExnsFn = PPC32FN(guest_ppc32_state_requires_precise_mem_exns); - disInstrFn = PPC32FN(disInstr_PPC); - specHelper = PPC32FN(guest_ppc32_spechelper); - guest_sizeB = sizeof(VexGuestPPC32State); - guest_word_type = Ity_I32; - guest_layout = PPC32FN(&ppc32Guest_layout); - offB_CMSTART = offsetof(VexGuestPPC32State,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestPPC32State,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestPPC32State,guest_CIA); - szB_GUEST_IP = sizeof( ((VexGuestPPC32State*)0)->guest_CIA ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestPPC32State,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR); + disInstrFn = PPC32FN(disInstr_PPC); + specHelper = PPC32FN(guest_ppc32_spechelper); + guest_layout = PPC32FN(&ppc32Guest_layout); + offB_CMSTART = offsetof(VexGuestPPC32State,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestPPC32State,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestPPC32State,guest_CIA); + szB_GUEST_IP = sizeof( ((VexGuestPPC32State*)0)->guest_CIA ); vassert(vta->archinfo_guest.endness == VexEndnessBE); vassert(0 == sizeof(VexGuestPPC32State) % LibVEX_GUEST_STATE_ALIGN); vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4); @@ -660,17 +421,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchPPC64: preciseMemExnsFn = PPC64FN(guest_ppc64_state_requires_precise_mem_exns); - disInstrFn = PPC64FN(disInstr_PPC); - specHelper = PPC64FN(guest_ppc64_spechelper); - guest_sizeB = sizeof(VexGuestPPC64State); - guest_word_type = Ity_I64; - guest_layout = PPC64FN(&ppc64Guest_layout); - offB_CMSTART = offsetof(VexGuestPPC64State,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestPPC64State,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestPPC64State,guest_CIA); - szB_GUEST_IP = sizeof( ((VexGuestPPC64State*)0)->guest_CIA ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestPPC64State,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR); + disInstrFn = PPC64FN(disInstr_PPC); + specHelper = PPC64FN(guest_ppc64_spechelper); + guest_layout = PPC64FN(&ppc64Guest_layout); + offB_CMSTART = offsetof(VexGuestPPC64State,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestPPC64State,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestPPC64State,guest_CIA); + szB_GUEST_IP = sizeof( ((VexGuestPPC64State*)0)->guest_CIA ); vassert(vta->archinfo_guest.endness == VexEndnessBE || vta->archinfo_guest.endness == VexEndnessLE ); vassert(0 == sizeof(VexGuestPPC64State) % LibVEX_GUEST_STATE_ALIGN); @@ -683,17 +440,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchS390X: preciseMemExnsFn = S390FN(guest_s390x_state_requires_precise_mem_exns); - disInstrFn = S390FN(disInstr_S390); - specHelper = S390FN(guest_s390x_spechelper); - guest_sizeB = sizeof(VexGuestS390XState); - guest_word_type = Ity_I64; - guest_layout = S390FN(&s390xGuest_layout); - offB_CMSTART = offsetof(VexGuestS390XState,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestS390XState,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestS390XState,guest_IA); - szB_GUEST_IP = sizeof( ((VexGuestS390XState*)0)->guest_IA); - offB_HOST_EvC_COUNTER = offsetof(VexGuestS390XState,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR); + disInstrFn = S390FN(disInstr_S390); + specHelper = S390FN(guest_s390x_spechelper); + guest_layout = S390FN(&s390xGuest_layout); + offB_CMSTART = offsetof(VexGuestS390XState,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestS390XState,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestS390XState,guest_IA); + szB_GUEST_IP = sizeof( ((VexGuestS390XState*)0)->guest_IA); vassert(vta->archinfo_guest.endness == VexEndnessBE); vassert(0 == sizeof(VexGuestS390XState) % LibVEX_GUEST_STATE_ALIGN); vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART ) == 8); @@ -704,17 +457,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchARM: preciseMemExnsFn = ARMFN(guest_arm_state_requires_precise_mem_exns); - disInstrFn = ARMFN(disInstr_ARM); - specHelper = ARMFN(guest_arm_spechelper); - guest_sizeB = sizeof(VexGuestARMState); - guest_word_type = Ity_I32; - guest_layout = ARMFN(&armGuest_layout); - offB_CMSTART = offsetof(VexGuestARMState,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestARMState,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestARMState,guest_R15T); - szB_GUEST_IP = sizeof( ((VexGuestARMState*)0)->guest_R15T ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestARMState,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR); + disInstrFn = ARMFN(disInstr_ARM); + specHelper = ARMFN(guest_arm_spechelper); + guest_layout = ARMFN(&armGuest_layout); + offB_CMSTART = offsetof(VexGuestARMState,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestARMState,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestARMState,guest_R15T); + szB_GUEST_IP = sizeof( ((VexGuestARMState*)0)->guest_R15T ); vassert(vta->archinfo_guest.endness == VexEndnessLE); vassert(0 == sizeof(VexGuestARMState) % LibVEX_GUEST_STATE_ALIGN); vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4); @@ -725,17 +474,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchARM64: preciseMemExnsFn = ARM64FN(guest_arm64_state_requires_precise_mem_exns); - disInstrFn = ARM64FN(disInstr_ARM64); - specHelper = ARM64FN(guest_arm64_spechelper); - guest_sizeB = sizeof(VexGuestARM64State); - guest_word_type = Ity_I64; - guest_layout = ARM64FN(&arm64Guest_layout); - offB_CMSTART = offsetof(VexGuestARM64State,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestARM64State,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestARM64State,guest_PC); - szB_GUEST_IP = sizeof( ((VexGuestARM64State*)0)->guest_PC ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestARM64State,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR); + disInstrFn = ARM64FN(disInstr_ARM64); + specHelper = ARM64FN(guest_arm64_spechelper); + guest_layout = ARM64FN(&arm64Guest_layout); + offB_CMSTART = offsetof(VexGuestARM64State,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestARM64State,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestARM64State,guest_PC); + szB_GUEST_IP = sizeof( ((VexGuestARM64State*)0)->guest_PC ); vassert(vta->archinfo_guest.endness == VexEndnessLE); vassert(0 == sizeof(VexGuestARM64State) % LibVEX_GUEST_STATE_ALIGN); vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8); @@ -746,17 +491,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchMIPS32: preciseMemExnsFn = MIPS32FN(guest_mips32_state_requires_precise_mem_exns); - disInstrFn = MIPS32FN(disInstr_MIPS); - specHelper = MIPS32FN(guest_mips32_spechelper); - guest_sizeB = sizeof(VexGuestMIPS32State); - guest_word_type = Ity_I32; - guest_layout = MIPS32FN(&mips32Guest_layout); - offB_CMSTART = offsetof(VexGuestMIPS32State,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestMIPS32State,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestMIPS32State,guest_PC); - szB_GUEST_IP = sizeof( ((VexGuestMIPS32State*)0)->guest_PC ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestMIPS32State,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR); + disInstrFn = MIPS32FN(disInstr_MIPS); + specHelper = MIPS32FN(guest_mips32_spechelper); + guest_layout = MIPS32FN(&mips32Guest_layout); + offB_CMSTART = offsetof(VexGuestMIPS32State,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestMIPS32State,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestMIPS32State,guest_PC); + szB_GUEST_IP = sizeof( ((VexGuestMIPS32State*)0)->guest_PC ); vassert(vta->archinfo_guest.endness == VexEndnessLE || vta->archinfo_guest.endness == VexEndnessBE); vassert(0 == sizeof(VexGuestMIPS32State) % LibVEX_GUEST_STATE_ALIGN); @@ -768,17 +509,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchMIPS64: preciseMemExnsFn = MIPS64FN(guest_mips64_state_requires_precise_mem_exns); - disInstrFn = MIPS64FN(disInstr_MIPS); - specHelper = MIPS64FN(guest_mips64_spechelper); - guest_sizeB = sizeof(VexGuestMIPS64State); - guest_word_type = Ity_I64; - guest_layout = MIPS64FN(&mips64Guest_layout); - offB_CMSTART = offsetof(VexGuestMIPS64State,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestMIPS64State,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestMIPS64State,guest_PC); - szB_GUEST_IP = sizeof( ((VexGuestMIPS64State*)0)->guest_PC ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestMIPS64State,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR); + disInstrFn = MIPS64FN(disInstr_MIPS); + specHelper = MIPS64FN(guest_mips64_spechelper); + guest_layout = MIPS64FN(&mips64Guest_layout); + offB_CMSTART = offsetof(VexGuestMIPS64State,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestMIPS64State,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestMIPS64State,guest_PC); + szB_GUEST_IP = sizeof( ((VexGuestMIPS64State*)0)->guest_PC ); vassert(vta->archinfo_guest.endness == VexEndnessLE || vta->archinfo_guest.endness == VexEndnessBE); vassert(0 == sizeof(VexGuestMIPS64State) % LibVEX_GUEST_STATE_ALIGN); @@ -790,17 +527,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) case VexArchTILEGX: preciseMemExnsFn = TILEGXFN(guest_tilegx_state_requires_precise_mem_exns); - disInstrFn = TILEGXFN(disInstr_TILEGX); - specHelper = TILEGXFN(guest_tilegx_spechelper); - guest_sizeB = sizeof(VexGuestTILEGXState); - guest_word_type = Ity_I64; - guest_layout = TILEGXFN(&tilegxGuest_layout); - offB_CMSTART = offsetof(VexGuestTILEGXState,guest_CMSTART); - offB_CMLEN = offsetof(VexGuestTILEGXState,guest_CMLEN); - offB_GUEST_IP = offsetof(VexGuestTILEGXState,guest_pc); - szB_GUEST_IP = sizeof( ((VexGuestTILEGXState*)0)->guest_pc ); - offB_HOST_EvC_COUNTER = offsetof(VexGuestTILEGXState,host_EvC_COUNTER); - offB_HOST_EvC_FAILADDR = offsetof(VexGuestTILEGXState,host_EvC_FAILADDR); + disInstrFn = TILEGXFN(disInstr_TILEGX); + specHelper = TILEGXFN(guest_tilegx_spechelper); + guest_layout = TILEGXFN(&tilegxGuest_layout); + offB_CMSTART = offsetof(VexGuestTILEGXState,guest_CMSTART); + offB_CMLEN = offsetof(VexGuestTILEGXState,guest_CMLEN); + offB_GUEST_IP = offsetof(VexGuestTILEGXState,guest_pc); + szB_GUEST_IP = sizeof( ((VexGuestTILEGXState*)0)->guest_pc ); vassert(vta->archinfo_guest.endness == VexEndnessLE); vassert(0 == sizeof(VexGuestTILEGXState) % LibVEX_GUEST_STATE_ALIGN); @@ -818,13 +551,12 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) // FIXME: how can we know the guest's hardware capabilities? check_hwcaps(vta->arch_guest, vta->archinfo_guest.hwcaps); - /* Set up result struct. */ - VexTranslateResult res; - res.status = VexTransOK; - res.n_sc_extents = 0; - res.offs_profInc = -1; - res.n_guest_instrs = 0; + res->status = VexTransOK; + res->n_sc_extents = 0; + res->offs_profInc = -1; + res->n_guest_instrs = 0; +#ifndef VEXMULTIARCH /* yet more sanity checks ... */ if (vta->arch_guest == vta->arch_host) { /* doesn't necessarily have to be true, but if it isn't it means @@ -834,6 +566,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) /* ditto */ vassert(vta->archinfo_guest.endness == vta->archinfo_host.endness); } +#endif vexAllocSanityCheck(); @@ -842,14 +575,14 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) " Front end " "------------------------\n\n"); - VexRegisterUpdates pxControl = vex_control.iropt_register_updates_default; - vassert(pxControl >= VexRegUpdSpAtMemAccess - && pxControl <= VexRegUpdAllregsAtEachInsn); + *pxControl = vex_control.iropt_register_updates_default; + vassert(*pxControl >= VexRegUpdSpAtMemAccess + && *pxControl <= VexRegUpdAllregsAtEachInsn); irsb = bb_to_IR ( vta->guest_extents, - &res.n_sc_extents, - &res.n_guest_instrs, - &pxControl, + &res->n_sc_extents, + &res->n_guest_instrs, + pxControl, vta->callback_opaque, disInstrFn, vta->guest_bytes, @@ -873,8 +606,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) if (irsb == NULL) { /* Access failure. */ vexSetAllocModeTEMP_and_clear(); - vex_traceflags = 0; - res.status = VexTransAccessFail; return res; + return NULL; } vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3); @@ -884,8 +616,8 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) } /* bb_to_IR() could have caused pxControl to change. */ - vassert(pxControl >= VexRegUpdSpAtMemAccess - && pxControl <= VexRegUpdAllregsAtEachInsn); + vassert(*pxControl >= VexRegUpdSpAtMemAccess + && *pxControl <= VexRegUpdAllregsAtEachInsn); /* If debugging, show the raw guest bytes for this bb. */ if (0 || (vex_traceflags & VEX_TRACE_FE)) { @@ -914,7 +646,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) vexAllocSanityCheck(); /* Clean it up, hopefully a lot. */ - irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn, pxControl, + irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn, *pxControl, vta->guest_bytes_addr, vta->arch_guest ); @@ -985,6 +717,348 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) vex_printf("\n"); } + return irsb; +} + + +/* Back end of the compilation pipeline. Is not exported. */ + +static void libvex_BackEnd ( const VexTranslateArgs *vta, + /*MOD*/ VexTranslateResult* res, + /*MOD*/ IRSB* irsb, + VexRegisterUpdates pxControl ) +{ + /* This the bundle of functions we need to do the back-end stuff + (insn selection, reg-alloc, assembly) whilst being insulated + from the target instruction set. */ + Bool (*isMove) ( const HInstr*, HReg*, HReg* ); + void (*getRegUsage) ( HRegUsage*, const HInstr*, Bool ); + void (*mapRegs) ( HRegRemap*, HInstr*, Bool ); + void (*genSpill) ( HInstr**, HInstr**, HReg, Int, Bool ); + void (*genReload) ( HInstr**, HInstr**, HReg, Int, Bool ); + HInstr* (*directReload) ( HInstr*, HReg, Short ); + void (*ppInstr) ( const HInstr*, Bool ); + void (*ppReg) ( HReg ); + HInstrArray* (*iselSB) ( const IRSB*, VexArch, const VexArchInfo*, + const VexAbiInfo*, Int, Int, Bool, Bool, + Addr ); + Int (*emit) ( /*MB_MOD*/Bool*, + UChar*, Int, const HInstr*, Bool, VexEndness, + const void*, const void*, const void*, + const void* ); + Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates ); + + const RRegUniverse* rRegUniv = NULL; + + Bool mode64, chainingAllowed; + Int i, j, k, out_used; + Int guest_sizeB; + Int offB_HOST_EvC_COUNTER; + Int offB_HOST_EvC_FAILADDR; + Addr max_ga; + UChar insn_bytes[128]; + HInstrArray* vcode; + HInstrArray* rcode; + + isMove = NULL; + getRegUsage = NULL; + mapRegs = NULL; + genSpill = NULL; + genReload = NULL; + directReload = NULL; + ppInstr = NULL; + ppReg = NULL; + iselSB = NULL; + emit = NULL; + + mode64 = False; + chainingAllowed = False; + guest_sizeB = 0; + offB_HOST_EvC_COUNTER = 0; + offB_HOST_EvC_FAILADDR = 0; + preciseMemExnsFn = NULL; + + vassert(vex_initdone); + vassert(vta->disp_cp_xassisted != NULL); + + vex_traceflags = vta->traceflags; + + /* Both the chainers and the indir are either NULL or non-NULL. */ + if (vta->disp_cp_chain_me_to_slowEP != NULL) { + vassert(vta->disp_cp_chain_me_to_fastEP != NULL); + vassert(vta->disp_cp_xindir != NULL); + chainingAllowed = True; + } else { + vassert(vta->disp_cp_chain_me_to_fastEP == NULL); + vassert(vta->disp_cp_xindir == NULL); + } + + switch (vta->arch_guest) { + + case VexArchX86: + preciseMemExnsFn + = X86FN(guest_x86_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestX86State); + offB_HOST_EvC_COUNTER = offsetof(VexGuestX86State,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR); + break; + + case VexArchAMD64: + preciseMemExnsFn + = AMD64FN(guest_amd64_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestAMD64State); + offB_HOST_EvC_COUNTER = offsetof(VexGuestAMD64State,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR); + break; + + case VexArchPPC32: + preciseMemExnsFn + = PPC32FN(guest_ppc32_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestPPC32State); + offB_HOST_EvC_COUNTER = offsetof(VexGuestPPC32State,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR); + break; + + case VexArchPPC64: + preciseMemExnsFn + = PPC64FN(guest_ppc64_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestPPC64State); + offB_HOST_EvC_COUNTER = offsetof(VexGuestPPC64State,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR); + break; + + case VexArchS390X: + preciseMemExnsFn + = S390FN(guest_s390x_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestS390XState); + offB_HOST_EvC_COUNTER = offsetof(VexGuestS390XState,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR); + break; + + case VexArchARM: + preciseMemExnsFn + = ARMFN(guest_arm_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestARMState); + offB_HOST_EvC_COUNTER = offsetof(VexGuestARMState,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR); + break; + + case VexArchARM64: + preciseMemExnsFn + = ARM64FN(guest_arm64_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestARM64State); + offB_HOST_EvC_COUNTER = offsetof(VexGuestARM64State,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR); + break; + + case VexArchMIPS32: + preciseMemExnsFn + = MIPS32FN(guest_mips32_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestMIPS32State); + offB_HOST_EvC_COUNTER = offsetof(VexGuestMIPS32State,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR); + break; + + case VexArchMIPS64: + preciseMemExnsFn + = MIPS64FN(guest_mips64_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestMIPS64State); + offB_HOST_EvC_COUNTER = offsetof(VexGuestMIPS64State,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR); + break; + + case VexArchTILEGX: + preciseMemExnsFn = + TILEGXFN(guest_tilegx_state_requires_precise_mem_exns); + guest_sizeB = sizeof(VexGuestTILEGXState); + offB_HOST_EvC_COUNTER = offsetof(VexGuestTILEGXState,host_EvC_COUNTER); + offB_HOST_EvC_FAILADDR = offsetof(VexGuestTILEGXState,host_EvC_FAILADDR); + break; + + default: + vpanic("LibVEX_Codegen: unsupported guest insn set"); + } + + + switch (vta->arch_host) { + + case VexArchX86: + mode64 = False; + rRegUniv = X86FN(getRRegUniverse_X86()); + isMove = CAST_TO_TYPEOF(isMove) X86FN(isMove_X86Instr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) X86FN(getRegUsage_X86Instr); + mapRegs = CAST_TO_TYPEOF(mapRegs) X86FN(mapRegs_X86Instr); + genSpill = CAST_TO_TYPEOF(genSpill) X86FN(genSpill_X86); + genReload = CAST_TO_TYPEOF(genReload) X86FN(genReload_X86); + directReload = CAST_TO_TYPEOF(directReload) X86FN(directReload_X86); + ppInstr = CAST_TO_TYPEOF(ppInstr) X86FN(ppX86Instr); + ppReg = CAST_TO_TYPEOF(ppReg) X86FN(ppHRegX86); + iselSB = X86FN(iselSB_X86); + emit = CAST_TO_TYPEOF(emit) X86FN(emit_X86Instr); + vassert(vta->archinfo_host.endness == VexEndnessLE); + break; + + case VexArchAMD64: + mode64 = True; + rRegUniv = AMD64FN(getRRegUniverse_AMD64()); + isMove = CAST_TO_TYPEOF(isMove) AMD64FN(isMove_AMD64Instr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) AMD64FN(getRegUsage_AMD64Instr); + mapRegs = CAST_TO_TYPEOF(mapRegs) AMD64FN(mapRegs_AMD64Instr); + genSpill = CAST_TO_TYPEOF(genSpill) AMD64FN(genSpill_AMD64); + genReload = CAST_TO_TYPEOF(genReload) AMD64FN(genReload_AMD64); + directReload = CAST_TO_TYPEOF(directReload) AMD64FN(directReload_AMD64); + ppInstr = CAST_TO_TYPEOF(ppInstr) AMD64FN(ppAMD64Instr); + ppReg = CAST_TO_TYPEOF(ppReg) AMD64FN(ppHRegAMD64); + iselSB = AMD64FN(iselSB_AMD64); + emit = CAST_TO_TYPEOF(emit) AMD64FN(emit_AMD64Instr); + vassert(vta->archinfo_host.endness == VexEndnessLE); + break; + + case VexArchPPC32: + mode64 = False; + rRegUniv = PPC32FN(getRRegUniverse_PPC(mode64)); + isMove = CAST_TO_TYPEOF(isMove) PPC32FN(isMove_PPCInstr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) PPC32FN(getRegUsage_PPCInstr); + mapRegs = CAST_TO_TYPEOF(mapRegs) PPC32FN(mapRegs_PPCInstr); + genSpill = CAST_TO_TYPEOF(genSpill) PPC32FN(genSpill_PPC); + genReload = CAST_TO_TYPEOF(genReload) PPC32FN(genReload_PPC); + ppInstr = CAST_TO_TYPEOF(ppInstr) PPC32FN(ppPPCInstr); + ppReg = CAST_TO_TYPEOF(ppReg) PPC32FN(ppHRegPPC); + iselSB = PPC32FN(iselSB_PPC); + emit = CAST_TO_TYPEOF(emit) PPC32FN(emit_PPCInstr); + vassert(vta->archinfo_host.endness == VexEndnessBE); + break; + + case VexArchPPC64: + mode64 = True; + rRegUniv = PPC64FN(getRRegUniverse_PPC(mode64)); + isMove = CAST_TO_TYPEOF(isMove) PPC64FN(isMove_PPCInstr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) PPC64FN(getRegUsage_PPCInstr); + mapRegs = CAST_TO_TYPEOF(mapRegs) PPC64FN(mapRegs_PPCInstr); + genSpill = CAST_TO_TYPEOF(genSpill) PPC64FN(genSpill_PPC); + genReload = CAST_TO_TYPEOF(genReload) PPC64FN(genReload_PPC); + ppInstr = CAST_TO_TYPEOF(ppInstr) PPC64FN(ppPPCInstr); + ppReg = CAST_TO_TYPEOF(ppReg) PPC64FN(ppHRegPPC); + iselSB = PPC64FN(iselSB_PPC); + emit = CAST_TO_TYPEOF(emit) PPC64FN(emit_PPCInstr); + vassert(vta->archinfo_host.endness == VexEndnessBE || + vta->archinfo_host.endness == VexEndnessLE ); + break; + + case VexArchS390X: + mode64 = True; + /* KLUDGE: export hwcaps. */ + s390_host_hwcaps = vta->archinfo_host.hwcaps; + rRegUniv = S390FN(getRRegUniverse_S390()); + isMove = CAST_TO_TYPEOF(isMove) S390FN(isMove_S390Instr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) S390FN(getRegUsage_S390Instr); + mapRegs = CAST_TO_TYPEOF(mapRegs) S390FN(mapRegs_S390Instr); + genSpill = CAST_TO_TYPEOF(genSpill) S390FN(genSpill_S390); + genReload = CAST_TO_TYPEOF(genReload) S390FN(genReload_S390); + // fixs390: consider implementing directReload_S390 + ppInstr = CAST_TO_TYPEOF(ppInstr) S390FN(ppS390Instr); + ppReg = CAST_TO_TYPEOF(ppReg) S390FN(ppHRegS390); + iselSB = S390FN(iselSB_S390); + emit = CAST_TO_TYPEOF(emit) S390FN(emit_S390Instr); + vassert(vta->archinfo_host.endness == VexEndnessBE); + break; + + case VexArchARM: + mode64 = False; + rRegUniv = ARMFN(getRRegUniverse_ARM()); + isMove = CAST_TO_TYPEOF(isMove) ARMFN(isMove_ARMInstr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) ARMFN(getRegUsage_ARMInstr); + mapRegs = CAST_TO_TYPEOF(mapRegs) ARMFN(mapRegs_ARMInstr); + genSpill = CAST_TO_TYPEOF(genSpill) ARMFN(genSpill_ARM); + genReload = CAST_TO_TYPEOF(genReload) ARMFN(genReload_ARM); + ppInstr = CAST_TO_TYPEOF(ppInstr) ARMFN(ppARMInstr); + ppReg = CAST_TO_TYPEOF(ppReg) ARMFN(ppHRegARM); + iselSB = ARMFN(iselSB_ARM); + emit = CAST_TO_TYPEOF(emit) ARMFN(emit_ARMInstr); + vassert(vta->archinfo_host.endness == VexEndnessLE); + break; + + case VexArchARM64: + mode64 = True; + rRegUniv = ARM64FN(getRRegUniverse_ARM64()); + isMove = CAST_TO_TYPEOF(isMove) ARM64FN(isMove_ARM64Instr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) ARM64FN(getRegUsage_ARM64Instr); + mapRegs = CAST_TO_TYPEOF(mapRegs) ARM64FN(mapRegs_ARM64Instr); + genSpill = CAST_TO_TYPEOF(genSpill) ARM64FN(genSpill_ARM64); + genReload = CAST_TO_TYPEOF(genReload) ARM64FN(genReload_ARM64); + ppInstr = CAST_TO_TYPEOF(ppInstr) ARM64FN(ppARM64Instr); + ppReg = CAST_TO_TYPEOF(ppReg) ARM64FN(ppHRegARM64); + iselSB = ARM64FN(iselSB_ARM64); + emit = CAST_TO_TYPEOF(emit) ARM64FN(emit_ARM64Instr); + vassert(vta->archinfo_host.endness == VexEndnessLE); + break; + + case VexArchMIPS32: + mode64 = False; + rRegUniv = MIPS32FN(getRRegUniverse_MIPS(mode64)); + isMove = CAST_TO_TYPEOF(isMove) MIPS32FN(isMove_MIPSInstr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) MIPS32FN(getRegUsage_MIPSInstr); + mapRegs = CAST_TO_TYPEOF(mapRegs) MIPS32FN(mapRegs_MIPSInstr); + genSpill = CAST_TO_TYPEOF(genSpill) MIPS32FN(genSpill_MIPS); + genReload = CAST_TO_TYPEOF(genReload) MIPS32FN(genReload_MIPS); + ppInstr = CAST_TO_TYPEOF(ppInstr) MIPS32FN(ppMIPSInstr); + ppReg = CAST_TO_TYPEOF(ppReg) MIPS32FN(ppHRegMIPS); + iselSB = MIPS32FN(iselSB_MIPS); + emit = CAST_TO_TYPEOF(emit) MIPS32FN(emit_MIPSInstr); + vassert(vta->archinfo_host.endness == VexEndnessLE + || vta->archinfo_host.endness == VexEndnessBE); + break; + + case VexArchMIPS64: + mode64 = True; + rRegUniv = MIPS64FN(getRRegUniverse_MIPS(mode64)); + isMove = CAST_TO_TYPEOF(isMove) MIPS64FN(isMove_MIPSInstr); + getRegUsage + = CAST_TO_TYPEOF(getRegUsage) MIPS64FN(getRegUsage_MIPSInstr); + mapRegs = CAST_TO_TYPEOF(mapRegs) MIPS64FN(mapRegs_MIPSInstr); + genSpill = CAST_TO_TYPEOF(genSpill) MIPS64FN(genSpill_MIPS); + genReload = CAST_TO_TYPEOF(genReload) MIPS64FN(genReload_MIPS); + ppInstr = CAST_TO_TYPEOF(ppInstr) MIPS64FN(ppMIPSInstr); + ppReg = CAST_TO_TYPEOF(ppReg) MIPS64FN(ppHRegMIPS); + iselSB = MIPS64FN(iselSB_MIPS); + emit = CAST_TO_TYPEOF(emit) MIPS64FN(emit_MIPSInstr); + vassert(vta->archinfo_host.endness == VexEndnessLE + || vta->archinfo_host.endness == VexEndnessBE); + break; + + case VexArchTILEGX: + mode64 = True; + rRegUniv = TILEGXFN(getRRegUniverse_TILEGX()); + isMove = CAST_TO_TYPEOF(isMove) TILEGXFN(isMove_TILEGXInstr); + getRegUsage = + CAST_TO_TYPEOF(getRegUsage) TILEGXFN(getRegUsage_TILEGXInstr); + mapRegs = CAST_TO_TYPEOF(mapRegs) TILEGXFN(mapRegs_TILEGXInstr); + genSpill = CAST_TO_TYPEOF(genSpill) TILEGXFN(genSpill_TILEGX); + genReload = CAST_TO_TYPEOF(genReload) TILEGXFN(genReload_TILEGX); + ppInstr = CAST_TO_TYPEOF(ppInstr) TILEGXFN(ppTILEGXInstr); + ppReg = CAST_TO_TYPEOF(ppReg) TILEGXFN(ppHRegTILEGX); + iselSB = TILEGXFN(iselSB_TILEGX); + emit = CAST_TO_TYPEOF(emit) TILEGXFN(emit_TILEGXInstr); + vassert(vta->archinfo_host.endness == VexEndnessLE); + break; + + default: + vpanic("LibVEX_Translate: unsupported host insn set"); + } + + // Are the host's hardware capabilities feasible. The function will + // not return if hwcaps are infeasible in some sense. + check_hwcaps(vta->arch_host, vta->archinfo_host.hwcaps); + + /* Turn it into virtual-registerised code. Build trees -- this also throws away any dead bindings. */ max_ga = ado_treebuild_BB( irsb, preciseMemExnsFn, pxControl ); @@ -1006,7 +1080,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) /* HACK */ if (0) { *(vta->host_bytes_used) = 0; - res.status = VexTransOK; return res; + res->status = VexTransOK; return; } /* end HACK */ @@ -1067,7 +1141,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) /* HACK */ if (0) { *(vta->host_bytes_used) = 0; - res.status = VexTransOK; return res; + res->status = VexTransOK; return; } /* end HACK */ @@ -1101,14 +1175,14 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) if (UNLIKELY(out_used + j > vta->host_bytes_size)) { vexSetAllocModeTEMP_and_clear(); vex_traceflags = 0; - res.status = VexTransOutputFull; - return res; + res->status = VexTransOutputFull; + return; } if (UNLIKELY(hi_isProfInc)) { vassert(vta->addProfInc); /* else where did it come from? */ - vassert(res.offs_profInc == -1); /* there can be only one (tm) */ + vassert(res->offs_profInc == -1); /* there can be only one (tm) */ vassert(out_used >= 0); - res.offs_profInc = out_used; + res->offs_profInc = out_used; } { UChar* dst = &vta->host_bytes[out_used]; for (k = 0; k < j; k++) { @@ -1134,7 +1208,20 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta ) } vex_traceflags = 0; - res.status = VexTransOK; + res->status = VexTransOK; + return; +} + + +/* Exported to library client. */ + +VexTranslateResult LibVEX_Translate ( /*MOD*/ VexTranslateArgs* vta ) +{ + VexTranslateResult res = { 0 }; + VexRegisterUpdates pxControl = VexRegUpd_INVALID; + + IRSB* irsb = LibVEX_FrontEnd(vta, &res, &pxControl); + libvex_BackEnd(vta, &res, irsb, pxControl); return res; } @@ -1470,6 +1557,29 @@ void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi ) } +static IRType arch_word_size (VexArch arch) { + switch (arch) { + case VexArchX86: + case VexArchARM: + case VexArchMIPS32: + case VexArchPPC32: + return Ity_I32; + + case VexArchAMD64: + case VexArchARM64: + case VexArchMIPS64: + case VexArchPPC64: + case VexArchS390X: + case VexArchTILEGX: + return Ity_I64; + + default: + vex_printf("Fatal: unknown arch in arch_word_size\n"); + vassert(0); + } +} + + /* Convenience macro to be used in show_hwcaps_ARCH functions */ #define NUM_HWCAPS (sizeof hwcaps_list / sizeof hwcaps_list[0]) diff --git a/VEX/priv/main_util.h b/VEX/priv/main_util.h index 57f45ed80b..a7c37c8615 100644 --- a/VEX/priv/main_util.h +++ b/VEX/priv/main_util.h @@ -43,8 +43,15 @@ #define NULL ((void*)0) -#define LIKELY(x) __builtin_expect(!!(x), 1) -#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#if defined(_MSC_VER) // building with MSVC +# define LIKELY(x) (x) +# define UNLIKELY(x) (x) +# define CAST_TO_TYPEOF(x) /**/ +#else +# define LIKELY(x) __builtin_expect(!!(x), 1) +# define UNLIKELY(x) __builtin_expect(!!(x), 0) +# define CAST_TO_TYPEOF(x) (__typeof__(x)) +#endif // defined(_MSC_VER) #if !defined(offsetof) # define offsetof(type,memb) ((SizeT)(HWord)&((type*)0)->memb) diff --git a/VEX/pub/libvex.h b/VEX/pub/libvex.h index fdb772fee0..8ac3d9fee4 100644 --- a/VEX/pub/libvex.h +++ b/VEX/pub/libvex.h @@ -776,8 +776,17 @@ typedef VexTranslateArgs; +/* Runs the entire compilation pipeline. */ extern -VexTranslateResult LibVEX_Translate ( VexTranslateArgs* ); +VexTranslateResult LibVEX_Translate ( /*MOD*/ VexTranslateArgs* ); + +/* Runs the first half of the compilation pipeline: lifts guest code to IR, + optimises, instruments and optimises it some more. */ +extern +IRSB* LibVEX_FrontEnd ( /*MOD*/ VexTranslateArgs*, + /*OUT*/ VexTranslateResult* res, + /*OUT*/ VexRegisterUpdates* pxControl ); + /* A subtlety re interaction between self-checking translations and bb-chasing. The supplied chase_into_ok function should say NO