Bool (*byte_accessible)(Addr64),
Bool (*resteerOkFn)(Addr64),
Bool host_bigendian,
- VexSubArch subarch_guest );
+ VexArchInfo* archinfo_guest );
/* Used by the optimiser to specialise calls to helpers. */
extern
resteer into, returns False. */
static
-DisResult disInstr ( /*IN*/ Bool resteerOK,
- /*IN*/ Bool (*resteerOkFn) ( Addr64 ),
- /*IN*/ ULong delta,
- /*IN*/ VexSubArch subarch,
- /*OUT*/ Long* size,
- /*OUT*/ Addr64* whereNext );
+DisResult disInstr ( /*IN*/ Bool resteerOK,
+ /*IN*/ Bool (*resteerOkFn) ( Addr64 ),
+ /*IN*/ ULong delta,
+ /*IN*/ VexArchInfo* archinfo,
+ /*OUT*/ Long* size,
+ /*OUT*/ Addr64* whereNext );
/* This is the main (only, in fact) entry point for this module. */
Bool (*byte_accessible)(Addr64),
Bool (*chase_into_ok)(Addr64),
Bool host_bigendian,
- VexSubArch subarch_guest )
+ VexArchInfo* archinfo_guest )
{
Long delta, size;
Int i, n_instrs, first_stmt_idx;
vassert(vex_control.guest_chase_thresh >= 0);
vassert(vex_control.guest_chase_thresh < vex_control.guest_max_insns);
- vassert(subarch_guest == VexSubArch_NONE);
+ vassert(archinfo_guest->subarch == VexSubArch_NONE);
/* Start a new, empty extent. */
vge->n_used = 1;
guest_rip_next_assumed = 0;
guest_rip_next_mustcheck = False;
dres = disInstr( resteerOK, chase_into_ok,
- delta, subarch_guest, &size, &guest_next );
+ delta, archinfo_guest, &size, &guest_next );
insn_verbose = False;
/* stay sane ... */
is False, disInstr may not return Dis_Resteer. */
static
-DisResult disInstr ( /*IN*/ Bool resteerOK,
- /*IN*/ Bool (*resteerOkFn) ( Addr64 ),
- /*IN*/ ULong delta,
- /*IN*/ VexSubArch subarch,
- /*OUT*/ Long* size,
- /*OUT*/ Addr64* whereNext )
+DisResult disInstr ( /*IN*/ Bool resteerOK,
+ /*IN*/ Bool (*resteerOkFn) ( Addr64 ),
+ /*IN*/ ULong delta,
+ /*IN*/ VexArchInfo* archinfo,
+ /*OUT*/ Long* size,
+ /*OUT*/ Addr64* whereNext )
{
IRType ty;
IRTemp addr, t0, t1, t2, t3, t4, t5, t6;
HChar* fName = NULL;
void* fAddr = NULL;
if (haveF2orF3(pfx)) goto decode_failure;
- switch (subarch) {
+ switch (archinfo->subarch) {
case VexSubArch_NONE:
fName = "amd64g_dirtyhelper_CPUID";
fAddr = &amd64g_dirtyhelper_CPUID;
Bool (*byte_accessible)(Addr64),
Bool (*resteerOkFn)(Addr64),
Bool host_bigendian,
- VexSubArch subarch_guest );
+ VexArchInfo* archinfo_guest );
/* Used by the optimiser to specialise calls to helpers. */
extern
Bool (*byte_accessible)(Addr64),
Bool (*chase_into_ok)(Addr64),
Bool host_bigendian,
- VexSubArch subarch_guest )
+ VexArchInfo* archinfo_guest )
{
UInt delta;
Int i, n_instrs, size, first_stmt_idx;
vassert(vex_control.guest_chase_thresh >= 0);
vassert(vex_control.guest_chase_thresh < vex_control.guest_max_insns);
- vassert(subarch_guest == VexSubArchPPC32_noAV
- || subarch_guest == VexSubArchPPC32_AV);
+ vassert(archinfo_guest->subarch == VexSubArchPPC32_noAV
+ || archinfo_guest->subarch == VexSubArchPPC32_AV);
/* Start a new, empty extent. */
vge->n_used = 1;
Bool (*byte_accessible)(Addr64),
Bool (*resteerOkFn)(Addr64),
Bool host_bigendian,
- VexSubArch subarch_guest );
+ VexArchInfo* archinfo_guest );
/* Used by the optimiser to specialise calls to helpers. */
extern
resteer into, returns False. */
static
-DisResult disInstr ( /*IN*/ Bool resteerOK,
- /*IN*/ Bool (*resteerOkFn) ( Addr64 ),
- /*IN*/ UInt delta,
- /*IN*/ VexSubArch subarch,
- /*OUT*/ Int* size,
- /*OUT*/ Addr64* whereNext );
+DisResult disInstr ( /*IN*/ Bool resteerOK,
+ /*IN*/ Bool (*resteerOkFn) ( Addr64 ),
+ /*IN*/ UInt delta,
+ /*IN*/ VexArchInfo* archinfo,
+ /*OUT*/ Int* size,
+ /*OUT*/ Addr64* whereNext );
/* This is the main (only, in fact) entry point for this module. */
Bool (*byte_accessible)(Addr64),
Bool (*chase_into_ok)(Addr64),
Bool host_bigendian,
- VexSubArch subarch_guest )
+ VexArchInfo* archinfo_guest )
{
UInt delta;
Int i, n_instrs, size, first_stmt_idx;
vassert(vex_control.guest_chase_thresh >= 0);
vassert(vex_control.guest_chase_thresh < vex_control.guest_max_insns);
- vassert(subarch_guest == VexSubArchX86_sse0
- || subarch_guest == VexSubArchX86_sse1
- || subarch_guest == VexSubArchX86_sse2);
+ vassert(archinfo_guest->subarch == VexSubArchX86_sse0
+ || archinfo_guest->subarch == VexSubArchX86_sse1
+ || archinfo_guest->subarch == VexSubArchX86_sse2);
vassert((guest_eip_start >> 32) == 0);
needs to be annulled. */
size = 0; /* just in case disInstr doesn't set it */
dres = disInstr( resteerOK, chase_into_ok,
- delta, subarch_guest, &size, &guest_next );
+ delta, archinfo_guest, &size, &guest_next );
insn_verbose = False;
/* stay sane ... */
is False, disInstr may not return Dis_Resteer. */
static
-DisResult disInstr ( /*IN*/ Bool resteerOK,
- /*IN*/ Bool (*resteerOkFn) ( Addr64 ),
- /*IN*/ UInt delta,
- /*IN*/ VexSubArch subarch,
- /*OUT*/ Int* size,
- /*OUT*/ Addr64* whereNext )
+DisResult disInstr ( /*IN*/ Bool resteerOK,
+ /*IN*/ Bool (*resteerOkFn) ( Addr64 ),
+ /*IN*/ UInt delta,
+ /*IN*/ VexArchInfo* archinfo,
+ /*OUT*/ Int* size,
+ /*OUT*/ Addr64* whereNext )
{
IRType ty;
IRTemp addr, t0, t1, t2, t3, t4, t5, t6;
/* Skip parts of the decoder which don't apply given the stated
guest subarchitecture. */
- if (subarch == VexSubArchX86_sse0)
+ if (archinfo->subarch == VexSubArchX86_sse0)
goto after_sse_decoders;
/* Otherwise we must be doing sse1 or sse2, so we can at least try
/* Skip parts of the decoder which don't apply given the stated
guest subarchitecture. */
- if (subarch == VexSubArchX86_sse0 || subarch == VexSubArchX86_sse1)
+ if (archinfo->subarch == VexSubArchX86_sse0
+ || archinfo->subarch == VexSubArchX86_sse1)
goto after_sse_decoders;
insn = (UChar*)&guest_code[delta];
IRDirty* d = NULL;
HChar* fName = NULL;
void* fAddr = NULL;
- switch (subarch) {
+ switch (archinfo->subarch) {
case VexSubArchX86_sse0:
fName = "x86g_dirtyhelper_CPUID_sse0";
fAddr = &x86g_dirtyhelper_CPUID_sse0;
extern AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offset );
extern AMD64Instr* genReload_AMD64 ( HReg rreg, Int offset );
extern void getAllocableRegs_AMD64 ( Int*, HReg** );
-extern HInstrArray* iselBB_AMD64 ( IRBB*, VexSubArch );
+extern HInstrArray* iselBB_AMD64 ( IRBB*, VexArchInfo* );
#endif /* ndef __LIBVEX_HOST_AMD64_HDEFS_H */
/* Translate an entire BB to amd64 code. */
-HInstrArray* iselBB_AMD64 ( IRBB* bb, VexSubArch subarch_host )
+HInstrArray* iselBB_AMD64 ( IRBB* bb, VexArchInfo* archinfo_host )
{
- Int i, j;
- HReg hreg, hregHI;
- ISelEnv* env;
+ Int i, j;
+ HReg hreg, hregHI;
+ ISelEnv* env;
+ VexSubArch subarch_host = archinfo_host->subarch;
/* sanity ... */
vassert(subarch_host == VexSubArch_NONE);
extern PPC32Instr* genSpill_PPC32 ( HReg rreg, UShort offsetB );
extern PPC32Instr* genReload_PPC32 ( HReg rreg, UShort offsetB );
extern void getAllocableRegs_PPC32 ( Int*, HReg** );
-extern HInstrArray* iselBB_PPC32 ( IRBB*, VexSubArch );
+extern HInstrArray* iselBB_PPC32 ( IRBB*, VexArchInfo* );
#endif /* ndef __LIBVEX_HOST_PPC32_HDEFS_H */
/* Translate an entire BB to ppc32 code. */
-HInstrArray* iselBB_PPC32 ( IRBB* bb, VexSubArch subarch_host )
+HInstrArray* iselBB_PPC32 ( IRBB* bb, VexArchInfo* archinfo_host )
{
- Int i, j;
- HReg hreg, hregHI;
- ISelEnv* env;
+ Int i, j;
+ HReg hreg, hregHI;
+ ISelEnv* env;
+ VexSubArch subarch_host = archinfo_host->subarch;
/* sanity ... */
vassert(subarch_host == VexSubArchPPC32_noAV
extern X86Instr* genSpill_X86 ( HReg rreg, Int offset );
extern X86Instr* genReload_X86 ( HReg rreg, Int offset );
extern void getAllocableRegs_X86 ( Int*, HReg** );
-extern HInstrArray* iselBB_X86 ( IRBB*, VexSubArch );
+extern HInstrArray* iselBB_X86 ( IRBB*, VexArchInfo* );
#endif /* ndef __LIBVEX_HOST_X86_HDEFS_H */
/* Translate an entire BB to x86 code. */
-HInstrArray* iselBB_X86 ( IRBB* bb, VexSubArch subarch_host )
+HInstrArray* iselBB_X86 ( IRBB* bb, VexArchInfo* archinfo_host )
{
- Int i, j;
- HReg hreg, hregHI;
- ISelEnv* env;
+ Int i, j;
+ HReg hreg, hregHI;
+ ISelEnv* env;
+ VexSubArch subarch_host = archinfo_host->subarch;
/* sanity ... */
vassert(subarch_host == VexSubArchX86_sse0
VexTranslateResult LibVEX_Translate (
/* The instruction sets we are translating from and to. */
- VexArch arch_guest,
- VexSubArch subarch_guest,
- VexArch arch_host,
- VexSubArch subarch_host,
+ VexArch arch_guest,
+ VexArchInfo* archinfo_guest,
+ VexArch arch_host,
+ VexArchInfo* archinfo_host,
/* IN: the block to translate, and its guest address. */
UChar* guest_bytes,
Addr64 guest_bytes_addr,
HInstr* (*genReload) ( HReg, Int );
void (*ppInstr) ( HInstr* );
void (*ppReg) ( HReg );
- HInstrArray* (*iselBB) ( IRBB*, VexSubArch );
+ HInstrArray* (*iselBB) ( IRBB*, VexArchInfo* );
IRBB* (*bbToIR) ( UChar*, Addr64,
VexGuestExtents*,
Bool(*)(Addr64),
Bool(*)(Addr64),
- Bool, VexSubArch );
+ Bool, VexArchInfo* );
Int (*emit) ( UChar*, Int, HInstr* );
IRExpr* (*specHelper) ( HChar*, IRExpr** );
Bool (*preciseMemExnsFn) ( Int, Int );
emit = (Int(*)(UChar*,Int,HInstr*)) emit_X86Instr;
host_is_bigendian = False;
host_word_type = Ity_I32;
- vassert(subarch_host == VexSubArchX86_sse0
- || subarch_host == VexSubArchX86_sse1
- || subarch_host == VexSubArchX86_sse2);
+ vassert(archinfo_host->subarch == VexSubArchX86_sse0
+ || archinfo_host->subarch == VexSubArchX86_sse1
+ || archinfo_host->subarch == VexSubArchX86_sse2);
break;
case VexArchAMD64:
emit = (Int(*)(UChar*,Int,HInstr*)) emit_AMD64Instr;
host_is_bigendian = False;
host_word_type = Ity_I64;
- vassert(subarch_host == VexSubArch_NONE);
+ vassert(archinfo_host->subarch == VexSubArch_NONE);
break;
case VexArchPPC32:
emit = (Int(*)(UChar*,Int,HInstr*)) emit_PPC32Instr;
host_is_bigendian = True;
host_word_type = Ity_I32;
- vassert(subarch_guest == VexSubArchPPC32_noAV
- || subarch_guest == VexSubArchPPC32_AV);
+ vassert(archinfo_guest->subarch == VexSubArchPPC32_noAV
+ || archinfo_guest->subarch == VexSubArchPPC32_AV);
break;
default:
guest_sizeB = sizeof(VexGuestX86State);
guest_word_type = Ity_I32;
guest_layout = &x86guest_layout;
- vassert(subarch_guest == VexSubArchX86_sse0
- || subarch_guest == VexSubArchX86_sse1
- || subarch_guest == VexSubArchX86_sse2);
+ vassert(archinfo_guest->subarch == VexSubArchX86_sse0
+ || archinfo_guest->subarch == VexSubArchX86_sse1
+ || archinfo_guest->subarch == VexSubArchX86_sse2);
break;
case VexArchAMD64:
guest_sizeB = sizeof(VexGuestAMD64State);
guest_word_type = Ity_I64;
guest_layout = &amd64guest_layout;
- vassert(subarch_guest == VexSubArch_NONE);
+ vassert(archinfo_guest->subarch == VexSubArch_NONE);
break;
case VexArchARM:
guest_sizeB = sizeof(VexGuestARMState);
guest_word_type = Ity_I32;
guest_layout = &armGuest_layout;
- vassert(subarch_guest == VexSubArchARM_v4);
+ vassert(archinfo_guest->subarch == VexSubArchARM_v4);
break;
case VexArchPPC32:
guest_sizeB = sizeof(VexGuestPPC32State);
guest_word_type = Ity_I32;
guest_layout = &ppc32Guest_layout;
- vassert(subarch_guest == VexSubArchPPC32_noAV
- || subarch_guest == VexSubArchPPC32_AV);
+ vassert(archinfo_guest->subarch == VexSubArchPPC32_noAV
+ || archinfo_guest->subarch == VexSubArchPPC32_AV);
break;
default:
/* doesn't necessarily have to be true, but if it isn't it means
we are simulating one flavour of an architecture a different
flavour of the same architecture, which is pretty strange. */
- vassert(subarch_guest == subarch_host);
+ vassert(archinfo_guest->subarch == archinfo_host->subarch);
}
if (vex_traceflags & VEX_TRACE_FE)
byte_accessible,
chase_into_ok,
host_is_bigendian,
- subarch_guest );
+ archinfo_guest );
if (irbb == NULL) {
/* Access failure. */
" Instruction selection "
"------------------------\n");
- vcode = iselBB ( irbb, subarch_host );
+ vcode = iselBB ( irbb, archinfo_host );
if (vex_traceflags & VEX_TRACE_VCODE)
vex_printf("\n");
}
}
-/* --------- Arch/Subarch names. --------- */
+/* --------- Arch/Subarch stuff. --------- */
const HChar* LibVEX_ppVexArch ( VexArch arch )
{
}
}
+/* Write default settings info *vai. */
+void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
+{
+ vai->subarch = VexSubArch_INVALID;
+ vai->ppc32_cache_line_szB = 0;
+}
+
+
/*---------------------------------------------------------------*/
/*--- end main/vex_main.c ---*/
/*---------------------------------------------------------------*/
/*---------------------------------------------------------------*/
/*-------------------------------------------------------*/
-/*--- Architectures and architecture variants ---*/
+/*--- Architectures, variants, and other arch info ---*/
/*-------------------------------------------------------*/
typedef
extern const HChar* LibVEX_ppVexSubArch ( VexSubArch );
+/* This struct is a bit of a hack, but is needed to carry misc
+ important bits of info about an arch. Fields which are optional or
+ ignored on some arch should be set to zero. */
+
+typedef
+ struct {
+ /* This is the only mandatory field. */
+ VexSubArch subarch;
+ /* PPC32 only: size of cache line */
+ Int ppc32_cache_line_szB;
+ }
+ VexArchInfo;
+
+/* Write default settings info *vai. */
+extern
+void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai );
+
+
/*-------------------------------------------------------*/
/*--- Control of Vex's optimiser (iropt). ---*/
/*-------------------------------------------------------*/
extern
VexTranslateResult LibVEX_Translate (
/* The instruction sets we are translating from and to. */
- VexArch arch_guest,
- VexSubArch subarch_guest,
- VexArch arch_host,
- VexSubArch subarch_host,
+ VexArch arch_guest,
+ VexArchInfo* archinfo_guest,
+ VexArch arch_host,
+ VexArchInfo* archinfo_host,
/* IN: the block to translate, and its guest address. */
UChar* guest_bytes,
Addr64 guest_bytes_addr,