VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
that we don't have to pass them around endlessly. */
/* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
/* Pointer to the guest code area (points to start of BB, not to the
insn being processed). */
static IRExpr* getIRegCL ( void )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
return IRExpr_Get( OFFB_RCX, Ity_I8 );
}
static void putIRegAH ( IRExpr* e )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
stmt( IRStmt_Put( OFFB_RAX+1, e ) );
}
static IRExpr* getIRegRAX ( Int sz )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
switch (sz) {
case 1: return IRExpr_Get( OFFB_RAX, Ity_I8 );
case 2: return IRExpr_Get( OFFB_RAX, Ity_I16 );
static void putIRegRAX ( Int sz, IRExpr* e )
{
IRType ty = typeOfIRExpr(irsb->tyenv, e);
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
switch (sz) {
case 8: vassert(ty == Ity_I64);
stmt( IRStmt_Put( OFFB_RAX, e ));
static IRExpr* getIRegRDX ( Int sz )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
switch (sz) {
case 1: return IRExpr_Get( OFFB_RDX, Ity_I8 );
case 2: return IRExpr_Get( OFFB_RDX, Ity_I16 );
static void putIRegRDX ( Int sz, IRExpr* e )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(typeOfIRExpr(irsb->tyenv, e) == szToITy(sz));
switch (sz) {
case 8: stmt( IRStmt_Put( OFFB_RDX, e ));
static IRExpr* getIReg32 ( UInt regno )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
return unop(Iop_64to32,
IRExpr_Get( integerGuestReg64Offset(regno),
Ity_I64 ));
static IRExpr* getIReg16 ( UInt regno )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
return IRExpr_Get( integerGuestReg64Offset(regno),
Ity_I16 );
}
static UInt offsetIRegG ( Int sz, Prefix pfx, UChar mod_reg_rm )
{
UInt reg;
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(IS_VALID_PFX(pfx));
vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
reg = gregOfRexRM( pfx, mod_reg_rm );
static UInt offsetIRegE ( Int sz, Prefix pfx, UChar mod_reg_rm )
{
UInt reg;
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(IS_VALID_PFX(pfx));
vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
reg = eregOfRexRM( pfx, mod_reg_rm );
static Int xmmGuestRegOffset ( UInt xmmreg )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
return ymmGuestRegOffset( xmmreg );
}
static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 8);
return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
}
static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 4);
return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
}
static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 2);
return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
}
static Int ymmGuestRegLane128offset ( UInt ymmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 2);
return ymmGuestRegOffset( ymmreg ) + 16 * laneno;
}
static Int ymmGuestRegLane64offset ( UInt ymmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 4);
return ymmGuestRegOffset( ymmreg ) + 8 * laneno;
}
static Int ymmGuestRegLane32offset ( UInt ymmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 8);
return ymmGuestRegOffset( ymmreg ) + 4 * laneno;
}
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
Int i, x1, x2;
vassert(guest_arch == VexArchAMD64);
guest_code = guest_code_IN;
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
guest_RIP_curr_instr = guest_IP;
guest_RIP_bbstart = guest_IP - delta;
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
not change during translation of the instruction.
*/
-/* CONST: is the host bigendian? We need to know this in order to do
- sub-register accesses to the SIMD/FP registers correctly. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness? We need to know this in
+ order to do sub-register accesses to the SIMD/FP registers
+ correctly. */
+static VexEndness host_endness;
/* CONST: The guest address for the instruction currently being
translated. */
has the lowest offset. */
static Int offsetQRegLane ( UInt qregNo, IRType laneTy, UInt laneNo )
{
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
Int base = offsetQReg128(qregNo);
/* Since the host is little-endian, the least significant lane
will be at the lowest address. */
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
DisResult dres;
vassert(guest_arch == VexArchARM64);
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
guest_PC_curr_instr = (Addr64)guest_IP;
/* Sanity checks */
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
not change during translation of the instruction.
*/
-/* CONST: is the host bigendian? This has to do with float vs double
- register accesses on VFP, but it's complex and not properly thought
- out. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness? This has to do with float vs
+ double register accesses on VFP, but it's complex and not properly
+ thought out. */
+static VexEndness host_endness;
/* CONST: The guest address for the instruction currently being
translated. This is the real, "decoded" address (not subject
Int off;
vassert(fregNo < 32);
off = doubleGuestRegOffset(fregNo >> 1);
- if (host_is_bigendian) {
- vassert(0);
- } else {
+ if (host_endness == VexEndnessLE) {
if (fregNo & 1)
off += 4;
+ } else {
+ vassert(0);
}
return off;
}
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
DisResult dres;
/* Set globals (see top of this file) */
vassert(guest_arch == VexArchARM);
- irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
- __curr_is_Thumb = isThumb;
+ irsb = irsb_IN;
+ host_endness = host_endness_IN;
+ __curr_is_Thumb = isThumb;
if (isThumb) {
guest_R15_curr_instr_notENC = (Addr32)guest_IP_ENCODED - 1;
/*IN*/ UChar* guest_code,
/*IN*/ Addr64 guest_IP_bbstart,
/*IN*/ Bool (*chase_into_ok)(void*,Addr64),
- /*IN*/ Bool host_bigendian,
+ /*IN*/ VexEndness host_endness,
/*IN*/ Bool sigill_diag,
/*IN*/ VexArch arch_guest,
/*IN*/ VexArchInfo* archinfo_guest,
arch_guest,
archinfo_guest,
abiinfo_both,
- host_bigendian,
+ host_endness,
sigill_diag );
/* stay sane ... */
/* ABI info for both guest and host */
/*IN*/ VexAbiInfo* abiinfo,
- /* Is the host bigendian? */
- /*IN*/ Bool host_bigendian,
+ /* The endianness of the host */
+ /*IN*/ VexEndness host_endness,
/* Should diagnostics be printed for illegal instructions? */
/*IN*/ Bool sigill_diag
/*IN*/ UChar* guest_code,
/*IN*/ Addr64 guest_IP_bbstart,
/*IN*/ Bool (*chase_into_ok)(void*,Addr64),
- /*IN*/ Bool host_bigendian,
+ /*IN*/ VexEndness host_endness,
/*IN*/ Bool sigill_diag,
/*IN*/ VexArch arch_guest,
/*IN*/ VexArchInfo* archinfo_guest,
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
that we don't have to pass them around endlessly. CONST means does
not change during translation of the instruction. */
-/* CONST: is the host bigendian? This has to do with float vs double
- register accesses on VFP, but it's complex and not properly thought
- out. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness? This has to do with float vs
+ double register accesses on VFP, but it's complex and not properly
+ thought out. */
+static VexEndness host_endness;
/* Pointer to the guest code area. */
static UChar *guest_code;
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
DisResult dres;
guest_code = guest_code_IN;
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
#if defined(VGP_mips32_linux)
guest_PC_curr_instr = (Addr32)guest_IP;
#elif defined(VGP_mips64_linux)
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
given insn. */
/* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
/* Pointer to the guest code area. */
static UChar* guest_code;
// jrs: probably not necessary; only matters if we reference sub-parts
// of the ppc registers, but that isn't the case
// later: this might affect Altivec though?
- vassert(host_is_bigendian);
+ vassert(host_endness == VexEndnessBE);
switch (archreg) {
case 0: return offsetofPPCGuestState(guest_GPR0);
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
IRType ty;
/* Set globals (see top of this file) */
guest_code = guest_code_IN;
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
guest_CIA_curr_instr = mkSzAddr(ty, guest_IP);
guest_CIA_bbstart = mkSzAddr(ty, guest_IP - delta);
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
VexArch guest_arch,
VexArchInfo *archinfo,
VexAbiInfo *abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag_IN)
{
vassert(guest_arch == VexArchS390X);
/* The instruction decoder requires a big-endian machine. */
- vassert(host_bigendian == True);
+ vassert(host_endness == VexEndnessBE);
/* Set globals (see top of this file) */
guest_IA_curr_instr = guest_IP;
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian,
+ VexEndness host_endness,
Bool sigill_diag );
/* Used by the optimiser to specialise calls to helpers. */
given insn. */
/* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
/* Pointer to the guest code area (points to start of BB, not to the
insn being processed). */
vassert(archreg < 8);
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
if (sz == 4 || sz == 2 || (sz == 1 && archreg < 4)) {
switch (archreg) {
static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 8);
return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
}
static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 4);
return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
}
static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
{
/* Correct for little-endian host only. */
- vassert(!host_is_bigendian);
+ vassert(host_endness == VexEndnessLE);
vassert(laneno >= 0 && laneno < 2);
return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
}
VexArch guest_arch,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
- Bool host_bigendian_IN,
+ VexEndness host_endness_IN,
Bool sigill_diag_IN )
{
Int i, x1, x2;
vassert(guest_arch == VexArchX86);
guest_code = guest_code_IN;
irsb = irsb_IN;
- host_is_bigendian = host_bigendian_IN;
+ host_endness = host_endness_IN;
guest_EIP_curr_instr = (Addr32)guest_IP;
guest_EIP_bbstart = (Addr32)toUInt(guest_IP - delta);
Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, AMD64Instr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
p = doAMode_M(p, fake(4), i->Ain.EvCheck.amFailAddr);
vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
/* And crosscheck .. */
- vassert(evCheckSzB_AMD64() == 8);
+ vassert(evCheckSzB_AMD64(endness_host) == 8);
goto done;
}
/* How big is an event check? See case for Ain_EvCheck in
emit_AMD64Instr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_AMD64 ( void )
+Int evCheckSzB_AMD64 ( VexEndness endness_host )
{
return 8;
}
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
+VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movabsq $disp_cp_chain_me_EXPECTED, %r11
call *%r11
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is either:
(general case)
movabsq $place_to_jump_to_EXPECTED, %r11
/* Patch the counter address into a profile inc point, as previously
created by the Ain_ProfInc case for emit_AMD64Instr. */
-VexInvalRange patchProfInc_AMD64 ( void* place_to_patch,
+VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter )
{
+ vassert(endness_host == VexEndnessLE);
vassert(sizeof(ULong*) == 8);
UChar* p = (UChar*)place_to_patch;
vassert(p[0] == 0x49);
extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool );
extern Bool isMove_AMD64Instr ( AMD64Instr*, HReg*, HReg* );
extern Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
- UChar* buf, Int nbuf, AMD64Instr* i,
+ UChar* buf, Int nbuf,
+ AMD64Instr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
and so assumes that they are both <= 128, and so can use the short
offset encoding. This is all checked with assertions, so in the
worst case we will merely assert at startup. */
-extern Int evCheckSzB_AMD64 ( void );
+extern Int evCheckSzB_AMD64 ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to );
-extern VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_AMD64 ( void* place_to_patch,
+extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter );
| VEX_HWCAPS_AMD64_BMI
| VEX_HWCAPS_AMD64_AVX2)));
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE);
+
/* Make up an initial environment to use. */
env = LibVEX_Alloc(sizeof(ISelEnv));
env->vreg_ctr = 0;
Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARM64Instr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
/* nofail: */
/* Crosscheck */
- vassert(evCheckSzB_ARM64() == (UChar*)p - (UChar*)p0);
+ vassert(evCheckSzB_ARM64(endness_host) == (UChar*)p - (UChar*)p0);
goto done;
}
/* How big is an event check? See case for ARM64in_EvCheck in
emit_ARM64Instr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_ARM64 ( void )
+Int evCheckSzB_ARM64 ( VexEndness endness_host )
{
return 24;
}
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
+VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movw x9, disp_cp_chain_me_to_EXPECTED[15:0]
movk x9, disp_cp_chain_me_to_EXPECTED[31:15], lsl 16
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movw x9, place_to_jump_to_EXPECTED[15:0]
movk x9, place_to_jump_to_EXPECTED[31:15], lsl 16
//ZZ /* Patch the counter address into a profile inc point, as previously
//ZZ created by the ARMin_ProfInc case for emit_ARMInstr. */
-//ZZ VexInvalRange patchProfInc_ARM ( void* place_to_patch,
+//ZZ VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+//ZZ void* place_to_patch,
//ZZ ULong* location_of_counter )
//ZZ {
//ZZ vassert(sizeof(ULong*) == 4);
extern Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARM64Instr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
/* How big is an event check? This is kind of a kludge because it
depends on the offsets of host_EvC_FAILADDR and
host_EvC_COUNTER. */
-extern Int evCheckSzB_ARM64 ( void );
+extern Int evCheckSzB_ARM64 ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to );
-extern VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me );
//ZZ /* Patch the counter location into an existing ProfInc point. */
-//ZZ extern VexInvalRange patchProfInc_ARM ( void* place_to_patch,
+//ZZ extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+//ZZ void* place_to_patch,
//ZZ ULong* location_of_counter );
/* sanity ... */
vassert(arch_host == VexArchARM64);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE);
+
/* guard against unexpected space regressions */
vassert(sizeof(ARM64Instr) <= 32);
Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARMInstr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
/* nofail: */
/* Crosscheck */
- vassert(evCheckSzB_ARM() == (UChar*)p - (UChar*)p0);
+ vassert(evCheckSzB_ARM(endness_host) == (UChar*)p - (UChar*)p0);
goto done;
}
/* How big is an event check? See case for ARMin_EvCheck in
emit_ARMInstr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_ARM ( void )
+Int evCheckSzB_ARM ( VexEndness endness_host )
{
return 24;
}
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_ARM ( void* place_to_chain,
+VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movw r12, lo16(disp_cp_chain_me_to_EXPECTED)
movt r12, hi16(disp_cp_chain_me_to_EXPECTED)
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
+VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
(general case)
movw r12, lo16(place_to_jump_to_EXPECTED)
/* Patch the counter address into a profile inc point, as previously
created by the ARMin_ProfInc case for emit_ARMInstr. */
-VexInvalRange patchProfInc_ARM ( void* place_to_patch,
+VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter )
{
+ vassert(endness_host == VexEndnessLE);
vassert(sizeof(ULong*) == 4);
UInt* p = (UInt*)place_to_patch;
vassert(0 == (3 & (HWord)p));
extern Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARMInstr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
/* How big is an event check? This is kind of a kludge because it
depends on the offsets of host_EvC_FAILADDR and
host_EvC_COUNTER. */
-extern Int evCheckSzB_ARM ( void );
+extern Int evCheckSzB_ARM ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_ARM ( void* place_to_chain,
+extern VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to );
-extern VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_ARM ( void* place_to_patch,
+extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter );
/* sanity ... */
vassert(arch_host == VexArchARM);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE);
+
/* guard against unexpected space regressions */
vassert(sizeof(ARMInstr) <= 28);
Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, MIPSInstr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
/* nofail: */
/* Crosscheck */
- vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0);
+ vassert(evCheckSzB_MIPS(endness_host) == (UChar*)p - (UChar*)p0);
goto done;
}
/* How big is an event check? See case for Min_EvCheck in
emit_MIPSInstr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_MIPS ( void )
+Int evCheckSzB_MIPS ( VexEndness endness_host )
{
UInt kInstrSize = 4;
return 7*kInstrSize;
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to,
Bool mode64 )
{
+ vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
/* What we're expecting to see is:
move r9, disp_cp_chain_me_to_EXPECTED
jalr r9
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me,
Bool mode64 )
{
+ vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
/* What we're expecting to see is:
move r9, place_to_jump_to_EXPECTED
jalr r9
/* Patch the counter address into a profile inc point, as previously
created by the Min_ProfInc case for emit_MIPSInstr. */
-VexInvalRange patchProfInc_MIPS ( void* place_to_patch,
+VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter, Bool mode64 )
{
- if (mode64)
+ vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
+ if (mode64) {
vassert(sizeof(ULong*) == 8);
- else
+ } else {
vassert(sizeof(ULong*) == 4);
+ }
UChar* p = (UChar*)place_to_patch;
vassert(0 == (3 & (HWord)p));
vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9,
extern Int emit_MIPSInstr (/*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, MIPSInstr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
and so assumes that they are both <= 128, and so can use the short
offset encoding. This is all checked with assertions, so in the
worst case we will merely assert at startup. */
-extern Int evCheckSzB_MIPS ( void );
+extern Int evCheckSzB_MIPS ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+extern VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to,
Bool mode64 );
-extern VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me,
Bool mode64 );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_MIPS ( void* place_to_patch,
+extern VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter,
Bool mode64 );
-#endif /* ndef __LIBVEX_HOST_MIPS_HDEFS_H */
+#endif /* ndef __VEX_HOST_MIPS_DEFS_H */
/*---------------------------------------------------------------*/
/*--- end host-mips_defs.h ---*/
|| VEX_PRID_COMP_BROADCOM == hwcaps_host
|| VEX_PRID_COMP_NETLOGIC);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE
+ || archinfo_host->endness == VexEndnessBE);
+
mode64 = arch_host != VexArchMIPS32;
#if (__mips_fpr==64)
fp_mode64 = ((VEX_MIPS_REV(hwcaps_host) == VEX_PRID_CPU_32FPR)
*/
Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, PPCInstr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
/* nofail: */
/* Crosscheck */
- vassert(evCheckSzB_PPC() == (UChar*)p - (UChar*)p0);
+ vassert(evCheckSzB_PPC(endness_host) == (UChar*)p - (UChar*)p0);
goto done;
}
/* How big is an event check? See case for Pin_EvCheck in
emit_PPCInstr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_PPC ( void )
+Int evCheckSzB_PPC ( VexEndness endness_host )
{
return 28;
}
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_PPC ( void* place_to_chain,
+VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to,
Bool mode64 )
{
+ if (mode64) {
+ vassert(endness_host == VexEndnessBE); /* later: or LE */
+ } else {
+ vassert(endness_host == VexEndnessBE);
+ }
+
/* What we're expecting to see is:
imm32/64-fixed r30, disp_cp_chain_me_to_EXPECTED
mtctr r30
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
+VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me,
Bool mode64 )
{
+ if (mode64) {
+ vassert(endness_host == VexEndnessBE); /* later: or LE */
+ } else {
+ vassert(endness_host == VexEndnessBE);
+ }
+
/* What we're expecting to see is:
imm32/64-fixed r30, place_to_jump_to_EXPECTED
mtctr r30
/* Patch the counter address into a profile inc point, as previously
created by the Pin_ProfInc case for emit_PPCInstr. */
-VexInvalRange patchProfInc_PPC ( void* place_to_patch,
+VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter,
Bool mode64 )
{
+ if (mode64) {
+ vassert(endness_host == VexEndnessBE); /* later: or LE */
+ } else {
+ vassert(endness_host == VexEndnessBE);
+ }
+
UChar* p = (UChar*)place_to_patch;
vassert(0 == (3 & (HWord)p));
extern Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, PPCInstr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
/* How big is an event check? This is kind of a kludge because it
depends on the offsets of host_EvC_FAILADDR and
host_EvC_COUNTER. */
-extern Int evCheckSzB_PPC ( void );
+extern Int evCheckSzB_PPC ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_PPC ( void* place_to_chain,
+extern VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to,
Bool mode64 );
-extern VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me,
Bool mode64 );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_PPC ( void* place_to_patch,
+extern VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter,
Bool mode64 );
vassert((hwcaps_host & mask64) == 0);
}
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessBE);
+
/* Make up an initial environment to use. */
env = LibVEX_Alloc(sizeof(ISelEnv));
env->vreg_ctr = 0;
The dispatch counter is a 32-bit value. */
static UChar *
-s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn)
+s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn,
+ VexEndness endness_host)
{
s390_amode *amode;
UInt b, d;
/* Make sure the size of the generated code is identical to the size
returned by evCheckSzB_S390 */
- vassert(evCheckSzB_S390() == code_end - code_begin);
+ vassert(evCheckSzB_S390(endness_host) == code_end - code_begin);
return buf;
}
Int
emit_S390Instr(Bool *is_profinc, UChar *buf, Int nbuf, s390_insn *insn,
- Bool mode64, void *disp_cp_chain_me_to_slowEP,
+ Bool mode64, VexEndness endness_host,
+ void *disp_cp_chain_me_to_slowEP,
void *disp_cp_chain_me_to_fastEP, void *disp_cp_xindir,
void *disp_cp_xassisted)
{
break;
case S390_INSN_EVCHECK:
- end = s390_insn_evcheck_emit(buf, insn);
+ end = s390_insn_evcheck_emit(buf, insn, endness_host);
break;
case S390_INSN_XDIRECT:
/* Return the number of bytes emitted for an S390_INSN_EVCHECK.
See s390_insn_evcheck_emit */
Int
-evCheckSzB_S390(void)
+evCheckSzB_S390(VexEndness endness_host)
{
return s390_host_has_gie ? 18 : 24;
}
/* Patch the counter address into CODE_TO_PATCH as previously
generated by s390_insn_profinc_emit. */
VexInvalRange
-patchProfInc_S390(void *code_to_patch, ULong *location_of_counter)
+patchProfInc_S390(VexEndness endness_host,
+ void *code_to_patch, ULong *location_of_counter)
{
vassert(sizeof(ULong *) == 8);
/* NB: what goes on here has to be very closely coordinated with the
s390_insn_xdirect_emit code above. */
VexInvalRange
-chainXDirect_S390(void *place_to_chain,
+chainXDirect_S390(VexEndness endness_host,
+ void *place_to_chain,
void *disp_cp_chain_me_EXPECTED,
void *place_to_jump_to)
{
+ vassert(endness_host == VexEndnessBE);
+
/* What we're expecting to see @ PLACE_TO_CHAIN is:
load tchain_scratch, #disp_cp_chain_me_EXPECTED
/* NB: what goes on here has to be very closely coordinated with the
s390_insn_xdirect_emit code above. */
VexInvalRange
-unchainXDirect_S390(void *place_to_unchain,
+unchainXDirect_S390(VexEndness endness_host,
+ void *place_to_unchain,
void *place_to_jump_to_EXPECTED,
void *disp_cp_chain_me)
{
+ vassert(endness_host == VexEndnessBE);
+
/* What we're expecting to see @ PLACE_TO_UNCHAIN:
load tchain_scratch, #place_to_jump_to_EXPECTED
void mapRegs_S390Instr ( HRegRemap *, s390_insn *, Bool );
Bool isMove_S390Instr ( s390_insn *, HReg *, HReg * );
Int emit_S390Instr ( Bool *, UChar *, Int, s390_insn *, Bool,
- void *, void *, void *, void *);
+ VexEndness, void *, void *, void *, void *);
void getAllocableRegs_S390( Int *, HReg **, Bool );
void genSpill_S390 ( HInstr **, HInstr **, HReg , Int , Bool );
void genReload_S390 ( HInstr **, HInstr **, HReg , Int , Bool );
Int, Int, Bool, Bool, Addr64);
/* Return the number of bytes of code needed for an event check */
-Int evCheckSzB_S390(void);
+Int evCheckSzB_S390(VexEndness endness_host);
/* Perform a chaining and unchaining of an XDirect jump. */
-VexInvalRange chainXDirect_S390(void *place_to_chain,
+VexInvalRange chainXDirect_S390(VexEndness endness_host,
+ void *place_to_chain,
void *disp_cp_chain_me_EXPECTED,
void *place_to_jump_to);
-VexInvalRange unchainXDirect_S390(void *place_to_unchain,
+VexInvalRange unchainXDirect_S390(VexEndness endness_host,
+ void *place_to_unchain,
void *place_to_jump_to_EXPECTED,
void *disp_cp_chain_me);
/* Patch the counter location into an existing ProfInc point. */
-VexInvalRange patchProfInc_S390(void *code_to_patch,
+VexInvalRange patchProfInc_S390(VexEndness endness_host,
+ void *code_to_patch,
ULong *location_of_counter);
/* KLUDGE: See detailled comment in host_s390_defs.c. */
/* Do some sanity checks */
vassert((VEX_HWCAPS_S390X(hwcaps_host) & ~(VEX_HWCAPS_S390X_ALL)) == 0);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessBE);
+
/* Make up an initial environment to use. */
env = LibVEX_Alloc(sizeof(ISelEnv));
env->vreg_ctr = 0;
Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, X86Instr* i,
- Bool mode64,
+ Bool mode64, VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
p = doAMode_M(p, fake(4), i->Xin.EvCheck.amFailAddr);
vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
/* And crosscheck .. */
- vassert(evCheckSzB_X86() == 8);
+ vassert(evCheckSzB_X86(endness_host) == 8);
goto done;
}
/* How big is an event check? See case for Xin_EvCheck in
emit_X86Instr just above. That crosschecks what this returns, so
we can tell if we're inconsistent. */
-Int evCheckSzB_X86 ( void )
+Int evCheckSzB_X86 ( VexEndness endness_host )
{
return 8;
}
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_X86 ( void* place_to_chain,
+VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
movl $disp_cp_chain_me_EXPECTED, %edx
call *%edx
/* NB: what goes on here has to be very closely coordinated with the
emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_X86 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me )
{
+ vassert(endness_host == VexEndnessLE);
+
/* What we're expecting to see is:
jmp d32
ud2;
/* Patch the counter address into a profile inc point, as previously
created by the Xin_ProfInc case for emit_X86Instr. */
-VexInvalRange patchProfInc_X86 ( void* place_to_patch,
+VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter )
{
+ vassert(endness_host == VexEndnessLE);
vassert(sizeof(ULong*) == 4);
UChar* p = (UChar*)place_to_patch;
vassert(p[0] == 0x83);
extern Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, X86Instr* i,
Bool mode64,
+ VexEndness endness_host,
void* disp_cp_chain_me_to_slowEP,
void* disp_cp_chain_me_to_fastEP,
void* disp_cp_xindir,
and so assumes that they are both <= 128, and so can use the short
offset encoding. This is all checked with assertions, so in the
worst case we will merely assert at startup. */
-extern Int evCheckSzB_X86 ( void );
+extern Int evCheckSzB_X86 ( VexEndness endness_host );
/* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_X86 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
+ void* place_to_chain,
void* disp_cp_chain_me_EXPECTED,
void* place_to_jump_to );
-extern VexInvalRange unchainXDirect_X86 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
+ void* place_to_unchain,
void* place_to_jump_to_EXPECTED,
void* disp_cp_chain_me );
/* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_X86 ( void* place_to_patch,
+extern VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
+ void* place_to_patch,
ULong* location_of_counter );
vassert(sizeof(max_ga) == 8);
vassert((max_ga >> 32) == 0);
+ /* Check that the host's endianness is as expected. */
+ vassert(archinfo_host->endness == VexEndnessLE);
+
/* Make up an initial environment to use. */
env = LibVEX_Alloc(sizeof(ISelEnv));
env->vreg_ctr = 0;
HInstrArray* (*iselSB) ( IRSB*, VexArch, VexArchInfo*, VexAbiInfo*,
Int, Int, Bool, Bool, Addr64 );
Int (*emit) ( /*MB_MOD*/Bool*,
- UChar*, Int, HInstr*, Bool,
+ UChar*, Int, HInstr*, Bool, VexEndness,
void*, void*, void*, void* );
IRExpr* (*specHelper) ( const HChar*, IRExpr**, IRStmt**, Int );
Bool (*preciseMemExnsFn) ( Int, Int );
DisOneInstrFn disInstrFn;
VexGuestLayout* guest_layout;
- Bool host_is_bigendian = False;
IRSB* irsb;
HInstrArray* vcode;
HInstrArray* rcode;
ppInstr = (void(*)(HInstr*, Bool)) ppX86Instr;
ppReg = (void(*)(HReg)) ppHRegX86;
iselSB = iselSB_X86;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*))
emit_X86Instr;
- host_is_bigendian = False;
host_word_type = Ity_I32;
vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessLE);
break;
case VexArchAMD64:
ppInstr = (void(*)(HInstr*, Bool)) ppAMD64Instr;
ppReg = (void(*)(HReg)) ppHRegAMD64;
iselSB = iselSB_AMD64;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*))
emit_AMD64Instr;
- host_is_bigendian = False;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessLE);
break;
case VexArchPPC32:
ppInstr = (void(*)(HInstr*,Bool)) ppPPCInstr;
ppReg = (void(*)(HReg)) ppHRegPPC;
iselSB = iselSB_PPC;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*))
emit_PPCInstr;
- host_is_bigendian = True;
host_word_type = Ity_I32;
vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessBE);
break;
case VexArchPPC64:
ppInstr = (void(*)(HInstr*, Bool)) ppPPCInstr;
ppReg = (void(*)(HReg)) ppHRegPPC;
iselSB = iselSB_PPC;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*))
emit_PPCInstr;
- host_is_bigendian = True;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessBE
+ /* later: || vta->archinfo_host.endness == VexEndnessLE */);
break;
case VexArchS390X:
ppInstr = (void(*)(HInstr*, Bool)) ppS390Instr;
ppReg = (void(*)(HReg)) ppHRegS390;
iselSB = iselSB_S390;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*)) emit_S390Instr;
- host_is_bigendian = True;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessBE);
break;
case VexArchARM:
ppInstr = (void(*)(HInstr*, Bool)) ppARMInstr;
ppReg = (void(*)(HReg)) ppHRegARM;
iselSB = iselSB_ARM;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*))
emit_ARMInstr;
- host_is_bigendian = False;
host_word_type = Ity_I32;
vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessLE);
break;
case VexArchARM64:
ppInstr = (void(*)(HInstr*, Bool)) ppARM64Instr;
ppReg = (void(*)(HReg)) ppHRegARM64;
iselSB = iselSB_ARM64;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*))
emit_ARM64Instr;
- host_is_bigendian = False;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessLE);
break;
case VexArchMIPS32:
ppInstr = (void(*)(HInstr*, Bool)) ppMIPSInstr;
ppReg = (void(*)(HReg)) ppHRegMIPS;
iselSB = iselSB_MIPS;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*))
emit_MIPSInstr;
-# if defined(VKI_LITTLE_ENDIAN)
- host_is_bigendian = False;
-# elif defined(VKI_BIG_ENDIAN)
- host_is_bigendian = True;
-# endif
host_word_type = Ity_I32;
vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessLE
+ || vta->archinfo_host.endness == VexEndnessBE);
break;
case VexArchMIPS64:
ppInstr = (void(*)(HInstr*, Bool)) ppMIPSInstr;
ppReg = (void(*)(HReg)) ppHRegMIPS;
iselSB = iselSB_MIPS;
- emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+ emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
void*,void*,void*,void*))
emit_MIPSInstr;
-# if defined(VKI_LITTLE_ENDIAN)
- host_is_bigendian = False;
-# elif defined(VKI_BIG_ENDIAN)
- host_is_bigendian = True;
-# endif
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_host.hwcaps));
+ vassert(vta->archinfo_host.endness == VexEndnessLE
+ || vta->archinfo_host.endness == VexEndnessBE);
break;
default:
offB_HOST_EvC_COUNTER = offsetof(VexGuestX86State,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessLE);
vassert(0 == sizeof(VexGuestX86State) % 16);
vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4);
vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN ) == 4);
offB_HOST_EvC_COUNTER = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessLE);
vassert(0 == sizeof(VexGuestAMD64State) % 16);
vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8);
vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMLEN ) == 8);
offB_HOST_EvC_COUNTER = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessBE);
vassert(0 == sizeof(VexGuestPPC32State) % 16);
vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4);
vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMLEN ) == 4);
offB_HOST_EvC_COUNTER = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessBE
+ /* later: || vta->archinfo_guest.endness == VexEndnessBE */);
vassert(0 == sizeof(VexGuestPPC64State) % 16);
vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMSTART ) == 8);
vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMLEN ) == 8);
offB_HOST_EvC_COUNTER = offsetof(VexGuestS390XState,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessBE);
vassert(0 == sizeof(VexGuestS390XState) % 16);
vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART ) == 8);
vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMLEN ) == 8);
offB_HOST_EvC_COUNTER = offsetof(VexGuestARMState,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessLE);
vassert(0 == sizeof(VexGuestARMState) % 16);
vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4);
vassert(sizeof( ((VexGuestARMState*)0)->guest_CMLEN ) == 4);
offB_HOST_EvC_COUNTER = offsetof(VexGuestARM64State,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessLE);
vassert(0 == sizeof(VexGuestARM64State) % 16);
vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8);
vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMLEN ) == 8);
offB_HOST_EvC_COUNTER = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessLE
+ || vta->archinfo_guest.endness == VexEndnessBE);
vassert(0 == sizeof(VexGuestMIPS32State) % 16);
vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMSTART) == 4);
vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMLEN ) == 4);
offB_HOST_EvC_COUNTER = offsetof(VexGuestMIPS64State,host_EvC_COUNTER);
offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR);
vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_guest.hwcaps));
+ vassert(vta->archinfo_guest.endness == VexEndnessLE
+ || vta->archinfo_guest.endness == VexEndnessBE);
vassert(0 == sizeof(VexGuestMIPS64State) % 16);
vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMSTART) == 8);
vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMLEN ) == 8);
we are simulating one flavour of an architecture a different
flavour of the same architecture, which is pretty strange. */
vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
+ /* ditto */
+ vassert(vta->archinfo_guest.endness == vta->archinfo_host.endness);
}
vexAllocSanityCheck();
vta->guest_bytes,
vta->guest_bytes_addr,
vta->chase_into_ok,
- host_is_bigendian,
+ vta->archinfo_host.endness,
vta->sigill_diag,
vta->arch_guest,
&vta->archinfo_guest,
vex_printf("\n");
}
j = emit( &hi_isProfInc,
- insn_bytes, sizeof insn_bytes, hi, mode64,
+ insn_bytes, sizeof insn_bytes, hi,
+ mode64, vta->archinfo_host.endness,
vta->disp_cp_chain_me_to_slowEP,
vta->disp_cp_chain_me_to_fastEP,
vta->disp_cp_xindir,
/* --------- Chain/Unchain XDirects. --------- */
-VexInvalRange LibVEX_Chain ( VexArch arch_host,
- void* place_to_chain,
- void* disp_cp_chain_me_EXPECTED,
- void* place_to_jump_to )
+VexInvalRange LibVEX_Chain ( VexArch arch_host,
+ VexEndness endness_host,
+ void* place_to_chain,
+ void* disp_cp_chain_me_EXPECTED,
+ void* place_to_jump_to )
{
- VexInvalRange (*chainXDirect)(void*, void*, void*) = NULL;
+ VexInvalRange (*chainXDirect)(VexEndness, void*, void*, void*) = NULL;
switch (arch_host) {
case VexArchX86:
chainXDirect = chainXDirect_X86; break;
case VexArchS390X:
chainXDirect = chainXDirect_S390; break;
case VexArchPPC32:
- return chainXDirect_PPC(place_to_chain,
+ return chainXDirect_PPC(endness_host,
+ place_to_chain,
disp_cp_chain_me_EXPECTED,
place_to_jump_to, False/*!mode64*/);
case VexArchPPC64:
- return chainXDirect_PPC(place_to_chain,
+ return chainXDirect_PPC(endness_host,
+ place_to_chain,
disp_cp_chain_me_EXPECTED,
place_to_jump_to, True/*mode64*/);
case VexArchMIPS32:
- return chainXDirect_MIPS(place_to_chain,
+ return chainXDirect_MIPS(endness_host,
+ place_to_chain,
disp_cp_chain_me_EXPECTED,
place_to_jump_to, False/*!mode64*/);
case VexArchMIPS64:
- return chainXDirect_MIPS(place_to_chain,
+ return chainXDirect_MIPS(endness_host,
+ place_to_chain,
disp_cp_chain_me_EXPECTED,
place_to_jump_to, True/*!mode64*/);
default:
}
vassert(chainXDirect);
VexInvalRange vir
- = chainXDirect(place_to_chain, disp_cp_chain_me_EXPECTED,
- place_to_jump_to);
+ = chainXDirect(endness_host, place_to_chain,
+ disp_cp_chain_me_EXPECTED, place_to_jump_to);
return vir;
}
-VexInvalRange LibVEX_UnChain ( VexArch arch_host,
- void* place_to_unchain,
- void* place_to_jump_to_EXPECTED,
- void* disp_cp_chain_me )
+VexInvalRange LibVEX_UnChain ( VexArch arch_host,
+ VexEndness endness_host,
+ void* place_to_unchain,
+ void* place_to_jump_to_EXPECTED,
+ void* disp_cp_chain_me )
{
- VexInvalRange (*unchainXDirect)(void*, void*, void*) = NULL;
+ VexInvalRange (*unchainXDirect)(VexEndness, void*, void*, void*) = NULL;
switch (arch_host) {
case VexArchX86:
unchainXDirect = unchainXDirect_X86; break;
case VexArchS390X:
unchainXDirect = unchainXDirect_S390; break;
case VexArchPPC32:
- return unchainXDirect_PPC(place_to_unchain,
+ return unchainXDirect_PPC(endness_host,
+ place_to_unchain,
place_to_jump_to_EXPECTED,
disp_cp_chain_me, False/*!mode64*/);
case VexArchPPC64:
- return unchainXDirect_PPC(place_to_unchain,
+ return unchainXDirect_PPC(endness_host,
+ place_to_unchain,
place_to_jump_to_EXPECTED,
disp_cp_chain_me, True/*mode64*/);
case VexArchMIPS32:
- return unchainXDirect_MIPS(place_to_unchain,
+ return unchainXDirect_MIPS(endness_host,
+ place_to_unchain,
place_to_jump_to_EXPECTED,
disp_cp_chain_me, False/*!mode64*/);
case VexArchMIPS64:
- return unchainXDirect_MIPS(place_to_unchain,
+ return unchainXDirect_MIPS(endness_host,
+ place_to_unchain,
place_to_jump_to_EXPECTED,
disp_cp_chain_me, True/*!mode64*/);
default:
}
vassert(unchainXDirect);
VexInvalRange vir
- = unchainXDirect(place_to_unchain, place_to_jump_to_EXPECTED,
- disp_cp_chain_me);
+ = unchainXDirect(endness_host, place_to_unchain,
+ place_to_jump_to_EXPECTED, disp_cp_chain_me);
return vir;
}
-Int LibVEX_evCheckSzB ( VexArch arch_host )
+Int LibVEX_evCheckSzB ( VexArch arch_host,
+ VexEndness endness_host )
{
static Int cached = 0; /* DO NOT MAKE NON-STATIC */
if (UNLIKELY(cached == 0)) {
switch (arch_host) {
case VexArchX86:
- cached = evCheckSzB_X86(); break;
+ cached = evCheckSzB_X86(endness_host); break;
case VexArchAMD64:
- cached = evCheckSzB_AMD64(); break;
+ cached = evCheckSzB_AMD64(endness_host); break;
case VexArchARM:
- cached = evCheckSzB_ARM(); break;
+ cached = evCheckSzB_ARM(endness_host); break;
case VexArchARM64:
- cached = evCheckSzB_ARM64(); break;
+ cached = evCheckSzB_ARM64(endness_host); break;
case VexArchS390X:
- cached = evCheckSzB_S390(); break;
+ cached = evCheckSzB_S390(endness_host); break;
case VexArchPPC32:
case VexArchPPC64:
- cached = evCheckSzB_PPC(); break;
+ cached = evCheckSzB_PPC(endness_host); break;
case VexArchMIPS32:
case VexArchMIPS64:
- cached = evCheckSzB_MIPS(); break;
+ cached = evCheckSzB_MIPS(endness_host); break;
default:
vassert(0);
}
return cached;
}
-VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
- void* place_to_patch,
- ULong* location_of_counter )
+VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
+ VexEndness endness_host,
+ void* place_to_patch,
+ ULong* location_of_counter )
{
- VexInvalRange (*patchProfInc)(void*,ULong*) = NULL;
+ VexInvalRange (*patchProfInc)(VexEndness,void*,ULong*) = NULL;
switch (arch_host) {
case VexArchX86:
patchProfInc = patchProfInc_X86; break;
case VexArchS390X:
patchProfInc = patchProfInc_S390; break;
case VexArchPPC32:
- return patchProfInc_PPC(place_to_patch,
+ return patchProfInc_PPC(endness_host, place_to_patch,
location_of_counter, False/*!mode64*/);
case VexArchPPC64:
- return patchProfInc_PPC(place_to_patch,
+ return patchProfInc_PPC(endness_host, place_to_patch,
location_of_counter, True/*mode64*/);
case VexArchMIPS32:
- return patchProfInc_MIPS(place_to_patch,
+ return patchProfInc_MIPS(endness_host, place_to_patch,
location_of_counter, False/*!mode64*/);
case VexArchMIPS64:
- return patchProfInc_MIPS(place_to_patch,
+ return patchProfInc_MIPS(endness_host, place_to_patch,
location_of_counter, True/*!mode64*/);
default:
vassert(0);
}
vassert(patchProfInc);
VexInvalRange vir
- = patchProfInc(place_to_patch, location_of_counter);
+ = patchProfInc(endness_host, place_to_patch, location_of_counter);
return vir;
}
}
}
+const HChar* LibVEX_ppVexEndness ( VexEndness endness )
+{
+ switch (endness) {
+ case VexEndness_INVALID: return "INVALID";
+ case VexEndnessLE: return "LittleEndian";
+ case VexEndnessBE: return "BigEndian";
+ default: return "VexEndness???";
+ }
+}
+
const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
{
const HChar* str = show_hwcaps(arch,hwcaps);
void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
{
vex_bzero(vai, sizeof(*vai));
- vai->hwcaps = 0;
- vai->ppc_icache_line_szB = 0;
- vai->ppc_dcbz_szB = 0;
- vai->ppc_dcbzl_szB = 0;
+ vai->hwcaps = 0;
+ vai->endness = VexEndness_INVALID;
+ vai->ppc_icache_line_szB = 0;
+ vai->ppc_dcbz_szB = 0;
+ vai->ppc_dcbzl_szB = 0;
vai->arm64_dMinLine_lg2_szB = 0;
vai->arm64_iMinLine_lg2_szB = 0;
vai->hwcache_info.num_levels = 0;
typedef
enum {
- VexArch_INVALID,
+ VexArch_INVALID=0x400,
VexArchX86,
VexArchAMD64,
VexArchARM,
VexArch;
+/* Information about endianness. */
+typedef
+ enum {
+ VexEndness_INVALID=0x600, /* unknown endianness */
+ VexEndnessLE, /* little endian */
+ VexEndnessBE /* big endian */
+ }
+ VexEndness;
+
+
/* For a given architecture, these specify extra capabilities beyond
the minimum supported (baseline) capabilities. They may be OR'd
together, although some combinations don't make sense. (eg, SSE2
/* These return statically allocated strings. */
extern const HChar* LibVEX_ppVexArch ( VexArch );
+extern const HChar* LibVEX_ppVexEndness ( VexEndness endness );
extern const HChar* LibVEX_ppVexHwCaps ( VexArch, UInt );
/* The various kinds of caches */
typedef enum {
- DATA_CACHE,
+ DATA_CACHE=0x500,
INSN_CACHE,
UNIFIED_CACHE
} VexCacheKind;
typedef
struct {
- /* The following two fields are mandatory. */
- UInt hwcaps;
+ /* The following three fields are mandatory. */
+ UInt hwcaps;
+ VexEndness endness;
VexCacheInfo hwcache_info;
/* PPC32/PPC64 only: size of instruction cache line */
Int ppc_icache_line_szB;
points.
VexRegUpdAllregsAtEachInsn : all registers up to date at each instruction. */
-typedef enum { VexRegUpdSpAtMemAccess,
+typedef enum { VexRegUpdSpAtMemAccess=0x700,
VexRegUpdUnwindregsAtMemAccess,
VexRegUpdAllregsAtMemAccess,
VexRegUpdAllregsAtEachInsn } VexRegisterUpdates;
typedef
struct {
/* overall status */
- enum { VexTransOK,
+ enum { VexTransOK=0x800,
VexTransAccessFail, VexTransOutputFull } status;
/* The number of extents that have a self-check (0 to 3) */
UInt n_sc_extents;
currently contains a call to the dispatcher specified by
disp_cp_chain_me_EXPECTED. */
extern
-VexInvalRange LibVEX_Chain ( VexArch arch_host,
- void* place_to_chain,
- void* disp_cp_chain_me_EXPECTED,
- void* place_to_jump_to );
+VexInvalRange LibVEX_Chain ( VexArch arch_host,
+ VexEndness endhess_host,
+ void* place_to_chain,
+ void* disp_cp_chain_me_EXPECTED,
+ void* place_to_jump_to );
/* Undo an XDirect jump located at place_to_unchain, so it is
converted back into a call to disp_cp_chain_me. It is expected
(and checked) that this site currently contains a jump directly to
the address specified by place_to_jump_to_EXPECTED. */
extern
-VexInvalRange LibVEX_UnChain ( VexArch arch_host,
- void* place_to_unchain,
- void* place_to_jump_to_EXPECTED,
- void* disp_cp_chain_me );
+VexInvalRange LibVEX_UnChain ( VexArch arch_host,
+ VexEndness endness_host,
+ void* place_to_unchain,
+ void* place_to_jump_to_EXPECTED,
+ void* disp_cp_chain_me );
/* Returns a constant -- the size of the event check that is put at
the start of every translation. This makes it possible to
calculate the fast entry point address if the slow entry point
address is known (the usual case), or vice versa. */
extern
-Int LibVEX_evCheckSzB ( VexArch arch_host );
+Int LibVEX_evCheckSzB ( VexArch arch_host,
+ VexEndness endness_host );
/* Patch the counter location into an existing ProfInc point. The
specified point is checked to make sure it is plausible. */
extern
-VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
- void* place_to_patch,
- ULong* location_of_counter );
+VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
+ VexEndness endness_host,
+ void* place_to_patch,
+ ULong* location_of_counter );
/*-------------------------------------------------------*/