have_mmxext = True;
va = VexArchX86;
+ vai.endness = VexEndnessLE;
if (have_sse2 && have_sse1 && have_mmxext) {
vai.hwcaps = VEX_HWCAPS_X86_MMXEXT;
vai.hwcaps |= VEX_HWCAPS_X86_SSE1;
have_avx2 = (ebx & (1<<5)) != 0; /* True => have AVX2 */
}
- va = VexArchAMD64;
- vai.hwcaps = (have_sse3 ? VEX_HWCAPS_AMD64_SSE3 : 0)
- | (have_cx16 ? VEX_HWCAPS_AMD64_CX16 : 0)
- | (have_lzcnt ? VEX_HWCAPS_AMD64_LZCNT : 0)
- | (have_avx ? VEX_HWCAPS_AMD64_AVX : 0)
- | (have_bmi ? VEX_HWCAPS_AMD64_BMI : 0)
- | (have_avx2 ? VEX_HWCAPS_AMD64_AVX2 : 0)
- | (have_rdtscp ? VEX_HWCAPS_AMD64_RDTSCP : 0);
+ va = VexArchAMD64;
+ vai.endness = VexEndnessLE;
+ vai.hwcaps = (have_sse3 ? VEX_HWCAPS_AMD64_SSE3 : 0)
+ | (have_cx16 ? VEX_HWCAPS_AMD64_CX16 : 0)
+ | (have_lzcnt ? VEX_HWCAPS_AMD64_LZCNT : 0)
+ | (have_avx ? VEX_HWCAPS_AMD64_AVX : 0)
+ | (have_bmi ? VEX_HWCAPS_AMD64_BMI : 0)
+ | (have_avx2 ? VEX_HWCAPS_AMD64_AVX2 : 0)
+ | (have_rdtscp ? VEX_HWCAPS_AMD64_RDTSCP : 0);
VG_(machine_get_cache_info)(&vai);
VG_(machine_ppc32_has_VMX) = have_V ? 1 : 0;
va = VexArchPPC32;
+ vai.endness = VexEndnessBE;
vai.hwcaps = 0;
if (have_F) vai.hwcaps |= VEX_HWCAPS_PPC32_F;
VG_(machine_ppc64_has_VMX) = have_V ? 1 : 0;
va = VexArchPPC64;
+ // CARLL fixme: when the time comes, copy .endness setting code
+ // from the VGA_mips32 case
+ vai.endness = VexEndnessBE;
vai.hwcaps = 0;
if (have_V) vai.hwcaps |= VEX_HWCAPS_PPC64_V;
r = VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
vg_assert(r == 0);
va = VexArchS390X;
+ vai.endness = VexEndnessBE;
vai.hwcaps = model;
if (have_STFLE) vai.hwcaps |= VEX_HWCAPS_S390X_STFLE;
VG_(machine_arm_archlevel) = archlevel;
va = VexArchARM;
+ vai.endness = VexEndnessLE;
vai.hwcaps = VEX_ARM_ARCHLEVEL(archlevel);
if (have_VFP3) vai.hwcaps |= VEX_HWCAPS_ARM_VFP3;
#elif defined(VGA_arm64)
{
va = VexArchARM64;
+ vai.endness = VexEndnessLE;
/* So far there are no variants. */
vai.hwcaps = 0;
vai.hwcaps = model;
+# if defined(VKI_LITTLE_ENDIAN)
+ vai.endness = VexEndnessLE;
+# elif defined(VKI_BIG_ENDIAN)
+ vai.endness = VexEndnessBE;
+# else
+ vai.endness = VexEndness_INVALID;
+# endif
+
/* Same instruction set detection algorithm as for ppc32/arm... */
vki_sigset_t saved_set, tmp_set;
vki_sigaction_fromK_t saved_sigill_act;
{
va = VexArchMIPS64;
UInt model = VG_(get_machine_model)();
- if (model== -1)
+ if (model == -1)
return False;
vai.hwcaps = model;
+# if defined(VKI_LITTLE_ENDIAN)
+ vai.endness = VexEndnessLE;
+# elif defined(VKI_BIG_ENDIAN)
+ vai.endness = VexEndnessBE;
+# else
+ vai.endness = VexEndness_INVALID;
+# endif
+
VG_(machine_get_cache_info)(&vai);
return True;
Bool to_fastEP )
{
/* Get the CPU info established at startup. */
- VexArch vex_arch = VexArch_INVALID;
- VG_(machine_get_VexArchInfo)( &vex_arch, NULL );
+ VexArch arch_host = VexArch_INVALID;
+ VexArchInfo archinfo_host;
+ VG_(bzero_inline)(&archinfo_host, sizeof(archinfo_host));
+ VG_(machine_get_VexArchInfo)( &arch_host, &archinfo_host );
+ VexEndness endness_host = archinfo_host.endness;
// host_code is where we're patching to. So it needs to
// take into account, whether we're jumping to the slow
// the slow (tcptr) entry point.
TTEntry* to_tte = index_tte(to_sNo, to_tteNo);
void* host_code = ((UChar*)to_tte->tcptr)
- + (to_fastEP ? LibVEX_evCheckSzB(vex_arch) : 0);
+ + (to_fastEP ? LibVEX_evCheckSzB(arch_host,
+ endness_host) : 0);
// stay sane -- the patch point (dst) is in this sector's code cache
vg_assert( (UChar*)host_code >= (UChar*)sectors[to_sNo].tc );
since it is host-dependent. */
VexInvalRange vir
= LibVEX_Chain(
- vex_arch,
+ arch_host, endness_host,
from__patch_addr,
VG_(fnptr_to_fnentry)(
to_fastEP ? &VG_(disp_cp_chain_me_to_fastEP)
addresses of the destination block (that is, the block that owns
this InEdge). */
__attribute__((noinline))
-static void unchain_one ( VexArch vex_arch,
+static void unchain_one ( VexArch arch_host, VexEndness endness_host,
InEdge* ie,
void* to_fastEPaddr, void* to_slowEPaddr )
{
// place_to_jump_to_EXPECTED really is the current dst, and
// asserts if it isn't.
VexInvalRange vir
- = LibVEX_UnChain( vex_arch, place_to_patch,
+ = LibVEX_UnChain( arch_host, endness_host, place_to_patch,
place_to_jump_to_EXPECTED, disp_cp_chain_me );
VG_(invalidate_icache)( (void*)vir.start, vir.len );
}
succs of its associated blocks accordingly. This includes undoing
any chained jumps to this block. */
static
-void unchain_in_preparation_for_deletion ( VexArch vex_arch,
+void unchain_in_preparation_for_deletion ( VexArch arch_host,
+ VexEndness endness_host,
UInt here_sNo, UInt here_tteNo )
{
if (DEBUG_TRANSTAB)
VG_(printf)("QQQ unchain_in_prep %u.%u...\n", here_sNo, here_tteNo);
UWord i, j, n, m;
- Int evCheckSzB = LibVEX_evCheckSzB(vex_arch);
+ Int evCheckSzB = LibVEX_evCheckSzB(arch_host, endness_host);
TTEntry* here_tte = index_tte(here_sNo, here_tteNo);
if (DEBUG_TRANSTAB)
VG_(printf)("... QQQ tt.entry 0x%llu tt.tcptr 0x%p\n",
// Undo the chaining.
UChar* here_slow_EP = (UChar*)here_tte->tcptr;
UChar* here_fast_EP = here_slow_EP + evCheckSzB;
- unchain_one(vex_arch, ie, here_fast_EP, here_slow_EP);
+ unchain_one(arch_host, endness_host, ie, here_fast_EP, here_slow_EP);
// Find the corresponding entry in the "from" node's out_edges,
// and remove it.
TTEntry* from_tte = index_tte(ie->from_sNo, ie->from_tteNo);
vg_assert(sec->tc_next != NULL);
n_dump_count += sec->tt_n_inuse;
- VexArch vex_arch = VexArch_INVALID;
- VG_(machine_get_VexArchInfo)( &vex_arch, NULL );
+ VexArch arch_host = VexArch_INVALID;
+ VexArchInfo archinfo_host;
+ VG_(bzero_inline)(&archinfo_host, sizeof(archinfo_host));
+ VG_(machine_get_VexArchInfo)( &arch_host, &archinfo_host );
+ VexEndness endness_host = archinfo_host.endness;
/* Visit each just-about-to-be-abandoned translation. */
if (DEBUG_TRANSTAB) VG_(printf)("QQQ unlink-entire-sector: %d START\n",
sec->tt[i].entry,
sec->tt[i].vge );
}
- unchain_in_preparation_for_deletion(vex_arch, sno, i);
+ unchain_in_preparation_for_deletion(arch_host,
+ endness_host, sno, i);
} else {
vg_assert(sec->tt[i].n_tte2ec == 0);
}
UInt code_len,
Bool is_self_checking,
Int offs_profInc,
- UInt n_guest_instrs,
- VexArch arch_host )
+ UInt n_guest_instrs )
{
Int tcAvailQ, reqdQ, y, i;
ULong *tcptr, *tcptr2;
/* Patch in the profile counter location, if necessary. */
if (offs_profInc != -1) {
vg_assert(offs_profInc >= 0 && offs_profInc < code_len);
+ VexArch arch_host = VexArch_INVALID;
+ VexArchInfo archinfo_host;
+ VG_(bzero_inline)(&archinfo_host, sizeof(archinfo_host));
+ VG_(machine_get_VexArchInfo)( &arch_host, &archinfo_host );
+ VexEndness endness_host = archinfo_host.endness;
VexInvalRange vir
- = LibVEX_PatchProfInc( arch_host,
+ = LibVEX_PatchProfInc( arch_host, endness_host,
dstP + offs_profInc,
§ors[y].tt[i].count );
VG_(invalidate_icache)( (void*)vir.start, vir.len );
/* Delete a tt entry, and update all the eclass data accordingly. */
static void delete_tte ( /*MOD*/Sector* sec, UInt secNo, Int tteno,
- VexArch vex_arch )
+ VexArch arch_host, VexEndness endness_host )
{
Int i, ec_num, ec_idx;
TTEntry* tte;
vg_assert(tte->n_tte2ec >= 1 && tte->n_tte2ec <= 3);
/* Unchain .. */
- unchain_in_preparation_for_deletion(vex_arch, secNo, tteno);
+ unchain_in_preparation_for_deletion(arch_host, endness_host, secNo, tteno);
/* Deal with the ec-to-tte links first. */
for (i = 0; i < tte->n_tte2ec; i++) {
Bool delete_translations_in_sector_eclass ( /*MOD*/Sector* sec, UInt secNo,
Addr64 guest_start, ULong range,
Int ec,
- VexArch vex_arch )
+ VexArch arch_host,
+ VexEndness endness_host )
{
Int i;
UShort tteno;
if (overlaps( guest_start, range, &tte->vge )) {
anyDeld = True;
- delete_tte( sec, secNo, (Int)tteno, vex_arch );
+ delete_tte( sec, secNo, (Int)tteno, arch_host, endness_host );
}
}
static
Bool delete_translations_in_sector ( /*MOD*/Sector* sec, UInt secNo,
Addr64 guest_start, ULong range,
- VexArch vex_arch )
+ VexArch arch_host,
+ VexEndness endness_host )
{
Int i;
Bool anyDeld = False;
if (sec->tt[i].status == InUse
&& overlaps( guest_start, range, &sec->tt[i].vge )) {
anyDeld = True;
- delete_tte( sec, secNo, i, vex_arch );
+ delete_tte( sec, secNo, i, arch_host, endness_host );
}
}
if (range == 0)
return;
- VexArch vex_arch = VexArch_INVALID;
- VG_(machine_get_VexArchInfo)( &vex_arch, NULL );
+ VexArch arch_host = VexArch_INVALID;
+ VexArchInfo archinfo_host;
+ VG_(bzero_inline)(&archinfo_host, sizeof(archinfo_host));
+ VG_(machine_get_VexArchInfo)( &arch_host, &archinfo_host );
+ VexEndness endness_host = archinfo_host.endness;
/* There are two different ways to do this.
continue;
anyDeleted |= delete_translations_in_sector_eclass(
sec, sno, guest_start, range, ec,
- vex_arch
+ arch_host, endness_host
);
anyDeleted |= delete_translations_in_sector_eclass(
sec, sno, guest_start, range, ECLASS_MISC,
- vex_arch
+ arch_host, endness_host
);
}
if (sec->tc == NULL)
continue;
anyDeleted |= delete_translations_in_sector(
- sec, sno, guest_start, range, vex_arch );
+ sec, sno, guest_start, range,
+ arch_host, endness_host
+ );
}
}