This implements a fallback LL/SC implementation as described in bug 344524.
Valgrind side changes:
* Command line plumbing for --sim-hints=fallback-llsc
* memcheck: handle new arm64 guest state in memcheck/mc_machine.c
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@16309
" --sim-hints=hint1,hint2,... activate unusual sim behaviours [none] \n"
" where hint is one of:\n"
" lax-ioctls lax-doors fuse-compatible enable-outer\n"
-" no-inner-prefix no-nptl-pthread-stackcache none\n"
+" no-inner-prefix no-nptl-pthread-stackcache fallback-llsc none\n"
" --fair-sched=no|yes|try schedule threads fairly on multicore systems [no]\n"
" --kernel-variant=variant1,variant2,...\n"
" handle non-standard kernel variants [none]\n"
else if VG_USETX_CLO (str, "--sim-hints",
"lax-ioctls,lax-doors,fuse-compatible,"
"enable-outer,no-inner-prefix,"
- "no-nptl-pthread-stackcache",
+ "no-nptl-pthread-stackcache,fallback-llsc",
VG_(clo_sim_hints)) {}
}
tst->arch.vex.host_EvC_FAILADDR
= (HWord)VG_(fnptr_to_fnentry)( &VG_(disp_cp_evcheck_fail) );
+ /* Invalidate any in-flight LL/SC transactions, in the case that we're
+ using the fallback LL/SC implementation. See bugs 344524 and 369459. */
+# if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+ tst->arch.vex.guest_LLaddr = (HWord)(-1);
+# elif defined(VGP_arm64_linux)
+ tst->arch.vex.guest_LLSC_SIZE = 0;
+# endif
+
if (0) {
vki_sigset_t m;
Int i, err = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &m);
vg_assert(VG_(in_generated_code) == True);
VG_(in_generated_code) = False;
-#if defined(VGA_mips32) || defined(VGA_mips64)
- tst->arch.vex.guest_LLaddr = (HWord)(-1);
-#endif
-
if (jumped != (HWord)0) {
/* We get here if the client took a fault that caused our signal
handler to longjmp. */
vex_abiinfo.guest_amd64_assume_fs_is_const = True;
vex_abiinfo.guest_amd64_assume_gs_is_const = True;
# endif
+
# if defined(VGP_amd64_darwin)
vex_abiinfo.guest_amd64_assume_gs_is_const = True;
# endif
+
+# if defined(VGP_amd64_solaris)
+ vex_abiinfo.guest_amd64_assume_fs_is_const = True;
+# endif
+
# if defined(VGP_ppc32_linux)
vex_abiinfo.guest_ppc_zap_RZ_at_blr = False;
vex_abiinfo.guest_ppc_zap_RZ_at_bl = NULL;
# endif
+
# if defined(VGP_ppc64be_linux)
vex_abiinfo.guest_ppc_zap_RZ_at_blr = True;
vex_abiinfo.guest_ppc_zap_RZ_at_bl = const_True;
vex_abiinfo.host_ppc_calls_use_fndescrs = True;
# endif
+
# if defined(VGP_ppc64le_linux)
vex_abiinfo.guest_ppc_zap_RZ_at_blr = True;
vex_abiinfo.guest_ppc_zap_RZ_at_bl = const_True;
vex_abiinfo.host_ppc_calls_use_fndescrs = False;
# endif
-# if defined(VGP_amd64_solaris)
- vex_abiinfo.guest_amd64_assume_fs_is_const = True;
-# endif
+
# if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
ThreadArchState* arch = &VG_(threads)[tid].arch;
vex_abiinfo.guest_mips_fp_mode64 =
!!(arch->vex.guest_CP0_status & MIPS_CP0_STATUS_FR);
+ /* Compute guest__use_fallback_LLSC, overiding any settings of
+ VG_(clo_fallback_llsc) that we know would cause the guest to
+ fail (loop). */
+ if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
+ /* We must use the fallback scheme. */
+ vex_abiinfo.guest__use_fallback_LLSC = True;
+ } else {
+ vex_abiinfo.guest__use_fallback_LLSC
+ = SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints));
+ }
+# endif
+
+# if defined(VGP_arm64_linux)
+ vex_abiinfo.guest__use_fallback_LLSC
+ = SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints));
# endif
/* Set up closure args. */
SimHint_fuse_compatible,
SimHint_enable_outer,
SimHint_no_inner_prefix,
- SimHint_no_nptl_pthread_stackcache
+ SimHint_no_nptl_pthread_stackcache,
+ SimHint_fallback_llsc
}
SimHint;
// Build mask to check or set SimHint a membership
#define SimHint2S(a) (1 << (a))
// SimHint h is member of the Set s ?
-#define SimHintiS(h,s) ((s) & SimHint2S(h))
+#define SimHintiS(h,s) (((s) & SimHint2S(h)) != 0)
extern UInt VG_(clo_sim_hints);
/* Show symbols in the form 'name+offset' ? Default: NO */
if (o == GOF(CMSTART) && sz == 8) return -1; // untracked
if (o == GOF(CMLEN) && sz == 8) return -1; // untracked
+ if (o == GOF(LLSC_SIZE) && sz == 8) return -1; // untracked
+ if (o == GOF(LLSC_ADDR) && sz == 8) return o;
+ if (o == GOF(LLSC_DATA) && sz == 8) return o;
+
VG_(printf)("MC_(get_otrack_shadow_offset)(arm64)(off=%d,sz=%d)\n",
offset,szB);
tl_assert(0);
--sim-hints=hint1,hint2,... activate unusual sim behaviours [none]
where hint is one of:
lax-ioctls lax-doors fuse-compatible enable-outer
- no-inner-prefix no-nptl-pthread-stackcache none
+ no-inner-prefix no-nptl-pthread-stackcache fallback-llsc none
--fair-sched=no|yes|try schedule threads fairly on multicore systems [no]
--kernel-variant=variant1,variant2,...
handle non-standard kernel variants [none]
--sim-hints=hint1,hint2,... activate unusual sim behaviours [none]
where hint is one of:
lax-ioctls lax-doors fuse-compatible enable-outer
- no-inner-prefix no-nptl-pthread-stackcache none
+ no-inner-prefix no-nptl-pthread-stackcache fallback-llsc none
--fair-sched=no|yes|try schedule threads fairly on multicore systems [no]
--kernel-variant=variant1,variant2,...
handle non-standard kernel variants [none]