#include "pub_core_basics.h"
#include "pub_core_debuglog.h"
-#include "pub_core_execontext.h" // self
#include "pub_core_libcassert.h"
#include "pub_core_libcprint.h" // For VG_(message)()
#include "pub_core_mallocfree.h"
#include "pub_core_options.h"
#include "pub_core_stacktrace.h"
+#include "pub_core_machine.h" // VG_(get_IP)
+#include "pub_core_vki.h" // To keep pub_core_threadstate.h happy
+#include "pub_core_threadstate.h" // VG_(is_valid_tid)
+#include "pub_core_execontext.h" // self
/*------------------------------------------------------------*/
/*--- Low-level ExeContext storage. ---*/
ec_htab_size_idx++;
}
-ExeContext* VG_(record_ExeContext) ( ThreadId tid, Word first_ip_delta )
+static ExeContext* record_ExeContext_wrk ( ThreadId tid, Word first_ip_delta,
+ Bool first_ip_only )
{
Int i;
Addr ips[VG_DEEPEST_BACKTRACE];
vg_assert(VG_(clo_backtrace_size) >= 1 &&
VG_(clo_backtrace_size) <= VG_DEEPEST_BACKTRACE);
- n_ips = VG_(get_StackTrace)( tid, ips, VG_(clo_backtrace_size),
- first_ip_delta );
+ if (first_ip_only) {
+ vg_assert(VG_(is_valid_tid)(tid));
+ n_ips = 1;
+ ips[0] = VG_(get_IP)(tid);
+ } else {
+ n_ips = VG_(get_StackTrace)( tid, ips, VG_(clo_backtrace_size),
+ first_ip_delta );
+ }
+
tl_assert(n_ips >= 1 && n_ips <= VG_(clo_backtrace_size));
/* Now figure out if we've seen this one before. First hash it so
return new_ec;
}
+ExeContext* VG_(record_ExeContext)( ThreadId tid, Word first_ip_delta ) {
+ return record_ExeContext_wrk( tid, first_ip_delta,
+ False/*!first_ip_only*/ );
+}
+
+ExeContext* VG_(record_depth_1_ExeContext)( ThreadId tid ) {
+ return record_ExeContext_wrk( tid, 0/*first_ip_delta*/,
+ True/*first_ip_only*/ );
+}
+
+
StackTrace VG_(extract_StackTrace) ( ExeContext* e )
{
return e->ips;
case VKI_BUS_OBJERR: event = "Hardware error"; break;
}
break;
- }
+ } /* switch (sigNo) */
if (event != NULL) {
if (haveaddr)
VG_(message)(Vg_UserMsg, " %s", event);
}
}
-
- if (tid != VG_INVALID_THREADID) {
- VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
+ /* Print a stack trace. Be cautious if the thread's SP is in an
+ obviously stupid place (not mapped readable) that would
+ likely cause a segfault. */
+ if (VG_(is_valid_tid)(tid)) {
+ ExeContext* ec = VG_(am_is_valid_for_client)
+ (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
+ ? VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ )
+ : VG_(record_depth_1_ExeContext)( tid );
+ vg_assert(ec);
+ VG_(pp_ExeContext)( ec );
}
-
if (sigNo == VKI_SIGSEGV
&& info && info->si_code > VKI_SI_USER
&& info->si_code == VKI_SEGV_MAPERR) {
extern
ExeContext* VG_(record_ExeContext) ( ThreadId tid, Word first_ip_delta );
+// Trivial version of VG_(record_ExeContext), which just records the
+// thread's current program counter but does not do any stack
+// unwinding. This is useful in some rare cases when we suspect the
+// stack might be outside mapped storage, and so unwinding
+// might cause a segfault. In this case we can at least safely
+// produce a one-element stack trace, which is better than nothing.
+extern
+ExeContext* VG_(record_depth_1_ExeContext)( ThreadId tid );
+
// Apply a function to every element in the ExeContext. The parameter 'n'
// gives the index of the passed ip. Doesn't go below main() unless
// --show-below-main=yes is set.