Note: the outer now unconditionally report the inner guest stacktrace.
If that would be a problem, we might add a sim-hint no-inner-guest-stacktrace
to optionally disable such outer behaviour.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@16139
* ==================== OTHER CHANGES ====================
+* For Valgrind developers: in an outer/inner setup, the outer Valgrind
+ will append the inner guest stacktrace to the inner host stacktrace.
+ This helps to investigate the errors reported by the outer, when they
+ are caused by the inner guest program (such as an inner regtest).
+
* ==================== FIXED BUGS ====================
The following bugs have been fixed or resolved. Note that "n-i-bz"
The file tests/outer_inner.supp contains suppressions for
the irrelevant or benign errors found in the inner.
+An regression test running in the inner (e.g. memcheck/tests/badrw) will
+cause the inner to report an error, which is expected and checked
+as usual when running the regtests in an outer/inner setup.
+However, the outer will often also observe an error, e.g. a jump
+using uninitialised data, or a read/write outside the bounds of a heap
+block. When the outer reports such an error, it will output the
+inner host stacktrace. To this stacktrace, it will append the
+stacktrace of the inner guest program. For example, this is an error
+reported by the outer when the inner runs the badrw regtest:
+ ==8119== Invalid read of size 2
+ ==8119== at 0x7F2EFD7AF: ???
+ ==8119== by 0x7F2C82EAF: ???
+ ==8119== by 0x7F180867F: ???
+ ==8119== by 0x40051D: main (badrw.c:5)
+ ==8119== by 0x7F180867F: ???
+ ==8119== by 0x1BFF: ???
+ ==8119== by 0x3803B7F0: _______VVVVVVVV_appended_inner_guest_stack_VVVVVVVV_______ (m_execontext.c:332)
+ ==8119== by 0x40055C: main (badrw.c:22)
+ ==8119== Address 0x55cd03c is 4 bytes before a block of size 16 alloc'd
+ ==8119== at 0x2804E26D: vgPlain_arena_malloc (m_mallocfree.c:1914)
+ ==8119== by 0x2800BAB4: vgMemCheck_new_block (mc_malloc_wrappers.c:368)
+ ==8119== by 0x2800BC87: vgMemCheck_malloc (mc_malloc_wrappers.c:403)
+ ==8119== by 0x28097EAE: do_client_request (scheduler.c:1861)
+ ==8119== by 0x28097EAE: vgPlain_scheduler (scheduler.c:1425)
+ ==8119== by 0x280A7237: thread_wrapper (syswrap-linux.c:103)
+ ==8119== by 0x280A7237: run_a_thread_NORETURN (syswrap-linux.c:156)
+ ==8119== by 0x3803B7F0: _______VVVVVVVV_appended_inner_guest_stack_VVVVVVVV_______ (m_execontext.c:332)
+ ==8119== by 0x4C294C4: malloc (vg_replace_malloc.c:298)
+ ==8119== by 0x40051D: main (badrw.c:5)
+In the above, the first stacktrace starts with the inner host stacktrace,
+which in this case is some JITted code. Such code sometimes contains IPs
+that points in the inner guest code (0x40051D: main (badrw.c:5)).
+After the separator, we have the inner guest stacktrace.
+The second stacktrace gives the stacktrace where the heap block that was
+overrun was allocated. We see it was allocated by the inner valgrind
+in the client arena (first part of the stacktrace). The second part is
+the guest stacktrace that did the allocation.
+
+
(C) Performance tests in an outer/inner setup:
To run all the performance tests with an outer cachegrind, do :
ec_htab_size_idx++;
}
+/* Used by the outer as a marker to separate the frames of the inner valgrind
+ from the frames of the inner guest frames. */
+static void _______VVVVVVVV_appended_inner_guest_stack_VVVVVVVV_______ (void)
+{
+}
+
/* Do the first part of getting a stack trace: actually unwind the
stack, and hand the results off to the duplicate-trace-finder
(_wrk2). */
NULL/*array to dump SP values in*/,
NULL/*array to dump FP values in*/,
first_ip_delta );
+ if (VG_(inner_threads) != NULL
+ && n_ips + 1 < VG_(clo_backtrace_size)) {
+ /* An inner V has informed us (the outer) of its thread array.
+ Append the inner guest stack trace, if we still have some
+ room in the ips array for the separator and (some) inner
+ guest IPs. */
+ UInt inner_tid;
+
+ for (inner_tid = 1; inner_tid < VG_N_THREADS; inner_tid++) {
+ if (VG_(threads)[tid].os_state.lwpid
+ == VG_(inner_threads)[inner_tid].os_state.lwpid) {
+ ThreadState* save_outer_vg_threads = VG_(threads);
+ UInt n_ips_inner_guest;
+
+ /* Append the separator + the inner guest stack trace. */
+ ips[n_ips] = (Addr)
+ _______VVVVVVVV_appended_inner_guest_stack_VVVVVVVV_______;
+ n_ips++;
+ VG_(threads) = VG_(inner_threads);
+ n_ips_inner_guest
+ = VG_(get_StackTrace)( inner_tid,
+ ips + n_ips,
+ VG_(clo_backtrace_size) - n_ips,
+ NULL/*array to dump SP values in*/,
+ NULL/*array to dump FP values in*/,
+ first_ip_delta );
+ n_ips += n_ips_inner_guest;
+ VG_(threads) = save_outer_vg_threads;
+ break;
+ }
+ }
+ }
}
return record_ExeContext_wrk2 ( ips, n_ips );
exit_wrk (status, False);
}
+static void print_thread_state (Bool stack_usage,
+ const HChar* prefix, ThreadId i)
+{
+ VgStack *stack
+ = (VgStack*)VG_(threads)[i].os_state.valgrind_stack_base;
+
+ VG_(printf)("\n%sThread %d: status = %s (lwpid %d)\n", prefix, i,
+ VG_(name_of_ThreadStatus)(VG_(threads)[i].status),
+ VG_(threads)[i].os_state.lwpid);
+ if (VG_(threads)[i].status != VgTs_Empty)
+ VG_(get_and_pp_StackTrace)( i, BACKTRACE_DEPTH );
+ if (stack_usage && VG_(threads)[i].client_stack_highest_byte != 0 ) {
+ Addr start, end;
+
+ start = end = 0;
+ VG_(stack_limits)(VG_(get_SP)(i), &start, &end);
+ if (start != end)
+ VG_(printf)("%sclient stack range: [%p %p] client SP: %p\n",
+ prefix,
+ (void*)start, (void*)end, (void*)VG_(get_SP)(i));
+ else
+ VG_(printf)("%sclient stack range: ??????? client SP: %p\n",
+ prefix,
+ (void*)VG_(get_SP)(i));
+ }
+ if (stack_usage && stack != 0)
+ VG_(printf)
+ ("%svalgrind stack top usage: %lu of %lu\n",
+ prefix,
+ VG_(clo_valgrind_stacksize)
+ - VG_(am_get_VgStack_unused_szB) (stack,
+ VG_(clo_valgrind_stacksize)),
+ (SizeT) VG_(clo_valgrind_stacksize));
+}
// Print the scheduler status.
static void show_sched_status_wrk ( Bool host_stacktrace,
has exited, then valgrind_stack_base points to the stack base. */
if (VG_(threads)[i].status == VgTs_Empty
&& (!exited_threads || stack == 0)) continue;
- VG_(printf)("\nThread %d: status = %s (lwpid %d)\n", i,
- VG_(name_of_ThreadStatus)(VG_(threads)[i].status),
- VG_(threads)[i].os_state.lwpid);
- if (VG_(threads)[i].status != VgTs_Empty)
- VG_(get_and_pp_StackTrace)( i, BACKTRACE_DEPTH );
- if (stack_usage && VG_(threads)[i].client_stack_highest_byte != 0 ) {
- Addr start, end;
-
- start = end = 0;
- VG_(stack_limits)(VG_(threads)[i].client_stack_highest_byte,
- &start, &end);
- if (start != end)
- VG_(printf)("client stack range: [%p %p] client SP: %p\n",
- (void*)start, (void*)end, (void*)VG_(get_SP)(i));
- else
- VG_(printf)("client stack range: ???????\n");
+ print_thread_state(stack_usage, "", i);
+ if (VG_(inner_threads) != NULL) {
+ /* An inner V has informed us (the outer) of its thread array.
+ Report the inner guest stack trace. */
+ UInt inner_tid;
+
+ for (inner_tid = 1; inner_tid < VG_N_THREADS; inner_tid++) {
+ if (VG_(threads)[i].os_state.lwpid
+ == VG_(inner_threads)[inner_tid].os_state.lwpid) {
+ ThreadState* save_outer_vg_threads = VG_(threads);
+
+ VG_(threads) = VG_(inner_threads);
+ print_thread_state(stack_usage, "INNER ", inner_tid);
+ VG_(threads) = save_outer_vg_threads;
+ break;
+ }
+ }
}
- if (stack_usage && stack != 0)
- VG_(printf)("valgrind stack top usage: %lu of %lu\n",
- VG_(clo_valgrind_stacksize)
- - VG_(am_get_VgStack_unused_szB)
- (stack, VG_(clo_valgrind_stacksize)),
- (SizeT) VG_(clo_valgrind_stacksize));
}
}
VG_(printf)("\n");
SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
break;
+ case VG_USERREQ__INNER_THREADS:
+ if (VG_(clo_verbosity) > 2)
+ VG_(printf)( "client request: INNER_THREADS,"
+ " addr %p\n",
+ (void*)arg[1] );
+ VG_(inner_threads) = (ThreadState*)arg[1];
+ SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
+ break;
+
case VG_USERREQ__COUNT_ERRORS:
SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
break;
#include "pub_core_options.h"
#include "pub_core_stacks.h"
#include "pub_core_tooliface.h"
+#include "pub_core_inner.h"
+#if defined(ENABLE_INNER_CLIENT_REQUEST)
+#include "pub_core_clreq.h"
+#endif
// For expensive debugging
#define EDEBUG(fmt, args...) //VG_(debugLog)(2, "stacks", fmt, ## args)
Addr start; // Lowest stack byte, included.
Addr end; // Highest stack byte, included.
struct _Stack *next;
+ UWord outer_id; /* For an inner valgrind, stack id registered in outer
+ valgrind. */
} Stack;
static Stack *stacks;
VG_(debugLog)(2, "stacks", "register [start-end] [%p-%p] as stack %lu\n",
(void*)start, (void*)end, i->id);
-
+ INNER_REQUEST(i->outer_id = VALGRIND_STACK_REGISTER(start, end));
return i->id;
}
} else {
prev->next = i->next;
}
+ INNER_REQUEST(VALGRIND_STACK_DEREGISTER(i->outer_id));
VG_(free)(i);
return;
}
/* FIXME : swap start/end like VG_(register_stack) ??? */
i->start = start;
i->end = end;
+ INNER_REQUEST(VALGRIND_STACK_CHANGE(i->outer_id, start, end));
return;
}
i = i->next;
ThreadState *VG_(threads);
UInt VG_N_THREADS;
+ThreadState *VG_(inner_threads);
+
/*------------------------------------------------------------*/
/*--- Operations. ---*/
/*------------------------------------------------------------*/
sizeof(VG_(threads)[tid].os_state.exitcode),
""));
}
+ INNER_REQUEST(VALGRIND_INNER_THREADS(VG_(threads)));
}
const HChar* VG_(name_of_ThreadStatus) ( ThreadStatus status )
/*--- The thread table. ---*/
/*------------------------------------------------------------*/
-/* A statically allocated array of threads. NOTE: [0] is
- never used, to simplify the simulation of initialisers for
- LinuxThreads. */
+/* An array of threads, dynamically allocated by VG_(init_Threads).
+ NOTE: [0] is never used, to simplify the simulation of initialisers
+ for LinuxThreads. */
extern ThreadState *VG_(threads);
+/* In an outer valgrind, VG_(inner_threads) stores the address of
+ the inner VG_(threads) array, as reported by the inner using
+ the client request INNER_THREADS. */
+extern ThreadState *VG_(inner_threads);
+
// The running thread. m_scheduler should be the only other module
// to write to this.
extern ThreadId VG_(running_tid);
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
- which use client requests. DO NOT CHANGE THE ORDER OF THESE
- ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+ which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end of the most
+ relevant group. */
typedef
enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
Other values are not allowed. */
VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801,
+ /* Some requests used for Valgrind internal, such as
+ self-test or self-hosting. */
/* Initialise IR injection */
- VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901
+ VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901,
+ /* Used by Inner Valgrind to inform Outer Valgrind where to
+ find the list of inner guest threads */
+ VG_USERREQ__INNER_THREADS = 0x1902
} Vg_ClientRequest;
#if !defined(__GNUC__)
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \
_qzz_addr, _qzz_len, 0, 0, 0)
+#define VALGRIND_INNER_THREADS(_qzz_addr) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \
+ _qzz_addr, 0, 0, 0, 0)
+
/* These requests are for getting Valgrind itself to print something.
Possibly with a backtrace. This is a really ugly hack. The return value