// Keeps track of how many bytes of memory we've scanned, for printing.
// (Nb: We don't keep track of how many register bytes we've scanned.)
static SizeT lc_scanned_szB;
+// Keeps track of how many bytes we have not scanned due to read errors that
+// caused a signal such as SIGSEGV.
+static SizeT lc_sig_skipped_szB;
SizeT MC_(bytes_leaked) = 0;
static VG_MINIMAL_JMP_BUF(memscan_jmpbuf);
+static volatile Addr bad_scanned_addr;
static
void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
{
if (0)
VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
- if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
+ if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS) {
+ bad_scanned_addr = addr;
VG_MINIMAL_LONGJMP(memscan_jmpbuf);
+ }
}
// lc_scan_memory has 2 modes:
end portions of the block if they are not aligned on sizeof(Addr):
These cannot be a valid pointer, and calls to MC_(is_valid_aligned_word)
will assert for a non aligned address. */
- Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
- Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
+ Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
+ const Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
vki_sigset_t sigmask;
if (VG_DEBUG_LEAKCHECK)
// if not, skip onto the next page.
ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
}
- /* This optimisation and below loop is based on some relationships between
- VKI_PAGE_SIZE, SM_SIZE and sizeof(Addr) which are asserted in
+ /* The above optimisation and below loop is based on some relationships
+ between VKI_PAGE_SIZE, SM_SIZE and sizeof(Addr) which are asserted in
MC_(detect_memory_leaks). */
+ // During scan, we check with aspacemgr that each page is readable and
+ // belongs to client.
+ // We still protect against SIGSEGV and SIGBUS e.g. in case aspacemgr is
+ // desynchronised with the real page mappings.
+ // Such a desynchronisation could happen due to an aspacemgr bug.
+ // Note that if the application is using mprotect(NONE), then
+ // a page can be unreadable but have addressable and defined
+ // VA bits (see mc_main.c function mc_new_mem_mprotect).
+ if (VG_MINIMAL_SETJMP(memscan_jmpbuf) != 0) {
+ // Catch read error ...
+ // We need to restore the signal mask, because we were
+ // longjmped out of a signal handler.
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
+ lc_sig_skipped_szB += sizeof(Addr);
+ tl_assert(bad_scanned_addr >= VG_ROUNDUP(start, sizeof(Addr)));
+ tl_assert(bad_scanned_addr < VG_ROUNDDN(start+len, sizeof(Addr)));
+ ptr = bad_scanned_addr + sizeof(Addr); // Unaddressable, - skip it.
+ }
while (ptr < end) {
Addr addr;
ptr += VKI_PAGE_SIZE; // Bad page - skip it.
continue;
}
- // aspacemgr indicates the page is readable and belongs to client.
- // We still probe the page explicitely in case aspacemgr is
- // desynchronised with the real page mappings.
- // Such a desynchronisation can happen due to an aspacemgr bug.
- // Note that if the application is using mprotect(NONE), then
- // a page can be unreadable but have addressable and defined
- // VA bits (see mc_main.c function mc_new_mem_mprotect).
- if (VG_MINIMAL_SETJMP(memscan_jmpbuf) == 0) {
- // Try a read in the beginning of the page ...
- Addr test = *(volatile Addr *)ptr;
- __asm__ __volatile__("": :"r"(test) : "cc","memory");
- } else {
- // Catch read error ...
- // We need to restore the signal mask, because we were
- // longjmped out of a signal handler.
- VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
- ptr += VKI_PAGE_SIZE; // Bad page - skip it.
- continue;
- }
}
if ( MC_(is_valid_aligned_word)(ptr) ) {
lc_scanned_szB += sizeof(Addr);
+ // If the below read fails, we will longjmp to the loop begin.
addr = *(Addr *)ptr;
// If we get here, the scanned word is in valid memory. Now
// let's see if its contents point to a chunk.
ch->data, addr, pp_heuristic(h));
}
}
- // Verify the loop above has properly scanned all heuristics.
- // If the below fails, it probably means the LeakCheckHeuristic
- // enum is not in sync anymore with the above loop and/or
- // with N_LEAK_CHECK_HEURISTICS.
+ // Verify the loop above has properly scanned all
+ // heuristics. If the below fails, it probably means the
+ // LeakCheckHeuristic enum is not in sync anymore with the
+ // above loop and/or with N_LEAK_CHECK_HEURISTICS.
tl_assert (h == N_LEAK_CHECK_HEURISTICS);
}
}
tl_assert(seg_starts && n_seg_starts > 0);
lc_scanned_szB = 0;
+ lc_sig_skipped_szB = 0;
// VG_(am_show_nsegments)( 0, "leakcheck");
for (i = 0; i < n_seg_starts; i++) {
if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
+ if (lc_sig_skipped_szB > 0)
+ VG_(umsg)("Skipped %'lu bytes due to read errors\n",
+ lc_sig_skipped_szB);
VG_(umsg)( "\n" );
}
{
long pagesize;
#define RNDPAGEDOWN(a) ((long)a & ~(pagesize-1))
+ int i;
+ const int nr_ptr = (10000 * 4)/sizeof(char*);
- b10 = malloc ((10000 * 4)/sizeof(char*) * sizeof(char*));
-
+ b10 = calloc (nr_ptr * sizeof(char*), 1);
+ for (i = 0; i < nr_ptr; i++)
+ b10[i] = (char*)b10;
b10[4000] = malloc (1000);
fprintf(stderr, "expecting no leaks\n");
fflush(stderr);
VALGRIND_DO_LEAK_CHECK;
+ if (RUNNING_ON_VALGRIND)
+ (void) VALGRIND_NON_SIMD_CALL2(non_simd_mprotect,
+ RNDPAGEDOWN(&b10[0]),
+ RNDPAGEDOWN(&(b10[nr_ptr-1]))
+ - RNDPAGEDOWN(&(b10[0])));
+ else
+ mprotect_result = mprotect((void*) RNDPAGEDOWN(&b10[0]),
+ RNDPAGEDOWN(&(b10[nr_ptr-1]))
+ - RNDPAGEDOWN(&(b10[0])),
+ PROT_NONE);
+ fprintf(stderr, "full mprotect result %d\n", mprotect_result);
+
+ fprintf(stderr, "expecting a leak again after full mprotect\n");
+ fflush(stderr);
+ VALGRIND_DO_LEAK_CHECK;
+
fprintf(stderr, "finished\n");
}
expecting a leak
1,000 bytes in 1 blocks are definitely lost in loss record ... of ...
at 0x........: malloc (vg_replace_malloc.c:...)
- by 0x........: f (leak-segv-jmp.c:196)
- by 0x........: main (leak-segv-jmp.c:243)
+ by 0x........: f (leak-segv-jmp.c:199)
+ by 0x........: main (leak-segv-jmp.c:262)
LEAK SUMMARY:
definitely lost: 1,000 bytes in 1 blocks
expecting a leak again
1,000 bytes in 1 blocks are definitely lost in loss record ... of ...
at 0x........: malloc (vg_replace_malloc.c:...)
- by 0x........: f (leak-segv-jmp.c:196)
- by 0x........: main (leak-segv-jmp.c:243)
+ by 0x........: f (leak-segv-jmp.c:199)
+ by 0x........: main (leak-segv-jmp.c:262)
+
+LEAK SUMMARY:
+ definitely lost: 1,000 bytes in 1 blocks
+ indirectly lost: 0 bytes in 0 blocks
+ possibly lost: 0 bytes in 0 blocks
+ still reachable: 40,000 bytes in 1 blocks
+ suppressed: 0 bytes in 0 blocks
+Reachable blocks (those to which a pointer was found) are not shown.
+To see them, rerun with: --leak-check=full --show-leak-kinds=all
+
+full mprotect result 0
+expecting a leak again after full mprotect
+1,000 bytes in 1 blocks are definitely lost in loss record ... of ...
+ at 0x........: malloc (vg_replace_malloc.c:...)
+ by 0x........: f (leak-segv-jmp.c:199)
+ by 0x........: main (leak-segv-jmp.c:262)
LEAK SUMMARY:
definitely lost: 1,000 bytes in 1 blocks
For a detailed leak analysis, rerun with: --leak-check=full
For counts of detected and suppressed errors, rerun with: -v
-ERROR SUMMARY: 2 errors from 2 contexts (suppressed: 0 from 0)
+ERROR SUMMARY: 3 errors from 3 contexts (suppressed: 0 from 0)