1 diff --git a/malloc/Makefile b/malloc/Makefile
2 index e7ec1ab..5330a3b 100644
6 dist-headers := malloc.h
7 headers := $(dist-headers) obstack.h mcheck.h
8 tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
9 - tst-mallocstate tst-mcheck tst-mallocfork tst-trim1
10 + tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 \
11 + tst-malloc-backtrace
12 test-srcs = tst-mtrace
14 distribute = thread-m.h mtrace.pl mcheck-init.c stackinfo.h memusage.h \
15 @@ -49,6 +50,9 @@ extra-libs-others = $(extra-libs)
16 libmemusage-routines = memusage
17 libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes))
19 +$(objpfx)tst-malloc-backtrace: $(common-objpfx)nptl/libpthread.so \
20 + $(common-objpfx)nptl/libpthread_nonshared.a
22 # These should be removed by `make clean'.
23 extra-objs = mcheck-init.o libmcheck.a
25 diff --git a/malloc/arena.c b/malloc/arena.c
26 index 18bea2b..5180516 100644
29 @@ -123,7 +123,7 @@ int __malloc_initialized = -1;
32 #define arena_lock(ptr, size) do { \
34 + if(ptr && !arena_is_corrupt (ptr)) \
35 (void)mutex_lock(&ptr->mutex); \
37 ptr = arena_get2(ptr, (size), false); \
38 @@ -1011,7 +1011,21 @@ reused_arena (bool retrying)
39 if (retrying && result == &main_arena)
40 result = result->next;
42 - /* No arena available. Wait for the next in line. */
43 + /* Make sure that the arena we get is not corrupted. */
44 + mstate begin = result;
45 + while (arena_is_corrupt (result))
47 + result = result->next;
48 + if (result == begin)
52 + /* We could not find any arena that was either not corrupted or not the one
53 + we wanted to avoid. */
54 + if (result == begin)
57 + /* No arena available without contention. Wait for the next in line. */
58 (void)mutex_lock(&result->mutex);
61 diff --git a/malloc/hooks.c b/malloc/hooks.c
62 index cc83d21..38d2542 100644
65 @@ -220,7 +220,8 @@ top_check()
68 mutex_unlock(&main_arena);
69 - malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
70 + malloc_printerr (check_action, "malloc: top chunk is corrupt", t,
72 mutex_lock(&main_arena);
74 /* Try to set up a new top chunk. */
75 @@ -283,7 +284,7 @@ free_check(mem, caller) Void_t* mem; const Void_t *caller;
77 (void)mutex_unlock(&main_arena.mutex);
79 - malloc_printerr(check_action, "free(): invalid pointer", mem);
80 + malloc_printerr(check_action, "free(): invalid pointer", mem, &main_arena);
84 @@ -329,7 +330,8 @@ realloc_check(oldmem, bytes, caller)
85 const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
86 (void)mutex_unlock(&main_arena.mutex);
88 - malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
89 + malloc_printerr(check_action, "realloc(): invalid pointer", oldmem,
91 return malloc_check(bytes, NULL);
93 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
94 diff --git a/malloc/malloc.c b/malloc/malloc.c
95 index 597c7b0..20ac534 100644
98 @@ -1633,7 +1633,7 @@ static size_t mUSABLe(Void_t*);
99 static void mSTATs(void);
100 static int mALLOPt(int, int);
101 static struct mallinfo mALLINFo(mstate);
102 -static void malloc_printerr(int action, const char *str, void *ptr);
103 +static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
105 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
106 static int internal_function top_check(void);
107 @@ -2114,7 +2114,8 @@ typedef struct malloc_chunk* mbinptr;
109 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) { \
110 mutex_unlock(&(AV)->mutex); \
111 - malloc_printerr (check_action, "corrupted double-linked list", P); \
112 + malloc_printerr (check_action, "corrupted double-linked list", P, \
114 mutex_lock(&(AV)->mutex); \
117 @@ -2344,6 +2345,15 @@ typedef struct malloc_chunk* mfastbinptr;
118 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
119 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
121 +/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
122 + arena. Such an arena is no longer used to allocate chunks. Chunks
123 + allocated in that arena before detecting corruption are not freed. */
125 +#define ARENA_CORRUPTION_BIT (4U)
127 +#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))
128 +#define set_arena_corrupt(A) ((A)->flags |= ARENA_CORRUPTION_BIT)
131 Set value of max_fast.
132 Use impossibly small value if 0.
133 @@ -3002,8 +3012,9 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
134 rather than expanding top.
137 - if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
138 - (mp_.n_mmaps < mp_.n_mmaps_max)) {
140 + || ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
141 + (mp_.n_mmaps < mp_.n_mmaps_max))) {
143 char* mm; /* return value from mmap call*/
145 @@ -3079,6 +3090,10 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
149 + /* There are no usable arenas and mmap also failed. */
153 /* Record incoming configuration of top */
156 @@ -3260,7 +3275,7 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
157 else if (contiguous(av) && old_size && brk < old_end) {
158 /* Oops! Someone else killed our space.. Can't touch anything. */
159 mutex_unlock(&av->mutex);
160 - malloc_printerr (3, "break adjusted to free malloc space", brk);
161 + malloc_printerr (3, "break adjusted to free malloc space", brk, av);
162 mutex_lock(&av->mutex);
165 @@ -3542,7 +3557,7 @@ munmap_chunk(p) mchunkptr p;
166 if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
168 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
170 + chunk2mem (p), NULL);
174 @@ -3625,65 +3640,31 @@ public_mALLOc(size_t bytes)
175 if (__builtin_expect (hook != NULL, 0))
176 return (*hook)(bytes, RETURN_ADDRESS (0));
178 - arena_lookup(ar_ptr);
180 - // XXX We need double-word CAS and fastbins must be extended to also
181 - // XXX hold a generation counter for each entry.
183 - INTERNAL_SIZE_T nb; /* normalized request size */
184 - checked_request2size(bytes, nb);
185 - if (nb <= get_max_fast ()) {
186 - long int idx = fastbin_index(nb);
187 - mfastbinptr* fb = &fastbin (ar_ptr, idx);
188 - mchunkptr pp = *fb;
196 - while ((pp = catomic_compare_and_exchange_val_acq (fb, v->fd, v)) != v);
198 - if (__builtin_expect (fastbin_index (chunksize (v)) != idx, 0))
199 - malloc_printerr (check_action, "malloc(): memory corruption (fast)",
201 - check_remalloced_chunk(ar_ptr, v, nb);
202 - void *p = chunk2mem(v);
203 - if (__builtin_expect (perturb_byte, 0))
204 - alloc_perturb (p, bytes);
210 + arena_get(ar_ptr, bytes);
212 - arena_lock(ar_ptr, bytes);
215 victim = _int_malloc(ar_ptr, bytes);
217 + if(!victim && ar_ptr != NULL) {
218 /* Maybe the failure is due to running out of mmapped areas. */
219 if(ar_ptr != &main_arena) {
220 (void)mutex_unlock(&ar_ptr->mutex);
221 ar_ptr = &main_arena;
222 (void)mutex_lock(&ar_ptr->mutex);
223 victim = _int_malloc(ar_ptr, bytes);
224 - (void)mutex_unlock(&ar_ptr->mutex);
227 /* ... or sbrk() has failed and there is still a chance to mmap() */
228 mstate prev = ar_ptr->next ? ar_ptr : 0;
229 (void)mutex_unlock(&ar_ptr->mutex);
230 ar_ptr = arena_get2(prev, bytes, true);
233 victim = _int_malloc(ar_ptr, bytes);
234 - (void)mutex_unlock(&ar_ptr->mutex);
241 + if (ar_ptr != NULL)
242 (void)mutex_unlock(&ar_ptr->mutex);
244 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
245 ar_ptr == arena_for_chunk(mem2chunk(victim)));
247 @@ -3773,6 +3754,11 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
249 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
251 + if (chunk_is_mmapped (oldp))
254 + ar_ptr = arena_for_chunk (oldp);
256 /* Little security check which won't hurt performance: the
257 allocator never wrapps around at the end of the address space.
258 Therefore we can exclude some size values which might appear
259 @@ -3780,7 +3766,8 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
260 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
261 || __builtin_expect (misaligned_chunk (oldp), 0))
263 - malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
264 + malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
269 @@ -3806,7 +3793,6 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
273 - ar_ptr = arena_for_chunk(oldp);
275 if(!mutex_trylock(&ar_ptr->mutex))
276 ++(ar_ptr->stat_lock_direct);
277 @@ -3887,31 +3873,29 @@ public_mEMALIGn(size_t alignment, size_t bytes)
280 arena_get(ar_ptr, bytes + alignment + MINSIZE);
283 p = _int_memalign(ar_ptr, alignment, bytes);
285 + if(!p && ar_ptr != NULL) {
286 /* Maybe the failure is due to running out of mmapped areas. */
287 if(ar_ptr != &main_arena) {
288 (void)mutex_unlock(&ar_ptr->mutex);
289 ar_ptr = &main_arena;
290 (void)mutex_lock(&ar_ptr->mutex);
291 p = _int_memalign(ar_ptr, alignment, bytes);
292 - (void)mutex_unlock(&ar_ptr->mutex);
295 /* ... or sbrk() has failed and there is still a chance to mmap() */
296 mstate prev = ar_ptr->next ? ar_ptr : 0;
297 (void)mutex_unlock(&ar_ptr->mutex);
298 ar_ptr = arena_get2(prev, bytes, true);
301 p = _int_memalign(ar_ptr, alignment, bytes);
302 - (void)mutex_unlock(&ar_ptr->mutex);
309 + if (ar_ptr != NULL)
310 (void)mutex_unlock(&ar_ptr->mutex);
312 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
313 ar_ptr == arena_for_chunk(mem2chunk(p)));
315 @@ -3945,31 +3929,29 @@ public_vALLOc(size_t bytes)
316 return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
318 arena_get(ar_ptr, bytes + pagesz + MINSIZE);
321 p = _int_valloc(ar_ptr, bytes);
323 + if(!p && ar_ptr != NULL) {
324 /* Maybe the failure is due to running out of mmapped areas. */
325 if(ar_ptr != &main_arena) {
326 (void)mutex_unlock(&ar_ptr->mutex);
327 ar_ptr = &main_arena;
328 (void)mutex_lock(&ar_ptr->mutex);
329 p = _int_memalign(ar_ptr, pagesz, bytes);
330 - (void)mutex_unlock(&ar_ptr->mutex);
333 /* ... or sbrk() has failed and there is still a chance to mmap() */
334 mstate prev = ar_ptr->next ? ar_ptr : 0;
335 (void)mutex_unlock(&ar_ptr->mutex);
336 ar_ptr = arena_get2(prev, bytes, true);
339 p = _int_memalign(ar_ptr, pagesz, bytes);
340 - (void)mutex_unlock(&ar_ptr->mutex);
347 + if (ar_ptr != NULL)
348 (void)mutex_unlock(&ar_ptr->mutex);
350 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
351 ar_ptr == arena_for_chunk(mem2chunk(p)));
353 @@ -4004,28 +3986,28 @@ public_pVALLOc(size_t bytes)
355 arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
356 p = _int_pvalloc(ar_ptr, bytes);
358 + if(!p && ar_ptr != NULL) {
359 /* Maybe the failure is due to running out of mmapped areas. */
360 if(ar_ptr != &main_arena) {
361 (void)mutex_unlock(&ar_ptr->mutex);
362 ar_ptr = &main_arena;
363 (void)mutex_lock(&ar_ptr->mutex);
364 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
365 - (void)mutex_unlock(&ar_ptr->mutex);
368 /* ... or sbrk() has failed and there is still a chance to mmap() */
369 mstate prev = ar_ptr->next ? ar_ptr : 0;
370 (void)mutex_unlock(&ar_ptr->mutex);
371 ar_ptr = arena_get2(prev, bytes + 2*pagesz + MINSIZE, true);
374 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
375 - (void)mutex_unlock(&ar_ptr->mutex);
382 + if (ar_ptr != NULL)
383 (void)mutex_unlock(&ar_ptr->mutex);
385 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
386 ar_ptr == arena_for_chunk(mem2chunk(p)));
388 @@ -4072,55 +4054,65 @@ public_cALLOc(size_t n, size_t elem_size)
395 - /* Check if we hand out the top chunk, in which case there may be no
399 + /* Check if we hand out the top chunk, in which case there may be no
403 - oldtopsize = chunksize(top(av));
405 + oldtopsize = chunksize(top(av));
406 #if MORECORE_CLEARS < 2
407 - /* Only newly allocated memory is guaranteed to be cleared. */
408 - if (av == &main_arena &&
409 - oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
410 - oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
411 + /* Only newly allocated memory is guaranteed to be cleared. */
412 + if (av == &main_arena &&
413 + oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
414 + oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
416 - if (av != &main_arena)
417 + if (av != &main_arena)
419 + heap_info *heap = heap_for_ptr (oldtop);
420 + if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
421 + oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
427 - heap_info *heap = heap_for_ptr (oldtop);
428 - if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
429 - oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
430 + /* No usable arenas. */
435 mem = _int_malloc(av, sz);
438 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
439 av == arena_for_chunk(mem2chunk(mem)));
442 + if (mem == 0 && av != NULL) {
443 /* Maybe the failure is due to running out of mmapped areas. */
444 if(av != &main_arena) {
445 (void)mutex_unlock(&av->mutex);
446 (void)mutex_lock(&main_arena.mutex);
447 mem = _int_malloc(&main_arena, sz);
448 - (void)mutex_unlock(&main_arena.mutex);
451 /* ... or sbrk() has failed and there is still a chance to mmap() */
452 mstate prev = av->next ? av : 0;
453 (void)mutex_unlock(&av->mutex);
454 av = arena_get2(prev, sz, true);
457 mem = _int_malloc(av, sz);
458 - (void)mutex_unlock(&av->mutex);
462 if (mem == 0) return 0;
467 (void)mutex_unlock(&av->mutex);
469 + /* Allocation failed even after a retry. */
475 /* Two optional cases in which clearing not necessary */
476 @@ -4175,6 +4167,8 @@ public_cALLOc(size_t n, size_t elem_size)
480 +/* XXX These functions are not patched to detect arena corruption because they
481 + are not built in glibc. */
484 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
485 @@ -4309,6 +4303,16 @@ _int_malloc(mstate av, size_t bytes)
487 checked_request2size(bytes, nb);
489 + /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
491 + if (__glibc_unlikely (av == NULL))
493 + void *p = sYSMALLOc (nb, av);
495 + alloc_perturb (p, bytes);
500 If the size qualifies as a fastbin, first check corresponding bin.
501 This code is safe to execute even if av is not yet initialized, so we
502 @@ -4337,7 +4341,7 @@ _int_malloc(mstate av, size_t bytes)
503 errstr = "malloc(): memory corruption (fast)";
505 mutex_unlock(&av->mutex);
506 - malloc_printerr (check_action, errstr, chunk2mem (victim));
507 + malloc_printerr (check_action, errstr, chunk2mem (victim), av);
508 mutex_lock(&av->mutex);
511 @@ -4429,7 +4433,7 @@ _int_malloc(mstate av, size_t bytes)
513 void *p = chunk2mem(victim);
514 mutex_unlock(&av->mutex);
515 - malloc_printerr (check_action, "malloc(): memory corruption", p);
516 + malloc_printerr (check_action, "malloc(): memory corruption", p, av);
517 mutex_lock(&av->mutex);
519 size = chunksize(victim);
520 @@ -4829,7 +4833,7 @@ _int_free(mstate av, mchunkptr p)
521 if (have_lock || locked)
522 (void)mutex_unlock(&av->mutex);
524 - malloc_printerr (check_action, errstr, chunk2mem(p));
525 + malloc_printerr (check_action, errstr, chunk2mem(p), av);
526 #ifdef ATOMIC_FASTBINS
528 mutex_lock(&av->mutex);
529 @@ -5281,7 +5285,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
530 errstr = "realloc(): invalid old size";
532 mutex_unlock(&av->mutex);
533 - malloc_printerr (check_action, errstr, chunk2mem(oldp));
534 + malloc_printerr (check_action, errstr, chunk2mem(oldp), av);
535 mutex_lock(&av->mutex);
538 @@ -5881,6 +5885,10 @@ static int mTRIm(mstate av, size_t pad)
539 static int mTRIm(av, pad) mstate av; size_t pad;
542 + /* Don't touch corrupt arenas. */
543 + if (arena_is_corrupt (av))
546 /* Ensure initialization/consolidation */
547 malloc_consolidate (av);
549 @@ -6320,8 +6328,14 @@ int mALLOPt(param_number, value) int param_number; int value;
550 extern char **__libc_argv attribute_hidden;
553 -malloc_printerr(int action, const char *str, void *ptr)
554 +malloc_printerr(int action, const char *str, void *ptr, mstate ar_ptr)
556 + /* Avoid using this arena in future. We do not attempt to synchronize this
557 + with anything else because we minimally want to ensure that __libc_message
558 + gets its resources safely without stumbling on the current corruption. */
560 + set_arena_corrupt (ar_ptr);
562 if ((action & 5) == 5)
563 __libc_message (action & 2, "%s\n", str);
565 diff --git a/malloc/tst-malloc-backtrace.c b/malloc/tst-malloc-backtrace.c
567 index 0000000..796a42f
569 +++ b/malloc/tst-malloc-backtrace.c
571 +/* Verify that backtrace does not deadlock on itself on memory corruption.
572 + Copyright (C) 2015 Free Software Foundation, Inc.
573 + This file is part of the GNU C Library.
575 + The GNU C Library is free software; you can redistribute it and/or
576 + modify it under the terms of the GNU Lesser General Public
577 + License as published by the Free Software Foundation; either
578 + version 2.1 of the License, or (at your option) any later version.
580 + The GNU C Library is distributed in the hope that it will be useful,
581 + but WITHOUT ANY WARRANTY; without even the implied warranty of
582 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
583 + Lesser General Public License for more details.
585 + You should have received a copy of the GNU Lesser General Public
586 + License along with the GNU C Library; if not, see
587 + <http://www.gnu.org/licenses/>. */
594 +/* Wrap free with a function to prevent gcc from optimizing it out. */
596 +__attribute__((noinline))
597 +call_free (void *ptr)
600 + *(size_t *)(ptr - sizeof (size_t)) = 1;
606 + void *ptr1 = malloc (SIZE);
607 + void *ptr2 = malloc (SIZE);
609 + call_free ((void *) ptr1);
610 + ptr1 = malloc (SIZE);
612 + /* Not reached. The return statement is to put ptr2 into use so that gcc
613 + doesn't optimize out that malloc call. */
614 + return (ptr1 == ptr2);
617 +#define TEST_FUNCTION do_test ()
618 +#define EXPECTED_SIGNAL SIGABRT
620 +#include "../test-skeleton.c"