]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/glibc/glibc-rh1066724.patch
dhcpcd: fix delay after dhcp down.
[ipfire-2.x.git] / src / patches / glibc / glibc-rh1066724.patch
1 diff --git a/malloc/Makefile b/malloc/Makefile
2 index e7ec1ab..5330a3b 100644
3 --- a/malloc/Makefile
4 +++ b/malloc/Makefile
5 @@ -27,7 +27,8 @@ all:
6 dist-headers := malloc.h
7 headers := $(dist-headers) obstack.h mcheck.h
8 tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
9 - tst-mallocstate tst-mcheck tst-mallocfork tst-trim1
10 + tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 \
11 + tst-malloc-backtrace
12 test-srcs = tst-mtrace
13
14 distribute = thread-m.h mtrace.pl mcheck-init.c stackinfo.h memusage.h \
15 @@ -49,6 +50,9 @@ extra-libs-others = $(extra-libs)
16 libmemusage-routines = memusage
17 libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes))
18
19 +$(objpfx)tst-malloc-backtrace: $(common-objpfx)nptl/libpthread.so \
20 + $(common-objpfx)nptl/libpthread_nonshared.a
21 +
22 # These should be removed by `make clean'.
23 extra-objs = mcheck-init.o libmcheck.a
24
25 diff --git a/malloc/arena.c b/malloc/arena.c
26 index 18bea2b..5180516 100644
27 --- a/malloc/arena.c
28 +++ b/malloc/arena.c
29 @@ -123,7 +123,7 @@ int __malloc_initialized = -1;
30
31 #ifdef PER_THREAD
32 #define arena_lock(ptr, size) do { \
33 - if(ptr) \
34 + if(ptr && !arena_is_corrupt (ptr)) \
35 (void)mutex_lock(&ptr->mutex); \
36 else \
37 ptr = arena_get2(ptr, (size), false); \
38 @@ -1011,7 +1011,21 @@ reused_arena (bool retrying)
39 if (retrying && result == &main_arena)
40 result = result->next;
41
42 - /* No arena available. Wait for the next in line. */
43 + /* Make sure that the arena we get is not corrupted. */
44 + mstate begin = result;
45 + while (arena_is_corrupt (result))
46 + {
47 + result = result->next;
48 + if (result == begin)
49 + break;
50 + }
51 +
52 + /* We could not find any arena that was either not corrupted or not the one
53 + we wanted to avoid. */
54 + if (result == begin)
55 + return NULL;
56 +
57 + /* No arena available without contention. Wait for the next in line. */
58 (void)mutex_lock(&result->mutex);
59
60 out:
61 diff --git a/malloc/hooks.c b/malloc/hooks.c
62 index cc83d21..38d2542 100644
63 --- a/malloc/hooks.c
64 +++ b/malloc/hooks.c
65 @@ -220,7 +220,8 @@ top_check()
66 return 0;
67
68 mutex_unlock(&main_arena);
69 - malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
70 + malloc_printerr (check_action, "malloc: top chunk is corrupt", t,
71 + &main_arena);
72 mutex_lock(&main_arena);
73
74 /* Try to set up a new top chunk. */
75 @@ -283,7 +284,7 @@ free_check(mem, caller) Void_t* mem; const Void_t *caller;
76 if(!p) {
77 (void)mutex_unlock(&main_arena.mutex);
78
79 - malloc_printerr(check_action, "free(): invalid pointer", mem);
80 + malloc_printerr(check_action, "free(): invalid pointer", mem, &main_arena);
81 return;
82 }
83 #if HAVE_MMAP
84 @@ -329,7 +330,8 @@ realloc_check(oldmem, bytes, caller)
85 const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
86 (void)mutex_unlock(&main_arena.mutex);
87 if(!oldp) {
88 - malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
89 + malloc_printerr(check_action, "realloc(): invalid pointer", oldmem,
90 + &main_arena);
91 return malloc_check(bytes, NULL);
92 }
93 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
94 diff --git a/malloc/malloc.c b/malloc/malloc.c
95 index 597c7b0..20ac534 100644
96 --- a/malloc/malloc.c
97 +++ b/malloc/malloc.c
98 @@ -1633,7 +1633,7 @@ static size_t mUSABLe(Void_t*);
99 static void mSTATs(void);
100 static int mALLOPt(int, int);
101 static struct mallinfo mALLINFo(mstate);
102 -static void malloc_printerr(int action, const char *str, void *ptr);
103 +static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
104
105 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
106 static int internal_function top_check(void);
107 @@ -2114,7 +2114,8 @@ typedef struct malloc_chunk* mbinptr;
108 BK = P->bk; \
109 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) { \
110 mutex_unlock(&(AV)->mutex); \
111 - malloc_printerr (check_action, "corrupted double-linked list", P); \
112 + malloc_printerr (check_action, "corrupted double-linked list", P, \
113 + AV); \
114 mutex_lock(&(AV)->mutex); \
115 } else { \
116 FD->bk = BK; \
117 @@ -2344,6 +2345,15 @@ typedef struct malloc_chunk* mfastbinptr;
118 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
119 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
120
121 +/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
122 + arena. Such an arena is no longer used to allocate chunks. Chunks
123 + allocated in that arena before detecting corruption are not freed. */
124 +
125 +#define ARENA_CORRUPTION_BIT (4U)
126 +
127 +#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))
128 +#define set_arena_corrupt(A) ((A)->flags |= ARENA_CORRUPTION_BIT)
129 +
130 /*
131 Set value of max_fast.
132 Use impossibly small value if 0.
133 @@ -3002,8 +3012,9 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
134 rather than expanding top.
135 */
136
137 - if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
138 - (mp_.n_mmaps < mp_.n_mmaps_max)) {
139 + if (av == NULL
140 + || ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
141 + (mp_.n_mmaps < mp_.n_mmaps_max))) {
142
143 char* mm; /* return value from mmap call*/
144
145 @@ -3079,6 +3090,10 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
146 }
147 #endif
148
149 + /* There are no usable arenas and mmap also failed. */
150 + if (av == NULL)
151 + return 0;
152 +
153 /* Record incoming configuration of top */
154
155 old_top = av->top;
156 @@ -3260,7 +3275,7 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
157 else if (contiguous(av) && old_size && brk < old_end) {
158 /* Oops! Someone else killed our space.. Can't touch anything. */
159 mutex_unlock(&av->mutex);
160 - malloc_printerr (3, "break adjusted to free malloc space", brk);
161 + malloc_printerr (3, "break adjusted to free malloc space", brk, av);
162 mutex_lock(&av->mutex);
163 }
164
165 @@ -3542,7 +3557,7 @@ munmap_chunk(p) mchunkptr p;
166 if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
167 {
168 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
169 - chunk2mem (p));
170 + chunk2mem (p), NULL);
171 return;
172 }
173
174 @@ -3625,65 +3640,31 @@ public_mALLOc(size_t bytes)
175 if (__builtin_expect (hook != NULL, 0))
176 return (*hook)(bytes, RETURN_ADDRESS (0));
177
178 - arena_lookup(ar_ptr);
179 -#if 0
180 - // XXX We need double-word CAS and fastbins must be extended to also
181 - // XXX hold a generation counter for each entry.
182 - if (ar_ptr) {
183 - INTERNAL_SIZE_T nb; /* normalized request size */
184 - checked_request2size(bytes, nb);
185 - if (nb <= get_max_fast ()) {
186 - long int idx = fastbin_index(nb);
187 - mfastbinptr* fb = &fastbin (ar_ptr, idx);
188 - mchunkptr pp = *fb;
189 - mchunkptr v;
190 - do
191 - {
192 - v = pp;
193 - if (v == NULL)
194 - break;
195 - }
196 - while ((pp = catomic_compare_and_exchange_val_acq (fb, v->fd, v)) != v);
197 - if (v != 0) {
198 - if (__builtin_expect (fastbin_index (chunksize (v)) != idx, 0))
199 - malloc_printerr (check_action, "malloc(): memory corruption (fast)",
200 - chunk2mem (v));
201 - check_remalloced_chunk(ar_ptr, v, nb);
202 - void *p = chunk2mem(v);
203 - if (__builtin_expect (perturb_byte, 0))
204 - alloc_perturb (p, bytes);
205 - return p;
206 - }
207 - }
208 - }
209 -#endif
210 + arena_get(ar_ptr, bytes);
211
212 - arena_lock(ar_ptr, bytes);
213 - if(!ar_ptr)
214 - return 0;
215 victim = _int_malloc(ar_ptr, bytes);
216 - if(!victim) {
217 + if(!victim && ar_ptr != NULL) {
218 /* Maybe the failure is due to running out of mmapped areas. */
219 if(ar_ptr != &main_arena) {
220 (void)mutex_unlock(&ar_ptr->mutex);
221 ar_ptr = &main_arena;
222 (void)mutex_lock(&ar_ptr->mutex);
223 victim = _int_malloc(ar_ptr, bytes);
224 - (void)mutex_unlock(&ar_ptr->mutex);
225 } else {
226 #if USE_ARENAS
227 /* ... or sbrk() has failed and there is still a chance to mmap() */
228 mstate prev = ar_ptr->next ? ar_ptr : 0;
229 (void)mutex_unlock(&ar_ptr->mutex);
230 ar_ptr = arena_get2(prev, bytes, true);
231 - if(ar_ptr) {
232 + if(ar_ptr)
233 victim = _int_malloc(ar_ptr, bytes);
234 - (void)mutex_unlock(&ar_ptr->mutex);
235 - }
236 #endif
237 }
238 - } else
239 + }
240 +
241 + if (ar_ptr != NULL)
242 (void)mutex_unlock(&ar_ptr->mutex);
243 +
244 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
245 ar_ptr == arena_for_chunk(mem2chunk(victim)));
246 return victim;
247 @@ -3773,6 +3754,11 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
248 /* its size */
249 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
250
251 + if (chunk_is_mmapped (oldp))
252 + ar_ptr = NULL;
253 + else
254 + ar_ptr = arena_for_chunk (oldp);
255 +
256 /* Little security check which won't hurt performance: the
257 allocator never wrapps around at the end of the address space.
258 Therefore we can exclude some size values which might appear
259 @@ -3780,7 +3766,8 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
260 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
261 || __builtin_expect (misaligned_chunk (oldp), 0))
262 {
263 - malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
264 + malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
265 + ar_ptr);
266 return NULL;
267 }
268
269 @@ -3806,7 +3793,6 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
270 }
271 #endif
272
273 - ar_ptr = arena_for_chunk(oldp);
274 #if THREAD_STATS
275 if(!mutex_trylock(&ar_ptr->mutex))
276 ++(ar_ptr->stat_lock_direct);
277 @@ -3887,31 +3873,29 @@ public_mEMALIGn(size_t alignment, size_t bytes)
278 }
279
280 arena_get(ar_ptr, bytes + alignment + MINSIZE);
281 - if(!ar_ptr)
282 - return 0;
283 p = _int_memalign(ar_ptr, alignment, bytes);
284 - if(!p) {
285 + if(!p && ar_ptr != NULL) {
286 /* Maybe the failure is due to running out of mmapped areas. */
287 if(ar_ptr != &main_arena) {
288 (void)mutex_unlock(&ar_ptr->mutex);
289 ar_ptr = &main_arena;
290 (void)mutex_lock(&ar_ptr->mutex);
291 p = _int_memalign(ar_ptr, alignment, bytes);
292 - (void)mutex_unlock(&ar_ptr->mutex);
293 } else {
294 #if USE_ARENAS
295 /* ... or sbrk() has failed and there is still a chance to mmap() */
296 mstate prev = ar_ptr->next ? ar_ptr : 0;
297 (void)mutex_unlock(&ar_ptr->mutex);
298 ar_ptr = arena_get2(prev, bytes, true);
299 - if(ar_ptr) {
300 + if(ar_ptr)
301 p = _int_memalign(ar_ptr, alignment, bytes);
302 - (void)mutex_unlock(&ar_ptr->mutex);
303 - }
304 #endif
305 }
306 - } else
307 + }
308 +
309 + if (ar_ptr != NULL)
310 (void)mutex_unlock(&ar_ptr->mutex);
311 +
312 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
313 ar_ptr == arena_for_chunk(mem2chunk(p)));
314 return p;
315 @@ -3945,31 +3929,29 @@ public_vALLOc(size_t bytes)
316 return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
317
318 arena_get(ar_ptr, bytes + pagesz + MINSIZE);
319 - if(!ar_ptr)
320 - return 0;
321 p = _int_valloc(ar_ptr, bytes);
322 - if(!p) {
323 + if(!p && ar_ptr != NULL) {
324 /* Maybe the failure is due to running out of mmapped areas. */
325 if(ar_ptr != &main_arena) {
326 (void)mutex_unlock(&ar_ptr->mutex);
327 ar_ptr = &main_arena;
328 (void)mutex_lock(&ar_ptr->mutex);
329 p = _int_memalign(ar_ptr, pagesz, bytes);
330 - (void)mutex_unlock(&ar_ptr->mutex);
331 } else {
332 #if USE_ARENAS
333 /* ... or sbrk() has failed and there is still a chance to mmap() */
334 mstate prev = ar_ptr->next ? ar_ptr : 0;
335 (void)mutex_unlock(&ar_ptr->mutex);
336 ar_ptr = arena_get2(prev, bytes, true);
337 - if(ar_ptr) {
338 + if(ar_ptr)
339 p = _int_memalign(ar_ptr, pagesz, bytes);
340 - (void)mutex_unlock(&ar_ptr->mutex);
341 - }
342 #endif
343 }
344 - } else
345 + }
346 +
347 + if (ar_ptr != NULL)
348 (void)mutex_unlock(&ar_ptr->mutex);
349 +
350 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
351 ar_ptr == arena_for_chunk(mem2chunk(p)));
352
353 @@ -4004,28 +3986,28 @@ public_pVALLOc(size_t bytes)
354
355 arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
356 p = _int_pvalloc(ar_ptr, bytes);
357 - if(!p) {
358 + if(!p && ar_ptr != NULL) {
359 /* Maybe the failure is due to running out of mmapped areas. */
360 if(ar_ptr != &main_arena) {
361 (void)mutex_unlock(&ar_ptr->mutex);
362 ar_ptr = &main_arena;
363 (void)mutex_lock(&ar_ptr->mutex);
364 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
365 - (void)mutex_unlock(&ar_ptr->mutex);
366 } else {
367 #if USE_ARENAS
368 /* ... or sbrk() has failed and there is still a chance to mmap() */
369 mstate prev = ar_ptr->next ? ar_ptr : 0;
370 (void)mutex_unlock(&ar_ptr->mutex);
371 ar_ptr = arena_get2(prev, bytes + 2*pagesz + MINSIZE, true);
372 - if(ar_ptr) {
373 + if(ar_ptr)
374 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
375 - (void)mutex_unlock(&ar_ptr->mutex);
376 - }
377 #endif
378 }
379 - } else
380 + }
381 +
382 + if (ar_ptr != NULL)
383 (void)mutex_unlock(&ar_ptr->mutex);
384 +
385 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
386 ar_ptr == arena_for_chunk(mem2chunk(p)));
387
388 @@ -4072,55 +4054,65 @@ public_cALLOc(size_t n, size_t elem_size)
389 sz = bytes;
390
391 arena_get(av, sz);
392 - if(!av)
393 - return 0;
394
395 - /* Check if we hand out the top chunk, in which case there may be no
396 - need to clear. */
397 + if (av)
398 + {
399 + /* Check if we hand out the top chunk, in which case there may be no
400 + need to clear. */
401 #if MORECORE_CLEARS
402 - oldtop = top(av);
403 - oldtopsize = chunksize(top(av));
404 + oldtop = top(av);
405 + oldtopsize = chunksize(top(av));
406 #if MORECORE_CLEARS < 2
407 - /* Only newly allocated memory is guaranteed to be cleared. */
408 - if (av == &main_arena &&
409 - oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
410 - oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
411 + /* Only newly allocated memory is guaranteed to be cleared. */
412 + if (av == &main_arena &&
413 + oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
414 + oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
415 #endif
416 - if (av != &main_arena)
417 + if (av != &main_arena)
418 + {
419 + heap_info *heap = heap_for_ptr (oldtop);
420 + if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
421 + oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
422 + }
423 +#endif
424 + }
425 + else
426 {
427 - heap_info *heap = heap_for_ptr (oldtop);
428 - if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
429 - oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
430 + /* No usable arenas. */
431 + oldtop = 0;
432 + oldtopsize = 0;
433 }
434 -#endif
435 mem = _int_malloc(av, sz);
436
437 -
438 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
439 av == arena_for_chunk(mem2chunk(mem)));
440
441 - if (mem == 0) {
442 + if (mem == 0 && av != NULL) {
443 /* Maybe the failure is due to running out of mmapped areas. */
444 if(av != &main_arena) {
445 (void)mutex_unlock(&av->mutex);
446 (void)mutex_lock(&main_arena.mutex);
447 mem = _int_malloc(&main_arena, sz);
448 - (void)mutex_unlock(&main_arena.mutex);
449 } else {
450 #if USE_ARENAS
451 /* ... or sbrk() has failed and there is still a chance to mmap() */
452 mstate prev = av->next ? av : 0;
453 (void)mutex_unlock(&av->mutex);
454 av = arena_get2(prev, sz, true);
455 - if(av) {
456 + if(av)
457 mem = _int_malloc(av, sz);
458 - (void)mutex_unlock(&av->mutex);
459 - }
460 #endif
461 }
462 if (mem == 0) return 0;
463 - } else
464 + }
465 +
466 + if (av != NULL)
467 (void)mutex_unlock(&av->mutex);
468 +
469 + /* Allocation failed even after a retry. */
470 + if (mem == 0)
471 + return 0;
472 +
473 p = mem2chunk(mem);
474
475 /* Two optional cases in which clearing not necessary */
476 @@ -4175,6 +4167,8 @@ public_cALLOc(size_t n, size_t elem_size)
477 }
478
479 #ifndef _LIBC
480 +/* XXX These functions are not patched to detect arena corruption because they
481 + are not built in glibc. */
482
483 Void_t**
484 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
485 @@ -4309,6 +4303,16 @@ _int_malloc(mstate av, size_t bytes)
486
487 checked_request2size(bytes, nb);
488
489 + /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
490 + mmap. */
491 + if (__glibc_unlikely (av == NULL))
492 + {
493 + void *p = sYSMALLOc (nb, av);
494 + if (p != NULL)
495 + alloc_perturb (p, bytes);
496 + return p;
497 + }
498 +
499 /*
500 If the size qualifies as a fastbin, first check corresponding bin.
501 This code is safe to execute even if av is not yet initialized, so we
502 @@ -4337,7 +4341,7 @@ _int_malloc(mstate av, size_t bytes)
503 errstr = "malloc(): memory corruption (fast)";
504 errout:
505 mutex_unlock(&av->mutex);
506 - malloc_printerr (check_action, errstr, chunk2mem (victim));
507 + malloc_printerr (check_action, errstr, chunk2mem (victim), av);
508 mutex_lock(&av->mutex);
509 return NULL;
510 }
511 @@ -4429,7 +4433,7 @@ _int_malloc(mstate av, size_t bytes)
512 {
513 void *p = chunk2mem(victim);
514 mutex_unlock(&av->mutex);
515 - malloc_printerr (check_action, "malloc(): memory corruption", p);
516 + malloc_printerr (check_action, "malloc(): memory corruption", p, av);
517 mutex_lock(&av->mutex);
518 }
519 size = chunksize(victim);
520 @@ -4829,7 +4833,7 @@ _int_free(mstate av, mchunkptr p)
521 if (have_lock || locked)
522 (void)mutex_unlock(&av->mutex);
523 #endif
524 - malloc_printerr (check_action, errstr, chunk2mem(p));
525 + malloc_printerr (check_action, errstr, chunk2mem(p), av);
526 #ifdef ATOMIC_FASTBINS
527 if (have_lock)
528 mutex_lock(&av->mutex);
529 @@ -5281,7 +5285,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
530 errstr = "realloc(): invalid old size";
531 errout:
532 mutex_unlock(&av->mutex);
533 - malloc_printerr (check_action, errstr, chunk2mem(oldp));
534 + malloc_printerr (check_action, errstr, chunk2mem(oldp), av);
535 mutex_lock(&av->mutex);
536 return NULL;
537 }
538 @@ -5881,6 +5885,10 @@ static int mTRIm(mstate av, size_t pad)
539 static int mTRIm(av, pad) mstate av; size_t pad;
540 #endif
541 {
542 + /* Don't touch corrupt arenas. */
543 + if (arena_is_corrupt (av))
544 + return 0;
545 +
546 /* Ensure initialization/consolidation */
547 malloc_consolidate (av);
548
549 @@ -6320,8 +6328,14 @@ int mALLOPt(param_number, value) int param_number; int value;
550 extern char **__libc_argv attribute_hidden;
551
552 static void
553 -malloc_printerr(int action, const char *str, void *ptr)
554 +malloc_printerr(int action, const char *str, void *ptr, mstate ar_ptr)
555 {
556 + /* Avoid using this arena in future. We do not attempt to synchronize this
557 + with anything else because we minimally want to ensure that __libc_message
558 + gets its resources safely without stumbling on the current corruption. */
559 + if (ar_ptr)
560 + set_arena_corrupt (ar_ptr);
561 +
562 if ((action & 5) == 5)
563 __libc_message (action & 2, "%s\n", str);
564 else if (action & 1)
565 diff --git a/malloc/tst-malloc-backtrace.c b/malloc/tst-malloc-backtrace.c
566 new file mode 100644
567 index 0000000..796a42f
568 --- /dev/null
569 +++ b/malloc/tst-malloc-backtrace.c
570 @@ -0,0 +1,50 @@
571 +/* Verify that backtrace does not deadlock on itself on memory corruption.
572 + Copyright (C) 2015 Free Software Foundation, Inc.
573 + This file is part of the GNU C Library.
574 +
575 + The GNU C Library is free software; you can redistribute it and/or
576 + modify it under the terms of the GNU Lesser General Public
577 + License as published by the Free Software Foundation; either
578 + version 2.1 of the License, or (at your option) any later version.
579 +
580 + The GNU C Library is distributed in the hope that it will be useful,
581 + but WITHOUT ANY WARRANTY; without even the implied warranty of
582 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
583 + Lesser General Public License for more details.
584 +
585 + You should have received a copy of the GNU Lesser General Public
586 + License along with the GNU C Library; if not, see
587 + <http://www.gnu.org/licenses/>. */
588 +
589 +
590 +#include <stdlib.h>
591 +
592 +#define SIZE 4096
593 +
594 +/* Wrap free with a function to prevent gcc from optimizing it out. */
595 +static void
596 +__attribute__((noinline))
597 +call_free (void *ptr)
598 +{
599 + free (ptr);
600 + *(size_t *)(ptr - sizeof (size_t)) = 1;
601 +}
602 +
603 +int
604 +do_test (void)
605 +{
606 + void *ptr1 = malloc (SIZE);
607 + void *ptr2 = malloc (SIZE);
608 +
609 + call_free ((void *) ptr1);
610 + ptr1 = malloc (SIZE);
611 +
612 + /* Not reached. The return statement is to put ptr2 into use so that gcc
613 + doesn't optimize out that malloc call. */
614 + return (ptr1 == ptr2);
615 +}
616 +
617 +#define TEST_FUNCTION do_test ()
618 +#define EXPECTED_SIGNAL SIGABRT
619 +
620 +#include "../test-skeleton.c"