]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/arena.c
Fix i386/x86_64 log2l (sNaN) (bug 20235).
[thirdparty/glibc.git] / malloc / arena.c
CommitLineData
fa8d436c 1/* Malloc implementation for multiple threads without lock contention.
f7a9f785 2 Copyright (C) 2001-2016 Free Software Foundation, Inc.
fa8d436c
UD
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
fa8d436c
UD
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 14 Lesser General Public License for more details.
fa8d436c 15
cc7375ce 16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
fa8d436c 19
a28b6b0a
RM
20#include <stdbool.h>
21
fa8d436c
UD
22/* Compile-time constants. */
23
6c8dbf00 24#define HEAP_MIN_SIZE (32 * 1024)
fa8d436c 25#ifndef HEAP_MAX_SIZE
e404fb16 26# ifdef DEFAULT_MMAP_THRESHOLD_MAX
bd2c2341 27# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
e404fb16 28# else
6c8dbf00 29# define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
e404fb16 30# endif
fa8d436c
UD
31#endif
32
33/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
39
fa8d436c
UD
40/***************************************************************************/
41
42#define top(ar_ptr) ((ar_ptr)->top)
43
44/* A heap is a single contiguous memory region holding (coalesceable)
45 malloc_chunks. It is allocated with mmap() and always starts at an
22a89187 46 address aligned to HEAP_MAX_SIZE. */
fa8d436c 47
6c8dbf00
OB
48typedef struct _heap_info
49{
fa8d436c
UD
50 mstate ar_ptr; /* Arena for this heap. */
51 struct _heap_info *prev; /* Previous heap. */
52 size_t size; /* Current size in bytes. */
6c8dbf00
OB
53 size_t mprotect_size; /* Size in bytes that has been mprotected
54 PROT_READ|PROT_WRITE. */
7d013a64
RM
55 /* Make sure the following data is properly aligned, particularly
56 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
c7fd3362
JJ
57 MALLOC_ALIGNMENT. */
58 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
fa8d436c
UD
59} heap_info;
60
7d013a64
RM
61/* Get a compile-time error if the heap_info padding is not correct
62 to make alignment work as expected in sYSMALLOc. */
63extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
6c8dbf00
OB
64 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
65 ? -1 : 1];
7d013a64 66
6782806d
FW
67/* Thread specific data. */
68
69static __thread mstate thread_arena attribute_tls_model_ie;
70
90c400bd
FW
71/* Arena free list. free_list_lock synchronizes access to the
72 free_list variable below, and the next_free and attached_threads
73 members of struct malloc_state objects. No other locks must be
74 acquired after free_list_lock has been acquired. */
fa8d436c 75
90c400bd 76static mutex_t free_list_lock = _LIBC_LOCK_INITIALIZER;
02d46fc4 77static size_t narenas = 1;
425ce2ed 78static mstate free_list;
fa8d436c 79
90c400bd
FW
80/* list_lock prevents concurrent writes to the next member of struct
81 malloc_state objects.
82
83 Read access to the next member is supposed to synchronize with the
84 atomic_write_barrier and the write to the next member in
85 _int_new_arena. This suffers from data races; see the FIXME
86 comments in _int_new_arena and reused_arena.
87
7962541a
FW
88 list_lock also prevents concurrent forks. At the time list_lock is
89 acquired, no arena lock must have been acquired, but it is
90 permitted to acquire arena locks subsequently, while list_lock is
91 acquired. */
90c400bd
FW
92static mutex_t list_lock = _LIBC_LOCK_INITIALIZER;
93
2a652f5a
RM
94/* Already initialized? */
95int __malloc_initialized = -1;
96
fa8d436c
UD
97/**************************************************************************/
98
fa8d436c
UD
99
100/* arena_get() acquires an arena and locks the corresponding mutex.
101 First, try the one last locked successfully by this thread. (This
102 is the common case and handled with a macro for speed.) Then, loop
103 once over the circularly linked list of arenas. If no arena is
104 readily available, create a new one. In this latter case, `size'
105 is just a hint as to how much memory will be required immediately
106 in the new arena. */
107
108#define arena_get(ptr, size) do { \
6782806d 109 ptr = thread_arena; \
6c8dbf00
OB
110 arena_lock (ptr, size); \
111 } while (0)
425ce2ed 112
6c8dbf00 113#define arena_lock(ptr, size) do { \
fff94fa2 114 if (ptr && !arena_is_corrupt (ptr)) \
6c8dbf00
OB
115 (void) mutex_lock (&ptr->mutex); \
116 else \
92a9b22d 117 ptr = arena_get2 ((size), NULL); \
6c8dbf00 118 } while (0)
fa8d436c
UD
119
120/* find the heap and corresponding arena for a given ptr */
121
122#define heap_for_ptr(ptr) \
6c8dbf00 123 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
fa8d436c 124#define arena_for_chunk(ptr) \
6c8dbf00 125 (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
fa8d436c 126
fa8d436c
UD
127
128/**************************************************************************/
129
fa8d436c
UD
130/* atfork support. */
131
29d79486
FW
132/* The following three functions are called around fork from a
133 multi-threaded process. We do not use the general fork handler
134 mechanism to make sure that our handlers are the last ones being
135 called, so that other fork handlers can use the malloc
136 subsystem. */
fa8d436c 137
29d79486 138void
186fe877 139internal_function
29d79486 140__malloc_fork_lock_parent (void)
fa8d436c 141{
6c8dbf00 142 if (__malloc_initialized < 1)
2a652f5a 143 return;
6c8dbf00 144
90c400bd 145 /* We do not acquire free_list_lock here because we completely
29d79486 146 reconstruct free_list in __malloc_fork_unlock_child. */
90c400bd 147
8a727af9 148 (void) mutex_lock (&list_lock);
7dac9f3d 149
8a727af9 150 for (mstate ar_ptr = &main_arena;; )
6c8dbf00
OB
151 {
152 (void) mutex_lock (&ar_ptr->mutex);
153 ar_ptr = ar_ptr->next;
154 if (ar_ptr == &main_arena)
155 break;
7dac9f3d 156 }
fa8d436c
UD
157}
158
29d79486 159void
186fe877 160internal_function
29d79486 161__malloc_fork_unlock_parent (void)
fa8d436c 162{
6c8dbf00 163 if (__malloc_initialized < 1)
2a652f5a 164 return;
6c8dbf00 165
8a727af9 166 for (mstate ar_ptr = &main_arena;; )
6c8dbf00
OB
167 {
168 (void) mutex_unlock (&ar_ptr->mutex);
169 ar_ptr = ar_ptr->next;
170 if (ar_ptr == &main_arena)
171 break;
172 }
173 (void) mutex_unlock (&list_lock);
fa8d436c
UD
174}
175
29d79486 176void
186fe877 177internal_function
29d79486 178__malloc_fork_unlock_child (void)
fa8d436c 179{
6c8dbf00 180 if (__malloc_initialized < 1)
2a652f5a 181 return;
6c8dbf00 182
8a727af9 183 /* Push all arenas to the free list, except thread_arena, which is
a62719ba 184 attached to the current thread. */
90c400bd 185 mutex_init (&free_list_lock);
8a727af9
FW
186 if (thread_arena != NULL)
187 thread_arena->attached_threads = 1;
425ce2ed 188 free_list = NULL;
8a727af9 189 for (mstate ar_ptr = &main_arena;; )
6c8dbf00
OB
190 {
191 mutex_init (&ar_ptr->mutex);
8a727af9 192 if (ar_ptr != thread_arena)
6c8dbf00 193 {
a62719ba
FW
194 /* This arena is no longer attached to any thread. */
195 ar_ptr->attached_threads = 0;
6c8dbf00
OB
196 ar_ptr->next_free = free_list;
197 free_list = ar_ptr;
198 }
199 ar_ptr = ar_ptr->next;
200 if (ar_ptr == &main_arena)
201 break;
425ce2ed 202 }
90c400bd 203
6c8dbf00 204 mutex_init (&list_lock);
fa8d436c
UD
205}
206
fa8d436c 207/* Initialization routine. */
fa8d436c
UD
208#include <string.h>
209extern char **_environ;
210
211static char *
212internal_function
213next_env_entry (char ***position)
214{
215 char **current = *position;
216 char *result = NULL;
217
218 while (*current != NULL)
219 {
220 if (__builtin_expect ((*current)[0] == 'M', 0)
6c8dbf00
OB
221 && (*current)[1] == 'A'
222 && (*current)[2] == 'L'
223 && (*current)[3] == 'L'
224 && (*current)[4] == 'O'
225 && (*current)[5] == 'C'
226 && (*current)[6] == '_')
227 {
228 result = &(*current)[7];
fa8d436c 229
6c8dbf00
OB
230 /* Save current position for next visit. */
231 *position = ++current;
fa8d436c 232
6c8dbf00
OB
233 break;
234 }
fa8d436c
UD
235
236 ++current;
237 }
238
239 return result;
240}
fa8d436c 241
c0f62c56 242
22a89187 243#ifdef SHARED
c0f62c56
UD
244static void *
245__failing_morecore (ptrdiff_t d)
246{
247 return (void *) MORECORE_FAILURE;
248}
5f21997b
UD
249
250extern struct dl_open_hook *_dl_open_hook;
251libc_hidden_proto (_dl_open_hook);
fde89ad0
RM
252#endif
253
fa8d436c 254static void
06d6611a 255ptmalloc_init (void)
fa8d436c 256{
6c8dbf00
OB
257 if (__malloc_initialized >= 0)
258 return;
259
fa8d436c
UD
260 __malloc_initialized = 0;
261
22a89187 262#ifdef SHARED
5f21997b
UD
263 /* In case this libc copy is in a non-default namespace, never use brk.
264 Likewise if dlopened from statically linked program. */
c0f62c56
UD
265 Dl_info di;
266 struct link_map *l;
5f21997b
UD
267
268 if (_dl_open_hook != NULL
269 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
6c8dbf00 270 && l->l_ns != LM_ID_BASE))
c0f62c56
UD
271 __morecore = __failing_morecore;
272#endif
273
6782806d 274 thread_arena = &main_arena;
02d46fc4 275 const char *s = NULL;
a1ffb40e 276 if (__glibc_likely (_environ != NULL))
08e49216
RM
277 {
278 char **runp = _environ;
279 char *envline;
280
281 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
6c8dbf00
OB
282 0))
283 {
284 size_t len = strcspn (envline, "=");
285
286 if (envline[len] != '=')
287 /* This is a "MALLOC_" variable at the end of the string
288 without a '=' character. Ignore it since otherwise we
289 will access invalid memory below. */
290 continue;
291
292 switch (len)
293 {
294 case 6:
295 if (memcmp (envline, "CHECK_", 6) == 0)
296 s = &envline[7];
297 break;
298 case 8:
299 if (!__builtin_expect (__libc_enable_secure, 0))
300 {
301 if (memcmp (envline, "TOP_PAD_", 8) == 0)
302 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
303 else if (memcmp (envline, "PERTURB_", 8) == 0)
304 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
305 }
306 break;
307 case 9:
308 if (!__builtin_expect (__libc_enable_secure, 0))
309 {
310 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
311 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
312 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
313 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
314 }
315 break;
316 case 10:
317 if (!__builtin_expect (__libc_enable_secure, 0))
318 {
319 if (memcmp (envline, "ARENA_TEST", 10) == 0)
320 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
321 }
322 break;
323 case 15:
324 if (!__builtin_expect (__libc_enable_secure, 0))
325 {
326 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
327 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
328 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
329 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
330 }
331 break;
332 default:
333 break;
334 }
335 }
336 }
337 if (s && s[0])
338 {
339 __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
340 if (check_action != 0)
341 __malloc_check_init ();
08e49216 342 }
f3eeb3fc 343 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
df77455c
UD
344 if (hook != NULL)
345 (*hook)();
fa8d436c
UD
346 __malloc_initialized = 1;
347}
348
fa8d436c
UD
349/* Managing heaps and arenas (for concurrent threads) */
350
fa8d436c
UD
351#if MALLOC_DEBUG > 1
352
353/* Print the complete contents of a single heap to stderr. */
354
355static void
6c8dbf00 356dump_heap (heap_info *heap)
fa8d436c
UD
357{
358 char *ptr;
359 mchunkptr p;
360
6c8dbf00
OB
361 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
362 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
363 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
364 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
365 ~MALLOC_ALIGN_MASK);
366 for (;; )
367 {
368 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
369 if (p == top (heap->ar_ptr))
370 {
371 fprintf (stderr, " (top)\n");
372 break;
373 }
374 else if (p->size == (0 | PREV_INUSE))
375 {
376 fprintf (stderr, " (fence)\n");
377 break;
378 }
379 fprintf (stderr, "\n");
380 p = next_chunk (p);
fa8d436c 381 }
fa8d436c 382}
fa8d436c
UD
383#endif /* MALLOC_DEBUG > 1 */
384
26d550d3
UD
385/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
386 addresses as opposed to increasing, new_heap would badly fragment the
387 address space. In that case remember the second HEAP_MAX_SIZE part
388 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
389 call (if it is already aligned) and try to reuse it next time. We need
390 no locking for it, as kernel ensures the atomicity for us - worst case
391 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
392 multiple threads, but only one will succeed. */
393static char *aligned_heap_area;
394
fa8d436c
UD
395/* Create a new heap. size is automatically rounded up to a multiple
396 of the page size. */
397
398static heap_info *
399internal_function
6c8dbf00 400new_heap (size_t size, size_t top_pad)
fa8d436c 401{
8a35c3fe 402 size_t pagesize = GLRO (dl_pagesize);
fa8d436c
UD
403 char *p1, *p2;
404 unsigned long ul;
405 heap_info *h;
406
6c8dbf00 407 if (size + top_pad < HEAP_MIN_SIZE)
fa8d436c 408 size = HEAP_MIN_SIZE;
6c8dbf00 409 else if (size + top_pad <= HEAP_MAX_SIZE)
fa8d436c 410 size += top_pad;
6c8dbf00 411 else if (size > HEAP_MAX_SIZE)
fa8d436c
UD
412 return 0;
413 else
414 size = HEAP_MAX_SIZE;
8a35c3fe 415 size = ALIGN_UP (size, pagesize);
fa8d436c
UD
416
417 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
418 No swap space needs to be reserved for the following large
419 mapping (on Linux, this is the case for all non-writable mappings
420 anyway). */
26d550d3 421 p2 = MAP_FAILED;
6c8dbf00
OB
422 if (aligned_heap_area)
423 {
424 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
425 MAP_NORESERVE);
426 aligned_heap_area = NULL;
427 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
428 {
429 __munmap (p2, HEAP_MAX_SIZE);
430 p2 = MAP_FAILED;
431 }
26d550d3 432 }
6c8dbf00
OB
433 if (p2 == MAP_FAILED)
434 {
435 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
436 if (p1 != MAP_FAILED)
437 {
438 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
439 & ~(HEAP_MAX_SIZE - 1));
440 ul = p2 - p1;
441 if (ul)
442 __munmap (p1, ul);
443 else
444 aligned_heap_area = p2 + HEAP_MAX_SIZE;
445 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
446 }
26d550d3 447 else
6c8dbf00
OB
448 {
449 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
450 is already aligned. */
451 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
452 if (p2 == MAP_FAILED)
453 return 0;
454
455 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
456 {
457 __munmap (p2, HEAP_MAX_SIZE);
458 return 0;
459 }
460 }
fa8d436c 461 }
6c8dbf00
OB
462 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
463 {
464 __munmap (p2, HEAP_MAX_SIZE);
465 return 0;
466 }
467 h = (heap_info *) p2;
fa8d436c 468 h->size = size;
c7fd3362 469 h->mprotect_size = size;
322dea08 470 LIBC_PROBE (memory_heap_new, 2, h, h->size);
fa8d436c
UD
471 return h;
472}
473
cbf5760e
UD
474/* Grow a heap. size is automatically rounded up to a
475 multiple of the page size. */
fa8d436c
UD
476
477static int
6c8dbf00 478grow_heap (heap_info *h, long diff)
fa8d436c 479{
8a35c3fe 480 size_t pagesize = GLRO (dl_pagesize);
fa8d436c
UD
481 long new_size;
482
8a35c3fe 483 diff = ALIGN_UP (diff, pagesize);
6c8dbf00
OB
484 new_size = (long) h->size + diff;
485 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
cbf5760e 486 return -1;
6c8dbf00
OB
487
488 if ((unsigned long) new_size > h->mprotect_size)
489 {
490 if (__mprotect ((char *) h + h->mprotect_size,
491 (unsigned long) new_size - h->mprotect_size,
492 PROT_READ | PROT_WRITE) != 0)
493 return -2;
494
495 h->mprotect_size = new_size;
496 }
cbf5760e
UD
497
498 h->size = new_size;
322dea08 499 LIBC_PROBE (memory_heap_more, 2, h, h->size);
cbf5760e
UD
500 return 0;
501}
502
503/* Shrink a heap. */
504
505static int
6c8dbf00 506shrink_heap (heap_info *h, long diff)
cbf5760e
UD
507{
508 long new_size;
509
6c8dbf00
OB
510 new_size = (long) h->size - diff;
511 if (new_size < (long) sizeof (*h))
cbf5760e 512 return -1;
6c8dbf00 513
9fab36eb
SP
514 /* Try to re-map the extra heap space freshly to save memory, and make it
515 inaccessible. See malloc-sysdep.h to know when this is true. */
a1ffb40e 516 if (__glibc_unlikely (check_may_shrink_heap ()))
cbf5760e 517 {
6c8dbf00
OB
518 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
519 MAP_FIXED) == (char *) MAP_FAILED)
520 return -2;
521
cbf5760e
UD
522 h->mprotect_size = new_size;
523 }
cbf5760e 524 else
6c8dbf00 525 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
cbf5760e
UD
526 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
527
fa8d436c 528 h->size = new_size;
322dea08 529 LIBC_PROBE (memory_heap_less, 2, h, h->size);
fa8d436c
UD
530 return 0;
531}
532
533/* Delete a heap. */
534
26d550d3 535#define delete_heap(heap) \
6c8dbf00
OB
536 do { \
537 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
538 aligned_heap_area = NULL; \
539 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
540 } while (0)
fa8d436c
UD
541
542static int
543internal_function
6c8dbf00 544heap_trim (heap_info *heap, size_t pad)
fa8d436c
UD
545{
546 mstate ar_ptr = heap->ar_ptr;
6c8dbf00
OB
547 unsigned long pagesz = GLRO (dl_pagesize);
548 mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
fa8d436c 549 heap_info *prev_heap;
c26efef9 550 long new_size, top_size, top_area, extra, prev_size, misalign;
fa8d436c
UD
551
552 /* Can this heap go away completely? */
6c8dbf00
OB
553 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
554 {
555 prev_heap = heap->prev;
556 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
557 p = chunk_at_offset (prev_heap, prev_size);
558 /* fencepost must be properly aligned. */
559 misalign = ((long) p) & MALLOC_ALIGN_MASK;
560 p = chunk_at_offset (prev_heap, prev_size - misalign);
561 assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
562 p = prev_chunk (p);
563 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
564 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
565 if (!prev_inuse (p))
566 new_size += p->prev_size;
567 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
568 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
569 break;
570 ar_ptr->system_mem -= heap->size;
6c8dbf00
OB
571 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
572 delete_heap (heap);
573 heap = prev_heap;
574 if (!prev_inuse (p)) /* consolidate backward */
575 {
576 p = prev_chunk (p);
fff94fa2 577 unlink (ar_ptr, p, bck, fwd);
6c8dbf00
OB
578 }
579 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
580 assert (((char *) p + new_size) == ((char *) heap + heap->size));
581 top (ar_ptr) = top_chunk = p;
582 set_head (top_chunk, new_size | PREV_INUSE);
583 /*check_chunk(ar_ptr, top_chunk);*/
fa8d436c 584 }
c26efef9
MG
585
586 /* Uses similar logic for per-thread arenas as the main arena with systrim
e4bc326d
CD
587 and _int_free by preserving the top pad and rounding down to the nearest
588 page. */
6c8dbf00 589 top_size = chunksize (top_chunk);
e4bc326d
CD
590 if ((unsigned long)(top_size) <
591 (unsigned long)(mp_.trim_threshold))
592 return 0;
593
c26efef9 594 top_area = top_size - MINSIZE - 1;
f8ef472c 595 if (top_area < 0 || (size_t) top_area <= pad)
c26efef9
MG
596 return 0;
597
e4bc326d 598 /* Release in pagesize units and round down to the nearest page. */
c26efef9 599 extra = ALIGN_DOWN(top_area - pad, pagesz);
e4bc326d 600 if (extra == 0)
fa8d436c 601 return 0;
6c8dbf00 602
fa8d436c 603 /* Try to shrink. */
6c8dbf00 604 if (shrink_heap (heap, extra) != 0)
fa8d436c 605 return 0;
6c8dbf00 606
fa8d436c 607 ar_ptr->system_mem -= extra;
fa8d436c
UD
608
609 /* Success. Adjust top accordingly. */
6c8dbf00 610 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
fa8d436c
UD
611 /*check_chunk(ar_ptr, top_chunk);*/
612 return 1;
613}
614
04ec80e4
UD
615/* Create a new arena with initial size "size". */
616
a62719ba 617/* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
90c400bd 618 called while free_list_lock is held. */
a62719ba
FW
619static void
620detach_arena (mstate replaced_arena)
621{
622 if (replaced_arena != NULL)
623 {
624 assert (replaced_arena->attached_threads > 0);
625 /* The current implementation only detaches from main_arena in
626 case of allocation failure. This means that it is likely not
627 beneficial to put the arena on free_list even if the
628 reference count reaches zero. */
629 --replaced_arena->attached_threads;
630 }
631}
632
04ec80e4 633static mstate
6c8dbf00 634_int_new_arena (size_t size)
04ec80e4
UD
635{
636 mstate a;
637 heap_info *h;
638 char *ptr;
639 unsigned long misalign;
640
6c8dbf00
OB
641 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
642 mp_.top_pad);
643 if (!h)
644 {
645 /* Maybe size is too large to fit in a single heap. So, just try
646 to create a minimally-sized arena and let _int_malloc() attempt
647 to deal with the large request via mmap_chunk(). */
648 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
649 if (!h)
650 return 0;
651 }
652 a = h->ar_ptr = (mstate) (h + 1);
653 malloc_init_state (a);
a62719ba 654 a->attached_threads = 1;
04ec80e4
UD
655 /*a->next = NULL;*/
656 a->system_mem = a->max_system_mem = h->size;
04ec80e4
UD
657
658 /* Set up the top chunk, with proper alignment. */
6c8dbf00
OB
659 ptr = (char *) (a + 1);
660 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
04ec80e4
UD
661 if (misalign > 0)
662 ptr += MALLOC_ALIGNMENT - misalign;
6c8dbf00
OB
663 top (a) = (mchunkptr) ptr;
664 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
04ec80e4 665
3ea5be54 666 LIBC_PROBE (memory_arena_new, 2, a, size);
a62719ba 667 mstate replaced_arena = thread_arena;
6782806d 668 thread_arena = a;
6c8dbf00 669 mutex_init (&a->mutex);
425ce2ed 670
6c8dbf00 671 (void) mutex_lock (&list_lock);
425ce2ed
UD
672
673 /* Add the new arena to the global list. */
674 a->next = main_arena.next;
90c400bd
FW
675 /* FIXME: The barrier is an attempt to synchronize with read access
676 in reused_arena, which does not acquire list_lock while
677 traversing the list. */
425ce2ed
UD
678 atomic_write_barrier ();
679 main_arena.next = a;
680
6c8dbf00 681 (void) mutex_unlock (&list_lock);
425ce2ed 682
90c400bd
FW
683 (void) mutex_lock (&free_list_lock);
684 detach_arena (replaced_arena);
685 (void) mutex_unlock (&free_list_lock);
686
687 /* Lock this arena. NB: Another thread may have been attached to
688 this arena because the arena is now accessible from the
689 main_arena.next list and could have been picked by reused_arena.
690 This can only happen for the last arena created (before the arena
691 limit is reached). At this point, some arena has to be attached
692 to two threads. We could acquire the arena lock before list_lock
693 to make it less likely that reused_arena picks this new arena,
29d79486
FW
694 but this could result in a deadlock with
695 __malloc_fork_lock_parent. */
90c400bd
FW
696
697 (void) mutex_lock (&a->mutex);
698
04ec80e4
UD
699 return a;
700}
701
425ce2ed 702
3da825ce
FW
703/* Remove an arena from free_list. The arena may be in use because it
704 was attached concurrently to a thread by reused_arena below. */
425ce2ed
UD
705static mstate
706get_free_list (void)
707{
a62719ba 708 mstate replaced_arena = thread_arena;
425ce2ed
UD
709 mstate result = free_list;
710 if (result != NULL)
711 {
90c400bd 712 (void) mutex_lock (&free_list_lock);
425ce2ed
UD
713 result = free_list;
714 if (result != NULL)
a62719ba
FW
715 {
716 free_list = result->next_free;
717
3da825ce
FW
718 /* The arena will be attached to this thread. */
719 ++result->attached_threads;
a62719ba
FW
720
721 detach_arena (replaced_arena);
722 }
90c400bd 723 (void) mutex_unlock (&free_list_lock);
425ce2ed
UD
724
725 if (result != NULL)
6c8dbf00
OB
726 {
727 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
728 (void) mutex_lock (&result->mutex);
6782806d 729 thread_arena = result;
6c8dbf00 730 }
425ce2ed
UD
731 }
732
733 return result;
734}
735
77480c6b 736/* Lock and return an arena that can be reused for memory allocation.
bf51f568
JL
737 Avoid AVOID_ARENA as we have already failed to allocate memory in
738 it and it is currently locked. */
425ce2ed 739static mstate
bf51f568 740reused_arena (mstate avoid_arena)
425ce2ed 741{
425ce2ed 742 mstate result;
90c400bd 743 /* FIXME: Access to next_to_use suffers from data races. */
425ce2ed
UD
744 static mstate next_to_use;
745 if (next_to_use == NULL)
746 next_to_use = &main_arena;
747
3da825ce
FW
748 /* Iterate over all arenas (including those linked from
749 free_list). */
425ce2ed
UD
750 result = next_to_use;
751 do
752 {
fff94fa2 753 if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
6c8dbf00 754 goto out;
425ce2ed 755
90c400bd 756 /* FIXME: This is a data race, see _int_new_arena. */
425ce2ed
UD
757 result = result->next;
758 }
759 while (result != next_to_use);
760
bf51f568
JL
761 /* Avoid AVOID_ARENA as we have already failed to allocate memory
762 in that arena and it is currently locked. */
763 if (result == avoid_arena)
764 result = result->next;
765
fff94fa2
SP
766 /* Make sure that the arena we get is not corrupted. */
767 mstate begin = result;
768 while (arena_is_corrupt (result) || result == avoid_arena)
769 {
770 result = result->next;
771 if (result == begin)
772 break;
773 }
774
775 /* We could not find any arena that was either not corrupted or not the one
776 we wanted to avoid. */
777 if (result == begin || result == avoid_arena)
778 return NULL;
779
780 /* No arena available without contention. Wait for the next in line. */
6999d38c 781 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
6c8dbf00 782 (void) mutex_lock (&result->mutex);
425ce2ed 783
6c8dbf00 784out:
3da825ce
FW
785 /* Attach the arena to the current thread. Note that we may have
786 selected an arena which was on free_list. */
a62719ba 787 {
90c400bd 788 /* Update the arena thread attachment counters. */
a62719ba 789 mstate replaced_arena = thread_arena;
90c400bd 790 (void) mutex_lock (&free_list_lock);
a62719ba
FW
791 detach_arena (replaced_arena);
792 ++result->attached_threads;
90c400bd 793 (void) mutex_unlock (&free_list_lock);
a62719ba
FW
794 }
795
6999d38c 796 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
6782806d 797 thread_arena = result;
425ce2ed
UD
798 next_to_use = result->next;
799
800 return result;
801}
425ce2ed 802
fa8d436c
UD
803static mstate
804internal_function
92a9b22d 805arena_get2 (size_t size, mstate avoid_arena)
fa8d436c
UD
806{
807 mstate a;
fa8d436c 808
77cdc054
AS
809 static size_t narenas_limit;
810
811 a = get_free_list ();
812 if (a == NULL)
813 {
814 /* Nothing immediately available, so generate a new arena. */
815 if (narenas_limit == 0)
6c8dbf00
OB
816 {
817 if (mp_.arena_max != 0)
818 narenas_limit = mp_.arena_max;
819 else if (narenas > mp_.arena_test)
820 {
821 int n = __get_nprocs ();
822
823 if (n >= 1)
824 narenas_limit = NARENAS_FROM_NCORES (n);
825 else
826 /* We have no information about the system. Assume two
827 cores. */
828 narenas_limit = NARENAS_FROM_NCORES (2);
829 }
830 }
77cdc054
AS
831 repeat:;
832 size_t n = narenas;
41b81892 833 /* NB: the following depends on the fact that (size_t)0 - 1 is a
6c8dbf00
OB
834 very large number and that the underflow is OK. If arena_max
835 is set the value of arena_test is irrelevant. If arena_test
836 is set but narenas is not yet larger or equal to arena_test
837 narenas_limit is 0. There is no possibility for narenas to
838 be too big for the test to always fail since there is not
839 enough address space to create that many arenas. */
a1ffb40e 840 if (__glibc_unlikely (n <= narenas_limit - 1))
6c8dbf00
OB
841 {
842 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
843 goto repeat;
844 a = _int_new_arena (size);
a1ffb40e 845 if (__glibc_unlikely (a == NULL))
6c8dbf00
OB
846 catomic_decrement (&narenas);
847 }
a5fb313c 848 else
6c8dbf00 849 a = reused_arena (avoid_arena);
77cdc054 850 }
fa8d436c
UD
851 return a;
852}
853
c78ab094
SP
854/* If we don't have the main arena, then maybe the failure is due to running
855 out of mmapped areas, so we can try allocating on the main arena.
856 Otherwise, it is likely that sbrk() has failed and there is still a chance
857 to mmap(), so try one of the other arenas. */
858static mstate
859arena_get_retry (mstate ar_ptr, size_t bytes)
860{
655673f3 861 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
6c8dbf00
OB
862 if (ar_ptr != &main_arena)
863 {
864 (void) mutex_unlock (&ar_ptr->mutex);
c3b9ef8d
SP
865 /* Don't touch the main arena if it is corrupt. */
866 if (arena_is_corrupt (&main_arena))
867 return NULL;
868
6c8dbf00
OB
869 ar_ptr = &main_arena;
870 (void) mutex_lock (&ar_ptr->mutex);
871 }
872 else
873 {
6c8dbf00 874 (void) mutex_unlock (&ar_ptr->mutex);
92a9b22d 875 ar_ptr = arena_get2 (bytes, ar_ptr);
6c8dbf00 876 }
c78ab094
SP
877
878 return ar_ptr;
879}
880
425ce2ed
UD
881static void __attribute__ ((section ("__libc_thread_freeres_fn")))
882arena_thread_freeres (void)
883{
6782806d
FW
884 mstate a = thread_arena;
885 thread_arena = NULL;
425ce2ed
UD
886
887 if (a != NULL)
888 {
90c400bd 889 (void) mutex_lock (&free_list_lock);
a62719ba
FW
890 /* If this was the last attached thread for this arena, put the
891 arena on the free list. */
892 assert (a->attached_threads > 0);
893 if (--a->attached_threads == 0)
894 {
895 a->next_free = free_list;
896 free_list = a;
897 }
90c400bd 898 (void) mutex_unlock (&free_list_lock);
425ce2ed
UD
899 }
900}
901text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
425ce2ed 902
fa8d436c
UD
903/*
904 * Local variables:
905 * c-basic-offset: 2
906 * End:
907 */