]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/arena.c
Prefer https to http for gnu.org and fsf.org URLs
[thirdparty/glibc.git] / malloc / arena.c
CommitLineData
fa8d436c 1/* Malloc implementation for multiple threads without lock contention.
04277e02 2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
fa8d436c
UD
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
fa8d436c
UD
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 14 Lesser General Public License for more details.
fa8d436c 15
cc7375ce 16 You should have received a copy of the GNU Lesser General Public
59ba27a6 17 License along with the GNU C Library; see the file COPYING.LIB. If
5a82c748 18 not, see <https://www.gnu.org/licenses/>. */
fa8d436c 19
a28b6b0a
RM
20#include <stdbool.h>
21
67e58f39
SP
22#if HAVE_TUNABLES
23# define TUNABLE_NAMESPACE malloc
24#endif
25#include <elf/dl-tunables.h>
26
fa8d436c
UD
27/* Compile-time constants. */
28
6c8dbf00 29#define HEAP_MIN_SIZE (32 * 1024)
fa8d436c 30#ifndef HEAP_MAX_SIZE
e404fb16 31# ifdef DEFAULT_MMAP_THRESHOLD_MAX
bd2c2341 32# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
e404fb16 33# else
6c8dbf00 34# define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
e404fb16 35# endif
fa8d436c
UD
36#endif
37
38/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
39 that are dynamically created for multi-threaded programs. The
40 maximum size must be a power of two, for fast determination of
41 which heap belongs to a chunk. It should be much larger than the
42 mmap threshold, so that requests with a size just below that
43 threshold can be fulfilled without creating too many heaps. */
44
fa8d436c
UD
45/***************************************************************************/
46
47#define top(ar_ptr) ((ar_ptr)->top)
48
49/* A heap is a single contiguous memory region holding (coalesceable)
50 malloc_chunks. It is allocated with mmap() and always starts at an
22a89187 51 address aligned to HEAP_MAX_SIZE. */
fa8d436c 52
6c8dbf00
OB
53typedef struct _heap_info
54{
fa8d436c
UD
55 mstate ar_ptr; /* Arena for this heap. */
56 struct _heap_info *prev; /* Previous heap. */
57 size_t size; /* Current size in bytes. */
6c8dbf00
OB
58 size_t mprotect_size; /* Size in bytes that has been mprotected
59 PROT_READ|PROT_WRITE. */
7d013a64
RM
60 /* Make sure the following data is properly aligned, particularly
61 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
c7fd3362
JJ
62 MALLOC_ALIGNMENT. */
63 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
fa8d436c
UD
64} heap_info;
65
7d013a64
RM
66/* Get a compile-time error if the heap_info padding is not correct
67 to make alignment work as expected in sYSMALLOc. */
68extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
6c8dbf00
OB
69 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
70 ? -1 : 1];
7d013a64 71
6782806d
FW
72/* Thread specific data. */
73
74static __thread mstate thread_arena attribute_tls_model_ie;
75
90c400bd
FW
76/* Arena free list. free_list_lock synchronizes access to the
77 free_list variable below, and the next_free and attached_threads
78 members of struct malloc_state objects. No other locks must be
79 acquired after free_list_lock has been acquired. */
fa8d436c 80
cbb47fa1 81__libc_lock_define_initialized (static, free_list_lock);
02d46fc4 82static size_t narenas = 1;
425ce2ed 83static mstate free_list;
fa8d436c 84
90c400bd
FW
85/* list_lock prevents concurrent writes to the next member of struct
86 malloc_state objects.
87
88 Read access to the next member is supposed to synchronize with the
89 atomic_write_barrier and the write to the next member in
90 _int_new_arena. This suffers from data races; see the FIXME
91 comments in _int_new_arena and reused_arena.
92
7962541a
FW
93 list_lock also prevents concurrent forks. At the time list_lock is
94 acquired, no arena lock must have been acquired, but it is
95 permitted to acquire arena locks subsequently, while list_lock is
96 acquired. */
cbb47fa1 97__libc_lock_define_initialized (static, list_lock);
90c400bd 98
2a652f5a
RM
99/* Already initialized? */
100int __malloc_initialized = -1;
101
fa8d436c
UD
102/**************************************************************************/
103
fa8d436c
UD
104
105/* arena_get() acquires an arena and locks the corresponding mutex.
106 First, try the one last locked successfully by this thread. (This
107 is the common case and handled with a macro for speed.) Then, loop
108 once over the circularly linked list of arenas. If no arena is
109 readily available, create a new one. In this latter case, `size'
110 is just a hint as to how much memory will be required immediately
111 in the new arena. */
112
113#define arena_get(ptr, size) do { \
6782806d 114 ptr = thread_arena; \
6c8dbf00
OB
115 arena_lock (ptr, size); \
116 } while (0)
425ce2ed 117
6c8dbf00 118#define arena_lock(ptr, size) do { \
a9da0bb2 119 if (ptr) \
cbb47fa1 120 __libc_lock_lock (ptr->mutex); \
6c8dbf00 121 else \
92a9b22d 122 ptr = arena_get2 ((size), NULL); \
6c8dbf00 123 } while (0)
fa8d436c
UD
124
125/* find the heap and corresponding arena for a given ptr */
126
127#define heap_for_ptr(ptr) \
6c8dbf00 128 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
fa8d436c 129#define arena_for_chunk(ptr) \
e9c4fe93 130 (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
fa8d436c 131
fa8d436c
UD
132
133/**************************************************************************/
134
fa8d436c
UD
135/* atfork support. */
136
29d79486
FW
137/* The following three functions are called around fork from a
138 multi-threaded process. We do not use the general fork handler
139 mechanism to make sure that our handlers are the last ones being
140 called, so that other fork handlers can use the malloc
141 subsystem. */
fa8d436c 142
29d79486
FW
143void
144__malloc_fork_lock_parent (void)
fa8d436c 145{
6c8dbf00 146 if (__malloc_initialized < 1)
2a652f5a 147 return;
6c8dbf00 148
90c400bd 149 /* We do not acquire free_list_lock here because we completely
29d79486 150 reconstruct free_list in __malloc_fork_unlock_child. */
90c400bd 151
4bf5f222 152 __libc_lock_lock (list_lock);
7dac9f3d 153
8a727af9 154 for (mstate ar_ptr = &main_arena;; )
6c8dbf00 155 {
4bf5f222 156 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00
OB
157 ar_ptr = ar_ptr->next;
158 if (ar_ptr == &main_arena)
159 break;
7dac9f3d 160 }
fa8d436c
UD
161}
162
29d79486
FW
163void
164__malloc_fork_unlock_parent (void)
fa8d436c 165{
6c8dbf00 166 if (__malloc_initialized < 1)
2a652f5a 167 return;
6c8dbf00 168
8a727af9 169 for (mstate ar_ptr = &main_arena;; )
6c8dbf00 170 {
4bf5f222 171 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
172 ar_ptr = ar_ptr->next;
173 if (ar_ptr == &main_arena)
174 break;
175 }
4bf5f222 176 __libc_lock_unlock (list_lock);
fa8d436c
UD
177}
178
29d79486
FW
179void
180__malloc_fork_unlock_child (void)
fa8d436c 181{
6c8dbf00 182 if (__malloc_initialized < 1)
2a652f5a 183 return;
6c8dbf00 184
8a727af9 185 /* Push all arenas to the free list, except thread_arena, which is
a62719ba 186 attached to the current thread. */
4bf5f222 187 __libc_lock_init (free_list_lock);
8a727af9
FW
188 if (thread_arena != NULL)
189 thread_arena->attached_threads = 1;
425ce2ed 190 free_list = NULL;
8a727af9 191 for (mstate ar_ptr = &main_arena;; )
6c8dbf00 192 {
4bf5f222 193 __libc_lock_init (ar_ptr->mutex);
8a727af9 194 if (ar_ptr != thread_arena)
6c8dbf00 195 {
a62719ba
FW
196 /* This arena is no longer attached to any thread. */
197 ar_ptr->attached_threads = 0;
6c8dbf00
OB
198 ar_ptr->next_free = free_list;
199 free_list = ar_ptr;
200 }
201 ar_ptr = ar_ptr->next;
202 if (ar_ptr == &main_arena)
203 break;
425ce2ed 204 }
90c400bd 205
4bf5f222 206 __libc_lock_init (list_lock);
fa8d436c
UD
207}
208
67e58f39 209#if HAVE_TUNABLES
67e58f39 210void
44330b6d 211TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
67e58f39 212{
8cbc826c 213 int32_t value = (int32_t) valp->numval;
ac3ed168 214 if (value != 0)
67e58f39
SP
215 __malloc_check_init ();
216}
217
44330b6d 218# define TUNABLE_CALLBACK_FNDECL(__name, __type) \
67e58f39
SP
219static inline int do_ ## __name (__type value); \
220void \
44330b6d 221TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
67e58f39 222{ \
8cbc826c 223 __type value = (__type) (valp)->numval; \
67e58f39
SP
224 do_ ## __name (value); \
225}
226
44330b6d
SP
227TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t)
228TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t)
229TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t)
230TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t)
231TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t)
232TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t)
233TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t)
d5c3fafc
DD
234#if USE_TCACHE
235TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t)
236TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t)
237TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t)
238#endif
c48d92b4 239TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t)
67e58f39 240#else
fa8d436c 241/* Initialization routine. */
fa8d436c
UD
242#include <string.h>
243extern char **_environ;
244
245static char *
fa8d436c
UD
246next_env_entry (char ***position)
247{
248 char **current = *position;
249 char *result = NULL;
250
251 while (*current != NULL)
252 {
253 if (__builtin_expect ((*current)[0] == 'M', 0)
6c8dbf00
OB
254 && (*current)[1] == 'A'
255 && (*current)[2] == 'L'
256 && (*current)[3] == 'L'
257 && (*current)[4] == 'O'
258 && (*current)[5] == 'C'
259 && (*current)[6] == '_')
260 {
261 result = &(*current)[7];
fa8d436c 262
6c8dbf00
OB
263 /* Save current position for next visit. */
264 *position = ++current;
fa8d436c 265
6c8dbf00
OB
266 break;
267 }
fa8d436c
UD
268
269 ++current;
270 }
271
272 return result;
273}
67e58f39 274#endif
fa8d436c 275
c0f62c56 276
22a89187 277#ifdef SHARED
c0f62c56
UD
278static void *
279__failing_morecore (ptrdiff_t d)
280{
281 return (void *) MORECORE_FAILURE;
282}
5f21997b
UD
283
284extern struct dl_open_hook *_dl_open_hook;
285libc_hidden_proto (_dl_open_hook);
fde89ad0
RM
286#endif
287
fa8d436c 288static void
06d6611a 289ptmalloc_init (void)
fa8d436c 290{
6c8dbf00
OB
291 if (__malloc_initialized >= 0)
292 return;
293
fa8d436c
UD
294 __malloc_initialized = 0;
295
22a89187 296#ifdef SHARED
5f21997b
UD
297 /* In case this libc copy is in a non-default namespace, never use brk.
298 Likewise if dlopened from statically linked program. */
c0f62c56
UD
299 Dl_info di;
300 struct link_map *l;
5f21997b
UD
301
302 if (_dl_open_hook != NULL
303 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
6c8dbf00 304 && l->l_ns != LM_ID_BASE))
c0f62c56
UD
305 __morecore = __failing_morecore;
306#endif
307
6782806d 308 thread_arena = &main_arena;
67e58f39 309
3381be5c 310 malloc_init_state (&main_arena);
67e58f39 311
3381be5c 312#if HAVE_TUNABLES
44330b6d
SP
313 TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
314 TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
315 TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
316 TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
317 TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold));
318 TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
319 TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
320 TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
3381be5c 321# if USE_TCACHE
d5c3fafc
DD
322 TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
323 TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
324 TUNABLE_GET (tcache_unsorted_limit, size_t,
325 TUNABLE_CALLBACK (set_tcache_unsorted_limit));
3381be5c 326# endif
c48d92b4 327 TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
67e58f39 328#else
02d46fc4 329 const char *s = NULL;
a1ffb40e 330 if (__glibc_likely (_environ != NULL))
08e49216
RM
331 {
332 char **runp = _environ;
333 char *envline;
334
335 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
6c8dbf00
OB
336 0))
337 {
338 size_t len = strcspn (envline, "=");
339
340 if (envline[len] != '=')
341 /* This is a "MALLOC_" variable at the end of the string
342 without a '=' character. Ignore it since otherwise we
343 will access invalid memory below. */
344 continue;
345
346 switch (len)
347 {
348 case 6:
349 if (memcmp (envline, "CHECK_", 6) == 0)
350 s = &envline[7];
351 break;
352 case 8:
353 if (!__builtin_expect (__libc_enable_secure, 0))
354 {
355 if (memcmp (envline, "TOP_PAD_", 8) == 0)
356 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
357 else if (memcmp (envline, "PERTURB_", 8) == 0)
358 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
359 }
360 break;
361 case 9:
362 if (!__builtin_expect (__libc_enable_secure, 0))
363 {
364 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
365 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
366 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
367 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
368 }
369 break;
370 case 10:
371 if (!__builtin_expect (__libc_enable_secure, 0))
372 {
373 if (memcmp (envline, "ARENA_TEST", 10) == 0)
374 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
375 }
376 break;
377 case 15:
378 if (!__builtin_expect (__libc_enable_secure, 0))
379 {
380 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
381 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
382 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
383 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
384 }
385 break;
386 default:
387 break;
388 }
389 }
390 }
ac3ed168
FW
391 if (s && s[0] != '\0' && s[0] != '0')
392 __malloc_check_init ();
67e58f39
SP
393#endif
394
2ba3cfa1 395#if HAVE_MALLOC_INIT_HOOK
92e1ab0e 396 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
df77455c
UD
397 if (hook != NULL)
398 (*hook)();
2ba3cfa1 399#endif
fa8d436c
UD
400 __malloc_initialized = 1;
401}
402
fa8d436c
UD
403/* Managing heaps and arenas (for concurrent threads) */
404
fa8d436c
UD
405#if MALLOC_DEBUG > 1
406
407/* Print the complete contents of a single heap to stderr. */
408
409static void
6c8dbf00 410dump_heap (heap_info *heap)
fa8d436c
UD
411{
412 char *ptr;
413 mchunkptr p;
414
6c8dbf00
OB
415 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
416 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
417 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
418 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
419 ~MALLOC_ALIGN_MASK);
420 for (;; )
421 {
422 fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
423 if (p == top (heap->ar_ptr))
424 {
425 fprintf (stderr, " (top)\n");
426 break;
427 }
428 else if (p->size == (0 | PREV_INUSE))
429 {
430 fprintf (stderr, " (fence)\n");
431 break;
432 }
433 fprintf (stderr, "\n");
434 p = next_chunk (p);
fa8d436c 435 }
fa8d436c 436}
fa8d436c
UD
437#endif /* MALLOC_DEBUG > 1 */
438
26d550d3
UD
439/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
440 addresses as opposed to increasing, new_heap would badly fragment the
441 address space. In that case remember the second HEAP_MAX_SIZE part
442 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
443 call (if it is already aligned) and try to reuse it next time. We need
444 no locking for it, as kernel ensures the atomicity for us - worst case
445 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
446 multiple threads, but only one will succeed. */
447static char *aligned_heap_area;
448
fa8d436c
UD
449/* Create a new heap. size is automatically rounded up to a multiple
450 of the page size. */
451
452static heap_info *
6c8dbf00 453new_heap (size_t size, size_t top_pad)
fa8d436c 454{
8a35c3fe 455 size_t pagesize = GLRO (dl_pagesize);
fa8d436c
UD
456 char *p1, *p2;
457 unsigned long ul;
458 heap_info *h;
459
6c8dbf00 460 if (size + top_pad < HEAP_MIN_SIZE)
fa8d436c 461 size = HEAP_MIN_SIZE;
6c8dbf00 462 else if (size + top_pad <= HEAP_MAX_SIZE)
fa8d436c 463 size += top_pad;
6c8dbf00 464 else if (size > HEAP_MAX_SIZE)
fa8d436c
UD
465 return 0;
466 else
467 size = HEAP_MAX_SIZE;
8a35c3fe 468 size = ALIGN_UP (size, pagesize);
fa8d436c
UD
469
470 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
471 No swap space needs to be reserved for the following large
472 mapping (on Linux, this is the case for all non-writable mappings
473 anyway). */
26d550d3 474 p2 = MAP_FAILED;
6c8dbf00
OB
475 if (aligned_heap_area)
476 {
477 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
478 MAP_NORESERVE);
479 aligned_heap_area = NULL;
480 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
481 {
482 __munmap (p2, HEAP_MAX_SIZE);
483 p2 = MAP_FAILED;
484 }
26d550d3 485 }
6c8dbf00
OB
486 if (p2 == MAP_FAILED)
487 {
488 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
489 if (p1 != MAP_FAILED)
490 {
491 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
492 & ~(HEAP_MAX_SIZE - 1));
493 ul = p2 - p1;
494 if (ul)
495 __munmap (p1, ul);
496 else
497 aligned_heap_area = p2 + HEAP_MAX_SIZE;
498 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
499 }
26d550d3 500 else
6c8dbf00
OB
501 {
502 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
503 is already aligned. */
504 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
505 if (p2 == MAP_FAILED)
506 return 0;
507
508 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
509 {
510 __munmap (p2, HEAP_MAX_SIZE);
511 return 0;
512 }
513 }
fa8d436c 514 }
6c8dbf00
OB
515 if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
516 {
517 __munmap (p2, HEAP_MAX_SIZE);
518 return 0;
519 }
520 h = (heap_info *) p2;
fa8d436c 521 h->size = size;
c7fd3362 522 h->mprotect_size = size;
322dea08 523 LIBC_PROBE (memory_heap_new, 2, h, h->size);
fa8d436c
UD
524 return h;
525}
526
cbf5760e
UD
527/* Grow a heap. size is automatically rounded up to a
528 multiple of the page size. */
fa8d436c
UD
529
530static int
6c8dbf00 531grow_heap (heap_info *h, long diff)
fa8d436c 532{
8a35c3fe 533 size_t pagesize = GLRO (dl_pagesize);
fa8d436c
UD
534 long new_size;
535
8a35c3fe 536 diff = ALIGN_UP (diff, pagesize);
6c8dbf00
OB
537 new_size = (long) h->size + diff;
538 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
cbf5760e 539 return -1;
6c8dbf00
OB
540
541 if ((unsigned long) new_size > h->mprotect_size)
542 {
543 if (__mprotect ((char *) h + h->mprotect_size,
544 (unsigned long) new_size - h->mprotect_size,
545 PROT_READ | PROT_WRITE) != 0)
546 return -2;
547
548 h->mprotect_size = new_size;
549 }
cbf5760e
UD
550
551 h->size = new_size;
322dea08 552 LIBC_PROBE (memory_heap_more, 2, h, h->size);
cbf5760e
UD
553 return 0;
554}
555
556/* Shrink a heap. */
557
558static int
6c8dbf00 559shrink_heap (heap_info *h, long diff)
cbf5760e
UD
560{
561 long new_size;
562
6c8dbf00
OB
563 new_size = (long) h->size - diff;
564 if (new_size < (long) sizeof (*h))
cbf5760e 565 return -1;
6c8dbf00 566
9fab36eb
SP
567 /* Try to re-map the extra heap space freshly to save memory, and make it
568 inaccessible. See malloc-sysdep.h to know when this is true. */
a1ffb40e 569 if (__glibc_unlikely (check_may_shrink_heap ()))
cbf5760e 570 {
6c8dbf00
OB
571 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
572 MAP_FIXED) == (char *) MAP_FAILED)
573 return -2;
574
cbf5760e
UD
575 h->mprotect_size = new_size;
576 }
cbf5760e 577 else
6c8dbf00 578 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
cbf5760e
UD
579 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
580
fa8d436c 581 h->size = new_size;
322dea08 582 LIBC_PROBE (memory_heap_less, 2, h, h->size);
fa8d436c
UD
583 return 0;
584}
585
586/* Delete a heap. */
587
26d550d3 588#define delete_heap(heap) \
6c8dbf00
OB
589 do { \
590 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
591 aligned_heap_area = NULL; \
592 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
593 } while (0)
fa8d436c
UD
594
595static int
6c8dbf00 596heap_trim (heap_info *heap, size_t pad)
fa8d436c
UD
597{
598 mstate ar_ptr = heap->ar_ptr;
6c8dbf00 599 unsigned long pagesz = GLRO (dl_pagesize);
1ecba1fa 600 mchunkptr top_chunk = top (ar_ptr), p;
fa8d436c 601 heap_info *prev_heap;
c26efef9 602 long new_size, top_size, top_area, extra, prev_size, misalign;
fa8d436c
UD
603
604 /* Can this heap go away completely? */
6c8dbf00
OB
605 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
606 {
607 prev_heap = heap->prev;
608 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
609 p = chunk_at_offset (prev_heap, prev_size);
610 /* fencepost must be properly aligned. */
611 misalign = ((long) p) & MALLOC_ALIGN_MASK;
612 p = chunk_at_offset (prev_heap, prev_size - misalign);
e9c4fe93 613 assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
6c8dbf00
OB
614 p = prev_chunk (p);
615 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
616 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
617 if (!prev_inuse (p))
e9c4fe93 618 new_size += prev_size (p);
6c8dbf00
OB
619 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
620 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
621 break;
622 ar_ptr->system_mem -= heap->size;
6c8dbf00
OB
623 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
624 delete_heap (heap);
625 heap = prev_heap;
626 if (!prev_inuse (p)) /* consolidate backward */
627 {
628 p = prev_chunk (p);
1ecba1fa 629 unlink_chunk (ar_ptr, p);
6c8dbf00
OB
630 }
631 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
632 assert (((char *) p + new_size) == ((char *) heap + heap->size));
633 top (ar_ptr) = top_chunk = p;
634 set_head (top_chunk, new_size | PREV_INUSE);
635 /*check_chunk(ar_ptr, top_chunk);*/
fa8d436c 636 }
c26efef9
MG
637
638 /* Uses similar logic for per-thread arenas as the main arena with systrim
e4bc326d
CD
639 and _int_free by preserving the top pad and rounding down to the nearest
640 page. */
6c8dbf00 641 top_size = chunksize (top_chunk);
e4bc326d
CD
642 if ((unsigned long)(top_size) <
643 (unsigned long)(mp_.trim_threshold))
644 return 0;
645
c26efef9 646 top_area = top_size - MINSIZE - 1;
f8ef472c 647 if (top_area < 0 || (size_t) top_area <= pad)
c26efef9
MG
648 return 0;
649
e4bc326d 650 /* Release in pagesize units and round down to the nearest page. */
c26efef9 651 extra = ALIGN_DOWN(top_area - pad, pagesz);
e4bc326d 652 if (extra == 0)
fa8d436c 653 return 0;
6c8dbf00 654
fa8d436c 655 /* Try to shrink. */
6c8dbf00 656 if (shrink_heap (heap, extra) != 0)
fa8d436c 657 return 0;
6c8dbf00 658
fa8d436c 659 ar_ptr->system_mem -= extra;
fa8d436c
UD
660
661 /* Success. Adjust top accordingly. */
6c8dbf00 662 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
fa8d436c
UD
663 /*check_chunk(ar_ptr, top_chunk);*/
664 return 1;
665}
666
04ec80e4
UD
667/* Create a new arena with initial size "size". */
668
a62719ba 669/* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
90c400bd 670 called while free_list_lock is held. */
a62719ba
FW
671static void
672detach_arena (mstate replaced_arena)
673{
674 if (replaced_arena != NULL)
675 {
676 assert (replaced_arena->attached_threads > 0);
677 /* The current implementation only detaches from main_arena in
678 case of allocation failure. This means that it is likely not
679 beneficial to put the arena on free_list even if the
680 reference count reaches zero. */
681 --replaced_arena->attached_threads;
682 }
683}
684
04ec80e4 685static mstate
6c8dbf00 686_int_new_arena (size_t size)
04ec80e4
UD
687{
688 mstate a;
689 heap_info *h;
690 char *ptr;
691 unsigned long misalign;
692
6c8dbf00
OB
693 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
694 mp_.top_pad);
695 if (!h)
696 {
697 /* Maybe size is too large to fit in a single heap. So, just try
698 to create a minimally-sized arena and let _int_malloc() attempt
699 to deal with the large request via mmap_chunk(). */
700 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
701 if (!h)
702 return 0;
703 }
704 a = h->ar_ptr = (mstate) (h + 1);
705 malloc_init_state (a);
a62719ba 706 a->attached_threads = 1;
04ec80e4
UD
707 /*a->next = NULL;*/
708 a->system_mem = a->max_system_mem = h->size;
04ec80e4
UD
709
710 /* Set up the top chunk, with proper alignment. */
6c8dbf00
OB
711 ptr = (char *) (a + 1);
712 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
04ec80e4
UD
713 if (misalign > 0)
714 ptr += MALLOC_ALIGNMENT - misalign;
6c8dbf00
OB
715 top (a) = (mchunkptr) ptr;
716 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
04ec80e4 717
3ea5be54 718 LIBC_PROBE (memory_arena_new, 2, a, size);
a62719ba 719 mstate replaced_arena = thread_arena;
6782806d 720 thread_arena = a;
4bf5f222 721 __libc_lock_init (a->mutex);
425ce2ed 722
4bf5f222 723 __libc_lock_lock (list_lock);
425ce2ed
UD
724
725 /* Add the new arena to the global list. */
726 a->next = main_arena.next;
90c400bd
FW
727 /* FIXME: The barrier is an attempt to synchronize with read access
728 in reused_arena, which does not acquire list_lock while
729 traversing the list. */
425ce2ed
UD
730 atomic_write_barrier ();
731 main_arena.next = a;
732
4bf5f222 733 __libc_lock_unlock (list_lock);
425ce2ed 734
4bf5f222 735 __libc_lock_lock (free_list_lock);
90c400bd 736 detach_arena (replaced_arena);
4bf5f222 737 __libc_lock_unlock (free_list_lock);
90c400bd
FW
738
739 /* Lock this arena. NB: Another thread may have been attached to
740 this arena because the arena is now accessible from the
741 main_arena.next list and could have been picked by reused_arena.
742 This can only happen for the last arena created (before the arena
743 limit is reached). At this point, some arena has to be attached
744 to two threads. We could acquire the arena lock before list_lock
745 to make it less likely that reused_arena picks this new arena,
29d79486
FW
746 but this could result in a deadlock with
747 __malloc_fork_lock_parent. */
90c400bd 748
4bf5f222 749 __libc_lock_lock (a->mutex);
90c400bd 750
04ec80e4
UD
751 return a;
752}
753
425ce2ed 754
f88aab5d 755/* Remove an arena from free_list. */
425ce2ed
UD
756static mstate
757get_free_list (void)
758{
a62719ba 759 mstate replaced_arena = thread_arena;
425ce2ed
UD
760 mstate result = free_list;
761 if (result != NULL)
762 {
4bf5f222 763 __libc_lock_lock (free_list_lock);
425ce2ed
UD
764 result = free_list;
765 if (result != NULL)
a62719ba
FW
766 {
767 free_list = result->next_free;
768
3da825ce 769 /* The arena will be attached to this thread. */
f88aab5d
FW
770 assert (result->attached_threads == 0);
771 result->attached_threads = 1;
a62719ba
FW
772
773 detach_arena (replaced_arena);
774 }
4bf5f222 775 __libc_lock_unlock (free_list_lock);
425ce2ed
UD
776
777 if (result != NULL)
6c8dbf00
OB
778 {
779 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
4bf5f222 780 __libc_lock_lock (result->mutex);
6782806d 781 thread_arena = result;
6c8dbf00 782 }
425ce2ed
UD
783 }
784
785 return result;
786}
787
f88aab5d
FW
788/* Remove the arena from the free list (if it is present).
789 free_list_lock must have been acquired by the caller. */
790static void
791remove_from_free_list (mstate arena)
792{
793 mstate *previous = &free_list;
794 for (mstate p = free_list; p != NULL; p = p->next_free)
795 {
796 assert (p->attached_threads == 0);
797 if (p == arena)
798 {
799 /* Remove the requested arena from the list. */
800 *previous = p->next_free;
801 break;
802 }
803 else
804 previous = &p->next_free;
805 }
806}
807
77480c6b 808/* Lock and return an arena that can be reused for memory allocation.
bf51f568
JL
809 Avoid AVOID_ARENA as we have already failed to allocate memory in
810 it and it is currently locked. */
425ce2ed 811static mstate
bf51f568 812reused_arena (mstate avoid_arena)
425ce2ed 813{
425ce2ed 814 mstate result;
90c400bd 815 /* FIXME: Access to next_to_use suffers from data races. */
425ce2ed
UD
816 static mstate next_to_use;
817 if (next_to_use == NULL)
818 next_to_use = &main_arena;
819
3da825ce
FW
820 /* Iterate over all arenas (including those linked from
821 free_list). */
425ce2ed
UD
822 result = next_to_use;
823 do
824 {
a9da0bb2 825 if (!__libc_lock_trylock (result->mutex))
6c8dbf00 826 goto out;
425ce2ed 827
90c400bd 828 /* FIXME: This is a data race, see _int_new_arena. */
425ce2ed
UD
829 result = result->next;
830 }
831 while (result != next_to_use);
832
bf51f568
JL
833 /* Avoid AVOID_ARENA as we have already failed to allocate memory
834 in that arena and it is currently locked. */
835 if (result == avoid_arena)
836 result = result->next;
837
fff94fa2 838 /* No arena available without contention. Wait for the next in line. */
6999d38c 839 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
4bf5f222 840 __libc_lock_lock (result->mutex);
425ce2ed 841
6c8dbf00 842out:
f88aab5d 843 /* Attach the arena to the current thread. */
a62719ba 844 {
90c400bd 845 /* Update the arena thread attachment counters. */
a62719ba 846 mstate replaced_arena = thread_arena;
4bf5f222 847 __libc_lock_lock (free_list_lock);
a62719ba 848 detach_arena (replaced_arena);
f88aab5d
FW
849
850 /* We may have picked up an arena on the free list. We need to
851 preserve the invariant that no arena on the free list has a
852 positive attached_threads counter (otherwise,
853 arena_thread_freeres cannot use the counter to determine if the
854 arena needs to be put on the free list). We unconditionally
855 remove the selected arena from the free list. The caller of
856 reused_arena checked the free list and observed it to be empty,
857 so the list is very short. */
858 remove_from_free_list (result);
859
a62719ba 860 ++result->attached_threads;
f88aab5d 861
4bf5f222 862 __libc_lock_unlock (free_list_lock);
a62719ba
FW
863 }
864
6999d38c 865 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
6782806d 866 thread_arena = result;
425ce2ed
UD
867 next_to_use = result->next;
868
869 return result;
870}
425ce2ed 871
fa8d436c 872static mstate
92a9b22d 873arena_get2 (size_t size, mstate avoid_arena)
fa8d436c
UD
874{
875 mstate a;
fa8d436c 876
77cdc054
AS
877 static size_t narenas_limit;
878
879 a = get_free_list ();
880 if (a == NULL)
881 {
882 /* Nothing immediately available, so generate a new arena. */
883 if (narenas_limit == 0)
6c8dbf00
OB
884 {
885 if (mp_.arena_max != 0)
886 narenas_limit = mp_.arena_max;
887 else if (narenas > mp_.arena_test)
888 {
889 int n = __get_nprocs ();
890
891 if (n >= 1)
892 narenas_limit = NARENAS_FROM_NCORES (n);
893 else
894 /* We have no information about the system. Assume two
895 cores. */
896 narenas_limit = NARENAS_FROM_NCORES (2);
897 }
898 }
77cdc054
AS
899 repeat:;
900 size_t n = narenas;
41b81892 901 /* NB: the following depends on the fact that (size_t)0 - 1 is a
6c8dbf00
OB
902 very large number and that the underflow is OK. If arena_max
903 is set the value of arena_test is irrelevant. If arena_test
904 is set but narenas is not yet larger or equal to arena_test
905 narenas_limit is 0. There is no possibility for narenas to
906 be too big for the test to always fail since there is not
907 enough address space to create that many arenas. */
a1ffb40e 908 if (__glibc_unlikely (n <= narenas_limit - 1))
6c8dbf00
OB
909 {
910 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
911 goto repeat;
912 a = _int_new_arena (size);
a1ffb40e 913 if (__glibc_unlikely (a == NULL))
6c8dbf00
OB
914 catomic_decrement (&narenas);
915 }
a5fb313c 916 else
6c8dbf00 917 a = reused_arena (avoid_arena);
77cdc054 918 }
fa8d436c
UD
919 return a;
920}
921
c78ab094
SP
922/* If we don't have the main arena, then maybe the failure is due to running
923 out of mmapped areas, so we can try allocating on the main arena.
924 Otherwise, it is likely that sbrk() has failed and there is still a chance
925 to mmap(), so try one of the other arenas. */
926static mstate
927arena_get_retry (mstate ar_ptr, size_t bytes)
928{
655673f3 929 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
6c8dbf00
OB
930 if (ar_ptr != &main_arena)
931 {
4bf5f222 932 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00 933 ar_ptr = &main_arena;
4bf5f222 934 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00
OB
935 }
936 else
937 {
4bf5f222 938 __libc_lock_unlock (ar_ptr->mutex);
92a9b22d 939 ar_ptr = arena_get2 (bytes, ar_ptr);
6c8dbf00 940 }
c78ab094
SP
941
942 return ar_ptr;
943}
944
124e0258
FW
945void
946__malloc_arena_thread_freeres (void)
425ce2ed 947{
0a947e06
FW
948 /* Shut down the thread cache first. This could deallocate data for
949 the thread arena, so do this before we put the arena on the free
950 list. */
951 tcache_thread_shutdown ();
952
6782806d
FW
953 mstate a = thread_arena;
954 thread_arena = NULL;
425ce2ed
UD
955
956 if (a != NULL)
957 {
4bf5f222 958 __libc_lock_lock (free_list_lock);
a62719ba
FW
959 /* If this was the last attached thread for this arena, put the
960 arena on the free list. */
961 assert (a->attached_threads > 0);
962 if (--a->attached_threads == 0)
963 {
964 a->next_free = free_list;
965 free_list = a;
966 }
4bf5f222 967 __libc_lock_unlock (free_list_lock);
425ce2ed
UD
968 }
969}
425ce2ed 970
fa8d436c
UD
971/*
972 * Local variables:
973 * c-basic-offset: 2
974 * End:
975 */