]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/arena.c
malloc: Refactor TAG_ macros to avoid indirection
[thirdparty/glibc.git] / malloc / arena.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
19
20 #include <stdbool.h>
21
22 #if HAVE_TUNABLES
23 # define TUNABLE_NAMESPACE malloc
24 #endif
25 #include <elf/dl-tunables.h>
26
27 /* Compile-time constants. */
28
29 #define HEAP_MIN_SIZE (32 * 1024)
30 #ifndef HEAP_MAX_SIZE
31 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
32 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
33 # else
34 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
35 # endif
36 #endif
37
38 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
39 that are dynamically created for multi-threaded programs. The
40 maximum size must be a power of two, for fast determination of
41 which heap belongs to a chunk. It should be much larger than the
42 mmap threshold, so that requests with a size just below that
43 threshold can be fulfilled without creating too many heaps. */
44
45 /***************************************************************************/
46
47 #define top(ar_ptr) ((ar_ptr)->top)
48
49 /* A heap is a single contiguous memory region holding (coalesceable)
50 malloc_chunks. It is allocated with mmap() and always starts at an
51 address aligned to HEAP_MAX_SIZE. */
52
53 typedef struct _heap_info
54 {
55 mstate ar_ptr; /* Arena for this heap. */
56 struct _heap_info *prev; /* Previous heap. */
57 size_t size; /* Current size in bytes. */
58 size_t mprotect_size; /* Size in bytes that has been mprotected
59 PROT_READ|PROT_WRITE. */
60 /* Make sure the following data is properly aligned, particularly
61 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
62 MALLOC_ALIGNMENT. */
63 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
64 } heap_info;
65
66 /* Get a compile-time error if the heap_info padding is not correct
67 to make alignment work as expected in sYSMALLOc. */
68 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
69 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
70 ? -1 : 1];
71
72 /* Thread specific data. */
73
74 static __thread mstate thread_arena attribute_tls_model_ie;
75
76 /* Arena free list. free_list_lock synchronizes access to the
77 free_list variable below, and the next_free and attached_threads
78 members of struct malloc_state objects. No other locks must be
79 acquired after free_list_lock has been acquired. */
80
81 __libc_lock_define_initialized (static, free_list_lock);
82 static size_t narenas = 1;
83 static mstate free_list;
84
85 /* list_lock prevents concurrent writes to the next member of struct
86 malloc_state objects.
87
88 Read access to the next member is supposed to synchronize with the
89 atomic_write_barrier and the write to the next member in
90 _int_new_arena. This suffers from data races; see the FIXME
91 comments in _int_new_arena and reused_arena.
92
93 list_lock also prevents concurrent forks. At the time list_lock is
94 acquired, no arena lock must have been acquired, but it is
95 permitted to acquire arena locks subsequently, while list_lock is
96 acquired. */
97 __libc_lock_define_initialized (static, list_lock);
98
99 /* Already initialized? */
100 int __malloc_initialized = -1;
101
102 /**************************************************************************/
103
104
105 /* arena_get() acquires an arena and locks the corresponding mutex.
106 First, try the one last locked successfully by this thread. (This
107 is the common case and handled with a macro for speed.) Then, loop
108 once over the circularly linked list of arenas. If no arena is
109 readily available, create a new one. In this latter case, `size'
110 is just a hint as to how much memory will be required immediately
111 in the new arena. */
112
113 #define arena_get(ptr, size) do { \
114 ptr = thread_arena; \
115 arena_lock (ptr, size); \
116 } while (0)
117
118 #define arena_lock(ptr, size) do { \
119 if (ptr) \
120 __libc_lock_lock (ptr->mutex); \
121 else \
122 ptr = arena_get2 ((size), NULL); \
123 } while (0)
124
125 /* find the heap and corresponding arena for a given ptr */
126
127 #define heap_for_ptr(ptr) \
128 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
129 #define arena_for_chunk(ptr) \
130 (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
131
132
133 /**************************************************************************/
134
135 /* atfork support. */
136
137 /* The following three functions are called around fork from a
138 multi-threaded process. We do not use the general fork handler
139 mechanism to make sure that our handlers are the last ones being
140 called, so that other fork handlers can use the malloc
141 subsystem. */
142
143 void
144 __malloc_fork_lock_parent (void)
145 {
146 if (__malloc_initialized < 1)
147 return;
148
149 /* We do not acquire free_list_lock here because we completely
150 reconstruct free_list in __malloc_fork_unlock_child. */
151
152 __libc_lock_lock (list_lock);
153
154 for (mstate ar_ptr = &main_arena;; )
155 {
156 __libc_lock_lock (ar_ptr->mutex);
157 ar_ptr = ar_ptr->next;
158 if (ar_ptr == &main_arena)
159 break;
160 }
161 }
162
163 void
164 __malloc_fork_unlock_parent (void)
165 {
166 if (__malloc_initialized < 1)
167 return;
168
169 for (mstate ar_ptr = &main_arena;; )
170 {
171 __libc_lock_unlock (ar_ptr->mutex);
172 ar_ptr = ar_ptr->next;
173 if (ar_ptr == &main_arena)
174 break;
175 }
176 __libc_lock_unlock (list_lock);
177 }
178
179 void
180 __malloc_fork_unlock_child (void)
181 {
182 if (__malloc_initialized < 1)
183 return;
184
185 /* Push all arenas to the free list, except thread_arena, which is
186 attached to the current thread. */
187 __libc_lock_init (free_list_lock);
188 if (thread_arena != NULL)
189 thread_arena->attached_threads = 1;
190 free_list = NULL;
191 for (mstate ar_ptr = &main_arena;; )
192 {
193 __libc_lock_init (ar_ptr->mutex);
194 if (ar_ptr != thread_arena)
195 {
196 /* This arena is no longer attached to any thread. */
197 ar_ptr->attached_threads = 0;
198 ar_ptr->next_free = free_list;
199 free_list = ar_ptr;
200 }
201 ar_ptr = ar_ptr->next;
202 if (ar_ptr == &main_arena)
203 break;
204 }
205
206 __libc_lock_init (list_lock);
207 }
208
209 #if HAVE_TUNABLES
210 void
211 TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
212 {
213 int32_t value = (int32_t) valp->numval;
214 if (value != 0)
215 __malloc_check_init ();
216 }
217
218 # define TUNABLE_CALLBACK_FNDECL(__name, __type) \
219 static inline int do_ ## __name (__type value); \
220 void \
221 TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
222 { \
223 __type value = (__type) (valp)->numval; \
224 do_ ## __name (value); \
225 }
226
227 TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t)
228 TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t)
229 TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t)
230 TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t)
231 TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t)
232 TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t)
233 TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t)
234 #if USE_TCACHE
235 TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t)
236 TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t)
237 TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t)
238 #endif
239 TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t)
240 #else
241 /* Initialization routine. */
242 #include <string.h>
243 extern char **_environ;
244
245 static char *
246 next_env_entry (char ***position)
247 {
248 char **current = *position;
249 char *result = NULL;
250
251 while (*current != NULL)
252 {
253 if (__builtin_expect ((*current)[0] == 'M', 0)
254 && (*current)[1] == 'A'
255 && (*current)[2] == 'L'
256 && (*current)[3] == 'L'
257 && (*current)[4] == 'O'
258 && (*current)[5] == 'C'
259 && (*current)[6] == '_')
260 {
261 result = &(*current)[7];
262
263 /* Save current position for next visit. */
264 *position = ++current;
265
266 break;
267 }
268
269 ++current;
270 }
271
272 return result;
273 }
274 #endif
275
276
277 #if defined(SHARED) || defined(USE_MTAG)
278 static void *
279 __failing_morecore (ptrdiff_t d)
280 {
281 return (void *) MORECORE_FAILURE;
282 }
283 #endif
284
285 #ifdef SHARED
286 extern struct dl_open_hook *_dl_open_hook;
287 libc_hidden_proto (_dl_open_hook);
288 #endif
289
290 #ifdef USE_MTAG
291
292 /* Generate a new (random) tag value for PTR and tag the memory it
293 points to upto the end of the usable size for the chunk containing
294 it. Return the newly tagged pointer. */
295 static void *
296 __mtag_tag_new_usable (void *ptr)
297 {
298 if (ptr)
299 {
300 mchunkptr cp = mem2chunk(ptr);
301 ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr),
302 CHUNK_AVAILABLE_SIZE (cp) - CHUNK_HDR_SZ);
303 }
304 return ptr;
305 }
306
307 /* Generate a new (random) tag value for PTR, set the tags for the
308 memory to the new tag and initialize the memory contents to VAL.
309 In practice this function will only be called with VAL=0, but we
310 keep this parameter to maintain the same prototype as memset. */
311 static void *
312 __mtag_tag_new_memset (void *ptr, int val, size_t size)
313 {
314 return __libc_mtag_memset_with_tag (__libc_mtag_new_tag (ptr), val, size);
315 }
316 #endif
317
318 static void
319 ptmalloc_init (void)
320 {
321 if (__malloc_initialized >= 0)
322 return;
323
324 __malloc_initialized = 0;
325
326 #ifdef USE_MTAG
327 if ((TUNABLE_GET_FULL (glibc, mem, tagging, int32_t, NULL) & 1) != 0)
328 {
329 /* If the tunable says that we should be using tagged memory
330 and that morecore does not support tagged regions, then
331 disable it. */
332 if (__MTAG_SBRK_UNTAGGED)
333 __morecore = __failing_morecore;
334
335 mtag_mmap_flags = __MTAG_MMAP_FLAGS;
336 tag_new_memset = __mtag_tag_new_memset;
337 tag_region = __libc_mtag_tag_region;
338 tag_new_usable = __mtag_tag_new_usable;
339 tag_at = __libc_mtag_address_get_tag;
340 mtag_granule_mask = ~(size_t)(__MTAG_GRANULE_SIZE - 1);
341 }
342 #endif
343
344 #ifdef SHARED
345 /* In case this libc copy is in a non-default namespace, never use
346 brk. Likewise if dlopened from statically linked program. The
347 generic sbrk implementation also enforces this, but it is not
348 used on Hurd. */
349 if (!__libc_initial)
350 __morecore = __failing_morecore;
351 #endif
352
353 thread_arena = &main_arena;
354
355 malloc_init_state (&main_arena);
356
357 #if HAVE_TUNABLES
358 TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
359 TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
360 TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
361 TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
362 TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold));
363 TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
364 TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
365 TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
366 # if USE_TCACHE
367 TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
368 TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
369 TUNABLE_GET (tcache_unsorted_limit, size_t,
370 TUNABLE_CALLBACK (set_tcache_unsorted_limit));
371 # endif
372 TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
373 #else
374 const char *s = NULL;
375 if (__glibc_likely (_environ != NULL))
376 {
377 char **runp = _environ;
378 char *envline;
379
380 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
381 0))
382 {
383 size_t len = strcspn (envline, "=");
384
385 if (envline[len] != '=')
386 /* This is a "MALLOC_" variable at the end of the string
387 without a '=' character. Ignore it since otherwise we
388 will access invalid memory below. */
389 continue;
390
391 switch (len)
392 {
393 case 6:
394 if (memcmp (envline, "CHECK_", 6) == 0)
395 s = &envline[7];
396 break;
397 case 8:
398 if (!__builtin_expect (__libc_enable_secure, 0))
399 {
400 if (memcmp (envline, "TOP_PAD_", 8) == 0)
401 __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
402 else if (memcmp (envline, "PERTURB_", 8) == 0)
403 __libc_mallopt (M_PERTURB, atoi (&envline[9]));
404 }
405 break;
406 case 9:
407 if (!__builtin_expect (__libc_enable_secure, 0))
408 {
409 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
410 __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
411 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
412 __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
413 }
414 break;
415 case 10:
416 if (!__builtin_expect (__libc_enable_secure, 0))
417 {
418 if (memcmp (envline, "ARENA_TEST", 10) == 0)
419 __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
420 }
421 break;
422 case 15:
423 if (!__builtin_expect (__libc_enable_secure, 0))
424 {
425 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
426 __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
427 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
428 __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
429 }
430 break;
431 default:
432 break;
433 }
434 }
435 }
436 if (s && s[0] != '\0' && s[0] != '0')
437 __malloc_check_init ();
438 #endif
439
440 #if HAVE_MALLOC_INIT_HOOK
441 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
442 if (hook != NULL)
443 (*hook)();
444 #endif
445 __malloc_initialized = 1;
446 }
447
448 /* Managing heaps and arenas (for concurrent threads) */
449
450 #if MALLOC_DEBUG > 1
451
452 /* Print the complete contents of a single heap to stderr. */
453
454 static void
455 dump_heap (heap_info *heap)
456 {
457 char *ptr;
458 mchunkptr p;
459
460 fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
461 ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
462 (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
463 p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
464 ~MALLOC_ALIGN_MASK);
465 for (;; )
466 {
467 fprintf (stderr, "chunk %p size %10lx", p, (long) chunksize_nomask(p));
468 if (p == top (heap->ar_ptr))
469 {
470 fprintf (stderr, " (top)\n");
471 break;
472 }
473 else if (chunksize_nomask(p) == (0 | PREV_INUSE))
474 {
475 fprintf (stderr, " (fence)\n");
476 break;
477 }
478 fprintf (stderr, "\n");
479 p = next_chunk (p);
480 }
481 }
482 #endif /* MALLOC_DEBUG > 1 */
483
484 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
485 addresses as opposed to increasing, new_heap would badly fragment the
486 address space. In that case remember the second HEAP_MAX_SIZE part
487 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
488 call (if it is already aligned) and try to reuse it next time. We need
489 no locking for it, as kernel ensures the atomicity for us - worst case
490 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
491 multiple threads, but only one will succeed. */
492 static char *aligned_heap_area;
493
494 /* Create a new heap. size is automatically rounded up to a multiple
495 of the page size. */
496
497 static heap_info *
498 new_heap (size_t size, size_t top_pad)
499 {
500 size_t pagesize = GLRO (dl_pagesize);
501 char *p1, *p2;
502 unsigned long ul;
503 heap_info *h;
504
505 if (size + top_pad < HEAP_MIN_SIZE)
506 size = HEAP_MIN_SIZE;
507 else if (size + top_pad <= HEAP_MAX_SIZE)
508 size += top_pad;
509 else if (size > HEAP_MAX_SIZE)
510 return 0;
511 else
512 size = HEAP_MAX_SIZE;
513 size = ALIGN_UP (size, pagesize);
514
515 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
516 No swap space needs to be reserved for the following large
517 mapping (on Linux, this is the case for all non-writable mappings
518 anyway). */
519 p2 = MAP_FAILED;
520 if (aligned_heap_area)
521 {
522 p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
523 MAP_NORESERVE);
524 aligned_heap_area = NULL;
525 if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
526 {
527 __munmap (p2, HEAP_MAX_SIZE);
528 p2 = MAP_FAILED;
529 }
530 }
531 if (p2 == MAP_FAILED)
532 {
533 p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
534 if (p1 != MAP_FAILED)
535 {
536 p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
537 & ~(HEAP_MAX_SIZE - 1));
538 ul = p2 - p1;
539 if (ul)
540 __munmap (p1, ul);
541 else
542 aligned_heap_area = p2 + HEAP_MAX_SIZE;
543 __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
544 }
545 else
546 {
547 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
548 is already aligned. */
549 p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
550 if (p2 == MAP_FAILED)
551 return 0;
552
553 if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
554 {
555 __munmap (p2, HEAP_MAX_SIZE);
556 return 0;
557 }
558 }
559 }
560 if (__mprotect (p2, size, mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
561 {
562 __munmap (p2, HEAP_MAX_SIZE);
563 return 0;
564 }
565 h = (heap_info *) p2;
566 h->size = size;
567 h->mprotect_size = size;
568 LIBC_PROBE (memory_heap_new, 2, h, h->size);
569 return h;
570 }
571
572 /* Grow a heap. size is automatically rounded up to a
573 multiple of the page size. */
574
575 static int
576 grow_heap (heap_info *h, long diff)
577 {
578 size_t pagesize = GLRO (dl_pagesize);
579 long new_size;
580
581 diff = ALIGN_UP (diff, pagesize);
582 new_size = (long) h->size + diff;
583 if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
584 return -1;
585
586 if ((unsigned long) new_size > h->mprotect_size)
587 {
588 if (__mprotect ((char *) h + h->mprotect_size,
589 (unsigned long) new_size - h->mprotect_size,
590 mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
591 return -2;
592
593 h->mprotect_size = new_size;
594 }
595
596 h->size = new_size;
597 LIBC_PROBE (memory_heap_more, 2, h, h->size);
598 return 0;
599 }
600
601 /* Shrink a heap. */
602
603 static int
604 shrink_heap (heap_info *h, long diff)
605 {
606 long new_size;
607
608 new_size = (long) h->size - diff;
609 if (new_size < (long) sizeof (*h))
610 return -1;
611
612 /* Try to re-map the extra heap space freshly to save memory, and make it
613 inaccessible. See malloc-sysdep.h to know when this is true. */
614 if (__glibc_unlikely (check_may_shrink_heap ()))
615 {
616 if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
617 MAP_FIXED) == (char *) MAP_FAILED)
618 return -2;
619
620 h->mprotect_size = new_size;
621 }
622 else
623 __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
624 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
625
626 h->size = new_size;
627 LIBC_PROBE (memory_heap_less, 2, h, h->size);
628 return 0;
629 }
630
631 /* Delete a heap. */
632
633 #define delete_heap(heap) \
634 do { \
635 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
636 aligned_heap_area = NULL; \
637 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
638 } while (0)
639
640 static int
641 heap_trim (heap_info *heap, size_t pad)
642 {
643 mstate ar_ptr = heap->ar_ptr;
644 unsigned long pagesz = GLRO (dl_pagesize);
645 mchunkptr top_chunk = top (ar_ptr), p;
646 heap_info *prev_heap;
647 long new_size, top_size, top_area, extra, prev_size, misalign;
648
649 /* Can this heap go away completely? */
650 while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
651 {
652 prev_heap = heap->prev;
653 prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
654 p = chunk_at_offset (prev_heap, prev_size);
655 /* fencepost must be properly aligned. */
656 misalign = ((long) p) & MALLOC_ALIGN_MASK;
657 p = chunk_at_offset (prev_heap, prev_size - misalign);
658 assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
659 p = prev_chunk (p);
660 new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
661 assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
662 if (!prev_inuse (p))
663 new_size += prev_size (p);
664 assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
665 if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
666 break;
667 ar_ptr->system_mem -= heap->size;
668 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
669 delete_heap (heap);
670 heap = prev_heap;
671 if (!prev_inuse (p)) /* consolidate backward */
672 {
673 p = prev_chunk (p);
674 unlink_chunk (ar_ptr, p);
675 }
676 assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
677 assert (((char *) p + new_size) == ((char *) heap + heap->size));
678 top (ar_ptr) = top_chunk = p;
679 set_head (top_chunk, new_size | PREV_INUSE);
680 /*check_chunk(ar_ptr, top_chunk);*/
681 }
682
683 /* Uses similar logic for per-thread arenas as the main arena with systrim
684 and _int_free by preserving the top pad and rounding down to the nearest
685 page. */
686 top_size = chunksize (top_chunk);
687 if ((unsigned long)(top_size) <
688 (unsigned long)(mp_.trim_threshold))
689 return 0;
690
691 top_area = top_size - MINSIZE - 1;
692 if (top_area < 0 || (size_t) top_area <= pad)
693 return 0;
694
695 /* Release in pagesize units and round down to the nearest page. */
696 extra = ALIGN_DOWN(top_area - pad, pagesz);
697 if (extra == 0)
698 return 0;
699
700 /* Try to shrink. */
701 if (shrink_heap (heap, extra) != 0)
702 return 0;
703
704 ar_ptr->system_mem -= extra;
705
706 /* Success. Adjust top accordingly. */
707 set_head (top_chunk, (top_size - extra) | PREV_INUSE);
708 /*check_chunk(ar_ptr, top_chunk);*/
709 return 1;
710 }
711
712 /* Create a new arena with initial size "size". */
713
714 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
715 called while free_list_lock is held. */
716 static void
717 detach_arena (mstate replaced_arena)
718 {
719 if (replaced_arena != NULL)
720 {
721 assert (replaced_arena->attached_threads > 0);
722 /* The current implementation only detaches from main_arena in
723 case of allocation failure. This means that it is likely not
724 beneficial to put the arena on free_list even if the
725 reference count reaches zero. */
726 --replaced_arena->attached_threads;
727 }
728 }
729
730 static mstate
731 _int_new_arena (size_t size)
732 {
733 mstate a;
734 heap_info *h;
735 char *ptr;
736 unsigned long misalign;
737
738 h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
739 mp_.top_pad);
740 if (!h)
741 {
742 /* Maybe size is too large to fit in a single heap. So, just try
743 to create a minimally-sized arena and let _int_malloc() attempt
744 to deal with the large request via mmap_chunk(). */
745 h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
746 if (!h)
747 return 0;
748 }
749 a = h->ar_ptr = (mstate) (h + 1);
750 malloc_init_state (a);
751 a->attached_threads = 1;
752 /*a->next = NULL;*/
753 a->system_mem = a->max_system_mem = h->size;
754
755 /* Set up the top chunk, with proper alignment. */
756 ptr = (char *) (a + 1);
757 misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
758 if (misalign > 0)
759 ptr += MALLOC_ALIGNMENT - misalign;
760 top (a) = (mchunkptr) ptr;
761 set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
762
763 LIBC_PROBE (memory_arena_new, 2, a, size);
764 mstate replaced_arena = thread_arena;
765 thread_arena = a;
766 __libc_lock_init (a->mutex);
767
768 __libc_lock_lock (list_lock);
769
770 /* Add the new arena to the global list. */
771 a->next = main_arena.next;
772 /* FIXME: The barrier is an attempt to synchronize with read access
773 in reused_arena, which does not acquire list_lock while
774 traversing the list. */
775 atomic_write_barrier ();
776 main_arena.next = a;
777
778 __libc_lock_unlock (list_lock);
779
780 __libc_lock_lock (free_list_lock);
781 detach_arena (replaced_arena);
782 __libc_lock_unlock (free_list_lock);
783
784 /* Lock this arena. NB: Another thread may have been attached to
785 this arena because the arena is now accessible from the
786 main_arena.next list and could have been picked by reused_arena.
787 This can only happen for the last arena created (before the arena
788 limit is reached). At this point, some arena has to be attached
789 to two threads. We could acquire the arena lock before list_lock
790 to make it less likely that reused_arena picks this new arena,
791 but this could result in a deadlock with
792 __malloc_fork_lock_parent. */
793
794 __libc_lock_lock (a->mutex);
795
796 return a;
797 }
798
799
800 /* Remove an arena from free_list. */
801 static mstate
802 get_free_list (void)
803 {
804 mstate replaced_arena = thread_arena;
805 mstate result = free_list;
806 if (result != NULL)
807 {
808 __libc_lock_lock (free_list_lock);
809 result = free_list;
810 if (result != NULL)
811 {
812 free_list = result->next_free;
813
814 /* The arena will be attached to this thread. */
815 assert (result->attached_threads == 0);
816 result->attached_threads = 1;
817
818 detach_arena (replaced_arena);
819 }
820 __libc_lock_unlock (free_list_lock);
821
822 if (result != NULL)
823 {
824 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
825 __libc_lock_lock (result->mutex);
826 thread_arena = result;
827 }
828 }
829
830 return result;
831 }
832
833 /* Remove the arena from the free list (if it is present).
834 free_list_lock must have been acquired by the caller. */
835 static void
836 remove_from_free_list (mstate arena)
837 {
838 mstate *previous = &free_list;
839 for (mstate p = free_list; p != NULL; p = p->next_free)
840 {
841 assert (p->attached_threads == 0);
842 if (p == arena)
843 {
844 /* Remove the requested arena from the list. */
845 *previous = p->next_free;
846 break;
847 }
848 else
849 previous = &p->next_free;
850 }
851 }
852
853 /* Lock and return an arena that can be reused for memory allocation.
854 Avoid AVOID_ARENA as we have already failed to allocate memory in
855 it and it is currently locked. */
856 static mstate
857 reused_arena (mstate avoid_arena)
858 {
859 mstate result;
860 /* FIXME: Access to next_to_use suffers from data races. */
861 static mstate next_to_use;
862 if (next_to_use == NULL)
863 next_to_use = &main_arena;
864
865 /* Iterate over all arenas (including those linked from
866 free_list). */
867 result = next_to_use;
868 do
869 {
870 if (!__libc_lock_trylock (result->mutex))
871 goto out;
872
873 /* FIXME: This is a data race, see _int_new_arena. */
874 result = result->next;
875 }
876 while (result != next_to_use);
877
878 /* Avoid AVOID_ARENA as we have already failed to allocate memory
879 in that arena and it is currently locked. */
880 if (result == avoid_arena)
881 result = result->next;
882
883 /* No arena available without contention. Wait for the next in line. */
884 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
885 __libc_lock_lock (result->mutex);
886
887 out:
888 /* Attach the arena to the current thread. */
889 {
890 /* Update the arena thread attachment counters. */
891 mstate replaced_arena = thread_arena;
892 __libc_lock_lock (free_list_lock);
893 detach_arena (replaced_arena);
894
895 /* We may have picked up an arena on the free list. We need to
896 preserve the invariant that no arena on the free list has a
897 positive attached_threads counter (otherwise,
898 arena_thread_freeres cannot use the counter to determine if the
899 arena needs to be put on the free list). We unconditionally
900 remove the selected arena from the free list. The caller of
901 reused_arena checked the free list and observed it to be empty,
902 so the list is very short. */
903 remove_from_free_list (result);
904
905 ++result->attached_threads;
906
907 __libc_lock_unlock (free_list_lock);
908 }
909
910 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
911 thread_arena = result;
912 next_to_use = result->next;
913
914 return result;
915 }
916
917 static mstate
918 arena_get2 (size_t size, mstate avoid_arena)
919 {
920 mstate a;
921
922 static size_t narenas_limit;
923
924 a = get_free_list ();
925 if (a == NULL)
926 {
927 /* Nothing immediately available, so generate a new arena. */
928 if (narenas_limit == 0)
929 {
930 if (mp_.arena_max != 0)
931 narenas_limit = mp_.arena_max;
932 else if (narenas > mp_.arena_test)
933 {
934 int n = __get_nprocs ();
935
936 if (n >= 1)
937 narenas_limit = NARENAS_FROM_NCORES (n);
938 else
939 /* We have no information about the system. Assume two
940 cores. */
941 narenas_limit = NARENAS_FROM_NCORES (2);
942 }
943 }
944 repeat:;
945 size_t n = narenas;
946 /* NB: the following depends on the fact that (size_t)0 - 1 is a
947 very large number and that the underflow is OK. If arena_max
948 is set the value of arena_test is irrelevant. If arena_test
949 is set but narenas is not yet larger or equal to arena_test
950 narenas_limit is 0. There is no possibility for narenas to
951 be too big for the test to always fail since there is not
952 enough address space to create that many arenas. */
953 if (__glibc_unlikely (n <= narenas_limit - 1))
954 {
955 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
956 goto repeat;
957 a = _int_new_arena (size);
958 if (__glibc_unlikely (a == NULL))
959 catomic_decrement (&narenas);
960 }
961 else
962 a = reused_arena (avoid_arena);
963 }
964 return a;
965 }
966
967 /* If we don't have the main arena, then maybe the failure is due to running
968 out of mmapped areas, so we can try allocating on the main arena.
969 Otherwise, it is likely that sbrk() has failed and there is still a chance
970 to mmap(), so try one of the other arenas. */
971 static mstate
972 arena_get_retry (mstate ar_ptr, size_t bytes)
973 {
974 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
975 if (ar_ptr != &main_arena)
976 {
977 __libc_lock_unlock (ar_ptr->mutex);
978 ar_ptr = &main_arena;
979 __libc_lock_lock (ar_ptr->mutex);
980 }
981 else
982 {
983 __libc_lock_unlock (ar_ptr->mutex);
984 ar_ptr = arena_get2 (bytes, ar_ptr);
985 }
986
987 return ar_ptr;
988 }
989
990 void
991 __malloc_arena_thread_freeres (void)
992 {
993 /* Shut down the thread cache first. This could deallocate data for
994 the thread arena, so do this before we put the arena on the free
995 list. */
996 tcache_thread_shutdown ();
997
998 mstate a = thread_arena;
999 thread_arena = NULL;
1000
1001 if (a != NULL)
1002 {
1003 __libc_lock_lock (free_list_lock);
1004 /* If this was the last attached thread for this arena, put the
1005 arena on the free list. */
1006 assert (a->attached_threads > 0);
1007 if (--a->attached_threads == 0)
1008 {
1009 a->next_free = free_list;
1010 free_list = a;
1011 }
1012 __libc_lock_unlock (free_list_lock);
1013 }
1014 }
1015
1016 /*
1017 * Local variables:
1018 * c-basic-offset: 2
1019 * End:
1020 */