1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, see <https://www.gnu.org/licenses/>. */
22 # define TUNABLE_NAMESPACE malloc
24 #include <elf/dl-tunables.h>
26 /* Compile-time constants. */
28 #define HEAP_MIN_SIZE (32 * 1024)
30 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
31 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
33 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
37 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
38 that are dynamically created for multi-threaded programs. The
39 maximum size must be a power of two, for fast determination of
40 which heap belongs to a chunk. It should be much larger than the
41 mmap threshold, so that requests with a size just below that
42 threshold can be fulfilled without creating too many heaps. */
44 /***************************************************************************/
46 #define top(ar_ptr) ((ar_ptr)->top)
48 /* A heap is a single contiguous memory region holding (coalesceable)
49 malloc_chunks. It is allocated with mmap() and always starts at an
50 address aligned to HEAP_MAX_SIZE. */
52 typedef struct _heap_info
54 mstate ar_ptr
; /* Arena for this heap. */
55 struct _heap_info
*prev
; /* Previous heap. */
56 size_t size
; /* Current size in bytes. */
57 size_t mprotect_size
; /* Size in bytes that has been mprotected
58 PROT_READ|PROT_WRITE. */
59 /* Make sure the following data is properly aligned, particularly
60 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
62 char pad
[-6 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
65 /* Get a compile-time error if the heap_info padding is not correct
66 to make alignment work as expected in sYSMALLOc. */
67 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
68 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
71 /* Thread specific data. */
73 static __thread mstate thread_arena attribute_tls_model_ie
;
75 /* Arena free list. free_list_lock synchronizes access to the
76 free_list variable below, and the next_free and attached_threads
77 members of struct malloc_state objects. No other locks must be
78 acquired after free_list_lock has been acquired. */
80 __libc_lock_define_initialized (static, free_list_lock
);
82 static size_t narenas
= 1;
84 static mstate free_list
;
86 /* list_lock prevents concurrent writes to the next member of struct
89 Read access to the next member is supposed to synchronize with the
90 atomic_write_barrier and the write to the next member in
91 _int_new_arena. This suffers from data races; see the FIXME
92 comments in _int_new_arena and reused_arena.
94 list_lock also prevents concurrent forks. At the time list_lock is
95 acquired, no arena lock must have been acquired, but it is
96 permitted to acquire arena locks subsequently, while list_lock is
98 __libc_lock_define_initialized (static, list_lock
);
100 /* Already initialized? */
101 static bool __malloc_initialized
= false;
103 /**************************************************************************/
106 /* arena_get() acquires an arena and locks the corresponding mutex.
107 First, try the one last locked successfully by this thread. (This
108 is the common case and handled with a macro for speed.) Then, loop
109 once over the circularly linked list of arenas. If no arena is
110 readily available, create a new one. In this latter case, `size'
111 is just a hint as to how much memory will be required immediately
114 #define arena_get(ptr, size) do { \
115 ptr = thread_arena; \
116 arena_lock (ptr, size); \
119 #define arena_lock(ptr, size) do { \
121 __libc_lock_lock (ptr->mutex); \
123 ptr = arena_get2 ((size), NULL); \
126 /* find the heap and corresponding arena for a given ptr */
128 #define heap_for_ptr(ptr) \
129 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
130 #define arena_for_chunk(ptr) \
131 (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
134 /**************************************************************************/
136 /* atfork support. */
138 /* The following three functions are called around fork from a
139 multi-threaded process. We do not use the general fork handler
140 mechanism to make sure that our handlers are the last ones being
141 called, so that other fork handlers can use the malloc
145 __malloc_fork_lock_parent (void)
147 if (!__malloc_initialized
)
150 /* We do not acquire free_list_lock here because we completely
151 reconstruct free_list in __malloc_fork_unlock_child. */
153 __libc_lock_lock (list_lock
);
155 for (mstate ar_ptr
= &main_arena
;; )
157 __libc_lock_lock (ar_ptr
->mutex
);
158 ar_ptr
= ar_ptr
->next
;
159 if (ar_ptr
== &main_arena
)
165 __malloc_fork_unlock_parent (void)
167 if (!__malloc_initialized
)
170 for (mstate ar_ptr
= &main_arena
;; )
172 __libc_lock_unlock (ar_ptr
->mutex
);
173 ar_ptr
= ar_ptr
->next
;
174 if (ar_ptr
== &main_arena
)
177 __libc_lock_unlock (list_lock
);
181 __malloc_fork_unlock_child (void)
183 if (!__malloc_initialized
)
186 /* Push all arenas to the free list, except thread_arena, which is
187 attached to the current thread. */
188 __libc_lock_init (free_list_lock
);
189 if (thread_arena
!= NULL
)
190 thread_arena
->attached_threads
= 1;
192 for (mstate ar_ptr
= &main_arena
;; )
194 __libc_lock_init (ar_ptr
->mutex
);
195 if (ar_ptr
!= thread_arena
)
197 /* This arena is no longer attached to any thread. */
198 ar_ptr
->attached_threads
= 0;
199 ar_ptr
->next_free
= free_list
;
202 ar_ptr
= ar_ptr
->next
;
203 if (ar_ptr
== &main_arena
)
207 __libc_lock_init (list_lock
);
211 # define TUNABLE_CALLBACK_FNDECL(__name, __type) \
212 static inline int do_ ## __name (__type value); \
214 TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
216 __type value = (__type) (valp)->numval; \
217 do_ ## __name (value); \
220 TUNABLE_CALLBACK_FNDECL (set_mmap_threshold
, size_t)
221 TUNABLE_CALLBACK_FNDECL (set_mmaps_max
, int32_t)
222 TUNABLE_CALLBACK_FNDECL (set_top_pad
, size_t)
223 TUNABLE_CALLBACK_FNDECL (set_perturb_byte
, int32_t)
224 TUNABLE_CALLBACK_FNDECL (set_trim_threshold
, size_t)
225 TUNABLE_CALLBACK_FNDECL (set_arena_max
, size_t)
226 TUNABLE_CALLBACK_FNDECL (set_arena_test
, size_t)
228 TUNABLE_CALLBACK_FNDECL (set_tcache_max
, size_t)
229 TUNABLE_CALLBACK_FNDECL (set_tcache_count
, size_t)
230 TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit
, size_t)
232 TUNABLE_CALLBACK_FNDECL (set_mxfast
, size_t)
233 TUNABLE_CALLBACK_FNDECL (set_hugetlb
, int32_t)
235 /* Initialization routine. */
237 extern char **_environ
;
240 next_env_entry (char ***position
)
242 char **current
= *position
;
245 while (*current
!= NULL
)
247 if (__builtin_expect ((*current
)[0] == 'M', 0)
248 && (*current
)[1] == 'A'
249 && (*current
)[2] == 'L'
250 && (*current
)[3] == 'L'
251 && (*current
)[4] == 'O'
252 && (*current
)[5] == 'C'
253 && (*current
)[6] == '_')
255 result
= &(*current
)[7];
257 /* Save current position for next visit. */
258 *position
= ++current
;
272 extern struct dl_open_hook
*_dl_open_hook
;
273 libc_hidden_proto (_dl_open_hook
);
277 static void tcache_key_initialize (void);
283 if (__malloc_initialized
)
286 __malloc_initialized
= true;
289 tcache_key_initialize ();
293 if ((TUNABLE_GET_FULL (glibc
, mem
, tagging
, int32_t, NULL
) & 1) != 0)
295 /* If the tunable says that we should be using tagged memory
296 and that morecore does not support tagged regions, then
298 if (__MTAG_SBRK_UNTAGGED
)
299 __always_fail_morecore
= true;
302 mtag_mmap_flags
= __MTAG_MMAP_FLAGS
;
306 #if defined SHARED && IS_IN (libc)
307 /* In case this libc copy is in a non-default namespace, never use
308 brk. Likewise if dlopened from statically linked program. The
309 generic sbrk implementation also enforces this, but it is not
312 __always_fail_morecore
= true;
315 thread_arena
= &main_arena
;
317 malloc_init_state (&main_arena
);
320 TUNABLE_GET (top_pad
, size_t, TUNABLE_CALLBACK (set_top_pad
));
321 TUNABLE_GET (perturb
, int32_t, TUNABLE_CALLBACK (set_perturb_byte
));
322 TUNABLE_GET (mmap_threshold
, size_t, TUNABLE_CALLBACK (set_mmap_threshold
));
323 TUNABLE_GET (trim_threshold
, size_t, TUNABLE_CALLBACK (set_trim_threshold
));
324 TUNABLE_GET (mmap_max
, int32_t, TUNABLE_CALLBACK (set_mmaps_max
));
325 TUNABLE_GET (arena_max
, size_t, TUNABLE_CALLBACK (set_arena_max
));
326 TUNABLE_GET (arena_test
, size_t, TUNABLE_CALLBACK (set_arena_test
));
328 TUNABLE_GET (tcache_max
, size_t, TUNABLE_CALLBACK (set_tcache_max
));
329 TUNABLE_GET (tcache_count
, size_t, TUNABLE_CALLBACK (set_tcache_count
));
330 TUNABLE_GET (tcache_unsorted_limit
, size_t,
331 TUNABLE_CALLBACK (set_tcache_unsorted_limit
));
333 TUNABLE_GET (mxfast
, size_t, TUNABLE_CALLBACK (set_mxfast
));
334 TUNABLE_GET (hugetlb
, int32_t, TUNABLE_CALLBACK (set_hugetlb
));
336 if (__glibc_likely (_environ
!= NULL
))
338 char **runp
= _environ
;
341 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
344 size_t len
= strcspn (envline
, "=");
346 if (envline
[len
] != '=')
347 /* This is a "MALLOC_" variable at the end of the string
348 without a '=' character. Ignore it since otherwise we
349 will access invalid memory below. */
355 if (!__builtin_expect (__libc_enable_secure
, 0))
357 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
358 __libc_mallopt (M_TOP_PAD
, atoi (&envline
[9]));
359 else if (memcmp (envline
, "PERTURB_", 8) == 0)
360 __libc_mallopt (M_PERTURB
, atoi (&envline
[9]));
364 if (!__builtin_expect (__libc_enable_secure
, 0))
366 if (memcmp (envline
, "MMAP_MAX_", 9) == 0)
367 __libc_mallopt (M_MMAP_MAX
, atoi (&envline
[10]));
368 else if (memcmp (envline
, "ARENA_MAX", 9) == 0)
369 __libc_mallopt (M_ARENA_MAX
, atoi (&envline
[10]));
373 if (!__builtin_expect (__libc_enable_secure
, 0))
375 if (memcmp (envline
, "ARENA_TEST", 10) == 0)
376 __libc_mallopt (M_ARENA_TEST
, atoi (&envline
[11]));
380 if (!__builtin_expect (__libc_enable_secure
, 0))
382 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
383 __libc_mallopt (M_TRIM_THRESHOLD
, atoi (&envline
[16]));
384 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
385 __libc_mallopt (M_MMAP_THRESHOLD
, atoi (&envline
[16]));
396 /* Managing heaps and arenas (for concurrent threads) */
400 /* Print the complete contents of a single heap to stderr. */
403 dump_heap (heap_info
*heap
)
408 fprintf (stderr
, "Heap %p, size %10lx:\n", heap
, (long) heap
->size
);
409 ptr
= (heap
->ar_ptr
!= (mstate
) (heap
+ 1)) ?
410 (char *) (heap
+ 1) : (char *) (heap
+ 1) + sizeof (struct malloc_state
);
411 p
= (mchunkptr
) (((unsigned long) ptr
+ MALLOC_ALIGN_MASK
) &
415 fprintf (stderr
, "chunk %p size %10lx", p
, (long) chunksize_nomask(p
));
416 if (p
== top (heap
->ar_ptr
))
418 fprintf (stderr
, " (top)\n");
421 else if (chunksize_nomask(p
) == (0 | PREV_INUSE
))
423 fprintf (stderr
, " (fence)\n");
426 fprintf (stderr
, "\n");
430 #endif /* MALLOC_DEBUG > 1 */
432 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
433 addresses as opposed to increasing, new_heap would badly fragment the
434 address space. In that case remember the second HEAP_MAX_SIZE part
435 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
436 call (if it is already aligned) and try to reuse it next time. We need
437 no locking for it, as kernel ensures the atomicity for us - worst case
438 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
439 multiple threads, but only one will succeed. */
440 static char *aligned_heap_area
;
442 /* Create a new heap. size is automatically rounded up to a multiple
446 new_heap (size_t size
, size_t top_pad
)
448 size_t pagesize
= GLRO (dl_pagesize
);
453 if (size
+ top_pad
< HEAP_MIN_SIZE
)
454 size
= HEAP_MIN_SIZE
;
455 else if (size
+ top_pad
<= HEAP_MAX_SIZE
)
457 else if (size
> HEAP_MAX_SIZE
)
460 size
= HEAP_MAX_SIZE
;
461 size
= ALIGN_UP (size
, pagesize
);
463 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
464 No swap space needs to be reserved for the following large
465 mapping (on Linux, this is the case for all non-writable mappings
468 if (aligned_heap_area
)
470 p2
= (char *) MMAP (aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
472 aligned_heap_area
= NULL
;
473 if (p2
!= MAP_FAILED
&& ((unsigned long) p2
& (HEAP_MAX_SIZE
- 1)))
475 __munmap (p2
, HEAP_MAX_SIZE
);
479 if (p2
== MAP_FAILED
)
481 p1
= (char *) MMAP (0, HEAP_MAX_SIZE
<< 1, PROT_NONE
, MAP_NORESERVE
);
482 if (p1
!= MAP_FAILED
)
484 p2
= (char *) (((unsigned long) p1
+ (HEAP_MAX_SIZE
- 1))
485 & ~(HEAP_MAX_SIZE
- 1));
490 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
491 __munmap (p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
495 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
496 is already aligned. */
497 p2
= (char *) MMAP (0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_NORESERVE
);
498 if (p2
== MAP_FAILED
)
501 if ((unsigned long) p2
& (HEAP_MAX_SIZE
- 1))
503 __munmap (p2
, HEAP_MAX_SIZE
);
508 if (__mprotect (p2
, size
, mtag_mmap_flags
| PROT_READ
| PROT_WRITE
) != 0)
510 __munmap (p2
, HEAP_MAX_SIZE
);
514 madvise_thp (p2
, size
);
516 h
= (heap_info
*) p2
;
518 h
->mprotect_size
= size
;
519 LIBC_PROBE (memory_heap_new
, 2, h
, h
->size
);
523 /* Grow a heap. size is automatically rounded up to a
524 multiple of the page size. */
527 grow_heap (heap_info
*h
, long diff
)
529 size_t pagesize
= GLRO (dl_pagesize
);
532 diff
= ALIGN_UP (diff
, pagesize
);
533 new_size
= (long) h
->size
+ diff
;
534 if ((unsigned long) new_size
> (unsigned long) HEAP_MAX_SIZE
)
537 if ((unsigned long) new_size
> h
->mprotect_size
)
539 if (__mprotect ((char *) h
+ h
->mprotect_size
,
540 (unsigned long) new_size
- h
->mprotect_size
,
541 mtag_mmap_flags
| PROT_READ
| PROT_WRITE
) != 0)
544 h
->mprotect_size
= new_size
;
548 LIBC_PROBE (memory_heap_more
, 2, h
, h
->size
);
555 shrink_heap (heap_info
*h
, long diff
)
559 new_size
= (long) h
->size
- diff
;
560 if (new_size
< (long) sizeof (*h
))
563 /* Try to re-map the extra heap space freshly to save memory, and make it
564 inaccessible. See malloc-sysdep.h to know when this is true. */
565 if (__glibc_unlikely (check_may_shrink_heap ()))
567 if ((char *) MMAP ((char *) h
+ new_size
, diff
, PROT_NONE
,
568 MAP_FIXED
) == (char *) MAP_FAILED
)
571 h
->mprotect_size
= new_size
;
574 __madvise ((char *) h
+ new_size
, diff
, MADV_DONTNEED
);
575 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
578 LIBC_PROBE (memory_heap_less
, 2, h
, h
->size
);
584 #define delete_heap(heap) \
586 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
587 aligned_heap_area = NULL; \
588 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
592 heap_trim (heap_info
*heap
, size_t pad
)
594 mstate ar_ptr
= heap
->ar_ptr
;
595 unsigned long pagesz
= GLRO (dl_pagesize
);
596 mchunkptr top_chunk
= top (ar_ptr
), p
;
597 heap_info
*prev_heap
;
598 long new_size
, top_size
, top_area
, extra
, prev_size
, misalign
;
600 /* Can this heap go away completely? */
601 while (top_chunk
== chunk_at_offset (heap
, sizeof (*heap
)))
603 prev_heap
= heap
->prev
;
604 prev_size
= prev_heap
->size
- (MINSIZE
- 2 * SIZE_SZ
);
605 p
= chunk_at_offset (prev_heap
, prev_size
);
606 /* fencepost must be properly aligned. */
607 misalign
= ((long) p
) & MALLOC_ALIGN_MASK
;
608 p
= chunk_at_offset (prev_heap
, prev_size
- misalign
);
609 assert (chunksize_nomask (p
) == (0 | PREV_INUSE
)); /* must be fencepost */
611 new_size
= chunksize (p
) + (MINSIZE
- 2 * SIZE_SZ
) + misalign
;
612 assert (new_size
> 0 && new_size
< (long) (2 * MINSIZE
));
614 new_size
+= prev_size (p
);
615 assert (new_size
> 0 && new_size
< HEAP_MAX_SIZE
);
616 if (new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
618 ar_ptr
->system_mem
-= heap
->size
;
619 LIBC_PROBE (memory_heap_free
, 2, heap
, heap
->size
);
622 if (!prev_inuse (p
)) /* consolidate backward */
625 unlink_chunk (ar_ptr
, p
);
627 assert (((unsigned long) ((char *) p
+ new_size
) & (pagesz
- 1)) == 0);
628 assert (((char *) p
+ new_size
) == ((char *) heap
+ heap
->size
));
629 top (ar_ptr
) = top_chunk
= p
;
630 set_head (top_chunk
, new_size
| PREV_INUSE
);
631 /*check_chunk(ar_ptr, top_chunk);*/
634 /* Uses similar logic for per-thread arenas as the main arena with systrim
635 and _int_free by preserving the top pad and rounding down to the nearest
637 top_size
= chunksize (top_chunk
);
638 if ((unsigned long)(top_size
) <
639 (unsigned long)(mp_
.trim_threshold
))
642 top_area
= top_size
- MINSIZE
- 1;
643 if (top_area
< 0 || (size_t) top_area
<= pad
)
646 /* Release in pagesize units and round down to the nearest page. */
647 extra
= ALIGN_DOWN(top_area
- pad
, pagesz
);
652 if (shrink_heap (heap
, extra
) != 0)
655 ar_ptr
->system_mem
-= extra
;
657 /* Success. Adjust top accordingly. */
658 set_head (top_chunk
, (top_size
- extra
) | PREV_INUSE
);
659 /*check_chunk(ar_ptr, top_chunk);*/
663 /* Create a new arena with initial size "size". */
666 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
667 called while free_list_lock is held. */
669 detach_arena (mstate replaced_arena
)
671 if (replaced_arena
!= NULL
)
673 assert (replaced_arena
->attached_threads
> 0);
674 /* The current implementation only detaches from main_arena in
675 case of allocation failure. This means that it is likely not
676 beneficial to put the arena on free_list even if the
677 reference count reaches zero. */
678 --replaced_arena
->attached_threads
;
683 _int_new_arena (size_t size
)
688 unsigned long misalign
;
690 h
= new_heap (size
+ (sizeof (*h
) + sizeof (*a
) + MALLOC_ALIGNMENT
),
694 /* Maybe size is too large to fit in a single heap. So, just try
695 to create a minimally-sized arena and let _int_malloc() attempt
696 to deal with the large request via mmap_chunk(). */
697 h
= new_heap (sizeof (*h
) + sizeof (*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
701 a
= h
->ar_ptr
= (mstate
) (h
+ 1);
702 malloc_init_state (a
);
703 a
->attached_threads
= 1;
705 a
->system_mem
= a
->max_system_mem
= h
->size
;
707 /* Set up the top chunk, with proper alignment. */
708 ptr
= (char *) (a
+ 1);
709 misalign
= (unsigned long) chunk2mem (ptr
) & MALLOC_ALIGN_MASK
;
711 ptr
+= MALLOC_ALIGNMENT
- misalign
;
712 top (a
) = (mchunkptr
) ptr
;
713 set_head (top (a
), (((char *) h
+ h
->size
) - ptr
) | PREV_INUSE
);
715 LIBC_PROBE (memory_arena_new
, 2, a
, size
);
716 mstate replaced_arena
= thread_arena
;
718 __libc_lock_init (a
->mutex
);
720 __libc_lock_lock (list_lock
);
722 /* Add the new arena to the global list. */
723 a
->next
= main_arena
.next
;
724 /* FIXME: The barrier is an attempt to synchronize with read access
725 in reused_arena, which does not acquire list_lock while
726 traversing the list. */
727 atomic_write_barrier ();
730 __libc_lock_unlock (list_lock
);
732 __libc_lock_lock (free_list_lock
);
733 detach_arena (replaced_arena
);
734 __libc_lock_unlock (free_list_lock
);
736 /* Lock this arena. NB: Another thread may have been attached to
737 this arena because the arena is now accessible from the
738 main_arena.next list and could have been picked by reused_arena.
739 This can only happen for the last arena created (before the arena
740 limit is reached). At this point, some arena has to be attached
741 to two threads. We could acquire the arena lock before list_lock
742 to make it less likely that reused_arena picks this new arena,
743 but this could result in a deadlock with
744 __malloc_fork_lock_parent. */
746 __libc_lock_lock (a
->mutex
);
752 /* Remove an arena from free_list. */
756 mstate replaced_arena
= thread_arena
;
757 mstate result
= free_list
;
760 __libc_lock_lock (free_list_lock
);
764 free_list
= result
->next_free
;
766 /* The arena will be attached to this thread. */
767 assert (result
->attached_threads
== 0);
768 result
->attached_threads
= 1;
770 detach_arena (replaced_arena
);
772 __libc_lock_unlock (free_list_lock
);
776 LIBC_PROBE (memory_arena_reuse_free_list
, 1, result
);
777 __libc_lock_lock (result
->mutex
);
778 thread_arena
= result
;
785 /* Remove the arena from the free list (if it is present).
786 free_list_lock must have been acquired by the caller. */
788 remove_from_free_list (mstate arena
)
790 mstate
*previous
= &free_list
;
791 for (mstate p
= free_list
; p
!= NULL
; p
= p
->next_free
)
793 assert (p
->attached_threads
== 0);
796 /* Remove the requested arena from the list. */
797 *previous
= p
->next_free
;
801 previous
= &p
->next_free
;
805 /* Lock and return an arena that can be reused for memory allocation.
806 Avoid AVOID_ARENA as we have already failed to allocate memory in
807 it and it is currently locked. */
809 reused_arena (mstate avoid_arena
)
812 /* FIXME: Access to next_to_use suffers from data races. */
813 static mstate next_to_use
;
814 if (next_to_use
== NULL
)
815 next_to_use
= &main_arena
;
817 /* Iterate over all arenas (including those linked from
819 result
= next_to_use
;
822 if (!__libc_lock_trylock (result
->mutex
))
825 /* FIXME: This is a data race, see _int_new_arena. */
826 result
= result
->next
;
828 while (result
!= next_to_use
);
830 /* Avoid AVOID_ARENA as we have already failed to allocate memory
831 in that arena and it is currently locked. */
832 if (result
== avoid_arena
)
833 result
= result
->next
;
835 /* No arena available without contention. Wait for the next in line. */
836 LIBC_PROBE (memory_arena_reuse_wait
, 3, &result
->mutex
, result
, avoid_arena
);
837 __libc_lock_lock (result
->mutex
);
840 /* Attach the arena to the current thread. */
842 /* Update the arena thread attachment counters. */
843 mstate replaced_arena
= thread_arena
;
844 __libc_lock_lock (free_list_lock
);
845 detach_arena (replaced_arena
);
847 /* We may have picked up an arena on the free list. We need to
848 preserve the invariant that no arena on the free list has a
849 positive attached_threads counter (otherwise,
850 arena_thread_freeres cannot use the counter to determine if the
851 arena needs to be put on the free list). We unconditionally
852 remove the selected arena from the free list. The caller of
853 reused_arena checked the free list and observed it to be empty,
854 so the list is very short. */
855 remove_from_free_list (result
);
857 ++result
->attached_threads
;
859 __libc_lock_unlock (free_list_lock
);
862 LIBC_PROBE (memory_arena_reuse
, 2, result
, avoid_arena
);
863 thread_arena
= result
;
864 next_to_use
= result
->next
;
870 arena_get2 (size_t size
, mstate avoid_arena
)
874 static size_t narenas_limit
;
876 a
= get_free_list ();
879 /* Nothing immediately available, so generate a new arena. */
880 if (narenas_limit
== 0)
882 if (mp_
.arena_max
!= 0)
883 narenas_limit
= mp_
.arena_max
;
884 else if (narenas
> mp_
.arena_test
)
886 int n
= __get_nprocs_sched ();
889 narenas_limit
= NARENAS_FROM_NCORES (n
);
891 /* We have no information about the system. Assume two
893 narenas_limit
= NARENAS_FROM_NCORES (2);
898 /* NB: the following depends on the fact that (size_t)0 - 1 is a
899 very large number and that the underflow is OK. If arena_max
900 is set the value of arena_test is irrelevant. If arena_test
901 is set but narenas is not yet larger or equal to arena_test
902 narenas_limit is 0. There is no possibility for narenas to
903 be too big for the test to always fail since there is not
904 enough address space to create that many arenas. */
905 if (__glibc_unlikely (n
<= narenas_limit
- 1))
907 if (catomic_compare_and_exchange_bool_acq (&narenas
, n
+ 1, n
))
909 a
= _int_new_arena (size
);
910 if (__glibc_unlikely (a
== NULL
))
911 catomic_decrement (&narenas
);
914 a
= reused_arena (avoid_arena
);
919 /* If we don't have the main arena, then maybe the failure is due to running
920 out of mmapped areas, so we can try allocating on the main arena.
921 Otherwise, it is likely that sbrk() has failed and there is still a chance
922 to mmap(), so try one of the other arenas. */
924 arena_get_retry (mstate ar_ptr
, size_t bytes
)
926 LIBC_PROBE (memory_arena_retry
, 2, bytes
, ar_ptr
);
927 if (ar_ptr
!= &main_arena
)
929 __libc_lock_unlock (ar_ptr
->mutex
);
930 ar_ptr
= &main_arena
;
931 __libc_lock_lock (ar_ptr
->mutex
);
935 __libc_lock_unlock (ar_ptr
->mutex
);
936 ar_ptr
= arena_get2 (bytes
, ar_ptr
);
944 __malloc_arena_thread_freeres (void)
946 /* Shut down the thread cache first. This could deallocate data for
947 the thread arena, so do this before we put the arena on the free
949 tcache_thread_shutdown ();
951 mstate a
= thread_arena
;
956 __libc_lock_lock (free_list_lock
);
957 /* If this was the last attached thread for this arena, put the
958 arena on the free list. */
959 assert (a
->attached_threads
> 0);
960 if (--a
->attached_threads
== 0)
962 a
->next_free
= free_list
;
965 __libc_lock_unlock (free_list_lock
);