1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
23 # define TUNABLE_NAMESPACE malloc
25 #include <elf/dl-tunables.h>
27 /* Compile-time constants. */
29 #define HEAP_MIN_SIZE (32 * 1024)
31 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
32 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
34 # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
38 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
39 that are dynamically created for multi-threaded programs. The
40 maximum size must be a power of two, for fast determination of
41 which heap belongs to a chunk. It should be much larger than the
42 mmap threshold, so that requests with a size just below that
43 threshold can be fulfilled without creating too many heaps. */
45 /***************************************************************************/
47 #define top(ar_ptr) ((ar_ptr)->top)
49 /* A heap is a single contiguous memory region holding (coalesceable)
50 malloc_chunks. It is allocated with mmap() and always starts at an
51 address aligned to HEAP_MAX_SIZE. */
53 typedef struct _heap_info
55 mstate ar_ptr
; /* Arena for this heap. */
56 struct _heap_info
*prev
; /* Previous heap. */
57 size_t size
; /* Current size in bytes. */
58 size_t mprotect_size
; /* Size in bytes that has been mprotected
59 PROT_READ|PROT_WRITE. */
60 /* Make sure the following data is properly aligned, particularly
61 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
63 char pad
[-6 * SIZE_SZ
& MALLOC_ALIGN_MASK
];
66 /* Get a compile-time error if the heap_info padding is not correct
67 to make alignment work as expected in sYSMALLOc. */
68 extern int sanity_check_heap_info_alignment
[(sizeof (heap_info
)
69 + 2 * SIZE_SZ
) % MALLOC_ALIGNMENT
72 /* Thread specific data. */
74 static __thread mstate thread_arena attribute_tls_model_ie
;
76 /* Arena free list. free_list_lock synchronizes access to the
77 free_list variable below, and the next_free and attached_threads
78 members of struct malloc_state objects. No other locks must be
79 acquired after free_list_lock has been acquired. */
81 __libc_lock_define_initialized (static, free_list_lock
);
82 static size_t narenas
= 1;
83 static mstate free_list
;
85 /* list_lock prevents concurrent writes to the next member of struct
88 Read access to the next member is supposed to synchronize with the
89 atomic_write_barrier and the write to the next member in
90 _int_new_arena. This suffers from data races; see the FIXME
91 comments in _int_new_arena and reused_arena.
93 list_lock also prevents concurrent forks. At the time list_lock is
94 acquired, no arena lock must have been acquired, but it is
95 permitted to acquire arena locks subsequently, while list_lock is
97 __libc_lock_define_initialized (static, list_lock
);
99 /* Already initialized? */
100 int __malloc_initialized
= -1;
102 /**************************************************************************/
105 /* arena_get() acquires an arena and locks the corresponding mutex.
106 First, try the one last locked successfully by this thread. (This
107 is the common case and handled with a macro for speed.) Then, loop
108 once over the circularly linked list of arenas. If no arena is
109 readily available, create a new one. In this latter case, `size'
110 is just a hint as to how much memory will be required immediately
113 #define arena_get(ptr, size) do { \
114 ptr = thread_arena; \
115 arena_lock (ptr, size); \
118 #define arena_lock(ptr, size) do { \
120 __libc_lock_lock (ptr->mutex); \
122 ptr = arena_get2 ((size), NULL); \
125 /* find the heap and corresponding arena for a given ptr */
127 #define heap_for_ptr(ptr) \
128 ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
129 #define arena_for_chunk(ptr) \
130 (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
133 /**************************************************************************/
135 /* atfork support. */
137 /* The following three functions are called around fork from a
138 multi-threaded process. We do not use the general fork handler
139 mechanism to make sure that our handlers are the last ones being
140 called, so that other fork handlers can use the malloc
144 __malloc_fork_lock_parent (void)
146 if (__malloc_initialized
< 1)
149 /* We do not acquire free_list_lock here because we completely
150 reconstruct free_list in __malloc_fork_unlock_child. */
152 __libc_lock_lock (list_lock
);
154 for (mstate ar_ptr
= &main_arena
;; )
156 __libc_lock_lock (ar_ptr
->mutex
);
157 ar_ptr
= ar_ptr
->next
;
158 if (ar_ptr
== &main_arena
)
164 __malloc_fork_unlock_parent (void)
166 if (__malloc_initialized
< 1)
169 for (mstate ar_ptr
= &main_arena
;; )
171 __libc_lock_unlock (ar_ptr
->mutex
);
172 ar_ptr
= ar_ptr
->next
;
173 if (ar_ptr
== &main_arena
)
176 __libc_lock_unlock (list_lock
);
180 __malloc_fork_unlock_child (void)
182 if (__malloc_initialized
< 1)
185 /* Push all arenas to the free list, except thread_arena, which is
186 attached to the current thread. */
187 __libc_lock_init (free_list_lock
);
188 if (thread_arena
!= NULL
)
189 thread_arena
->attached_threads
= 1;
191 for (mstate ar_ptr
= &main_arena
;; )
193 __libc_lock_init (ar_ptr
->mutex
);
194 if (ar_ptr
!= thread_arena
)
196 /* This arena is no longer attached to any thread. */
197 ar_ptr
->attached_threads
= 0;
198 ar_ptr
->next_free
= free_list
;
201 ar_ptr
= ar_ptr
->next
;
202 if (ar_ptr
== &main_arena
)
206 __libc_lock_init (list_lock
);
211 TUNABLE_CALLBACK (set_mallopt_check
) (tunable_val_t
*valp
)
213 int32_t value
= (int32_t) valp
->numval
;
215 __malloc_check_init ();
218 # define TUNABLE_CALLBACK_FNDECL(__name, __type) \
219 static inline int do_ ## __name (__type value); \
221 TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
223 __type value = (__type) (valp)->numval; \
224 do_ ## __name (value); \
227 TUNABLE_CALLBACK_FNDECL (set_mmap_threshold
, size_t)
228 TUNABLE_CALLBACK_FNDECL (set_mmaps_max
, int32_t)
229 TUNABLE_CALLBACK_FNDECL (set_top_pad
, size_t)
230 TUNABLE_CALLBACK_FNDECL (set_perturb_byte
, int32_t)
231 TUNABLE_CALLBACK_FNDECL (set_trim_threshold
, size_t)
232 TUNABLE_CALLBACK_FNDECL (set_arena_max
, size_t)
233 TUNABLE_CALLBACK_FNDECL (set_arena_test
, size_t)
235 TUNABLE_CALLBACK_FNDECL (set_tcache_max
, size_t)
236 TUNABLE_CALLBACK_FNDECL (set_tcache_count
, size_t)
237 TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit
, size_t)
239 TUNABLE_CALLBACK_FNDECL (set_mxfast
, size_t)
241 /* Initialization routine. */
243 extern char **_environ
;
246 next_env_entry (char ***position
)
248 char **current
= *position
;
251 while (*current
!= NULL
)
253 if (__builtin_expect ((*current
)[0] == 'M', 0)
254 && (*current
)[1] == 'A'
255 && (*current
)[2] == 'L'
256 && (*current
)[3] == 'L'
257 && (*current
)[4] == 'O'
258 && (*current
)[5] == 'C'
259 && (*current
)[6] == '_')
261 result
= &(*current
)[7];
263 /* Save current position for next visit. */
264 *position
= ++current
;
277 #if defined(SHARED) || defined(USE_MTAG)
279 __failing_morecore (ptrdiff_t d
)
281 return (void *) MORECORE_FAILURE
;
286 extern struct dl_open_hook
*_dl_open_hook
;
287 libc_hidden_proto (_dl_open_hook
);
292 /* Generate a new (random) tag value for PTR and tag the memory it
293 points to upto the end of the usable size for the chunk containing
294 it. Return the newly tagged pointer. */
296 __mtag_tag_new_usable (void *ptr
)
300 mchunkptr cp
= mem2chunk(ptr
);
301 /* This likely will never happen, but we can't handle retagging
302 chunks from the dumped main arena. So just return the
304 if (DUMPED_MAIN_ARENA_CHUNK (cp
))
306 ptr
= __libc_mtag_tag_region (__libc_mtag_new_tag (ptr
),
307 CHUNK_AVAILABLE_SIZE (cp
) - CHUNK_HDR_SZ
);
312 /* Generate a new (random) tag value for PTR, set the tags for the
313 memory to the new tag and initialize the memory contents to VAL.
314 In practice this function will only be called with VAL=0, but we
315 keep this parameter to maintain the same prototype as memset. */
317 __mtag_tag_new_memset (void *ptr
, int val
, size_t size
)
319 return __libc_mtag_memset_with_tag (__libc_mtag_new_tag (ptr
), val
, size
);
326 if (__malloc_initialized
>= 0)
329 __malloc_initialized
= 0;
332 if ((TUNABLE_GET_FULL (glibc
, mem
, tagging
, int32_t, NULL
) & 1) != 0)
334 /* If the tunable says that we should be using tagged memory
335 and that morecore does not support tagged regions, then
337 if (__MTAG_SBRK_UNTAGGED
)
338 __morecore
= __failing_morecore
;
340 __mtag_mmap_flags
= __MTAG_MMAP_FLAGS
;
341 __tag_new_memset
= __mtag_tag_new_memset
;
342 __tag_region
= __libc_mtag_tag_region
;
343 __tag_new_usable
= __mtag_tag_new_usable
;
344 __tag_at
= __libc_mtag_address_get_tag
;
345 __mtag_granule_mask
= ~(size_t)(__MTAG_GRANULE_SIZE
- 1);
350 /* In case this libc copy is in a non-default namespace, never use
351 brk. Likewise if dlopened from statically linked program. The
352 generic sbrk implementation also enforces this, but it is not
355 __morecore
= __failing_morecore
;
358 thread_arena
= &main_arena
;
360 malloc_init_state (&main_arena
);
363 TUNABLE_GET (check
, int32_t, TUNABLE_CALLBACK (set_mallopt_check
));
364 TUNABLE_GET (top_pad
, size_t, TUNABLE_CALLBACK (set_top_pad
));
365 TUNABLE_GET (perturb
, int32_t, TUNABLE_CALLBACK (set_perturb_byte
));
366 TUNABLE_GET (mmap_threshold
, size_t, TUNABLE_CALLBACK (set_mmap_threshold
));
367 TUNABLE_GET (trim_threshold
, size_t, TUNABLE_CALLBACK (set_trim_threshold
));
368 TUNABLE_GET (mmap_max
, int32_t, TUNABLE_CALLBACK (set_mmaps_max
));
369 TUNABLE_GET (arena_max
, size_t, TUNABLE_CALLBACK (set_arena_max
));
370 TUNABLE_GET (arena_test
, size_t, TUNABLE_CALLBACK (set_arena_test
));
372 TUNABLE_GET (tcache_max
, size_t, TUNABLE_CALLBACK (set_tcache_max
));
373 TUNABLE_GET (tcache_count
, size_t, TUNABLE_CALLBACK (set_tcache_count
));
374 TUNABLE_GET (tcache_unsorted_limit
, size_t,
375 TUNABLE_CALLBACK (set_tcache_unsorted_limit
));
377 TUNABLE_GET (mxfast
, size_t, TUNABLE_CALLBACK (set_mxfast
));
379 const char *s
= NULL
;
380 if (__glibc_likely (_environ
!= NULL
))
382 char **runp
= _environ
;
385 while (__builtin_expect ((envline
= next_env_entry (&runp
)) != NULL
,
388 size_t len
= strcspn (envline
, "=");
390 if (envline
[len
] != '=')
391 /* This is a "MALLOC_" variable at the end of the string
392 without a '=' character. Ignore it since otherwise we
393 will access invalid memory below. */
399 if (memcmp (envline
, "CHECK_", 6) == 0)
403 if (!__builtin_expect (__libc_enable_secure
, 0))
405 if (memcmp (envline
, "TOP_PAD_", 8) == 0)
406 __libc_mallopt (M_TOP_PAD
, atoi (&envline
[9]));
407 else if (memcmp (envline
, "PERTURB_", 8) == 0)
408 __libc_mallopt (M_PERTURB
, atoi (&envline
[9]));
412 if (!__builtin_expect (__libc_enable_secure
, 0))
414 if (memcmp (envline
, "MMAP_MAX_", 9) == 0)
415 __libc_mallopt (M_MMAP_MAX
, atoi (&envline
[10]));
416 else if (memcmp (envline
, "ARENA_MAX", 9) == 0)
417 __libc_mallopt (M_ARENA_MAX
, atoi (&envline
[10]));
421 if (!__builtin_expect (__libc_enable_secure
, 0))
423 if (memcmp (envline
, "ARENA_TEST", 10) == 0)
424 __libc_mallopt (M_ARENA_TEST
, atoi (&envline
[11]));
428 if (!__builtin_expect (__libc_enable_secure
, 0))
430 if (memcmp (envline
, "TRIM_THRESHOLD_", 15) == 0)
431 __libc_mallopt (M_TRIM_THRESHOLD
, atoi (&envline
[16]));
432 else if (memcmp (envline
, "MMAP_THRESHOLD_", 15) == 0)
433 __libc_mallopt (M_MMAP_THRESHOLD
, atoi (&envline
[16]));
441 if (s
&& s
[0] != '\0' && s
[0] != '0')
442 __malloc_check_init ();
445 #if HAVE_MALLOC_INIT_HOOK
446 void (*hook
) (void) = atomic_forced_read (__malloc_initialize_hook
);
450 __malloc_initialized
= 1;
453 /* Managing heaps and arenas (for concurrent threads) */
457 /* Print the complete contents of a single heap to stderr. */
460 dump_heap (heap_info
*heap
)
465 fprintf (stderr
, "Heap %p, size %10lx:\n", heap
, (long) heap
->size
);
466 ptr
= (heap
->ar_ptr
!= (mstate
) (heap
+ 1)) ?
467 (char *) (heap
+ 1) : (char *) (heap
+ 1) + sizeof (struct malloc_state
);
468 p
= (mchunkptr
) (((unsigned long) ptr
+ MALLOC_ALIGN_MASK
) &
472 fprintf (stderr
, "chunk %p size %10lx", p
, (long) chunksize_nomask(p
));
473 if (p
== top (heap
->ar_ptr
))
475 fprintf (stderr
, " (top)\n");
478 else if (chunksize_nomask(p
) == (0 | PREV_INUSE
))
480 fprintf (stderr
, " (fence)\n");
483 fprintf (stderr
, "\n");
487 #endif /* MALLOC_DEBUG > 1 */
489 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
490 addresses as opposed to increasing, new_heap would badly fragment the
491 address space. In that case remember the second HEAP_MAX_SIZE part
492 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
493 call (if it is already aligned) and try to reuse it next time. We need
494 no locking for it, as kernel ensures the atomicity for us - worst case
495 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
496 multiple threads, but only one will succeed. */
497 static char *aligned_heap_area
;
499 /* Create a new heap. size is automatically rounded up to a multiple
503 new_heap (size_t size
, size_t top_pad
)
505 size_t pagesize
= GLRO (dl_pagesize
);
510 if (size
+ top_pad
< HEAP_MIN_SIZE
)
511 size
= HEAP_MIN_SIZE
;
512 else if (size
+ top_pad
<= HEAP_MAX_SIZE
)
514 else if (size
> HEAP_MAX_SIZE
)
517 size
= HEAP_MAX_SIZE
;
518 size
= ALIGN_UP (size
, pagesize
);
520 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
521 No swap space needs to be reserved for the following large
522 mapping (on Linux, this is the case for all non-writable mappings
525 if (aligned_heap_area
)
527 p2
= (char *) MMAP (aligned_heap_area
, HEAP_MAX_SIZE
, PROT_NONE
,
529 aligned_heap_area
= NULL
;
530 if (p2
!= MAP_FAILED
&& ((unsigned long) p2
& (HEAP_MAX_SIZE
- 1)))
532 __munmap (p2
, HEAP_MAX_SIZE
);
536 if (p2
== MAP_FAILED
)
538 p1
= (char *) MMAP (0, HEAP_MAX_SIZE
<< 1, PROT_NONE
, MAP_NORESERVE
);
539 if (p1
!= MAP_FAILED
)
541 p2
= (char *) (((unsigned long) p1
+ (HEAP_MAX_SIZE
- 1))
542 & ~(HEAP_MAX_SIZE
- 1));
547 aligned_heap_area
= p2
+ HEAP_MAX_SIZE
;
548 __munmap (p2
+ HEAP_MAX_SIZE
, HEAP_MAX_SIZE
- ul
);
552 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
553 is already aligned. */
554 p2
= (char *) MMAP (0, HEAP_MAX_SIZE
, PROT_NONE
, MAP_NORESERVE
);
555 if (p2
== MAP_FAILED
)
558 if ((unsigned long) p2
& (HEAP_MAX_SIZE
- 1))
560 __munmap (p2
, HEAP_MAX_SIZE
);
565 if (__mprotect (p2
, size
, MTAG_MMAP_FLAGS
| PROT_READ
| PROT_WRITE
) != 0)
567 __munmap (p2
, HEAP_MAX_SIZE
);
570 h
= (heap_info
*) p2
;
572 h
->mprotect_size
= size
;
573 LIBC_PROBE (memory_heap_new
, 2, h
, h
->size
);
577 /* Grow a heap. size is automatically rounded up to a
578 multiple of the page size. */
581 grow_heap (heap_info
*h
, long diff
)
583 size_t pagesize
= GLRO (dl_pagesize
);
586 diff
= ALIGN_UP (diff
, pagesize
);
587 new_size
= (long) h
->size
+ diff
;
588 if ((unsigned long) new_size
> (unsigned long) HEAP_MAX_SIZE
)
591 if ((unsigned long) new_size
> h
->mprotect_size
)
593 if (__mprotect ((char *) h
+ h
->mprotect_size
,
594 (unsigned long) new_size
- h
->mprotect_size
,
595 MTAG_MMAP_FLAGS
| PROT_READ
| PROT_WRITE
) != 0)
598 h
->mprotect_size
= new_size
;
602 LIBC_PROBE (memory_heap_more
, 2, h
, h
->size
);
609 shrink_heap (heap_info
*h
, long diff
)
613 new_size
= (long) h
->size
- diff
;
614 if (new_size
< (long) sizeof (*h
))
617 /* Try to re-map the extra heap space freshly to save memory, and make it
618 inaccessible. See malloc-sysdep.h to know when this is true. */
619 if (__glibc_unlikely (check_may_shrink_heap ()))
621 if ((char *) MMAP ((char *) h
+ new_size
, diff
, PROT_NONE
,
622 MAP_FIXED
) == (char *) MAP_FAILED
)
625 h
->mprotect_size
= new_size
;
628 __madvise ((char *) h
+ new_size
, diff
, MADV_DONTNEED
);
629 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
632 LIBC_PROBE (memory_heap_less
, 2, h
, h
->size
);
638 #define delete_heap(heap) \
640 if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
641 aligned_heap_area = NULL; \
642 __munmap ((char *) (heap), HEAP_MAX_SIZE); \
646 heap_trim (heap_info
*heap
, size_t pad
)
648 mstate ar_ptr
= heap
->ar_ptr
;
649 unsigned long pagesz
= GLRO (dl_pagesize
);
650 mchunkptr top_chunk
= top (ar_ptr
), p
;
651 heap_info
*prev_heap
;
652 long new_size
, top_size
, top_area
, extra
, prev_size
, misalign
;
654 /* Can this heap go away completely? */
655 while (top_chunk
== chunk_at_offset (heap
, sizeof (*heap
)))
657 prev_heap
= heap
->prev
;
658 prev_size
= prev_heap
->size
- (MINSIZE
- 2 * SIZE_SZ
);
659 p
= chunk_at_offset (prev_heap
, prev_size
);
660 /* fencepost must be properly aligned. */
661 misalign
= ((long) p
) & MALLOC_ALIGN_MASK
;
662 p
= chunk_at_offset (prev_heap
, prev_size
- misalign
);
663 assert (chunksize_nomask (p
) == (0 | PREV_INUSE
)); /* must be fencepost */
665 new_size
= chunksize (p
) + (MINSIZE
- 2 * SIZE_SZ
) + misalign
;
666 assert (new_size
> 0 && new_size
< (long) (2 * MINSIZE
));
668 new_size
+= prev_size (p
);
669 assert (new_size
> 0 && new_size
< HEAP_MAX_SIZE
);
670 if (new_size
+ (HEAP_MAX_SIZE
- prev_heap
->size
) < pad
+ MINSIZE
+ pagesz
)
672 ar_ptr
->system_mem
-= heap
->size
;
673 LIBC_PROBE (memory_heap_free
, 2, heap
, heap
->size
);
676 if (!prev_inuse (p
)) /* consolidate backward */
679 unlink_chunk (ar_ptr
, p
);
681 assert (((unsigned long) ((char *) p
+ new_size
) & (pagesz
- 1)) == 0);
682 assert (((char *) p
+ new_size
) == ((char *) heap
+ heap
->size
));
683 top (ar_ptr
) = top_chunk
= p
;
684 set_head (top_chunk
, new_size
| PREV_INUSE
);
685 /*check_chunk(ar_ptr, top_chunk);*/
688 /* Uses similar logic for per-thread arenas as the main arena with systrim
689 and _int_free by preserving the top pad and rounding down to the nearest
691 top_size
= chunksize (top_chunk
);
692 if ((unsigned long)(top_size
) <
693 (unsigned long)(mp_
.trim_threshold
))
696 top_area
= top_size
- MINSIZE
- 1;
697 if (top_area
< 0 || (size_t) top_area
<= pad
)
700 /* Release in pagesize units and round down to the nearest page. */
701 extra
= ALIGN_DOWN(top_area
- pad
, pagesz
);
706 if (shrink_heap (heap
, extra
) != 0)
709 ar_ptr
->system_mem
-= extra
;
711 /* Success. Adjust top accordingly. */
712 set_head (top_chunk
, (top_size
- extra
) | PREV_INUSE
);
713 /*check_chunk(ar_ptr, top_chunk);*/
717 /* Create a new arena with initial size "size". */
719 /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
720 called while free_list_lock is held. */
722 detach_arena (mstate replaced_arena
)
724 if (replaced_arena
!= NULL
)
726 assert (replaced_arena
->attached_threads
> 0);
727 /* The current implementation only detaches from main_arena in
728 case of allocation failure. This means that it is likely not
729 beneficial to put the arena on free_list even if the
730 reference count reaches zero. */
731 --replaced_arena
->attached_threads
;
736 _int_new_arena (size_t size
)
741 unsigned long misalign
;
743 h
= new_heap (size
+ (sizeof (*h
) + sizeof (*a
) + MALLOC_ALIGNMENT
),
747 /* Maybe size is too large to fit in a single heap. So, just try
748 to create a minimally-sized arena and let _int_malloc() attempt
749 to deal with the large request via mmap_chunk(). */
750 h
= new_heap (sizeof (*h
) + sizeof (*a
) + MALLOC_ALIGNMENT
, mp_
.top_pad
);
754 a
= h
->ar_ptr
= (mstate
) (h
+ 1);
755 malloc_init_state (a
);
756 a
->attached_threads
= 1;
758 a
->system_mem
= a
->max_system_mem
= h
->size
;
760 /* Set up the top chunk, with proper alignment. */
761 ptr
= (char *) (a
+ 1);
762 misalign
= (unsigned long) chunk2mem (ptr
) & MALLOC_ALIGN_MASK
;
764 ptr
+= MALLOC_ALIGNMENT
- misalign
;
765 top (a
) = (mchunkptr
) ptr
;
766 set_head (top (a
), (((char *) h
+ h
->size
) - ptr
) | PREV_INUSE
);
768 LIBC_PROBE (memory_arena_new
, 2, a
, size
);
769 mstate replaced_arena
= thread_arena
;
771 __libc_lock_init (a
->mutex
);
773 __libc_lock_lock (list_lock
);
775 /* Add the new arena to the global list. */
776 a
->next
= main_arena
.next
;
777 /* FIXME: The barrier is an attempt to synchronize with read access
778 in reused_arena, which does not acquire list_lock while
779 traversing the list. */
780 atomic_write_barrier ();
783 __libc_lock_unlock (list_lock
);
785 __libc_lock_lock (free_list_lock
);
786 detach_arena (replaced_arena
);
787 __libc_lock_unlock (free_list_lock
);
789 /* Lock this arena. NB: Another thread may have been attached to
790 this arena because the arena is now accessible from the
791 main_arena.next list and could have been picked by reused_arena.
792 This can only happen for the last arena created (before the arena
793 limit is reached). At this point, some arena has to be attached
794 to two threads. We could acquire the arena lock before list_lock
795 to make it less likely that reused_arena picks this new arena,
796 but this could result in a deadlock with
797 __malloc_fork_lock_parent. */
799 __libc_lock_lock (a
->mutex
);
805 /* Remove an arena from free_list. */
809 mstate replaced_arena
= thread_arena
;
810 mstate result
= free_list
;
813 __libc_lock_lock (free_list_lock
);
817 free_list
= result
->next_free
;
819 /* The arena will be attached to this thread. */
820 assert (result
->attached_threads
== 0);
821 result
->attached_threads
= 1;
823 detach_arena (replaced_arena
);
825 __libc_lock_unlock (free_list_lock
);
829 LIBC_PROBE (memory_arena_reuse_free_list
, 1, result
);
830 __libc_lock_lock (result
->mutex
);
831 thread_arena
= result
;
838 /* Remove the arena from the free list (if it is present).
839 free_list_lock must have been acquired by the caller. */
841 remove_from_free_list (mstate arena
)
843 mstate
*previous
= &free_list
;
844 for (mstate p
= free_list
; p
!= NULL
; p
= p
->next_free
)
846 assert (p
->attached_threads
== 0);
849 /* Remove the requested arena from the list. */
850 *previous
= p
->next_free
;
854 previous
= &p
->next_free
;
858 /* Lock and return an arena that can be reused for memory allocation.
859 Avoid AVOID_ARENA as we have already failed to allocate memory in
860 it and it is currently locked. */
862 reused_arena (mstate avoid_arena
)
865 /* FIXME: Access to next_to_use suffers from data races. */
866 static mstate next_to_use
;
867 if (next_to_use
== NULL
)
868 next_to_use
= &main_arena
;
870 /* Iterate over all arenas (including those linked from
872 result
= next_to_use
;
875 if (!__libc_lock_trylock (result
->mutex
))
878 /* FIXME: This is a data race, see _int_new_arena. */
879 result
= result
->next
;
881 while (result
!= next_to_use
);
883 /* Avoid AVOID_ARENA as we have already failed to allocate memory
884 in that arena and it is currently locked. */
885 if (result
== avoid_arena
)
886 result
= result
->next
;
888 /* No arena available without contention. Wait for the next in line. */
889 LIBC_PROBE (memory_arena_reuse_wait
, 3, &result
->mutex
, result
, avoid_arena
);
890 __libc_lock_lock (result
->mutex
);
893 /* Attach the arena to the current thread. */
895 /* Update the arena thread attachment counters. */
896 mstate replaced_arena
= thread_arena
;
897 __libc_lock_lock (free_list_lock
);
898 detach_arena (replaced_arena
);
900 /* We may have picked up an arena on the free list. We need to
901 preserve the invariant that no arena on the free list has a
902 positive attached_threads counter (otherwise,
903 arena_thread_freeres cannot use the counter to determine if the
904 arena needs to be put on the free list). We unconditionally
905 remove the selected arena from the free list. The caller of
906 reused_arena checked the free list and observed it to be empty,
907 so the list is very short. */
908 remove_from_free_list (result
);
910 ++result
->attached_threads
;
912 __libc_lock_unlock (free_list_lock
);
915 LIBC_PROBE (memory_arena_reuse
, 2, result
, avoid_arena
);
916 thread_arena
= result
;
917 next_to_use
= result
->next
;
923 arena_get2 (size_t size
, mstate avoid_arena
)
927 static size_t narenas_limit
;
929 a
= get_free_list ();
932 /* Nothing immediately available, so generate a new arena. */
933 if (narenas_limit
== 0)
935 if (mp_
.arena_max
!= 0)
936 narenas_limit
= mp_
.arena_max
;
937 else if (narenas
> mp_
.arena_test
)
939 int n
= __get_nprocs ();
942 narenas_limit
= NARENAS_FROM_NCORES (n
);
944 /* We have no information about the system. Assume two
946 narenas_limit
= NARENAS_FROM_NCORES (2);
951 /* NB: the following depends on the fact that (size_t)0 - 1 is a
952 very large number and that the underflow is OK. If arena_max
953 is set the value of arena_test is irrelevant. If arena_test
954 is set but narenas is not yet larger or equal to arena_test
955 narenas_limit is 0. There is no possibility for narenas to
956 be too big for the test to always fail since there is not
957 enough address space to create that many arenas. */
958 if (__glibc_unlikely (n
<= narenas_limit
- 1))
960 if (catomic_compare_and_exchange_bool_acq (&narenas
, n
+ 1, n
))
962 a
= _int_new_arena (size
);
963 if (__glibc_unlikely (a
== NULL
))
964 catomic_decrement (&narenas
);
967 a
= reused_arena (avoid_arena
);
972 /* If we don't have the main arena, then maybe the failure is due to running
973 out of mmapped areas, so we can try allocating on the main arena.
974 Otherwise, it is likely that sbrk() has failed and there is still a chance
975 to mmap(), so try one of the other arenas. */
977 arena_get_retry (mstate ar_ptr
, size_t bytes
)
979 LIBC_PROBE (memory_arena_retry
, 2, bytes
, ar_ptr
);
980 if (ar_ptr
!= &main_arena
)
982 __libc_lock_unlock (ar_ptr
->mutex
);
983 ar_ptr
= &main_arena
;
984 __libc_lock_lock (ar_ptr
->mutex
);
988 __libc_lock_unlock (ar_ptr
->mutex
);
989 ar_ptr
= arena_get2 (bytes
, ar_ptr
);
996 __malloc_arena_thread_freeres (void)
998 /* Shut down the thread cache first. This could deallocate data for
999 the thread arena, so do this before we put the arena on the free
1001 tcache_thread_shutdown ();
1003 mstate a
= thread_arena
;
1004 thread_arena
= NULL
;
1008 __libc_lock_lock (free_list_lock
);
1009 /* If this was the last attached thread for this arena, put the
1010 arena on the free list. */
1011 assert (a
->attached_threads
> 0);
1012 if (--a
->attached_threads
== 0)
1014 a
->next_free
= free_list
;
1017 __libc_lock_unlock (free_list_lock
);