]>
Commit | Line | Data |
---|---|---|
fa8d436c | 1 | /* Malloc implementation for multiple threads without lock contention. |
2b778ceb | 2 | Copyright (C) 2001-2021 Free Software Foundation, Inc. |
fa8d436c UD |
3 | This file is part of the GNU C Library. |
4 | Contributed by Wolfram Gloger <wg@malloc.de>, 2001. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
7 | modify it under the terms of the GNU Lesser General Public License as |
8 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c UD |
9 | License, or (at your option) any later version. |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 14 | Lesser General Public License for more details. |
fa8d436c | 15 | |
cc7375ce | 16 | You should have received a copy of the GNU Lesser General Public |
59ba27a6 | 17 | License along with the GNU C Library; see the file COPYING.LIB. If |
5a82c748 | 18 | not, see <https://www.gnu.org/licenses/>. */ |
fa8d436c | 19 | |
a28b6b0a RM |
20 | #include <stdbool.h> |
21 | ||
67e58f39 SP |
22 | #if HAVE_TUNABLES |
23 | # define TUNABLE_NAMESPACE malloc | |
24 | #endif | |
25 | #include <elf/dl-tunables.h> | |
26 | ||
fa8d436c UD |
27 | /* Compile-time constants. */ |
28 | ||
6c8dbf00 | 29 | #define HEAP_MIN_SIZE (32 * 1024) |
fa8d436c | 30 | #ifndef HEAP_MAX_SIZE |
e404fb16 | 31 | # ifdef DEFAULT_MMAP_THRESHOLD_MAX |
bd2c2341 | 32 | # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX) |
e404fb16 | 33 | # else |
6c8dbf00 | 34 | # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */ |
e404fb16 | 35 | # endif |
fa8d436c UD |
36 | #endif |
37 | ||
38 | /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps | |
39 | that are dynamically created for multi-threaded programs. The | |
40 | maximum size must be a power of two, for fast determination of | |
41 | which heap belongs to a chunk. It should be much larger than the | |
42 | mmap threshold, so that requests with a size just below that | |
43 | threshold can be fulfilled without creating too many heaps. */ | |
44 | ||
fa8d436c UD |
45 | /***************************************************************************/ |
46 | ||
47 | #define top(ar_ptr) ((ar_ptr)->top) | |
48 | ||
49 | /* A heap is a single contiguous memory region holding (coalesceable) | |
50 | malloc_chunks. It is allocated with mmap() and always starts at an | |
22a89187 | 51 | address aligned to HEAP_MAX_SIZE. */ |
fa8d436c | 52 | |
6c8dbf00 OB |
53 | typedef struct _heap_info |
54 | { | |
fa8d436c UD |
55 | mstate ar_ptr; /* Arena for this heap. */ |
56 | struct _heap_info *prev; /* Previous heap. */ | |
57 | size_t size; /* Current size in bytes. */ | |
6c8dbf00 OB |
58 | size_t mprotect_size; /* Size in bytes that has been mprotected |
59 | PROT_READ|PROT_WRITE. */ | |
7d013a64 RM |
60 | /* Make sure the following data is properly aligned, particularly |
61 | that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of | |
c7fd3362 JJ |
62 | MALLOC_ALIGNMENT. */ |
63 | char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; | |
fa8d436c UD |
64 | } heap_info; |
65 | ||
7d013a64 RM |
66 | /* Get a compile-time error if the heap_info padding is not correct |
67 | to make alignment work as expected in sYSMALLOc. */ | |
68 | extern int sanity_check_heap_info_alignment[(sizeof (heap_info) | |
6c8dbf00 OB |
69 | + 2 * SIZE_SZ) % MALLOC_ALIGNMENT |
70 | ? -1 : 1]; | |
7d013a64 | 71 | |
6782806d FW |
72 | /* Thread specific data. */ |
73 | ||
74 | static __thread mstate thread_arena attribute_tls_model_ie; | |
75 | ||
90c400bd FW |
76 | /* Arena free list. free_list_lock synchronizes access to the |
77 | free_list variable below, and the next_free and attached_threads | |
78 | members of struct malloc_state objects. No other locks must be | |
79 | acquired after free_list_lock has been acquired. */ | |
fa8d436c | 80 | |
cbb47fa1 | 81 | __libc_lock_define_initialized (static, free_list_lock); |
02d46fc4 | 82 | static size_t narenas = 1; |
425ce2ed | 83 | static mstate free_list; |
fa8d436c | 84 | |
90c400bd FW |
85 | /* list_lock prevents concurrent writes to the next member of struct |
86 | malloc_state objects. | |
87 | ||
88 | Read access to the next member is supposed to synchronize with the | |
89 | atomic_write_barrier and the write to the next member in | |
90 | _int_new_arena. This suffers from data races; see the FIXME | |
91 | comments in _int_new_arena and reused_arena. | |
92 | ||
7962541a FW |
93 | list_lock also prevents concurrent forks. At the time list_lock is |
94 | acquired, no arena lock must have been acquired, but it is | |
95 | permitted to acquire arena locks subsequently, while list_lock is | |
96 | acquired. */ | |
cbb47fa1 | 97 | __libc_lock_define_initialized (static, list_lock); |
90c400bd | 98 | |
2a652f5a RM |
99 | /* Already initialized? */ |
100 | int __malloc_initialized = -1; | |
101 | ||
fa8d436c UD |
102 | /**************************************************************************/ |
103 | ||
fa8d436c UD |
104 | |
105 | /* arena_get() acquires an arena and locks the corresponding mutex. | |
106 | First, try the one last locked successfully by this thread. (This | |
107 | is the common case and handled with a macro for speed.) Then, loop | |
108 | once over the circularly linked list of arenas. If no arena is | |
109 | readily available, create a new one. In this latter case, `size' | |
110 | is just a hint as to how much memory will be required immediately | |
111 | in the new arena. */ | |
112 | ||
113 | #define arena_get(ptr, size) do { \ | |
6782806d | 114 | ptr = thread_arena; \ |
6c8dbf00 OB |
115 | arena_lock (ptr, size); \ |
116 | } while (0) | |
425ce2ed | 117 | |
6c8dbf00 | 118 | #define arena_lock(ptr, size) do { \ |
a9da0bb2 | 119 | if (ptr) \ |
cbb47fa1 | 120 | __libc_lock_lock (ptr->mutex); \ |
6c8dbf00 | 121 | else \ |
92a9b22d | 122 | ptr = arena_get2 ((size), NULL); \ |
6c8dbf00 | 123 | } while (0) |
fa8d436c UD |
124 | |
125 | /* find the heap and corresponding arena for a given ptr */ | |
126 | ||
127 | #define heap_for_ptr(ptr) \ | |
6c8dbf00 | 128 | ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1))) |
fa8d436c | 129 | #define arena_for_chunk(ptr) \ |
e9c4fe93 | 130 | (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr) |
fa8d436c | 131 | |
fa8d436c UD |
132 | |
133 | /**************************************************************************/ | |
134 | ||
fa8d436c UD |
135 | /* atfork support. */ |
136 | ||
29d79486 FW |
137 | /* The following three functions are called around fork from a |
138 | multi-threaded process. We do not use the general fork handler | |
139 | mechanism to make sure that our handlers are the last ones being | |
140 | called, so that other fork handlers can use the malloc | |
141 | subsystem. */ | |
fa8d436c | 142 | |
29d79486 FW |
143 | void |
144 | __malloc_fork_lock_parent (void) | |
fa8d436c | 145 | { |
6c8dbf00 | 146 | if (__malloc_initialized < 1) |
2a652f5a | 147 | return; |
6c8dbf00 | 148 | |
90c400bd | 149 | /* We do not acquire free_list_lock here because we completely |
29d79486 | 150 | reconstruct free_list in __malloc_fork_unlock_child. */ |
90c400bd | 151 | |
4bf5f222 | 152 | __libc_lock_lock (list_lock); |
7dac9f3d | 153 | |
8a727af9 | 154 | for (mstate ar_ptr = &main_arena;; ) |
6c8dbf00 | 155 | { |
4bf5f222 | 156 | __libc_lock_lock (ar_ptr->mutex); |
6c8dbf00 OB |
157 | ar_ptr = ar_ptr->next; |
158 | if (ar_ptr == &main_arena) | |
159 | break; | |
7dac9f3d | 160 | } |
fa8d436c UD |
161 | } |
162 | ||
29d79486 FW |
163 | void |
164 | __malloc_fork_unlock_parent (void) | |
fa8d436c | 165 | { |
6c8dbf00 | 166 | if (__malloc_initialized < 1) |
2a652f5a | 167 | return; |
6c8dbf00 | 168 | |
8a727af9 | 169 | for (mstate ar_ptr = &main_arena;; ) |
6c8dbf00 | 170 | { |
4bf5f222 | 171 | __libc_lock_unlock (ar_ptr->mutex); |
6c8dbf00 OB |
172 | ar_ptr = ar_ptr->next; |
173 | if (ar_ptr == &main_arena) | |
174 | break; | |
175 | } | |
4bf5f222 | 176 | __libc_lock_unlock (list_lock); |
fa8d436c UD |
177 | } |
178 | ||
29d79486 FW |
179 | void |
180 | __malloc_fork_unlock_child (void) | |
fa8d436c | 181 | { |
6c8dbf00 | 182 | if (__malloc_initialized < 1) |
2a652f5a | 183 | return; |
6c8dbf00 | 184 | |
8a727af9 | 185 | /* Push all arenas to the free list, except thread_arena, which is |
a62719ba | 186 | attached to the current thread. */ |
4bf5f222 | 187 | __libc_lock_init (free_list_lock); |
8a727af9 FW |
188 | if (thread_arena != NULL) |
189 | thread_arena->attached_threads = 1; | |
425ce2ed | 190 | free_list = NULL; |
8a727af9 | 191 | for (mstate ar_ptr = &main_arena;; ) |
6c8dbf00 | 192 | { |
4bf5f222 | 193 | __libc_lock_init (ar_ptr->mutex); |
8a727af9 | 194 | if (ar_ptr != thread_arena) |
6c8dbf00 | 195 | { |
a62719ba FW |
196 | /* This arena is no longer attached to any thread. */ |
197 | ar_ptr->attached_threads = 0; | |
6c8dbf00 OB |
198 | ar_ptr->next_free = free_list; |
199 | free_list = ar_ptr; | |
200 | } | |
201 | ar_ptr = ar_ptr->next; | |
202 | if (ar_ptr == &main_arena) | |
203 | break; | |
425ce2ed | 204 | } |
90c400bd | 205 | |
4bf5f222 | 206 | __libc_lock_init (list_lock); |
fa8d436c UD |
207 | } |
208 | ||
67e58f39 | 209 | #if HAVE_TUNABLES |
67e58f39 | 210 | void |
44330b6d | 211 | TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp) |
67e58f39 | 212 | { |
8cbc826c | 213 | int32_t value = (int32_t) valp->numval; |
ac3ed168 | 214 | if (value != 0) |
67e58f39 SP |
215 | __malloc_check_init (); |
216 | } | |
217 | ||
44330b6d | 218 | # define TUNABLE_CALLBACK_FNDECL(__name, __type) \ |
67e58f39 SP |
219 | static inline int do_ ## __name (__type value); \ |
220 | void \ | |
44330b6d | 221 | TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \ |
67e58f39 | 222 | { \ |
8cbc826c | 223 | __type value = (__type) (valp)->numval; \ |
67e58f39 SP |
224 | do_ ## __name (value); \ |
225 | } | |
226 | ||
44330b6d SP |
227 | TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t) |
228 | TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t) | |
229 | TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t) | |
230 | TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t) | |
231 | TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t) | |
232 | TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t) | |
233 | TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t) | |
d5c3fafc DD |
234 | #if USE_TCACHE |
235 | TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t) | |
236 | TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t) | |
237 | TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t) | |
238 | #endif | |
c48d92b4 | 239 | TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t) |
67e58f39 | 240 | #else |
fa8d436c | 241 | /* Initialization routine. */ |
fa8d436c UD |
242 | #include <string.h> |
243 | extern char **_environ; | |
244 | ||
245 | static char * | |
fa8d436c UD |
246 | next_env_entry (char ***position) |
247 | { | |
248 | char **current = *position; | |
249 | char *result = NULL; | |
250 | ||
251 | while (*current != NULL) | |
252 | { | |
253 | if (__builtin_expect ((*current)[0] == 'M', 0) | |
6c8dbf00 OB |
254 | && (*current)[1] == 'A' |
255 | && (*current)[2] == 'L' | |
256 | && (*current)[3] == 'L' | |
257 | && (*current)[4] == 'O' | |
258 | && (*current)[5] == 'C' | |
259 | && (*current)[6] == '_') | |
260 | { | |
261 | result = &(*current)[7]; | |
fa8d436c | 262 | |
6c8dbf00 OB |
263 | /* Save current position for next visit. */ |
264 | *position = ++current; | |
fa8d436c | 265 | |
6c8dbf00 OB |
266 | break; |
267 | } | |
fa8d436c UD |
268 | |
269 | ++current; | |
270 | } | |
271 | ||
272 | return result; | |
273 | } | |
67e58f39 | 274 | #endif |
fa8d436c | 275 | |
c0f62c56 | 276 | |
3784dfc0 | 277 | #if defined(SHARED) || defined(USE_MTAG) |
c0f62c56 UD |
278 | static void * |
279 | __failing_morecore (ptrdiff_t d) | |
280 | { | |
281 | return (void *) MORECORE_FAILURE; | |
282 | } | |
3784dfc0 | 283 | #endif |
5f21997b | 284 | |
3784dfc0 | 285 | #ifdef SHARED |
5f21997b UD |
286 | extern struct dl_open_hook *_dl_open_hook; |
287 | libc_hidden_proto (_dl_open_hook); | |
fde89ad0 RM |
288 | #endif |
289 | ||
3784dfc0 RE |
290 | #ifdef USE_MTAG |
291 | ||
292 | /* Generate a new (random) tag value for PTR and tag the memory it | |
293 | points to upto the end of the usable size for the chunk containing | |
294 | it. Return the newly tagged pointer. */ | |
295 | static void * | |
296 | __mtag_tag_new_usable (void *ptr) | |
297 | { | |
298 | if (ptr) | |
299 | { | |
300 | mchunkptr cp = mem2chunk(ptr); | |
3784dfc0 RE |
301 | ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), |
302 | CHUNK_AVAILABLE_SIZE (cp) - CHUNK_HDR_SZ); | |
303 | } | |
304 | return ptr; | |
305 | } | |
306 | ||
307 | /* Generate a new (random) tag value for PTR, set the tags for the | |
308 | memory to the new tag and initialize the memory contents to VAL. | |
309 | In practice this function will only be called with VAL=0, but we | |
310 | keep this parameter to maintain the same prototype as memset. */ | |
311 | static void * | |
312 | __mtag_tag_new_memset (void *ptr, int val, size_t size) | |
313 | { | |
314 | return __libc_mtag_memset_with_tag (__libc_mtag_new_tag (ptr), val, size); | |
315 | } | |
316 | #endif | |
317 | ||
fa8d436c | 318 | static void |
06d6611a | 319 | ptmalloc_init (void) |
fa8d436c | 320 | { |
6c8dbf00 OB |
321 | if (__malloc_initialized >= 0) |
322 | return; | |
323 | ||
fa8d436c UD |
324 | __malloc_initialized = 0; |
325 | ||
3784dfc0 RE |
326 | #ifdef USE_MTAG |
327 | if ((TUNABLE_GET_FULL (glibc, mem, tagging, int32_t, NULL) & 1) != 0) | |
328 | { | |
329 | /* If the tunable says that we should be using tagged memory | |
330 | and that morecore does not support tagged regions, then | |
331 | disable it. */ | |
332 | if (__MTAG_SBRK_UNTAGGED) | |
333 | __morecore = __failing_morecore; | |
334 | ||
0c719cf4 SN |
335 | mtag_mmap_flags = __MTAG_MMAP_FLAGS; |
336 | tag_new_memset = __mtag_tag_new_memset; | |
337 | tag_region = __libc_mtag_tag_region; | |
338 | tag_new_usable = __mtag_tag_new_usable; | |
339 | tag_at = __libc_mtag_address_get_tag; | |
340 | mtag_granule_mask = ~(size_t)(__MTAG_GRANULE_SIZE - 1); | |
3784dfc0 RE |
341 | } |
342 | #endif | |
343 | ||
22a89187 | 344 | #ifdef SHARED |
29a4db29 FW |
345 | /* In case this libc copy is in a non-default namespace, never use |
346 | brk. Likewise if dlopened from statically linked program. The | |
347 | generic sbrk implementation also enforces this, but it is not | |
348 | used on Hurd. */ | |
349 | if (!__libc_initial) | |
c0f62c56 UD |
350 | __morecore = __failing_morecore; |
351 | #endif | |
352 | ||
6782806d | 353 | thread_arena = &main_arena; |
67e58f39 | 354 | |
3381be5c | 355 | malloc_init_state (&main_arena); |
67e58f39 | 356 | |
3381be5c | 357 | #if HAVE_TUNABLES |
44330b6d SP |
358 | TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check)); |
359 | TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad)); | |
360 | TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte)); | |
361 | TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold)); | |
362 | TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold)); | |
363 | TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max)); | |
364 | TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max)); | |
365 | TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test)); | |
3381be5c | 366 | # if USE_TCACHE |
d5c3fafc DD |
367 | TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max)); |
368 | TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count)); | |
369 | TUNABLE_GET (tcache_unsorted_limit, size_t, | |
370 | TUNABLE_CALLBACK (set_tcache_unsorted_limit)); | |
3381be5c | 371 | # endif |
c48d92b4 | 372 | TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast)); |
67e58f39 | 373 | #else |
02d46fc4 | 374 | const char *s = NULL; |
a1ffb40e | 375 | if (__glibc_likely (_environ != NULL)) |
08e49216 RM |
376 | { |
377 | char **runp = _environ; | |
378 | char *envline; | |
379 | ||
380 | while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, | |
6c8dbf00 OB |
381 | 0)) |
382 | { | |
383 | size_t len = strcspn (envline, "="); | |
384 | ||
385 | if (envline[len] != '=') | |
386 | /* This is a "MALLOC_" variable at the end of the string | |
387 | without a '=' character. Ignore it since otherwise we | |
388 | will access invalid memory below. */ | |
389 | continue; | |
390 | ||
391 | switch (len) | |
392 | { | |
393 | case 6: | |
394 | if (memcmp (envline, "CHECK_", 6) == 0) | |
395 | s = &envline[7]; | |
396 | break; | |
397 | case 8: | |
398 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
399 | { | |
400 | if (memcmp (envline, "TOP_PAD_", 8) == 0) | |
401 | __libc_mallopt (M_TOP_PAD, atoi (&envline[9])); | |
402 | else if (memcmp (envline, "PERTURB_", 8) == 0) | |
403 | __libc_mallopt (M_PERTURB, atoi (&envline[9])); | |
404 | } | |
405 | break; | |
406 | case 9: | |
407 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
408 | { | |
409 | if (memcmp (envline, "MMAP_MAX_", 9) == 0) | |
410 | __libc_mallopt (M_MMAP_MAX, atoi (&envline[10])); | |
411 | else if (memcmp (envline, "ARENA_MAX", 9) == 0) | |
412 | __libc_mallopt (M_ARENA_MAX, atoi (&envline[10])); | |
413 | } | |
414 | break; | |
415 | case 10: | |
416 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
417 | { | |
418 | if (memcmp (envline, "ARENA_TEST", 10) == 0) | |
419 | __libc_mallopt (M_ARENA_TEST, atoi (&envline[11])); | |
420 | } | |
421 | break; | |
422 | case 15: | |
423 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
424 | { | |
425 | if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) | |
426 | __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16])); | |
427 | else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) | |
428 | __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16])); | |
429 | } | |
430 | break; | |
431 | default: | |
432 | break; | |
433 | } | |
434 | } | |
435 | } | |
ac3ed168 FW |
436 | if (s && s[0] != '\0' && s[0] != '0') |
437 | __malloc_check_init (); | |
67e58f39 SP |
438 | #endif |
439 | ||
2ba3cfa1 | 440 | #if HAVE_MALLOC_INIT_HOOK |
92e1ab0e | 441 | void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook); |
df77455c UD |
442 | if (hook != NULL) |
443 | (*hook)(); | |
2ba3cfa1 | 444 | #endif |
fa8d436c UD |
445 | __malloc_initialized = 1; |
446 | } | |
447 | ||
fa8d436c UD |
448 | /* Managing heaps and arenas (for concurrent threads) */ |
449 | ||
fa8d436c UD |
450 | #if MALLOC_DEBUG > 1 |
451 | ||
452 | /* Print the complete contents of a single heap to stderr. */ | |
453 | ||
454 | static void | |
6c8dbf00 | 455 | dump_heap (heap_info *heap) |
fa8d436c UD |
456 | { |
457 | char *ptr; | |
458 | mchunkptr p; | |
459 | ||
6c8dbf00 OB |
460 | fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size); |
461 | ptr = (heap->ar_ptr != (mstate) (heap + 1)) ? | |
462 | (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state); | |
463 | p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) & | |
464 | ~MALLOC_ALIGN_MASK); | |
465 | for (;; ) | |
466 | { | |
961d12d9 | 467 | fprintf (stderr, "chunk %p size %10lx", p, (long) chunksize_nomask(p)); |
6c8dbf00 OB |
468 | if (p == top (heap->ar_ptr)) |
469 | { | |
470 | fprintf (stderr, " (top)\n"); | |
471 | break; | |
472 | } | |
961d12d9 | 473 | else if (chunksize_nomask(p) == (0 | PREV_INUSE)) |
6c8dbf00 OB |
474 | { |
475 | fprintf (stderr, " (fence)\n"); | |
476 | break; | |
477 | } | |
478 | fprintf (stderr, "\n"); | |
479 | p = next_chunk (p); | |
fa8d436c | 480 | } |
fa8d436c | 481 | } |
fa8d436c UD |
482 | #endif /* MALLOC_DEBUG > 1 */ |
483 | ||
26d550d3 UD |
484 | /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing |
485 | addresses as opposed to increasing, new_heap would badly fragment the | |
486 | address space. In that case remember the second HEAP_MAX_SIZE part | |
487 | aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...) | |
488 | call (if it is already aligned) and try to reuse it next time. We need | |
489 | no locking for it, as kernel ensures the atomicity for us - worst case | |
490 | we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in | |
491 | multiple threads, but only one will succeed. */ | |
492 | static char *aligned_heap_area; | |
493 | ||
fa8d436c UD |
494 | /* Create a new heap. size is automatically rounded up to a multiple |
495 | of the page size. */ | |
496 | ||
497 | static heap_info * | |
6c8dbf00 | 498 | new_heap (size_t size, size_t top_pad) |
fa8d436c | 499 | { |
8a35c3fe | 500 | size_t pagesize = GLRO (dl_pagesize); |
fa8d436c UD |
501 | char *p1, *p2; |
502 | unsigned long ul; | |
503 | heap_info *h; | |
504 | ||
6c8dbf00 | 505 | if (size + top_pad < HEAP_MIN_SIZE) |
fa8d436c | 506 | size = HEAP_MIN_SIZE; |
6c8dbf00 | 507 | else if (size + top_pad <= HEAP_MAX_SIZE) |
fa8d436c | 508 | size += top_pad; |
6c8dbf00 | 509 | else if (size > HEAP_MAX_SIZE) |
fa8d436c UD |
510 | return 0; |
511 | else | |
512 | size = HEAP_MAX_SIZE; | |
8a35c3fe | 513 | size = ALIGN_UP (size, pagesize); |
fa8d436c UD |
514 | |
515 | /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. | |
516 | No swap space needs to be reserved for the following large | |
517 | mapping (on Linux, this is the case for all non-writable mappings | |
518 | anyway). */ | |
26d550d3 | 519 | p2 = MAP_FAILED; |
6c8dbf00 OB |
520 | if (aligned_heap_area) |
521 | { | |
522 | p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, | |
523 | MAP_NORESERVE); | |
524 | aligned_heap_area = NULL; | |
525 | if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))) | |
526 | { | |
527 | __munmap (p2, HEAP_MAX_SIZE); | |
528 | p2 = MAP_FAILED; | |
529 | } | |
26d550d3 | 530 | } |
6c8dbf00 OB |
531 | if (p2 == MAP_FAILED) |
532 | { | |
533 | p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE); | |
534 | if (p1 != MAP_FAILED) | |
535 | { | |
536 | p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1)) | |
537 | & ~(HEAP_MAX_SIZE - 1)); | |
538 | ul = p2 - p1; | |
539 | if (ul) | |
540 | __munmap (p1, ul); | |
541 | else | |
542 | aligned_heap_area = p2 + HEAP_MAX_SIZE; | |
543 | __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); | |
544 | } | |
26d550d3 | 545 | else |
6c8dbf00 OB |
546 | { |
547 | /* Try to take the chance that an allocation of only HEAP_MAX_SIZE | |
548 | is already aligned. */ | |
549 | p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE); | |
550 | if (p2 == MAP_FAILED) | |
551 | return 0; | |
552 | ||
553 | if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)) | |
554 | { | |
555 | __munmap (p2, HEAP_MAX_SIZE); | |
556 | return 0; | |
557 | } | |
558 | } | |
fa8d436c | 559 | } |
0c719cf4 | 560 | if (__mprotect (p2, size, mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0) |
6c8dbf00 OB |
561 | { |
562 | __munmap (p2, HEAP_MAX_SIZE); | |
563 | return 0; | |
564 | } | |
565 | h = (heap_info *) p2; | |
fa8d436c | 566 | h->size = size; |
c7fd3362 | 567 | h->mprotect_size = size; |
322dea08 | 568 | LIBC_PROBE (memory_heap_new, 2, h, h->size); |
fa8d436c UD |
569 | return h; |
570 | } | |
571 | ||
cbf5760e UD |
572 | /* Grow a heap. size is automatically rounded up to a |
573 | multiple of the page size. */ | |
fa8d436c UD |
574 | |
575 | static int | |
6c8dbf00 | 576 | grow_heap (heap_info *h, long diff) |
fa8d436c | 577 | { |
8a35c3fe | 578 | size_t pagesize = GLRO (dl_pagesize); |
fa8d436c UD |
579 | long new_size; |
580 | ||
8a35c3fe | 581 | diff = ALIGN_UP (diff, pagesize); |
6c8dbf00 OB |
582 | new_size = (long) h->size + diff; |
583 | if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE) | |
cbf5760e | 584 | return -1; |
6c8dbf00 OB |
585 | |
586 | if ((unsigned long) new_size > h->mprotect_size) | |
587 | { | |
588 | if (__mprotect ((char *) h + h->mprotect_size, | |
589 | (unsigned long) new_size - h->mprotect_size, | |
0c719cf4 | 590 | mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0) |
6c8dbf00 OB |
591 | return -2; |
592 | ||
593 | h->mprotect_size = new_size; | |
594 | } | |
cbf5760e UD |
595 | |
596 | h->size = new_size; | |
322dea08 | 597 | LIBC_PROBE (memory_heap_more, 2, h, h->size); |
cbf5760e UD |
598 | return 0; |
599 | } | |
600 | ||
601 | /* Shrink a heap. */ | |
602 | ||
603 | static int | |
6c8dbf00 | 604 | shrink_heap (heap_info *h, long diff) |
cbf5760e UD |
605 | { |
606 | long new_size; | |
607 | ||
6c8dbf00 OB |
608 | new_size = (long) h->size - diff; |
609 | if (new_size < (long) sizeof (*h)) | |
cbf5760e | 610 | return -1; |
6c8dbf00 | 611 | |
9fab36eb SP |
612 | /* Try to re-map the extra heap space freshly to save memory, and make it |
613 | inaccessible. See malloc-sysdep.h to know when this is true. */ | |
a1ffb40e | 614 | if (__glibc_unlikely (check_may_shrink_heap ())) |
cbf5760e | 615 | { |
6c8dbf00 OB |
616 | if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE, |
617 | MAP_FIXED) == (char *) MAP_FAILED) | |
618 | return -2; | |
619 | ||
cbf5760e UD |
620 | h->mprotect_size = new_size; |
621 | } | |
cbf5760e | 622 | else |
6c8dbf00 | 623 | __madvise ((char *) h + new_size, diff, MADV_DONTNEED); |
cbf5760e UD |
624 | /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ |
625 | ||
fa8d436c | 626 | h->size = new_size; |
322dea08 | 627 | LIBC_PROBE (memory_heap_less, 2, h, h->size); |
fa8d436c UD |
628 | return 0; |
629 | } | |
630 | ||
631 | /* Delete a heap. */ | |
632 | ||
26d550d3 | 633 | #define delete_heap(heap) \ |
6c8dbf00 OB |
634 | do { \ |
635 | if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \ | |
636 | aligned_heap_area = NULL; \ | |
637 | __munmap ((char *) (heap), HEAP_MAX_SIZE); \ | |
638 | } while (0) | |
fa8d436c UD |
639 | |
640 | static int | |
6c8dbf00 | 641 | heap_trim (heap_info *heap, size_t pad) |
fa8d436c UD |
642 | { |
643 | mstate ar_ptr = heap->ar_ptr; | |
6c8dbf00 | 644 | unsigned long pagesz = GLRO (dl_pagesize); |
1ecba1fa | 645 | mchunkptr top_chunk = top (ar_ptr), p; |
fa8d436c | 646 | heap_info *prev_heap; |
c26efef9 | 647 | long new_size, top_size, top_area, extra, prev_size, misalign; |
fa8d436c UD |
648 | |
649 | /* Can this heap go away completely? */ | |
6c8dbf00 OB |
650 | while (top_chunk == chunk_at_offset (heap, sizeof (*heap))) |
651 | { | |
652 | prev_heap = heap->prev; | |
653 | prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ); | |
654 | p = chunk_at_offset (prev_heap, prev_size); | |
655 | /* fencepost must be properly aligned. */ | |
656 | misalign = ((long) p) & MALLOC_ALIGN_MASK; | |
657 | p = chunk_at_offset (prev_heap, prev_size - misalign); | |
e9c4fe93 | 658 | assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */ |
6c8dbf00 OB |
659 | p = prev_chunk (p); |
660 | new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign; | |
661 | assert (new_size > 0 && new_size < (long) (2 * MINSIZE)); | |
662 | if (!prev_inuse (p)) | |
e9c4fe93 | 663 | new_size += prev_size (p); |
6c8dbf00 OB |
664 | assert (new_size > 0 && new_size < HEAP_MAX_SIZE); |
665 | if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) | |
666 | break; | |
667 | ar_ptr->system_mem -= heap->size; | |
6c8dbf00 OB |
668 | LIBC_PROBE (memory_heap_free, 2, heap, heap->size); |
669 | delete_heap (heap); | |
670 | heap = prev_heap; | |
671 | if (!prev_inuse (p)) /* consolidate backward */ | |
672 | { | |
673 | p = prev_chunk (p); | |
1ecba1fa | 674 | unlink_chunk (ar_ptr, p); |
6c8dbf00 OB |
675 | } |
676 | assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0); | |
677 | assert (((char *) p + new_size) == ((char *) heap + heap->size)); | |
678 | top (ar_ptr) = top_chunk = p; | |
679 | set_head (top_chunk, new_size | PREV_INUSE); | |
680 | /*check_chunk(ar_ptr, top_chunk);*/ | |
fa8d436c | 681 | } |
c26efef9 MG |
682 | |
683 | /* Uses similar logic for per-thread arenas as the main arena with systrim | |
e4bc326d CD |
684 | and _int_free by preserving the top pad and rounding down to the nearest |
685 | page. */ | |
6c8dbf00 | 686 | top_size = chunksize (top_chunk); |
e4bc326d CD |
687 | if ((unsigned long)(top_size) < |
688 | (unsigned long)(mp_.trim_threshold)) | |
689 | return 0; | |
690 | ||
c26efef9 | 691 | top_area = top_size - MINSIZE - 1; |
f8ef472c | 692 | if (top_area < 0 || (size_t) top_area <= pad) |
c26efef9 MG |
693 | return 0; |
694 | ||
e4bc326d | 695 | /* Release in pagesize units and round down to the nearest page. */ |
c26efef9 | 696 | extra = ALIGN_DOWN(top_area - pad, pagesz); |
e4bc326d | 697 | if (extra == 0) |
fa8d436c | 698 | return 0; |
6c8dbf00 | 699 | |
fa8d436c | 700 | /* Try to shrink. */ |
6c8dbf00 | 701 | if (shrink_heap (heap, extra) != 0) |
fa8d436c | 702 | return 0; |
6c8dbf00 | 703 | |
fa8d436c | 704 | ar_ptr->system_mem -= extra; |
fa8d436c UD |
705 | |
706 | /* Success. Adjust top accordingly. */ | |
6c8dbf00 | 707 | set_head (top_chunk, (top_size - extra) | PREV_INUSE); |
fa8d436c UD |
708 | /*check_chunk(ar_ptr, top_chunk);*/ |
709 | return 1; | |
710 | } | |
711 | ||
04ec80e4 UD |
712 | /* Create a new arena with initial size "size". */ |
713 | ||
a62719ba | 714 | /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be |
90c400bd | 715 | called while free_list_lock is held. */ |
a62719ba FW |
716 | static void |
717 | detach_arena (mstate replaced_arena) | |
718 | { | |
719 | if (replaced_arena != NULL) | |
720 | { | |
721 | assert (replaced_arena->attached_threads > 0); | |
722 | /* The current implementation only detaches from main_arena in | |
723 | case of allocation failure. This means that it is likely not | |
724 | beneficial to put the arena on free_list even if the | |
725 | reference count reaches zero. */ | |
726 | --replaced_arena->attached_threads; | |
727 | } | |
728 | } | |
729 | ||
04ec80e4 | 730 | static mstate |
6c8dbf00 | 731 | _int_new_arena (size_t size) |
04ec80e4 UD |
732 | { |
733 | mstate a; | |
734 | heap_info *h; | |
735 | char *ptr; | |
736 | unsigned long misalign; | |
737 | ||
6c8dbf00 OB |
738 | h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT), |
739 | mp_.top_pad); | |
740 | if (!h) | |
741 | { | |
742 | /* Maybe size is too large to fit in a single heap. So, just try | |
743 | to create a minimally-sized arena and let _int_malloc() attempt | |
744 | to deal with the large request via mmap_chunk(). */ | |
745 | h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad); | |
746 | if (!h) | |
747 | return 0; | |
748 | } | |
749 | a = h->ar_ptr = (mstate) (h + 1); | |
750 | malloc_init_state (a); | |
a62719ba | 751 | a->attached_threads = 1; |
04ec80e4 UD |
752 | /*a->next = NULL;*/ |
753 | a->system_mem = a->max_system_mem = h->size; | |
04ec80e4 UD |
754 | |
755 | /* Set up the top chunk, with proper alignment. */ | |
6c8dbf00 OB |
756 | ptr = (char *) (a + 1); |
757 | misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK; | |
04ec80e4 UD |
758 | if (misalign > 0) |
759 | ptr += MALLOC_ALIGNMENT - misalign; | |
6c8dbf00 OB |
760 | top (a) = (mchunkptr) ptr; |
761 | set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE); | |
04ec80e4 | 762 | |
3ea5be54 | 763 | LIBC_PROBE (memory_arena_new, 2, a, size); |
a62719ba | 764 | mstate replaced_arena = thread_arena; |
6782806d | 765 | thread_arena = a; |
4bf5f222 | 766 | __libc_lock_init (a->mutex); |
425ce2ed | 767 | |
4bf5f222 | 768 | __libc_lock_lock (list_lock); |
425ce2ed UD |
769 | |
770 | /* Add the new arena to the global list. */ | |
771 | a->next = main_arena.next; | |
90c400bd FW |
772 | /* FIXME: The barrier is an attempt to synchronize with read access |
773 | in reused_arena, which does not acquire list_lock while | |
774 | traversing the list. */ | |
425ce2ed UD |
775 | atomic_write_barrier (); |
776 | main_arena.next = a; | |
777 | ||
4bf5f222 | 778 | __libc_lock_unlock (list_lock); |
425ce2ed | 779 | |
4bf5f222 | 780 | __libc_lock_lock (free_list_lock); |
90c400bd | 781 | detach_arena (replaced_arena); |
4bf5f222 | 782 | __libc_lock_unlock (free_list_lock); |
90c400bd FW |
783 | |
784 | /* Lock this arena. NB: Another thread may have been attached to | |
785 | this arena because the arena is now accessible from the | |
786 | main_arena.next list and could have been picked by reused_arena. | |
787 | This can only happen for the last arena created (before the arena | |
788 | limit is reached). At this point, some arena has to be attached | |
789 | to two threads. We could acquire the arena lock before list_lock | |
790 | to make it less likely that reused_arena picks this new arena, | |
29d79486 FW |
791 | but this could result in a deadlock with |
792 | __malloc_fork_lock_parent. */ | |
90c400bd | 793 | |
4bf5f222 | 794 | __libc_lock_lock (a->mutex); |
90c400bd | 795 | |
04ec80e4 UD |
796 | return a; |
797 | } | |
798 | ||
425ce2ed | 799 | |
f88aab5d | 800 | /* Remove an arena from free_list. */ |
425ce2ed UD |
801 | static mstate |
802 | get_free_list (void) | |
803 | { | |
a62719ba | 804 | mstate replaced_arena = thread_arena; |
425ce2ed UD |
805 | mstate result = free_list; |
806 | if (result != NULL) | |
807 | { | |
4bf5f222 | 808 | __libc_lock_lock (free_list_lock); |
425ce2ed UD |
809 | result = free_list; |
810 | if (result != NULL) | |
a62719ba FW |
811 | { |
812 | free_list = result->next_free; | |
813 | ||
3da825ce | 814 | /* The arena will be attached to this thread. */ |
f88aab5d FW |
815 | assert (result->attached_threads == 0); |
816 | result->attached_threads = 1; | |
a62719ba FW |
817 | |
818 | detach_arena (replaced_arena); | |
819 | } | |
4bf5f222 | 820 | __libc_lock_unlock (free_list_lock); |
425ce2ed UD |
821 | |
822 | if (result != NULL) | |
6c8dbf00 OB |
823 | { |
824 | LIBC_PROBE (memory_arena_reuse_free_list, 1, result); | |
4bf5f222 | 825 | __libc_lock_lock (result->mutex); |
6782806d | 826 | thread_arena = result; |
6c8dbf00 | 827 | } |
425ce2ed UD |
828 | } |
829 | ||
830 | return result; | |
831 | } | |
832 | ||
f88aab5d FW |
833 | /* Remove the arena from the free list (if it is present). |
834 | free_list_lock must have been acquired by the caller. */ | |
835 | static void | |
836 | remove_from_free_list (mstate arena) | |
837 | { | |
838 | mstate *previous = &free_list; | |
839 | for (mstate p = free_list; p != NULL; p = p->next_free) | |
840 | { | |
841 | assert (p->attached_threads == 0); | |
842 | if (p == arena) | |
843 | { | |
844 | /* Remove the requested arena from the list. */ | |
845 | *previous = p->next_free; | |
846 | break; | |
847 | } | |
848 | else | |
849 | previous = &p->next_free; | |
850 | } | |
851 | } | |
852 | ||
77480c6b | 853 | /* Lock and return an arena that can be reused for memory allocation. |
bf51f568 JL |
854 | Avoid AVOID_ARENA as we have already failed to allocate memory in |
855 | it and it is currently locked. */ | |
425ce2ed | 856 | static mstate |
bf51f568 | 857 | reused_arena (mstate avoid_arena) |
425ce2ed | 858 | { |
425ce2ed | 859 | mstate result; |
90c400bd | 860 | /* FIXME: Access to next_to_use suffers from data races. */ |
425ce2ed UD |
861 | static mstate next_to_use; |
862 | if (next_to_use == NULL) | |
863 | next_to_use = &main_arena; | |
864 | ||
3da825ce FW |
865 | /* Iterate over all arenas (including those linked from |
866 | free_list). */ | |
425ce2ed UD |
867 | result = next_to_use; |
868 | do | |
869 | { | |
a9da0bb2 | 870 | if (!__libc_lock_trylock (result->mutex)) |
6c8dbf00 | 871 | goto out; |
425ce2ed | 872 | |
90c400bd | 873 | /* FIXME: This is a data race, see _int_new_arena. */ |
425ce2ed UD |
874 | result = result->next; |
875 | } | |
876 | while (result != next_to_use); | |
877 | ||
bf51f568 JL |
878 | /* Avoid AVOID_ARENA as we have already failed to allocate memory |
879 | in that arena and it is currently locked. */ | |
880 | if (result == avoid_arena) | |
881 | result = result->next; | |
882 | ||
fff94fa2 | 883 | /* No arena available without contention. Wait for the next in line. */ |
6999d38c | 884 | LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena); |
4bf5f222 | 885 | __libc_lock_lock (result->mutex); |
425ce2ed | 886 | |
6c8dbf00 | 887 | out: |
f88aab5d | 888 | /* Attach the arena to the current thread. */ |
a62719ba | 889 | { |
90c400bd | 890 | /* Update the arena thread attachment counters. */ |
a62719ba | 891 | mstate replaced_arena = thread_arena; |
4bf5f222 | 892 | __libc_lock_lock (free_list_lock); |
a62719ba | 893 | detach_arena (replaced_arena); |
f88aab5d FW |
894 | |
895 | /* We may have picked up an arena on the free list. We need to | |
896 | preserve the invariant that no arena on the free list has a | |
897 | positive attached_threads counter (otherwise, | |
898 | arena_thread_freeres cannot use the counter to determine if the | |
899 | arena needs to be put on the free list). We unconditionally | |
900 | remove the selected arena from the free list. The caller of | |
901 | reused_arena checked the free list and observed it to be empty, | |
902 | so the list is very short. */ | |
903 | remove_from_free_list (result); | |
904 | ||
a62719ba | 905 | ++result->attached_threads; |
f88aab5d | 906 | |
4bf5f222 | 907 | __libc_lock_unlock (free_list_lock); |
a62719ba FW |
908 | } |
909 | ||
6999d38c | 910 | LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena); |
6782806d | 911 | thread_arena = result; |
425ce2ed UD |
912 | next_to_use = result->next; |
913 | ||
914 | return result; | |
915 | } | |
425ce2ed | 916 | |
fa8d436c | 917 | static mstate |
92a9b22d | 918 | arena_get2 (size_t size, mstate avoid_arena) |
fa8d436c UD |
919 | { |
920 | mstate a; | |
fa8d436c | 921 | |
77cdc054 AS |
922 | static size_t narenas_limit; |
923 | ||
924 | a = get_free_list (); | |
925 | if (a == NULL) | |
926 | { | |
927 | /* Nothing immediately available, so generate a new arena. */ | |
928 | if (narenas_limit == 0) | |
6c8dbf00 OB |
929 | { |
930 | if (mp_.arena_max != 0) | |
931 | narenas_limit = mp_.arena_max; | |
932 | else if (narenas > mp_.arena_test) | |
933 | { | |
934 | int n = __get_nprocs (); | |
935 | ||
936 | if (n >= 1) | |
937 | narenas_limit = NARENAS_FROM_NCORES (n); | |
938 | else | |
939 | /* We have no information about the system. Assume two | |
940 | cores. */ | |
941 | narenas_limit = NARENAS_FROM_NCORES (2); | |
942 | } | |
943 | } | |
77cdc054 AS |
944 | repeat:; |
945 | size_t n = narenas; | |
41b81892 | 946 | /* NB: the following depends on the fact that (size_t)0 - 1 is a |
6c8dbf00 OB |
947 | very large number and that the underflow is OK. If arena_max |
948 | is set the value of arena_test is irrelevant. If arena_test | |
949 | is set but narenas is not yet larger or equal to arena_test | |
950 | narenas_limit is 0. There is no possibility for narenas to | |
951 | be too big for the test to always fail since there is not | |
952 | enough address space to create that many arenas. */ | |
a1ffb40e | 953 | if (__glibc_unlikely (n <= narenas_limit - 1)) |
6c8dbf00 OB |
954 | { |
955 | if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n)) | |
956 | goto repeat; | |
957 | a = _int_new_arena (size); | |
a1ffb40e | 958 | if (__glibc_unlikely (a == NULL)) |
6c8dbf00 OB |
959 | catomic_decrement (&narenas); |
960 | } | |
a5fb313c | 961 | else |
6c8dbf00 | 962 | a = reused_arena (avoid_arena); |
77cdc054 | 963 | } |
fa8d436c UD |
964 | return a; |
965 | } | |
966 | ||
c78ab094 SP |
967 | /* If we don't have the main arena, then maybe the failure is due to running |
968 | out of mmapped areas, so we can try allocating on the main arena. | |
969 | Otherwise, it is likely that sbrk() has failed and there is still a chance | |
970 | to mmap(), so try one of the other arenas. */ | |
971 | static mstate | |
972 | arena_get_retry (mstate ar_ptr, size_t bytes) | |
973 | { | |
655673f3 | 974 | LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr); |
6c8dbf00 OB |
975 | if (ar_ptr != &main_arena) |
976 | { | |
4bf5f222 | 977 | __libc_lock_unlock (ar_ptr->mutex); |
6c8dbf00 | 978 | ar_ptr = &main_arena; |
4bf5f222 | 979 | __libc_lock_lock (ar_ptr->mutex); |
6c8dbf00 OB |
980 | } |
981 | else | |
982 | { | |
4bf5f222 | 983 | __libc_lock_unlock (ar_ptr->mutex); |
92a9b22d | 984 | ar_ptr = arena_get2 (bytes, ar_ptr); |
6c8dbf00 | 985 | } |
c78ab094 SP |
986 | |
987 | return ar_ptr; | |
988 | } | |
989 | ||
124e0258 FW |
990 | void |
991 | __malloc_arena_thread_freeres (void) | |
425ce2ed | 992 | { |
0a947e06 FW |
993 | /* Shut down the thread cache first. This could deallocate data for |
994 | the thread arena, so do this before we put the arena on the free | |
995 | list. */ | |
996 | tcache_thread_shutdown (); | |
997 | ||
6782806d FW |
998 | mstate a = thread_arena; |
999 | thread_arena = NULL; | |
425ce2ed UD |
1000 | |
1001 | if (a != NULL) | |
1002 | { | |
4bf5f222 | 1003 | __libc_lock_lock (free_list_lock); |
a62719ba FW |
1004 | /* If this was the last attached thread for this arena, put the |
1005 | arena on the free list. */ | |
1006 | assert (a->attached_threads > 0); | |
1007 | if (--a->attached_threads == 0) | |
1008 | { | |
1009 | a->next_free = free_list; | |
1010 | free_list = a; | |
1011 | } | |
4bf5f222 | 1012 | __libc_lock_unlock (free_list_lock); |
425ce2ed UD |
1013 | } |
1014 | } | |
425ce2ed | 1015 | |
fa8d436c UD |
1016 | /* |
1017 | * Local variables: | |
1018 | * c-basic-offset: 2 | |
1019 | * End: | |
1020 | */ |