]>
Commit | Line | Data |
---|---|---|
fa8d436c | 1 | /* Malloc implementation for multiple threads without lock contention. |
b168057a | 2 | Copyright (C) 2001-2015 Free Software Foundation, Inc. |
fa8d436c UD |
3 | This file is part of the GNU C Library. |
4 | Contributed by Wolfram Gloger <wg@malloc.de>, 2001. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
7 | modify it under the terms of the GNU Lesser General Public License as |
8 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c UD |
9 | License, or (at your option) any later version. |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 14 | Lesser General Public License for more details. |
fa8d436c | 15 | |
cc7375ce | 16 | You should have received a copy of the GNU Lesser General Public |
59ba27a6 PE |
17 | License along with the GNU C Library; see the file COPYING.LIB. If |
18 | not, see <http://www.gnu.org/licenses/>. */ | |
fa8d436c | 19 | |
a28b6b0a RM |
20 | #include <stdbool.h> |
21 | ||
fa8d436c UD |
22 | /* Compile-time constants. */ |
23 | ||
6c8dbf00 | 24 | #define HEAP_MIN_SIZE (32 * 1024) |
fa8d436c | 25 | #ifndef HEAP_MAX_SIZE |
e404fb16 | 26 | # ifdef DEFAULT_MMAP_THRESHOLD_MAX |
bd2c2341 | 27 | # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX) |
e404fb16 | 28 | # else |
6c8dbf00 | 29 | # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */ |
e404fb16 | 30 | # endif |
fa8d436c UD |
31 | #endif |
32 | ||
33 | /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps | |
34 | that are dynamically created for multi-threaded programs. The | |
35 | maximum size must be a power of two, for fast determination of | |
36 | which heap belongs to a chunk. It should be much larger than the | |
37 | mmap threshold, so that requests with a size just below that | |
38 | threshold can be fulfilled without creating too many heaps. */ | |
39 | ||
fa8d436c UD |
40 | /***************************************************************************/ |
41 | ||
42 | #define top(ar_ptr) ((ar_ptr)->top) | |
43 | ||
44 | /* A heap is a single contiguous memory region holding (coalesceable) | |
45 | malloc_chunks. It is allocated with mmap() and always starts at an | |
22a89187 | 46 | address aligned to HEAP_MAX_SIZE. */ |
fa8d436c | 47 | |
6c8dbf00 OB |
48 | typedef struct _heap_info |
49 | { | |
fa8d436c UD |
50 | mstate ar_ptr; /* Arena for this heap. */ |
51 | struct _heap_info *prev; /* Previous heap. */ | |
52 | size_t size; /* Current size in bytes. */ | |
6c8dbf00 OB |
53 | size_t mprotect_size; /* Size in bytes that has been mprotected |
54 | PROT_READ|PROT_WRITE. */ | |
7d013a64 RM |
55 | /* Make sure the following data is properly aligned, particularly |
56 | that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of | |
c7fd3362 JJ |
57 | MALLOC_ALIGNMENT. */ |
58 | char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; | |
fa8d436c UD |
59 | } heap_info; |
60 | ||
7d013a64 RM |
61 | /* Get a compile-time error if the heap_info padding is not correct |
62 | to make alignment work as expected in sYSMALLOc. */ | |
63 | extern int sanity_check_heap_info_alignment[(sizeof (heap_info) | |
6c8dbf00 OB |
64 | + 2 * SIZE_SZ) % MALLOC_ALIGNMENT |
65 | ? -1 : 1]; | |
7d013a64 | 66 | |
6782806d FW |
67 | /* Thread specific data. */ |
68 | ||
69 | static __thread mstate thread_arena attribute_tls_model_ie; | |
70 | ||
71 | /* Arena free list. */ | |
fa8d436c | 72 | |
02d46fc4 | 73 | static mutex_t list_lock = MUTEX_INITIALIZER; |
02d46fc4 | 74 | static size_t narenas = 1; |
425ce2ed | 75 | static mstate free_list; |
fa8d436c | 76 | |
fa8d436c UD |
77 | /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */ |
78 | static unsigned long arena_mem; | |
79 | ||
2a652f5a RM |
80 | /* Already initialized? */ |
81 | int __malloc_initialized = -1; | |
82 | ||
fa8d436c UD |
83 | /**************************************************************************/ |
84 | ||
fa8d436c UD |
85 | |
86 | /* arena_get() acquires an arena and locks the corresponding mutex. | |
87 | First, try the one last locked successfully by this thread. (This | |
88 | is the common case and handled with a macro for speed.) Then, loop | |
89 | once over the circularly linked list of arenas. If no arena is | |
90 | readily available, create a new one. In this latter case, `size' | |
91 | is just a hint as to how much memory will be required immediately | |
92 | in the new arena. */ | |
93 | ||
94 | #define arena_get(ptr, size) do { \ | |
6782806d | 95 | ptr = thread_arena; \ |
6c8dbf00 OB |
96 | arena_lock (ptr, size); \ |
97 | } while (0) | |
425ce2ed | 98 | |
6c8dbf00 | 99 | #define arena_lock(ptr, size) do { \ |
fff94fa2 | 100 | if (ptr && !arena_is_corrupt (ptr)) \ |
6c8dbf00 OB |
101 | (void) mutex_lock (&ptr->mutex); \ |
102 | else \ | |
92a9b22d | 103 | ptr = arena_get2 ((size), NULL); \ |
6c8dbf00 | 104 | } while (0) |
fa8d436c UD |
105 | |
106 | /* find the heap and corresponding arena for a given ptr */ | |
107 | ||
108 | #define heap_for_ptr(ptr) \ | |
6c8dbf00 | 109 | ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1))) |
fa8d436c | 110 | #define arena_for_chunk(ptr) \ |
6c8dbf00 | 111 | (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena) |
fa8d436c | 112 | |
fa8d436c UD |
113 | |
114 | /**************************************************************************/ | |
115 | ||
750c1f2a RM |
116 | #ifndef NO_THREADS |
117 | ||
fa8d436c UD |
118 | /* atfork support. */ |
119 | ||
6c8dbf00 | 120 | static void *(*save_malloc_hook)(size_t __size, const void *); |
a222d91a JM |
121 | static void (*save_free_hook) (void *__ptr, const void *); |
122 | static void *save_arena; | |
fa8d436c | 123 | |
6c8dbf00 | 124 | # ifdef ATFORK_MEM |
666aa020 | 125 | ATFORK_MEM; |
6c8dbf00 | 126 | # endif |
666aa020 | 127 | |
fa8d436c UD |
128 | /* Magic value for the thread-specific arena pointer when |
129 | malloc_atfork() is in use. */ | |
130 | ||
6c8dbf00 | 131 | # define ATFORK_ARENA_PTR ((void *) -1) |
fa8d436c UD |
132 | |
133 | /* The following hooks are used while the `atfork' handling mechanism | |
134 | is active. */ | |
135 | ||
6c8dbf00 OB |
136 | static void * |
137 | malloc_atfork (size_t sz, const void *caller) | |
fa8d436c | 138 | { |
22a89187 | 139 | void *victim; |
fa8d436c | 140 | |
6782806d | 141 | if (thread_arena == ATFORK_ARENA_PTR) |
6c8dbf00 OB |
142 | { |
143 | /* We are the only thread that may allocate at all. */ | |
144 | if (save_malloc_hook != malloc_check) | |
145 | { | |
146 | return _int_malloc (&main_arena, sz); | |
147 | } | |
148 | else | |
149 | { | |
150 | if (top_check () < 0) | |
151 | return 0; | |
152 | ||
153 | victim = _int_malloc (&main_arena, sz + 1); | |
154 | return mem2mem_check (victim, sz); | |
155 | } | |
156 | } | |
157 | else | |
158 | { | |
159 | /* Suspend the thread until the `atfork' handlers have completed. | |
160 | By that time, the hooks will have been reset as well, so that | |
161 | mALLOc() can be used again. */ | |
162 | (void) mutex_lock (&list_lock); | |
163 | (void) mutex_unlock (&list_lock); | |
164 | return __libc_malloc (sz); | |
fa8d436c | 165 | } |
fa8d436c UD |
166 | } |
167 | ||
168 | static void | |
6c8dbf00 | 169 | free_atfork (void *mem, const void *caller) |
fa8d436c | 170 | { |
fa8d436c UD |
171 | mstate ar_ptr; |
172 | mchunkptr p; /* chunk corresponding to mem */ | |
173 | ||
174 | if (mem == 0) /* free(0) has no effect */ | |
175 | return; | |
176 | ||
6c8dbf00 | 177 | p = mem2chunk (mem); /* do not bother to replicate free_check here */ |
fa8d436c | 178 | |
6c8dbf00 OB |
179 | if (chunk_is_mmapped (p)) /* release mmapped memory. */ |
180 | { | |
181 | munmap_chunk (p); | |
182 | return; | |
183 | } | |
fa8d436c | 184 | |
6c8dbf00 | 185 | ar_ptr = arena_for_chunk (p); |
6782806d | 186 | _int_free (ar_ptr, p, thread_arena == ATFORK_ARENA_PTR); |
fa8d436c UD |
187 | } |
188 | ||
7dac9f3d UD |
189 | |
190 | /* Counter for number of times the list is locked by the same thread. */ | |
191 | static unsigned int atfork_recursive_cntr; | |
192 | ||
fa8d436c UD |
193 | /* The following two functions are registered via thread_atfork() to |
194 | make sure that the mutexes remain in a consistent state in the | |
195 | fork()ed version of a thread. Also adapt the malloc and free hooks | |
196 | temporarily, because the `atfork' handler mechanism may use | |
197 | malloc/free internally (e.g. in LinuxThreads). */ | |
198 | ||
199 | static void | |
06d6611a | 200 | ptmalloc_lock_all (void) |
fa8d436c UD |
201 | { |
202 | mstate ar_ptr; | |
203 | ||
6c8dbf00 | 204 | if (__malloc_initialized < 1) |
2a652f5a | 205 | return; |
6c8dbf00 OB |
206 | |
207 | if (mutex_trylock (&list_lock)) | |
7dac9f3d | 208 | { |
6782806d | 209 | if (thread_arena == ATFORK_ARENA_PTR) |
6c8dbf00 OB |
210 | /* This is the same thread which already locks the global list. |
211 | Just bump the counter. */ | |
212 | goto out; | |
7dac9f3d UD |
213 | |
214 | /* This thread has to wait its turn. */ | |
6c8dbf00 OB |
215 | (void) mutex_lock (&list_lock); |
216 | } | |
217 | for (ar_ptr = &main_arena;; ) | |
218 | { | |
219 | (void) mutex_lock (&ar_ptr->mutex); | |
220 | ar_ptr = ar_ptr->next; | |
221 | if (ar_ptr == &main_arena) | |
222 | break; | |
7dac9f3d | 223 | } |
fa8d436c UD |
224 | save_malloc_hook = __malloc_hook; |
225 | save_free_hook = __free_hook; | |
226 | __malloc_hook = malloc_atfork; | |
227 | __free_hook = free_atfork; | |
228 | /* Only the current thread may perform malloc/free calls now. */ | |
6782806d FW |
229 | save_arena = thread_arena; |
230 | thread_arena = ATFORK_ARENA_PTR; | |
6c8dbf00 | 231 | out: |
7dac9f3d | 232 | ++atfork_recursive_cntr; |
fa8d436c UD |
233 | } |
234 | ||
235 | static void | |
06d6611a | 236 | ptmalloc_unlock_all (void) |
fa8d436c UD |
237 | { |
238 | mstate ar_ptr; | |
239 | ||
6c8dbf00 | 240 | if (__malloc_initialized < 1) |
2a652f5a | 241 | return; |
6c8dbf00 | 242 | |
7dac9f3d UD |
243 | if (--atfork_recursive_cntr != 0) |
244 | return; | |
6c8dbf00 | 245 | |
6782806d | 246 | thread_arena = save_arena; |
fa8d436c UD |
247 | __malloc_hook = save_malloc_hook; |
248 | __free_hook = save_free_hook; | |
6c8dbf00 OB |
249 | for (ar_ptr = &main_arena;; ) |
250 | { | |
251 | (void) mutex_unlock (&ar_ptr->mutex); | |
252 | ar_ptr = ar_ptr->next; | |
253 | if (ar_ptr == &main_arena) | |
254 | break; | |
255 | } | |
256 | (void) mutex_unlock (&list_lock); | |
fa8d436c UD |
257 | } |
258 | ||
750c1f2a | 259 | # ifdef __linux__ |
fa8d436c | 260 | |
e851dca1 | 261 | /* In NPTL, unlocking a mutex in the child process after a |
fa8d436c UD |
262 | fork() is currently unsafe, whereas re-initializing it is safe and |
263 | does not leak resources. Therefore, a special atfork handler is | |
264 | installed for the child. */ | |
265 | ||
266 | static void | |
06d6611a | 267 | ptmalloc_unlock_all2 (void) |
fa8d436c UD |
268 | { |
269 | mstate ar_ptr; | |
270 | ||
6c8dbf00 | 271 | if (__malloc_initialized < 1) |
2a652f5a | 272 | return; |
6c8dbf00 | 273 | |
6782806d | 274 | thread_arena = save_arena; |
fa8d436c UD |
275 | __malloc_hook = save_malloc_hook; |
276 | __free_hook = save_free_hook; | |
425ce2ed | 277 | free_list = NULL; |
6c8dbf00 OB |
278 | for (ar_ptr = &main_arena;; ) |
279 | { | |
280 | mutex_init (&ar_ptr->mutex); | |
281 | if (ar_ptr != save_arena) | |
282 | { | |
283 | ar_ptr->next_free = free_list; | |
284 | free_list = ar_ptr; | |
285 | } | |
286 | ar_ptr = ar_ptr->next; | |
287 | if (ar_ptr == &main_arena) | |
288 | break; | |
425ce2ed | 289 | } |
6c8dbf00 | 290 | mutex_init (&list_lock); |
e851dca1 | 291 | atfork_recursive_cntr = 0; |
fa8d436c UD |
292 | } |
293 | ||
750c1f2a | 294 | # else |
fa8d436c | 295 | |
750c1f2a | 296 | # define ptmalloc_unlock_all2 ptmalloc_unlock_all |
750c1f2a | 297 | # endif |
750c1f2a | 298 | #endif /* !NO_THREADS */ |
fa8d436c | 299 | |
fa8d436c | 300 | /* Initialization routine. */ |
fa8d436c UD |
301 | #include <string.h> |
302 | extern char **_environ; | |
303 | ||
304 | static char * | |
305 | internal_function | |
306 | next_env_entry (char ***position) | |
307 | { | |
308 | char **current = *position; | |
309 | char *result = NULL; | |
310 | ||
311 | while (*current != NULL) | |
312 | { | |
313 | if (__builtin_expect ((*current)[0] == 'M', 0) | |
6c8dbf00 OB |
314 | && (*current)[1] == 'A' |
315 | && (*current)[2] == 'L' | |
316 | && (*current)[3] == 'L' | |
317 | && (*current)[4] == 'O' | |
318 | && (*current)[5] == 'C' | |
319 | && (*current)[6] == '_') | |
320 | { | |
321 | result = &(*current)[7]; | |
fa8d436c | 322 | |
6c8dbf00 OB |
323 | /* Save current position for next visit. */ |
324 | *position = ++current; | |
fa8d436c | 325 | |
6c8dbf00 OB |
326 | break; |
327 | } | |
fa8d436c UD |
328 | |
329 | ++current; | |
330 | } | |
331 | ||
332 | return result; | |
333 | } | |
fa8d436c | 334 | |
c0f62c56 | 335 | |
22a89187 | 336 | #ifdef SHARED |
c0f62c56 UD |
337 | static void * |
338 | __failing_morecore (ptrdiff_t d) | |
339 | { | |
340 | return (void *) MORECORE_FAILURE; | |
341 | } | |
5f21997b UD |
342 | |
343 | extern struct dl_open_hook *_dl_open_hook; | |
344 | libc_hidden_proto (_dl_open_hook); | |
fde89ad0 RM |
345 | #endif |
346 | ||
fa8d436c | 347 | static void |
06d6611a | 348 | ptmalloc_init (void) |
fa8d436c | 349 | { |
6c8dbf00 OB |
350 | if (__malloc_initialized >= 0) |
351 | return; | |
352 | ||
fa8d436c UD |
353 | __malloc_initialized = 0; |
354 | ||
22a89187 | 355 | #ifdef SHARED |
5f21997b UD |
356 | /* In case this libc copy is in a non-default namespace, never use brk. |
357 | Likewise if dlopened from statically linked program. */ | |
c0f62c56 UD |
358 | Dl_info di; |
359 | struct link_map *l; | |
5f21997b UD |
360 | |
361 | if (_dl_open_hook != NULL | |
362 | || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0 | |
6c8dbf00 | 363 | && l->l_ns != LM_ID_BASE)) |
c0f62c56 UD |
364 | __morecore = __failing_morecore; |
365 | #endif | |
366 | ||
6782806d | 367 | thread_arena = &main_arena; |
6c8dbf00 | 368 | thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); |
02d46fc4 | 369 | const char *s = NULL; |
a1ffb40e | 370 | if (__glibc_likely (_environ != NULL)) |
08e49216 RM |
371 | { |
372 | char **runp = _environ; | |
373 | char *envline; | |
374 | ||
375 | while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, | |
6c8dbf00 OB |
376 | 0)) |
377 | { | |
378 | size_t len = strcspn (envline, "="); | |
379 | ||
380 | if (envline[len] != '=') | |
381 | /* This is a "MALLOC_" variable at the end of the string | |
382 | without a '=' character. Ignore it since otherwise we | |
383 | will access invalid memory below. */ | |
384 | continue; | |
385 | ||
386 | switch (len) | |
387 | { | |
388 | case 6: | |
389 | if (memcmp (envline, "CHECK_", 6) == 0) | |
390 | s = &envline[7]; | |
391 | break; | |
392 | case 8: | |
393 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
394 | { | |
395 | if (memcmp (envline, "TOP_PAD_", 8) == 0) | |
396 | __libc_mallopt (M_TOP_PAD, atoi (&envline[9])); | |
397 | else if (memcmp (envline, "PERTURB_", 8) == 0) | |
398 | __libc_mallopt (M_PERTURB, atoi (&envline[9])); | |
399 | } | |
400 | break; | |
401 | case 9: | |
402 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
403 | { | |
404 | if (memcmp (envline, "MMAP_MAX_", 9) == 0) | |
405 | __libc_mallopt (M_MMAP_MAX, atoi (&envline[10])); | |
406 | else if (memcmp (envline, "ARENA_MAX", 9) == 0) | |
407 | __libc_mallopt (M_ARENA_MAX, atoi (&envline[10])); | |
408 | } | |
409 | break; | |
410 | case 10: | |
411 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
412 | { | |
413 | if (memcmp (envline, "ARENA_TEST", 10) == 0) | |
414 | __libc_mallopt (M_ARENA_TEST, atoi (&envline[11])); | |
415 | } | |
416 | break; | |
417 | case 15: | |
418 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
419 | { | |
420 | if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) | |
421 | __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16])); | |
422 | else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) | |
423 | __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16])); | |
424 | } | |
425 | break; | |
426 | default: | |
427 | break; | |
428 | } | |
429 | } | |
430 | } | |
431 | if (s && s[0]) | |
432 | { | |
433 | __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0')); | |
434 | if (check_action != 0) | |
435 | __malloc_check_init (); | |
08e49216 | 436 | } |
f3eeb3fc | 437 | void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook); |
df77455c UD |
438 | if (hook != NULL) |
439 | (*hook)(); | |
fa8d436c UD |
440 | __malloc_initialized = 1; |
441 | } | |
442 | ||
443 | /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */ | |
444 | #ifdef thread_atfork_static | |
6c8dbf00 OB |
445 | thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all, \ |
446 | ptmalloc_unlock_all2) | |
fa8d436c UD |
447 | #endif |
448 | ||
6c8dbf00 | 449 | |
fa8d436c UD |
450 | |
451 | /* Managing heaps and arenas (for concurrent threads) */ | |
452 | ||
fa8d436c UD |
453 | #if MALLOC_DEBUG > 1 |
454 | ||
455 | /* Print the complete contents of a single heap to stderr. */ | |
456 | ||
457 | static void | |
6c8dbf00 | 458 | dump_heap (heap_info *heap) |
fa8d436c UD |
459 | { |
460 | char *ptr; | |
461 | mchunkptr p; | |
462 | ||
6c8dbf00 OB |
463 | fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size); |
464 | ptr = (heap->ar_ptr != (mstate) (heap + 1)) ? | |
465 | (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state); | |
466 | p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) & | |
467 | ~MALLOC_ALIGN_MASK); | |
468 | for (;; ) | |
469 | { | |
470 | fprintf (stderr, "chunk %p size %10lx", p, (long) p->size); | |
471 | if (p == top (heap->ar_ptr)) | |
472 | { | |
473 | fprintf (stderr, " (top)\n"); | |
474 | break; | |
475 | } | |
476 | else if (p->size == (0 | PREV_INUSE)) | |
477 | { | |
478 | fprintf (stderr, " (fence)\n"); | |
479 | break; | |
480 | } | |
481 | fprintf (stderr, "\n"); | |
482 | p = next_chunk (p); | |
fa8d436c | 483 | } |
fa8d436c | 484 | } |
fa8d436c UD |
485 | #endif /* MALLOC_DEBUG > 1 */ |
486 | ||
26d550d3 UD |
487 | /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing |
488 | addresses as opposed to increasing, new_heap would badly fragment the | |
489 | address space. In that case remember the second HEAP_MAX_SIZE part | |
490 | aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...) | |
491 | call (if it is already aligned) and try to reuse it next time. We need | |
492 | no locking for it, as kernel ensures the atomicity for us - worst case | |
493 | we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in | |
494 | multiple threads, but only one will succeed. */ | |
495 | static char *aligned_heap_area; | |
496 | ||
fa8d436c UD |
497 | /* Create a new heap. size is automatically rounded up to a multiple |
498 | of the page size. */ | |
499 | ||
500 | static heap_info * | |
501 | internal_function | |
6c8dbf00 | 502 | new_heap (size_t size, size_t top_pad) |
fa8d436c | 503 | { |
8a35c3fe | 504 | size_t pagesize = GLRO (dl_pagesize); |
fa8d436c UD |
505 | char *p1, *p2; |
506 | unsigned long ul; | |
507 | heap_info *h; | |
508 | ||
6c8dbf00 | 509 | if (size + top_pad < HEAP_MIN_SIZE) |
fa8d436c | 510 | size = HEAP_MIN_SIZE; |
6c8dbf00 | 511 | else if (size + top_pad <= HEAP_MAX_SIZE) |
fa8d436c | 512 | size += top_pad; |
6c8dbf00 | 513 | else if (size > HEAP_MAX_SIZE) |
fa8d436c UD |
514 | return 0; |
515 | else | |
516 | size = HEAP_MAX_SIZE; | |
8a35c3fe | 517 | size = ALIGN_UP (size, pagesize); |
fa8d436c UD |
518 | |
519 | /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. | |
520 | No swap space needs to be reserved for the following large | |
521 | mapping (on Linux, this is the case for all non-writable mappings | |
522 | anyway). */ | |
26d550d3 | 523 | p2 = MAP_FAILED; |
6c8dbf00 OB |
524 | if (aligned_heap_area) |
525 | { | |
526 | p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, | |
527 | MAP_NORESERVE); | |
528 | aligned_heap_area = NULL; | |
529 | if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))) | |
530 | { | |
531 | __munmap (p2, HEAP_MAX_SIZE); | |
532 | p2 = MAP_FAILED; | |
533 | } | |
26d550d3 | 534 | } |
6c8dbf00 OB |
535 | if (p2 == MAP_FAILED) |
536 | { | |
537 | p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE); | |
538 | if (p1 != MAP_FAILED) | |
539 | { | |
540 | p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1)) | |
541 | & ~(HEAP_MAX_SIZE - 1)); | |
542 | ul = p2 - p1; | |
543 | if (ul) | |
544 | __munmap (p1, ul); | |
545 | else | |
546 | aligned_heap_area = p2 + HEAP_MAX_SIZE; | |
547 | __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); | |
548 | } | |
26d550d3 | 549 | else |
6c8dbf00 OB |
550 | { |
551 | /* Try to take the chance that an allocation of only HEAP_MAX_SIZE | |
552 | is already aligned. */ | |
553 | p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE); | |
554 | if (p2 == MAP_FAILED) | |
555 | return 0; | |
556 | ||
557 | if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)) | |
558 | { | |
559 | __munmap (p2, HEAP_MAX_SIZE); | |
560 | return 0; | |
561 | } | |
562 | } | |
fa8d436c | 563 | } |
6c8dbf00 OB |
564 | if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0) |
565 | { | |
566 | __munmap (p2, HEAP_MAX_SIZE); | |
567 | return 0; | |
568 | } | |
569 | h = (heap_info *) p2; | |
fa8d436c | 570 | h->size = size; |
c7fd3362 | 571 | h->mprotect_size = size; |
322dea08 | 572 | LIBC_PROBE (memory_heap_new, 2, h, h->size); |
fa8d436c UD |
573 | return h; |
574 | } | |
575 | ||
cbf5760e UD |
576 | /* Grow a heap. size is automatically rounded up to a |
577 | multiple of the page size. */ | |
fa8d436c UD |
578 | |
579 | static int | |
6c8dbf00 | 580 | grow_heap (heap_info *h, long diff) |
fa8d436c | 581 | { |
8a35c3fe | 582 | size_t pagesize = GLRO (dl_pagesize); |
fa8d436c UD |
583 | long new_size; |
584 | ||
8a35c3fe | 585 | diff = ALIGN_UP (diff, pagesize); |
6c8dbf00 OB |
586 | new_size = (long) h->size + diff; |
587 | if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE) | |
cbf5760e | 588 | return -1; |
6c8dbf00 OB |
589 | |
590 | if ((unsigned long) new_size > h->mprotect_size) | |
591 | { | |
592 | if (__mprotect ((char *) h + h->mprotect_size, | |
593 | (unsigned long) new_size - h->mprotect_size, | |
594 | PROT_READ | PROT_WRITE) != 0) | |
595 | return -2; | |
596 | ||
597 | h->mprotect_size = new_size; | |
598 | } | |
cbf5760e UD |
599 | |
600 | h->size = new_size; | |
322dea08 | 601 | LIBC_PROBE (memory_heap_more, 2, h, h->size); |
cbf5760e UD |
602 | return 0; |
603 | } | |
604 | ||
605 | /* Shrink a heap. */ | |
606 | ||
607 | static int | |
6c8dbf00 | 608 | shrink_heap (heap_info *h, long diff) |
cbf5760e UD |
609 | { |
610 | long new_size; | |
611 | ||
6c8dbf00 OB |
612 | new_size = (long) h->size - diff; |
613 | if (new_size < (long) sizeof (*h)) | |
cbf5760e | 614 | return -1; |
6c8dbf00 | 615 | |
9fab36eb SP |
616 | /* Try to re-map the extra heap space freshly to save memory, and make it |
617 | inaccessible. See malloc-sysdep.h to know when this is true. */ | |
a1ffb40e | 618 | if (__glibc_unlikely (check_may_shrink_heap ())) |
cbf5760e | 619 | { |
6c8dbf00 OB |
620 | if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE, |
621 | MAP_FIXED) == (char *) MAP_FAILED) | |
622 | return -2; | |
623 | ||
cbf5760e UD |
624 | h->mprotect_size = new_size; |
625 | } | |
cbf5760e | 626 | else |
6c8dbf00 | 627 | __madvise ((char *) h + new_size, diff, MADV_DONTNEED); |
cbf5760e UD |
628 | /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ |
629 | ||
fa8d436c | 630 | h->size = new_size; |
322dea08 | 631 | LIBC_PROBE (memory_heap_less, 2, h, h->size); |
fa8d436c UD |
632 | return 0; |
633 | } | |
634 | ||
635 | /* Delete a heap. */ | |
636 | ||
26d550d3 | 637 | #define delete_heap(heap) \ |
6c8dbf00 OB |
638 | do { \ |
639 | if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \ | |
640 | aligned_heap_area = NULL; \ | |
641 | __munmap ((char *) (heap), HEAP_MAX_SIZE); \ | |
642 | } while (0) | |
fa8d436c UD |
643 | |
644 | static int | |
645 | internal_function | |
6c8dbf00 | 646 | heap_trim (heap_info *heap, size_t pad) |
fa8d436c UD |
647 | { |
648 | mstate ar_ptr = heap->ar_ptr; | |
6c8dbf00 OB |
649 | unsigned long pagesz = GLRO (dl_pagesize); |
650 | mchunkptr top_chunk = top (ar_ptr), p, bck, fwd; | |
fa8d436c | 651 | heap_info *prev_heap; |
c26efef9 | 652 | long new_size, top_size, top_area, extra, prev_size, misalign; |
fa8d436c UD |
653 | |
654 | /* Can this heap go away completely? */ | |
6c8dbf00 OB |
655 | while (top_chunk == chunk_at_offset (heap, sizeof (*heap))) |
656 | { | |
657 | prev_heap = heap->prev; | |
658 | prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ); | |
659 | p = chunk_at_offset (prev_heap, prev_size); | |
660 | /* fencepost must be properly aligned. */ | |
661 | misalign = ((long) p) & MALLOC_ALIGN_MASK; | |
662 | p = chunk_at_offset (prev_heap, prev_size - misalign); | |
663 | assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */ | |
664 | p = prev_chunk (p); | |
665 | new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign; | |
666 | assert (new_size > 0 && new_size < (long) (2 * MINSIZE)); | |
667 | if (!prev_inuse (p)) | |
668 | new_size += p->prev_size; | |
669 | assert (new_size > 0 && new_size < HEAP_MAX_SIZE); | |
670 | if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) | |
671 | break; | |
672 | ar_ptr->system_mem -= heap->size; | |
673 | arena_mem -= heap->size; | |
674 | LIBC_PROBE (memory_heap_free, 2, heap, heap->size); | |
675 | delete_heap (heap); | |
676 | heap = prev_heap; | |
677 | if (!prev_inuse (p)) /* consolidate backward */ | |
678 | { | |
679 | p = prev_chunk (p); | |
fff94fa2 | 680 | unlink (ar_ptr, p, bck, fwd); |
6c8dbf00 OB |
681 | } |
682 | assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0); | |
683 | assert (((char *) p + new_size) == ((char *) heap + heap->size)); | |
684 | top (ar_ptr) = top_chunk = p; | |
685 | set_head (top_chunk, new_size | PREV_INUSE); | |
686 | /*check_chunk(ar_ptr, top_chunk);*/ | |
fa8d436c | 687 | } |
c26efef9 MG |
688 | |
689 | /* Uses similar logic for per-thread arenas as the main arena with systrim | |
e4bc326d CD |
690 | and _int_free by preserving the top pad and rounding down to the nearest |
691 | page. */ | |
6c8dbf00 | 692 | top_size = chunksize (top_chunk); |
e4bc326d CD |
693 | if ((unsigned long)(top_size) < |
694 | (unsigned long)(mp_.trim_threshold)) | |
695 | return 0; | |
696 | ||
c26efef9 | 697 | top_area = top_size - MINSIZE - 1; |
f8ef472c | 698 | if (top_area < 0 || (size_t) top_area <= pad) |
c26efef9 MG |
699 | return 0; |
700 | ||
e4bc326d | 701 | /* Release in pagesize units and round down to the nearest page. */ |
c26efef9 | 702 | extra = ALIGN_DOWN(top_area - pad, pagesz); |
e4bc326d | 703 | if (extra == 0) |
fa8d436c | 704 | return 0; |
6c8dbf00 | 705 | |
fa8d436c | 706 | /* Try to shrink. */ |
6c8dbf00 | 707 | if (shrink_heap (heap, extra) != 0) |
fa8d436c | 708 | return 0; |
6c8dbf00 | 709 | |
fa8d436c UD |
710 | ar_ptr->system_mem -= extra; |
711 | arena_mem -= extra; | |
712 | ||
713 | /* Success. Adjust top accordingly. */ | |
6c8dbf00 | 714 | set_head (top_chunk, (top_size - extra) | PREV_INUSE); |
fa8d436c UD |
715 | /*check_chunk(ar_ptr, top_chunk);*/ |
716 | return 1; | |
717 | } | |
718 | ||
04ec80e4 UD |
719 | /* Create a new arena with initial size "size". */ |
720 | ||
721 | static mstate | |
6c8dbf00 | 722 | _int_new_arena (size_t size) |
04ec80e4 UD |
723 | { |
724 | mstate a; | |
725 | heap_info *h; | |
726 | char *ptr; | |
727 | unsigned long misalign; | |
728 | ||
6c8dbf00 OB |
729 | h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT), |
730 | mp_.top_pad); | |
731 | if (!h) | |
732 | { | |
733 | /* Maybe size is too large to fit in a single heap. So, just try | |
734 | to create a minimally-sized arena and let _int_malloc() attempt | |
735 | to deal with the large request via mmap_chunk(). */ | |
736 | h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad); | |
737 | if (!h) | |
738 | return 0; | |
739 | } | |
740 | a = h->ar_ptr = (mstate) (h + 1); | |
741 | malloc_init_state (a); | |
04ec80e4 UD |
742 | /*a->next = NULL;*/ |
743 | a->system_mem = a->max_system_mem = h->size; | |
744 | arena_mem += h->size; | |
04ec80e4 UD |
745 | |
746 | /* Set up the top chunk, with proper alignment. */ | |
6c8dbf00 OB |
747 | ptr = (char *) (a + 1); |
748 | misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK; | |
04ec80e4 UD |
749 | if (misalign > 0) |
750 | ptr += MALLOC_ALIGNMENT - misalign; | |
6c8dbf00 OB |
751 | top (a) = (mchunkptr) ptr; |
752 | set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE); | |
04ec80e4 | 753 | |
3ea5be54 | 754 | LIBC_PROBE (memory_arena_new, 2, a, size); |
6782806d | 755 | thread_arena = a; |
6c8dbf00 OB |
756 | mutex_init (&a->mutex); |
757 | (void) mutex_lock (&a->mutex); | |
425ce2ed | 758 | |
6c8dbf00 | 759 | (void) mutex_lock (&list_lock); |
425ce2ed UD |
760 | |
761 | /* Add the new arena to the global list. */ | |
762 | a->next = main_arena.next; | |
763 | atomic_write_barrier (); | |
764 | main_arena.next = a; | |
765 | ||
6c8dbf00 | 766 | (void) mutex_unlock (&list_lock); |
425ce2ed | 767 | |
04ec80e4 UD |
768 | return a; |
769 | } | |
770 | ||
425ce2ed | 771 | |
425ce2ed UD |
772 | static mstate |
773 | get_free_list (void) | |
774 | { | |
775 | mstate result = free_list; | |
776 | if (result != NULL) | |
777 | { | |
6c8dbf00 | 778 | (void) mutex_lock (&list_lock); |
425ce2ed UD |
779 | result = free_list; |
780 | if (result != NULL) | |
6c8dbf00 OB |
781 | free_list = result->next_free; |
782 | (void) mutex_unlock (&list_lock); | |
425ce2ed UD |
783 | |
784 | if (result != NULL) | |
6c8dbf00 OB |
785 | { |
786 | LIBC_PROBE (memory_arena_reuse_free_list, 1, result); | |
787 | (void) mutex_lock (&result->mutex); | |
6782806d | 788 | thread_arena = result; |
6c8dbf00 | 789 | } |
425ce2ed UD |
790 | } |
791 | ||
792 | return result; | |
793 | } | |
794 | ||
77480c6b | 795 | /* Lock and return an arena that can be reused for memory allocation. |
bf51f568 JL |
796 | Avoid AVOID_ARENA as we have already failed to allocate memory in |
797 | it and it is currently locked. */ | |
425ce2ed | 798 | static mstate |
bf51f568 | 799 | reused_arena (mstate avoid_arena) |
425ce2ed | 800 | { |
425ce2ed UD |
801 | mstate result; |
802 | static mstate next_to_use; | |
803 | if (next_to_use == NULL) | |
804 | next_to_use = &main_arena; | |
805 | ||
806 | result = next_to_use; | |
807 | do | |
808 | { | |
fff94fa2 | 809 | if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex)) |
6c8dbf00 | 810 | goto out; |
425ce2ed UD |
811 | |
812 | result = result->next; | |
813 | } | |
814 | while (result != next_to_use); | |
815 | ||
bf51f568 JL |
816 | /* Avoid AVOID_ARENA as we have already failed to allocate memory |
817 | in that arena and it is currently locked. */ | |
818 | if (result == avoid_arena) | |
819 | result = result->next; | |
820 | ||
fff94fa2 SP |
821 | /* Make sure that the arena we get is not corrupted. */ |
822 | mstate begin = result; | |
823 | while (arena_is_corrupt (result) || result == avoid_arena) | |
824 | { | |
825 | result = result->next; | |
826 | if (result == begin) | |
827 | break; | |
828 | } | |
829 | ||
830 | /* We could not find any arena that was either not corrupted or not the one | |
831 | we wanted to avoid. */ | |
832 | if (result == begin || result == avoid_arena) | |
833 | return NULL; | |
834 | ||
835 | /* No arena available without contention. Wait for the next in line. */ | |
6999d38c | 836 | LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena); |
6c8dbf00 | 837 | (void) mutex_lock (&result->mutex); |
425ce2ed | 838 | |
6c8dbf00 | 839 | out: |
6999d38c | 840 | LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena); |
6782806d | 841 | thread_arena = result; |
425ce2ed UD |
842 | next_to_use = result->next; |
843 | ||
844 | return result; | |
845 | } | |
425ce2ed | 846 | |
fa8d436c UD |
847 | static mstate |
848 | internal_function | |
92a9b22d | 849 | arena_get2 (size_t size, mstate avoid_arena) |
fa8d436c UD |
850 | { |
851 | mstate a; | |
fa8d436c | 852 | |
77cdc054 AS |
853 | static size_t narenas_limit; |
854 | ||
855 | a = get_free_list (); | |
856 | if (a == NULL) | |
857 | { | |
858 | /* Nothing immediately available, so generate a new arena. */ | |
859 | if (narenas_limit == 0) | |
6c8dbf00 OB |
860 | { |
861 | if (mp_.arena_max != 0) | |
862 | narenas_limit = mp_.arena_max; | |
863 | else if (narenas > mp_.arena_test) | |
864 | { | |
865 | int n = __get_nprocs (); | |
866 | ||
867 | if (n >= 1) | |
868 | narenas_limit = NARENAS_FROM_NCORES (n); | |
869 | else | |
870 | /* We have no information about the system. Assume two | |
871 | cores. */ | |
872 | narenas_limit = NARENAS_FROM_NCORES (2); | |
873 | } | |
874 | } | |
77cdc054 AS |
875 | repeat:; |
876 | size_t n = narenas; | |
41b81892 | 877 | /* NB: the following depends on the fact that (size_t)0 - 1 is a |
6c8dbf00 OB |
878 | very large number and that the underflow is OK. If arena_max |
879 | is set the value of arena_test is irrelevant. If arena_test | |
880 | is set but narenas is not yet larger or equal to arena_test | |
881 | narenas_limit is 0. There is no possibility for narenas to | |
882 | be too big for the test to always fail since there is not | |
883 | enough address space to create that many arenas. */ | |
a1ffb40e | 884 | if (__glibc_unlikely (n <= narenas_limit - 1)) |
6c8dbf00 OB |
885 | { |
886 | if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n)) | |
887 | goto repeat; | |
888 | a = _int_new_arena (size); | |
a1ffb40e | 889 | if (__glibc_unlikely (a == NULL)) |
6c8dbf00 OB |
890 | catomic_decrement (&narenas); |
891 | } | |
a5fb313c | 892 | else |
6c8dbf00 | 893 | a = reused_arena (avoid_arena); |
77cdc054 | 894 | } |
fa8d436c UD |
895 | return a; |
896 | } | |
897 | ||
c78ab094 SP |
898 | /* If we don't have the main arena, then maybe the failure is due to running |
899 | out of mmapped areas, so we can try allocating on the main arena. | |
900 | Otherwise, it is likely that sbrk() has failed and there is still a chance | |
901 | to mmap(), so try one of the other arenas. */ | |
902 | static mstate | |
903 | arena_get_retry (mstate ar_ptr, size_t bytes) | |
904 | { | |
655673f3 | 905 | LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr); |
6c8dbf00 OB |
906 | if (ar_ptr != &main_arena) |
907 | { | |
908 | (void) mutex_unlock (&ar_ptr->mutex); | |
c3b9ef8d SP |
909 | /* Don't touch the main arena if it is corrupt. */ |
910 | if (arena_is_corrupt (&main_arena)) | |
911 | return NULL; | |
912 | ||
6c8dbf00 OB |
913 | ar_ptr = &main_arena; |
914 | (void) mutex_lock (&ar_ptr->mutex); | |
915 | } | |
916 | else | |
917 | { | |
6c8dbf00 | 918 | (void) mutex_unlock (&ar_ptr->mutex); |
92a9b22d | 919 | ar_ptr = arena_get2 (bytes, ar_ptr); |
6c8dbf00 | 920 | } |
c78ab094 SP |
921 | |
922 | return ar_ptr; | |
923 | } | |
924 | ||
425ce2ed UD |
925 | static void __attribute__ ((section ("__libc_thread_freeres_fn"))) |
926 | arena_thread_freeres (void) | |
927 | { | |
6782806d FW |
928 | mstate a = thread_arena; |
929 | thread_arena = NULL; | |
425ce2ed UD |
930 | |
931 | if (a != NULL) | |
932 | { | |
6c8dbf00 | 933 | (void) mutex_lock (&list_lock); |
425ce2ed UD |
934 | a->next_free = free_list; |
935 | free_list = a; | |
6c8dbf00 | 936 | (void) mutex_unlock (&list_lock); |
425ce2ed UD |
937 | } |
938 | } | |
939 | text_set_element (__libc_thread_subfreeres, arena_thread_freeres); | |
425ce2ed | 940 | |
fa8d436c UD |
941 | /* |
942 | * Local variables: | |
943 | * c-basic-offset: 2 | |
944 | * End: | |
945 | */ |