]>
Commit | Line | Data |
---|---|---|
fa8d436c | 1 | /* Malloc implementation for multiple threads without lock contention. |
24d4e01b | 2 | Copyright (C) 2001-2012 Free Software Foundation, Inc. |
fa8d436c UD |
3 | This file is part of the GNU C Library. |
4 | Contributed by Wolfram Gloger <wg@malloc.de>, 2001. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
7 | modify it under the terms of the GNU Lesser General Public License as |
8 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c UD |
9 | License, or (at your option) any later version. |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 14 | Lesser General Public License for more details. |
fa8d436c | 15 | |
cc7375ce | 16 | You should have received a copy of the GNU Lesser General Public |
59ba27a6 PE |
17 | License along with the GNU C Library; see the file COPYING.LIB. If |
18 | not, see <http://www.gnu.org/licenses/>. */ | |
fa8d436c | 19 | |
a28b6b0a RM |
20 | #include <stdbool.h> |
21 | ||
fa8d436c UD |
22 | /* Compile-time constants. */ |
23 | ||
24 | #define HEAP_MIN_SIZE (32*1024) | |
25 | #ifndef HEAP_MAX_SIZE | |
e404fb16 | 26 | # ifdef DEFAULT_MMAP_THRESHOLD_MAX |
bd2c2341 | 27 | # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX) |
e404fb16 UD |
28 | # else |
29 | # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */ | |
30 | # endif | |
fa8d436c UD |
31 | #endif |
32 | ||
33 | /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps | |
34 | that are dynamically created for multi-threaded programs. The | |
35 | maximum size must be a power of two, for fast determination of | |
36 | which heap belongs to a chunk. It should be much larger than the | |
37 | mmap threshold, so that requests with a size just below that | |
38 | threshold can be fulfilled without creating too many heaps. */ | |
39 | ||
40 | ||
41 | #ifndef THREAD_STATS | |
42 | #define THREAD_STATS 0 | |
43 | #endif | |
44 | ||
45 | /* If THREAD_STATS is non-zero, some statistics on mutex locking are | |
46 | computed. */ | |
47 | ||
48 | /***************************************************************************/ | |
49 | ||
50 | #define top(ar_ptr) ((ar_ptr)->top) | |
51 | ||
52 | /* A heap is a single contiguous memory region holding (coalesceable) | |
53 | malloc_chunks. It is allocated with mmap() and always starts at an | |
22a89187 | 54 | address aligned to HEAP_MAX_SIZE. */ |
fa8d436c UD |
55 | |
56 | typedef struct _heap_info { | |
57 | mstate ar_ptr; /* Arena for this heap. */ | |
58 | struct _heap_info *prev; /* Previous heap. */ | |
59 | size_t size; /* Current size in bytes. */ | |
c7fd3362 JJ |
60 | size_t mprotect_size; /* Size in bytes that has been mprotected |
61 | PROT_READ|PROT_WRITE. */ | |
7d013a64 RM |
62 | /* Make sure the following data is properly aligned, particularly |
63 | that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of | |
c7fd3362 JJ |
64 | MALLOC_ALIGNMENT. */ |
65 | char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; | |
fa8d436c UD |
66 | } heap_info; |
67 | ||
7d013a64 RM |
68 | /* Get a compile-time error if the heap_info padding is not correct |
69 | to make alignment work as expected in sYSMALLOc. */ | |
70 | extern int sanity_check_heap_info_alignment[(sizeof (heap_info) | |
71 | + 2 * SIZE_SZ) % MALLOC_ALIGNMENT | |
72 | ? -1 : 1]; | |
73 | ||
fa8d436c UD |
74 | /* Thread specific data */ |
75 | ||
76 | static tsd_key_t arena_key; | |
02d46fc4 | 77 | static mutex_t list_lock = MUTEX_INITIALIZER; |
425ce2ed | 78 | #ifdef PER_THREAD |
02d46fc4 | 79 | static size_t narenas = 1; |
425ce2ed UD |
80 | static mstate free_list; |
81 | #endif | |
fa8d436c UD |
82 | |
83 | #if THREAD_STATS | |
84 | static int stat_n_heaps; | |
85 | #define THREAD_STAT(x) x | |
86 | #else | |
87 | #define THREAD_STAT(x) do ; while(0) | |
88 | #endif | |
89 | ||
90 | /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */ | |
91 | static unsigned long arena_mem; | |
92 | ||
2a652f5a RM |
93 | /* Already initialized? */ |
94 | int __malloc_initialized = -1; | |
95 | ||
fa8d436c UD |
96 | /**************************************************************************/ |
97 | ||
fa8d436c UD |
98 | |
99 | /* arena_get() acquires an arena and locks the corresponding mutex. | |
100 | First, try the one last locked successfully by this thread. (This | |
101 | is the common case and handled with a macro for speed.) Then, loop | |
102 | once over the circularly linked list of arenas. If no arena is | |
103 | readily available, create a new one. In this latter case, `size' | |
104 | is just a hint as to how much memory will be required immediately | |
105 | in the new arena. */ | |
106 | ||
107 | #define arena_get(ptr, size) do { \ | |
425ce2ed UD |
108 | arena_lookup(ptr); \ |
109 | arena_lock(ptr, size); \ | |
110 | } while(0) | |
111 | ||
112 | #define arena_lookup(ptr) do { \ | |
22a89187 | 113 | void *vptr = NULL; \ |
fa8d436c | 114 | ptr = (mstate)tsd_getspecific(arena_key, vptr); \ |
425ce2ed UD |
115 | } while(0) |
116 | ||
117 | #ifdef PER_THREAD | |
22a89187 | 118 | # define arena_lock(ptr, size) do { \ |
425ce2ed UD |
119 | if(ptr) \ |
120 | (void)mutex_lock(&ptr->mutex); \ | |
121 | else \ | |
bf51f568 | 122 | ptr = arena_get2(ptr, (size), NULL); \ |
425ce2ed UD |
123 | } while(0) |
124 | #else | |
22a89187 | 125 | # define arena_lock(ptr, size) do { \ |
fa8d436c UD |
126 | if(ptr && !mutex_trylock(&ptr->mutex)) { \ |
127 | THREAD_STAT(++(ptr->stat_lock_direct)); \ | |
128 | } else \ | |
bf51f568 | 129 | ptr = arena_get2(ptr, (size), NULL); \ |
fa8d436c | 130 | } while(0) |
425ce2ed | 131 | #endif |
fa8d436c UD |
132 | |
133 | /* find the heap and corresponding arena for a given ptr */ | |
134 | ||
135 | #define heap_for_ptr(ptr) \ | |
136 | ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1))) | |
137 | #define arena_for_chunk(ptr) \ | |
138 | (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena) | |
139 | ||
fa8d436c UD |
140 | |
141 | /**************************************************************************/ | |
142 | ||
750c1f2a RM |
143 | #ifndef NO_THREADS |
144 | ||
fa8d436c UD |
145 | /* atfork support. */ |
146 | ||
06d6611a | 147 | static __malloc_ptr_t (*save_malloc_hook) (size_t __size, |
a784e502 | 148 | const __malloc_ptr_t); |
06d6611a | 149 | static void (*save_free_hook) (__malloc_ptr_t __ptr, |
a784e502 | 150 | const __malloc_ptr_t); |
22a89187 | 151 | static void* save_arena; |
fa8d436c | 152 | |
666aa020 UD |
153 | #ifdef ATFORK_MEM |
154 | ATFORK_MEM; | |
155 | #endif | |
156 | ||
fa8d436c UD |
157 | /* Magic value for the thread-specific arena pointer when |
158 | malloc_atfork() is in use. */ | |
159 | ||
22a89187 | 160 | #define ATFORK_ARENA_PTR ((void*)-1) |
fa8d436c UD |
161 | |
162 | /* The following hooks are used while the `atfork' handling mechanism | |
163 | is active. */ | |
164 | ||
22a89187 UD |
165 | static void* |
166 | malloc_atfork(size_t sz, const void *caller) | |
fa8d436c | 167 | { |
22a89187 UD |
168 | void *vptr = NULL; |
169 | void *victim; | |
fa8d436c UD |
170 | |
171 | tsd_getspecific(arena_key, vptr); | |
172 | if(vptr == ATFORK_ARENA_PTR) { | |
173 | /* We are the only thread that may allocate at all. */ | |
174 | if(save_malloc_hook != malloc_check) { | |
175 | return _int_malloc(&main_arena, sz); | |
176 | } else { | |
177 | if(top_check()<0) | |
b9b42ee0 | 178 | return 0; |
fa8d436c UD |
179 | victim = _int_malloc(&main_arena, sz+1); |
180 | return mem2mem_check(victim, sz); | |
181 | } | |
182 | } else { | |
183 | /* Suspend the thread until the `atfork' handlers have completed. | |
184 | By that time, the hooks will have been reset as well, so that | |
185 | mALLOc() can be used again. */ | |
186 | (void)mutex_lock(&list_lock); | |
187 | (void)mutex_unlock(&list_lock); | |
3b49edc0 | 188 | return __libc_malloc(sz); |
fa8d436c UD |
189 | } |
190 | } | |
191 | ||
192 | static void | |
22a89187 | 193 | free_atfork(void* mem, const void *caller) |
fa8d436c | 194 | { |
22a89187 | 195 | void *vptr = NULL; |
fa8d436c UD |
196 | mstate ar_ptr; |
197 | mchunkptr p; /* chunk corresponding to mem */ | |
198 | ||
199 | if (mem == 0) /* free(0) has no effect */ | |
200 | return; | |
201 | ||
202 | p = mem2chunk(mem); /* do not bother to replicate free_check here */ | |
203 | ||
fa8d436c UD |
204 | if (chunk_is_mmapped(p)) /* release mmapped memory. */ |
205 | { | |
206 | munmap_chunk(p); | |
207 | return; | |
208 | } | |
fa8d436c | 209 | |
425ce2ed UD |
210 | ar_ptr = arena_for_chunk(p); |
211 | tsd_getspecific(arena_key, vptr); | |
212 | _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR); | |
fa8d436c UD |
213 | } |
214 | ||
7dac9f3d UD |
215 | |
216 | /* Counter for number of times the list is locked by the same thread. */ | |
217 | static unsigned int atfork_recursive_cntr; | |
218 | ||
fa8d436c UD |
219 | /* The following two functions are registered via thread_atfork() to |
220 | make sure that the mutexes remain in a consistent state in the | |
221 | fork()ed version of a thread. Also adapt the malloc and free hooks | |
222 | temporarily, because the `atfork' handler mechanism may use | |
223 | malloc/free internally (e.g. in LinuxThreads). */ | |
224 | ||
225 | static void | |
06d6611a | 226 | ptmalloc_lock_all (void) |
fa8d436c UD |
227 | { |
228 | mstate ar_ptr; | |
229 | ||
2a652f5a RM |
230 | if(__malloc_initialized < 1) |
231 | return; | |
7dac9f3d UD |
232 | if (mutex_trylock(&list_lock)) |
233 | { | |
22a89187 | 234 | void *my_arena; |
7dac9f3d UD |
235 | tsd_getspecific(arena_key, my_arena); |
236 | if (my_arena == ATFORK_ARENA_PTR) | |
237 | /* This is the same thread which already locks the global list. | |
238 | Just bump the counter. */ | |
239 | goto out; | |
240 | ||
241 | /* This thread has to wait its turn. */ | |
242 | (void)mutex_lock(&list_lock); | |
243 | } | |
fa8d436c UD |
244 | for(ar_ptr = &main_arena;;) { |
245 | (void)mutex_lock(&ar_ptr->mutex); | |
246 | ar_ptr = ar_ptr->next; | |
247 | if(ar_ptr == &main_arena) break; | |
248 | } | |
249 | save_malloc_hook = __malloc_hook; | |
250 | save_free_hook = __free_hook; | |
251 | __malloc_hook = malloc_atfork; | |
252 | __free_hook = free_atfork; | |
253 | /* Only the current thread may perform malloc/free calls now. */ | |
254 | tsd_getspecific(arena_key, save_arena); | |
255 | tsd_setspecific(arena_key, ATFORK_ARENA_PTR); | |
7dac9f3d UD |
256 | out: |
257 | ++atfork_recursive_cntr; | |
fa8d436c UD |
258 | } |
259 | ||
260 | static void | |
06d6611a | 261 | ptmalloc_unlock_all (void) |
fa8d436c UD |
262 | { |
263 | mstate ar_ptr; | |
264 | ||
2a652f5a RM |
265 | if(__malloc_initialized < 1) |
266 | return; | |
7dac9f3d UD |
267 | if (--atfork_recursive_cntr != 0) |
268 | return; | |
fa8d436c UD |
269 | tsd_setspecific(arena_key, save_arena); |
270 | __malloc_hook = save_malloc_hook; | |
271 | __free_hook = save_free_hook; | |
272 | for(ar_ptr = &main_arena;;) { | |
273 | (void)mutex_unlock(&ar_ptr->mutex); | |
274 | ar_ptr = ar_ptr->next; | |
275 | if(ar_ptr == &main_arena) break; | |
276 | } | |
277 | (void)mutex_unlock(&list_lock); | |
278 | } | |
279 | ||
750c1f2a | 280 | # ifdef __linux__ |
fa8d436c | 281 | |
e851dca1 | 282 | /* In NPTL, unlocking a mutex in the child process after a |
fa8d436c UD |
283 | fork() is currently unsafe, whereas re-initializing it is safe and |
284 | does not leak resources. Therefore, a special atfork handler is | |
285 | installed for the child. */ | |
286 | ||
287 | static void | |
06d6611a | 288 | ptmalloc_unlock_all2 (void) |
fa8d436c UD |
289 | { |
290 | mstate ar_ptr; | |
291 | ||
2a652f5a RM |
292 | if(__malloc_initialized < 1) |
293 | return; | |
fa8d436c UD |
294 | tsd_setspecific(arena_key, save_arena); |
295 | __malloc_hook = save_malloc_hook; | |
296 | __free_hook = save_free_hook; | |
425ce2ed UD |
297 | #ifdef PER_THREAD |
298 | free_list = NULL; | |
fa8d436c UD |
299 | #endif |
300 | for(ar_ptr = &main_arena;;) { | |
fdb933e2 | 301 | mutex_init(&ar_ptr->mutex); |
425ce2ed UD |
302 | #ifdef PER_THREAD |
303 | if (ar_ptr != save_arena) { | |
304 | ar_ptr->next_free = free_list; | |
305 | free_list = ar_ptr; | |
306 | } | |
307 | #endif | |
fa8d436c UD |
308 | ar_ptr = ar_ptr->next; |
309 | if(ar_ptr == &main_arena) break; | |
310 | } | |
fdb933e2 | 311 | mutex_init(&list_lock); |
e851dca1 | 312 | atfork_recursive_cntr = 0; |
fa8d436c UD |
313 | } |
314 | ||
750c1f2a | 315 | # else |
fa8d436c | 316 | |
750c1f2a | 317 | # define ptmalloc_unlock_all2 ptmalloc_unlock_all |
fa8d436c | 318 | |
750c1f2a RM |
319 | # endif |
320 | ||
321 | #endif /* !NO_THREADS */ | |
fa8d436c | 322 | |
fa8d436c | 323 | /* Initialization routine. */ |
fa8d436c UD |
324 | #include <string.h> |
325 | extern char **_environ; | |
326 | ||
327 | static char * | |
328 | internal_function | |
329 | next_env_entry (char ***position) | |
330 | { | |
331 | char **current = *position; | |
332 | char *result = NULL; | |
333 | ||
334 | while (*current != NULL) | |
335 | { | |
336 | if (__builtin_expect ((*current)[0] == 'M', 0) | |
337 | && (*current)[1] == 'A' | |
338 | && (*current)[2] == 'L' | |
339 | && (*current)[3] == 'L' | |
340 | && (*current)[4] == 'O' | |
341 | && (*current)[5] == 'C' | |
342 | && (*current)[6] == '_') | |
343 | { | |
344 | result = &(*current)[7]; | |
345 | ||
346 | /* Save current position for next visit. */ | |
347 | *position = ++current; | |
348 | ||
349 | break; | |
350 | } | |
351 | ||
352 | ++current; | |
353 | } | |
354 | ||
355 | return result; | |
356 | } | |
fa8d436c | 357 | |
c0f62c56 | 358 | |
22a89187 | 359 | #ifdef SHARED |
c0f62c56 UD |
360 | static void * |
361 | __failing_morecore (ptrdiff_t d) | |
362 | { | |
363 | return (void *) MORECORE_FAILURE; | |
364 | } | |
5f21997b UD |
365 | |
366 | extern struct dl_open_hook *_dl_open_hook; | |
367 | libc_hidden_proto (_dl_open_hook); | |
fde89ad0 RM |
368 | #endif |
369 | ||
fa8d436c | 370 | static void |
06d6611a | 371 | ptmalloc_init (void) |
fa8d436c | 372 | { |
fa8d436c UD |
373 | if(__malloc_initialized >= 0) return; |
374 | __malloc_initialized = 0; | |
375 | ||
22a89187 | 376 | #ifdef SHARED |
5f21997b UD |
377 | /* In case this libc copy is in a non-default namespace, never use brk. |
378 | Likewise if dlopened from statically linked program. */ | |
c0f62c56 UD |
379 | Dl_info di; |
380 | struct link_map *l; | |
5f21997b UD |
381 | |
382 | if (_dl_open_hook != NULL | |
383 | || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0 | |
384 | && l->l_ns != LM_ID_BASE)) | |
c0f62c56 UD |
385 | __morecore = __failing_morecore; |
386 | #endif | |
387 | ||
fa8d436c | 388 | tsd_key_create(&arena_key, NULL); |
22a89187 | 389 | tsd_setspecific(arena_key, (void *)&main_arena); |
fa8d436c | 390 | thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); |
02d46fc4 | 391 | const char *s = NULL; |
08e49216 RM |
392 | if (__builtin_expect (_environ != NULL, 1)) |
393 | { | |
394 | char **runp = _environ; | |
395 | char *envline; | |
396 | ||
397 | while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, | |
398 | 0)) | |
399 | { | |
400 | size_t len = strcspn (envline, "="); | |
401 | ||
402 | if (envline[len] != '=') | |
403 | /* This is a "MALLOC_" variable at the end of the string | |
404 | without a '=' character. Ignore it since otherwise we | |
405 | will access invalid memory below. */ | |
406 | continue; | |
407 | ||
408 | switch (len) | |
409 | { | |
410 | case 6: | |
411 | if (memcmp (envline, "CHECK_", 6) == 0) | |
412 | s = &envline[7]; | |
413 | break; | |
414 | case 8: | |
02d46fc4 | 415 | if (! __builtin_expect (__libc_enable_secure, 0)) |
a5a33449 UD |
416 | { |
417 | if (memcmp (envline, "TOP_PAD_", 8) == 0) | |
3b49edc0 | 418 | __libc_mallopt(M_TOP_PAD, atoi(&envline[9])); |
a5a33449 | 419 | else if (memcmp (envline, "PERTURB_", 8) == 0) |
3b49edc0 | 420 | __libc_mallopt(M_PERTURB, atoi(&envline[9])); |
a5a33449 | 421 | } |
08e49216 RM |
422 | break; |
423 | case 9: | |
02d46fc4 | 424 | if (! __builtin_expect (__libc_enable_secure, 0)) |
425ce2ed UD |
425 | { |
426 | if (memcmp (envline, "MMAP_MAX_", 9) == 0) | |
3b49edc0 | 427 | __libc_mallopt(M_MMAP_MAX, atoi(&envline[10])); |
425ce2ed UD |
428 | #ifdef PER_THREAD |
429 | else if (memcmp (envline, "ARENA_MAX", 9) == 0) | |
3b49edc0 | 430 | __libc_mallopt(M_ARENA_MAX, atoi(&envline[10])); |
425ce2ed UD |
431 | #endif |
432 | } | |
08e49216 | 433 | break; |
425ce2ed UD |
434 | #ifdef PER_THREAD |
435 | case 10: | |
02d46fc4 | 436 | if (! __builtin_expect (__libc_enable_secure, 0)) |
425ce2ed UD |
437 | { |
438 | if (memcmp (envline, "ARENA_TEST", 10) == 0) | |
3b49edc0 | 439 | __libc_mallopt(M_ARENA_TEST, atoi(&envline[11])); |
425ce2ed UD |
440 | } |
441 | break; | |
442 | #endif | |
08e49216 | 443 | case 15: |
02d46fc4 | 444 | if (! __builtin_expect (__libc_enable_secure, 0)) |
08e49216 RM |
445 | { |
446 | if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) | |
3b49edc0 | 447 | __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16])); |
08e49216 | 448 | else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) |
3b49edc0 | 449 | __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16])); |
08e49216 RM |
450 | } |
451 | break; | |
452 | default: | |
453 | break; | |
454 | } | |
455 | } | |
456 | } | |
ceba6be7 | 457 | if(s && s[0]) { |
3b49edc0 | 458 | __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0')); |
a19fe332 UD |
459 | if (check_action != 0) |
460 | __malloc_check_init(); | |
fa8d436c | 461 | } |
df77455c UD |
462 | void (*hook) (void) = force_reg (__malloc_initialize_hook); |
463 | if (hook != NULL) | |
464 | (*hook)(); | |
fa8d436c UD |
465 | __malloc_initialized = 1; |
466 | } | |
467 | ||
468 | /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */ | |
469 | #ifdef thread_atfork_static | |
470 | thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \ | |
b9b42ee0 | 471 | ptmalloc_unlock_all2) |
fa8d436c UD |
472 | #endif |
473 | ||
474 | \f | |
475 | ||
476 | /* Managing heaps and arenas (for concurrent threads) */ | |
477 | ||
fa8d436c UD |
478 | #if MALLOC_DEBUG > 1 |
479 | ||
480 | /* Print the complete contents of a single heap to stderr. */ | |
481 | ||
482 | static void | |
fa8d436c | 483 | dump_heap(heap_info *heap) |
fa8d436c UD |
484 | { |
485 | char *ptr; | |
486 | mchunkptr p; | |
487 | ||
488 | fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size); | |
489 | ptr = (heap->ar_ptr != (mstate)(heap+1)) ? | |
490 | (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state); | |
491 | p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) & | |
b9b42ee0 | 492 | ~MALLOC_ALIGN_MASK); |
fa8d436c UD |
493 | for(;;) { |
494 | fprintf(stderr, "chunk %p size %10lx", p, (long)p->size); | |
495 | if(p == top(heap->ar_ptr)) { | |
496 | fprintf(stderr, " (top)\n"); | |
497 | break; | |
498 | } else if(p->size == (0|PREV_INUSE)) { | |
499 | fprintf(stderr, " (fence)\n"); | |
500 | break; | |
501 | } | |
502 | fprintf(stderr, "\n"); | |
503 | p = next_chunk(p); | |
504 | } | |
505 | } | |
506 | ||
507 | #endif /* MALLOC_DEBUG > 1 */ | |
508 | ||
26d550d3 UD |
509 | /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing |
510 | addresses as opposed to increasing, new_heap would badly fragment the | |
511 | address space. In that case remember the second HEAP_MAX_SIZE part | |
512 | aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...) | |
513 | call (if it is already aligned) and try to reuse it next time. We need | |
514 | no locking for it, as kernel ensures the atomicity for us - worst case | |
515 | we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in | |
516 | multiple threads, but only one will succeed. */ | |
517 | static char *aligned_heap_area; | |
518 | ||
fa8d436c UD |
519 | /* Create a new heap. size is automatically rounded up to a multiple |
520 | of the page size. */ | |
521 | ||
522 | static heap_info * | |
523 | internal_function | |
fa8d436c | 524 | new_heap(size_t size, size_t top_pad) |
fa8d436c | 525 | { |
02d46fc4 | 526 | size_t page_mask = GLRO(dl_pagesize) - 1; |
fa8d436c UD |
527 | char *p1, *p2; |
528 | unsigned long ul; | |
529 | heap_info *h; | |
530 | ||
531 | if(size+top_pad < HEAP_MIN_SIZE) | |
532 | size = HEAP_MIN_SIZE; | |
533 | else if(size+top_pad <= HEAP_MAX_SIZE) | |
534 | size += top_pad; | |
535 | else if(size > HEAP_MAX_SIZE) | |
536 | return 0; | |
537 | else | |
538 | size = HEAP_MAX_SIZE; | |
539 | size = (size + page_mask) & ~page_mask; | |
540 | ||
541 | /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. | |
542 | No swap space needs to be reserved for the following large | |
543 | mapping (on Linux, this is the case for all non-writable mappings | |
544 | anyway). */ | |
26d550d3 UD |
545 | p2 = MAP_FAILED; |
546 | if(aligned_heap_area) { | |
547 | p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, | |
3b49edc0 | 548 | MAP_NORESERVE); |
26d550d3 UD |
549 | aligned_heap_area = NULL; |
550 | if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) { | |
3b49edc0 | 551 | __munmap(p2, HEAP_MAX_SIZE); |
26d550d3 UD |
552 | p2 = MAP_FAILED; |
553 | } | |
554 | } | |
555 | if(p2 == MAP_FAILED) { | |
3b49edc0 | 556 | p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE); |
26d550d3 UD |
557 | if(p1 != MAP_FAILED) { |
558 | p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) | |
559 | & ~(HEAP_MAX_SIZE-1)); | |
560 | ul = p2 - p1; | |
561 | if (ul) | |
3b49edc0 | 562 | __munmap(p1, ul); |
26d550d3 UD |
563 | else |
564 | aligned_heap_area = p2 + HEAP_MAX_SIZE; | |
3b49edc0 | 565 | __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); |
26d550d3 UD |
566 | } else { |
567 | /* Try to take the chance that an allocation of only HEAP_MAX_SIZE | |
568 | is already aligned. */ | |
3b49edc0 | 569 | p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE); |
26d550d3 UD |
570 | if(p2 == MAP_FAILED) |
571 | return 0; | |
572 | if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) { | |
3b49edc0 | 573 | __munmap(p2, HEAP_MAX_SIZE); |
26d550d3 UD |
574 | return 0; |
575 | } | |
fa8d436c UD |
576 | } |
577 | } | |
3b49edc0 UD |
578 | if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) { |
579 | __munmap(p2, HEAP_MAX_SIZE); | |
fa8d436c UD |
580 | return 0; |
581 | } | |
582 | h = (heap_info *)p2; | |
583 | h->size = size; | |
c7fd3362 | 584 | h->mprotect_size = size; |
fa8d436c UD |
585 | THREAD_STAT(stat_n_heaps++); |
586 | return h; | |
587 | } | |
588 | ||
cbf5760e UD |
589 | /* Grow a heap. size is automatically rounded up to a |
590 | multiple of the page size. */ | |
fa8d436c UD |
591 | |
592 | static int | |
fa8d436c | 593 | grow_heap(heap_info *h, long diff) |
fa8d436c | 594 | { |
02d46fc4 | 595 | size_t page_mask = GLRO(dl_pagesize) - 1; |
fa8d436c UD |
596 | long new_size; |
597 | ||
cbf5760e UD |
598 | diff = (diff + page_mask) & ~page_mask; |
599 | new_size = (long)h->size + diff; | |
600 | if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE) | |
601 | return -1; | |
602 | if((unsigned long) new_size > h->mprotect_size) { | |
3b49edc0 UD |
603 | if (__mprotect((char *)h + h->mprotect_size, |
604 | (unsigned long) new_size - h->mprotect_size, | |
605 | PROT_READ|PROT_WRITE) != 0) | |
cbf5760e UD |
606 | return -2; |
607 | h->mprotect_size = new_size; | |
608 | } | |
609 | ||
610 | h->size = new_size; | |
611 | return 0; | |
612 | } | |
613 | ||
614 | /* Shrink a heap. */ | |
615 | ||
616 | static int | |
cbf5760e | 617 | shrink_heap(heap_info *h, long diff) |
cbf5760e UD |
618 | { |
619 | long new_size; | |
620 | ||
621 | new_size = (long)h->size - diff; | |
622 | if(new_size < (long)sizeof(*h)) | |
623 | return -1; | |
624 | /* Try to re-map the extra heap space freshly to save memory, and | |
625 | make it inaccessible. */ | |
cbf5760e | 626 | if (__builtin_expect (__libc_enable_secure, 0)) |
cbf5760e UD |
627 | { |
628 | if((char *)MMAP((char *)h + new_size, diff, PROT_NONE, | |
3b49edc0 | 629 | MAP_FIXED) == (char *) MAP_FAILED) |
cbf5760e UD |
630 | return -2; |
631 | h->mprotect_size = new_size; | |
632 | } | |
cbf5760e UD |
633 | else |
634 | madvise ((char *)h + new_size, diff, MADV_DONTNEED); | |
cbf5760e UD |
635 | /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ |
636 | ||
fa8d436c UD |
637 | h->size = new_size; |
638 | return 0; | |
639 | } | |
640 | ||
641 | /* Delete a heap. */ | |
642 | ||
26d550d3 UD |
643 | #define delete_heap(heap) \ |
644 | do { \ | |
645 | if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \ | |
646 | aligned_heap_area = NULL; \ | |
3b49edc0 | 647 | __munmap((char*)(heap), HEAP_MAX_SIZE); \ |
26d550d3 | 648 | } while (0) |
fa8d436c UD |
649 | |
650 | static int | |
651 | internal_function | |
fa8d436c | 652 | heap_trim(heap_info *heap, size_t pad) |
fa8d436c UD |
653 | { |
654 | mstate ar_ptr = heap->ar_ptr; | |
02d46fc4 | 655 | unsigned long pagesz = GLRO(dl_pagesize); |
fa8d436c UD |
656 | mchunkptr top_chunk = top(ar_ptr), p, bck, fwd; |
657 | heap_info *prev_heap; | |
658 | long new_size, top_size, extra; | |
659 | ||
660 | /* Can this heap go away completely? */ | |
661 | while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) { | |
662 | prev_heap = heap->prev; | |
663 | p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ)); | |
664 | assert(p->size == (0|PREV_INUSE)); /* must be fencepost */ | |
665 | p = prev_chunk(p); | |
666 | new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ); | |
667 | assert(new_size>0 && new_size<(long)(2*MINSIZE)); | |
668 | if(!prev_inuse(p)) | |
669 | new_size += p->prev_size; | |
670 | assert(new_size>0 && new_size<HEAP_MAX_SIZE); | |
671 | if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) | |
672 | break; | |
673 | ar_ptr->system_mem -= heap->size; | |
674 | arena_mem -= heap->size; | |
675 | delete_heap(heap); | |
676 | heap = prev_heap; | |
677 | if(!prev_inuse(p)) { /* consolidate backward */ | |
678 | p = prev_chunk(p); | |
679 | unlink(p, bck, fwd); | |
680 | } | |
681 | assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0); | |
682 | assert( ((char*)p + new_size) == ((char*)heap + heap->size) ); | |
683 | top(ar_ptr) = top_chunk = p; | |
684 | set_head(top_chunk, new_size | PREV_INUSE); | |
685 | /*check_chunk(ar_ptr, top_chunk);*/ | |
686 | } | |
687 | top_size = chunksize(top_chunk); | |
b9b42ee0 | 688 | extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1); |
fa8d436c UD |
689 | if(extra < (long)pagesz) |
690 | return 0; | |
691 | /* Try to shrink. */ | |
cbf5760e | 692 | if(shrink_heap(heap, extra) != 0) |
fa8d436c UD |
693 | return 0; |
694 | ar_ptr->system_mem -= extra; | |
695 | arena_mem -= extra; | |
696 | ||
697 | /* Success. Adjust top accordingly. */ | |
698 | set_head(top_chunk, (top_size - extra) | PREV_INUSE); | |
699 | /*check_chunk(ar_ptr, top_chunk);*/ | |
700 | return 1; | |
701 | } | |
702 | ||
04ec80e4 UD |
703 | /* Create a new arena with initial size "size". */ |
704 | ||
705 | static mstate | |
706 | _int_new_arena(size_t size) | |
707 | { | |
708 | mstate a; | |
709 | heap_info *h; | |
710 | char *ptr; | |
711 | unsigned long misalign; | |
712 | ||
713 | h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), | |
714 | mp_.top_pad); | |
715 | if(!h) { | |
716 | /* Maybe size is too large to fit in a single heap. So, just try | |
717 | to create a minimally-sized arena and let _int_malloc() attempt | |
718 | to deal with the large request via mmap_chunk(). */ | |
719 | h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad); | |
720 | if(!h) | |
721 | return 0; | |
722 | } | |
723 | a = h->ar_ptr = (mstate)(h+1); | |
724 | malloc_init_state(a); | |
725 | /*a->next = NULL;*/ | |
726 | a->system_mem = a->max_system_mem = h->size; | |
727 | arena_mem += h->size; | |
04ec80e4 UD |
728 | |
729 | /* Set up the top chunk, with proper alignment. */ | |
730 | ptr = (char *)(a + 1); | |
731 | misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK; | |
732 | if (misalign > 0) | |
733 | ptr += MALLOC_ALIGNMENT - misalign; | |
734 | top(a) = (mchunkptr)ptr; | |
735 | set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE); | |
736 | ||
22a89187 | 737 | tsd_setspecific(arena_key, (void *)a); |
425ce2ed UD |
738 | mutex_init(&a->mutex); |
739 | (void)mutex_lock(&a->mutex); | |
740 | ||
741 | #ifdef PER_THREAD | |
742 | (void)mutex_lock(&list_lock); | |
743 | #endif | |
744 | ||
745 | /* Add the new arena to the global list. */ | |
746 | a->next = main_arena.next; | |
747 | atomic_write_barrier (); | |
748 | main_arena.next = a; | |
749 | ||
750 | #ifdef PER_THREAD | |
425ce2ed UD |
751 | (void)mutex_unlock(&list_lock); |
752 | #endif | |
753 | ||
754 | THREAD_STAT(++(a->stat_lock_loop)); | |
755 | ||
04ec80e4 UD |
756 | return a; |
757 | } | |
758 | ||
425ce2ed UD |
759 | |
760 | #ifdef PER_THREAD | |
761 | static mstate | |
762 | get_free_list (void) | |
763 | { | |
764 | mstate result = free_list; | |
765 | if (result != NULL) | |
766 | { | |
767 | (void)mutex_lock(&list_lock); | |
768 | result = free_list; | |
769 | if (result != NULL) | |
770 | free_list = result->next_free; | |
771 | (void)mutex_unlock(&list_lock); | |
772 | ||
773 | if (result != NULL) | |
774 | { | |
775 | (void)mutex_lock(&result->mutex); | |
22a89187 | 776 | tsd_setspecific(arena_key, (void *)result); |
425ce2ed UD |
777 | THREAD_STAT(++(result->stat_lock_loop)); |
778 | } | |
779 | } | |
780 | ||
781 | return result; | |
782 | } | |
783 | ||
77480c6b | 784 | /* Lock and return an arena that can be reused for memory allocation. |
bf51f568 JL |
785 | Avoid AVOID_ARENA as we have already failed to allocate memory in |
786 | it and it is currently locked. */ | |
425ce2ed | 787 | static mstate |
bf51f568 | 788 | reused_arena (mstate avoid_arena) |
425ce2ed | 789 | { |
425ce2ed UD |
790 | mstate result; |
791 | static mstate next_to_use; | |
792 | if (next_to_use == NULL) | |
793 | next_to_use = &main_arena; | |
794 | ||
795 | result = next_to_use; | |
796 | do | |
797 | { | |
798 | if (!mutex_trylock(&result->mutex)) | |
799 | goto out; | |
800 | ||
801 | result = result->next; | |
802 | } | |
803 | while (result != next_to_use); | |
804 | ||
bf51f568 JL |
805 | /* Avoid AVOID_ARENA as we have already failed to allocate memory |
806 | in that arena and it is currently locked. */ | |
807 | if (result == avoid_arena) | |
808 | result = result->next; | |
809 | ||
425ce2ed UD |
810 | /* No arena available. Wait for the next in line. */ |
811 | (void)mutex_lock(&result->mutex); | |
812 | ||
813 | out: | |
22a89187 | 814 | tsd_setspecific(arena_key, (void *)result); |
425ce2ed UD |
815 | THREAD_STAT(++(result->stat_lock_loop)); |
816 | next_to_use = result->next; | |
817 | ||
818 | return result; | |
819 | } | |
820 | #endif | |
821 | ||
fa8d436c UD |
822 | static mstate |
823 | internal_function | |
bf51f568 | 824 | arena_get2(mstate a_tsd, size_t size, mstate avoid_arena) |
fa8d436c UD |
825 | { |
826 | mstate a; | |
fa8d436c | 827 | |
425ce2ed | 828 | #ifdef PER_THREAD |
77cdc054 AS |
829 | static size_t narenas_limit; |
830 | ||
831 | a = get_free_list (); | |
832 | if (a == NULL) | |
833 | { | |
834 | /* Nothing immediately available, so generate a new arena. */ | |
835 | if (narenas_limit == 0) | |
836 | { | |
837 | if (mp_.arena_max != 0) | |
838 | narenas_limit = mp_.arena_max; | |
41b81892 | 839 | else if (narenas > mp_.arena_test) |
77cdc054 AS |
840 | { |
841 | int n = __get_nprocs (); | |
842 | ||
843 | if (n >= 1) | |
844 | narenas_limit = NARENAS_FROM_NCORES (n); | |
845 | else | |
846 | /* We have no information about the system. Assume two | |
847 | cores. */ | |
848 | narenas_limit = NARENAS_FROM_NCORES (2); | |
849 | } | |
850 | } | |
851 | repeat:; | |
852 | size_t n = narenas; | |
41b81892 UD |
853 | /* NB: the following depends on the fact that (size_t)0 - 1 is a |
854 | very large number and that the underflow is OK. If arena_max | |
855 | is set the value of arena_test is irrelevant. If arena_test | |
856 | is set but narenas is not yet larger or equal to arena_test | |
857 | narenas_limit is 0. There is no possibility for narenas to | |
858 | be too big for the test to always fail since there is not | |
859 | enough address space to create that many arenas. */ | |
860 | if (__builtin_expect (n <= narenas_limit - 1, 0)) | |
77cdc054 | 861 | { |
a5fb313c | 862 | if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n)) |
77cdc054 AS |
863 | goto repeat; |
864 | a = _int_new_arena (size); | |
a5fb313c AS |
865 | if (__builtin_expect (a == NULL, 0)) |
866 | catomic_decrement (&narenas); | |
77cdc054 | 867 | } |
a5fb313c | 868 | else |
bf51f568 | 869 | a = reused_arena (avoid_arena); |
77cdc054 | 870 | } |
425ce2ed | 871 | #else |
fa8d436c UD |
872 | if(!a_tsd) |
873 | a = a_tsd = &main_arena; | |
874 | else { | |
875 | a = a_tsd->next; | |
876 | if(!a) { | |
877 | /* This can only happen while initializing the new arena. */ | |
878 | (void)mutex_lock(&main_arena.mutex); | |
879 | THREAD_STAT(++(main_arena.stat_lock_wait)); | |
880 | return &main_arena; | |
881 | } | |
882 | } | |
883 | ||
884 | /* Check the global, circularly linked list for available arenas. */ | |
8c7d3691 | 885 | bool retried = false; |
fa8d436c UD |
886 | repeat: |
887 | do { | |
888 | if(!mutex_trylock(&a->mutex)) { | |
8c7d3691 UD |
889 | if (retried) |
890 | (void)mutex_unlock(&list_lock); | |
fa8d436c | 891 | THREAD_STAT(++(a->stat_lock_loop)); |
22a89187 | 892 | tsd_setspecific(arena_key, (void *)a); |
fa8d436c UD |
893 | return a; |
894 | } | |
895 | a = a->next; | |
896 | } while(a != a_tsd); | |
897 | ||
898 | /* If not even the list_lock can be obtained, try again. This can | |
899 | happen during `atfork', or for example on systems where thread | |
900 | creation makes it temporarily impossible to obtain _any_ | |
901 | locks. */ | |
8c7d3691 UD |
902 | if(!retried && mutex_trylock(&list_lock)) { |
903 | /* We will block to not run in a busy loop. */ | |
904 | (void)mutex_lock(&list_lock); | |
905 | ||
906 | /* Since we blocked there might be an arena available now. */ | |
907 | retried = true; | |
fa8d436c UD |
908 | a = a_tsd; |
909 | goto repeat; | |
910 | } | |
fa8d436c UD |
911 | |
912 | /* Nothing immediately available, so generate a new arena. */ | |
913 | a = _int_new_arena(size); | |
fa8d436c | 914 | (void)mutex_unlock(&list_lock); |
425ce2ed | 915 | #endif |
fa8d436c | 916 | |
fa8d436c UD |
917 | return a; |
918 | } | |
919 | ||
425ce2ed UD |
920 | #ifdef PER_THREAD |
921 | static void __attribute__ ((section ("__libc_thread_freeres_fn"))) | |
922 | arena_thread_freeres (void) | |
923 | { | |
22a89187 | 924 | void *vptr = NULL; |
425ce2ed UD |
925 | mstate a = tsd_getspecific(arena_key, vptr); |
926 | tsd_setspecific(arena_key, NULL); | |
927 | ||
928 | if (a != NULL) | |
929 | { | |
930 | (void)mutex_lock(&list_lock); | |
931 | a->next_free = free_list; | |
932 | free_list = a; | |
933 | (void)mutex_unlock(&list_lock); | |
934 | } | |
935 | } | |
936 | text_set_element (__libc_thread_subfreeres, arena_thread_freeres); | |
937 | #endif | |
938 | ||
fa8d436c UD |
939 | /* |
940 | * Local variables: | |
941 | * c-basic-offset: 2 | |
942 | * End: | |
943 | */ |