]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/arena.c
* malloc/arena.c: Fold copyright years.
[thirdparty/glibc.git] / malloc / arena.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stdbool.h>
21
22 /* Compile-time constants. */
23
24 #define HEAP_MIN_SIZE (32*1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
30 # endif
31 #endif
32
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
39
40
41 #ifndef THREAD_STATS
42 #define THREAD_STATS 0
43 #endif
44
45 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
46 computed. */
47
48 /***************************************************************************/
49
50 #define top(ar_ptr) ((ar_ptr)->top)
51
52 /* A heap is a single contiguous memory region holding (coalesceable)
53 malloc_chunks. It is allocated with mmap() and always starts at an
54 address aligned to HEAP_MAX_SIZE. */
55
56 typedef struct _heap_info {
57 mstate ar_ptr; /* Arena for this heap. */
58 struct _heap_info *prev; /* Previous heap. */
59 size_t size; /* Current size in bytes. */
60 size_t mprotect_size; /* Size in bytes that has been mprotected
61 PROT_READ|PROT_WRITE. */
62 /* Make sure the following data is properly aligned, particularly
63 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
64 MALLOC_ALIGNMENT. */
65 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
66 } heap_info;
67
68 /* Get a compile-time error if the heap_info padding is not correct
69 to make alignment work as expected in sYSMALLOc. */
70 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
71 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
72 ? -1 : 1];
73
74 /* Thread specific data */
75
76 static tsd_key_t arena_key;
77 static mutex_t list_lock = MUTEX_INITIALIZER;
78 #ifdef PER_THREAD
79 static size_t narenas = 1;
80 static mstate free_list;
81 #endif
82
83 #if THREAD_STATS
84 static int stat_n_heaps;
85 #define THREAD_STAT(x) x
86 #else
87 #define THREAD_STAT(x) do ; while(0)
88 #endif
89
90 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
91 static unsigned long arena_mem;
92
93 /* Already initialized? */
94 int __malloc_initialized = -1;
95
96 /**************************************************************************/
97
98
99 /* arena_get() acquires an arena and locks the corresponding mutex.
100 First, try the one last locked successfully by this thread. (This
101 is the common case and handled with a macro for speed.) Then, loop
102 once over the circularly linked list of arenas. If no arena is
103 readily available, create a new one. In this latter case, `size'
104 is just a hint as to how much memory will be required immediately
105 in the new arena. */
106
107 #define arena_get(ptr, size) do { \
108 arena_lookup(ptr); \
109 arena_lock(ptr, size); \
110 } while(0)
111
112 #define arena_lookup(ptr) do { \
113 void *vptr = NULL; \
114 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
115 } while(0)
116
117 #ifdef PER_THREAD
118 # define arena_lock(ptr, size) do { \
119 if(ptr) \
120 (void)mutex_lock(&ptr->mutex); \
121 else \
122 ptr = arena_get2(ptr, (size), NULL); \
123 } while(0)
124 #else
125 # define arena_lock(ptr, size) do { \
126 if(ptr && !mutex_trylock(&ptr->mutex)) { \
127 THREAD_STAT(++(ptr->stat_lock_direct)); \
128 } else \
129 ptr = arena_get2(ptr, (size), NULL); \
130 } while(0)
131 #endif
132
133 /* find the heap and corresponding arena for a given ptr */
134
135 #define heap_for_ptr(ptr) \
136 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
137 #define arena_for_chunk(ptr) \
138 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
139
140
141 /**************************************************************************/
142
143 #ifndef NO_THREADS
144
145 /* atfork support. */
146
147 static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
148 const __malloc_ptr_t);
149 static void (*save_free_hook) (__malloc_ptr_t __ptr,
150 const __malloc_ptr_t);
151 static void* save_arena;
152
153 #ifdef ATFORK_MEM
154 ATFORK_MEM;
155 #endif
156
157 /* Magic value for the thread-specific arena pointer when
158 malloc_atfork() is in use. */
159
160 #define ATFORK_ARENA_PTR ((void*)-1)
161
162 /* The following hooks are used while the `atfork' handling mechanism
163 is active. */
164
165 static void*
166 malloc_atfork(size_t sz, const void *caller)
167 {
168 void *vptr = NULL;
169 void *victim;
170
171 tsd_getspecific(arena_key, vptr);
172 if(vptr == ATFORK_ARENA_PTR) {
173 /* We are the only thread that may allocate at all. */
174 if(save_malloc_hook != malloc_check) {
175 return _int_malloc(&main_arena, sz);
176 } else {
177 if(top_check()<0)
178 return 0;
179 victim = _int_malloc(&main_arena, sz+1);
180 return mem2mem_check(victim, sz);
181 }
182 } else {
183 /* Suspend the thread until the `atfork' handlers have completed.
184 By that time, the hooks will have been reset as well, so that
185 mALLOc() can be used again. */
186 (void)mutex_lock(&list_lock);
187 (void)mutex_unlock(&list_lock);
188 return __libc_malloc(sz);
189 }
190 }
191
192 static void
193 free_atfork(void* mem, const void *caller)
194 {
195 void *vptr = NULL;
196 mstate ar_ptr;
197 mchunkptr p; /* chunk corresponding to mem */
198
199 if (mem == 0) /* free(0) has no effect */
200 return;
201
202 p = mem2chunk(mem); /* do not bother to replicate free_check here */
203
204 if (chunk_is_mmapped(p)) /* release mmapped memory. */
205 {
206 munmap_chunk(p);
207 return;
208 }
209
210 ar_ptr = arena_for_chunk(p);
211 tsd_getspecific(arena_key, vptr);
212 _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
213 }
214
215
216 /* Counter for number of times the list is locked by the same thread. */
217 static unsigned int atfork_recursive_cntr;
218
219 /* The following two functions are registered via thread_atfork() to
220 make sure that the mutexes remain in a consistent state in the
221 fork()ed version of a thread. Also adapt the malloc and free hooks
222 temporarily, because the `atfork' handler mechanism may use
223 malloc/free internally (e.g. in LinuxThreads). */
224
225 static void
226 ptmalloc_lock_all (void)
227 {
228 mstate ar_ptr;
229
230 if(__malloc_initialized < 1)
231 return;
232 if (mutex_trylock(&list_lock))
233 {
234 void *my_arena;
235 tsd_getspecific(arena_key, my_arena);
236 if (my_arena == ATFORK_ARENA_PTR)
237 /* This is the same thread which already locks the global list.
238 Just bump the counter. */
239 goto out;
240
241 /* This thread has to wait its turn. */
242 (void)mutex_lock(&list_lock);
243 }
244 for(ar_ptr = &main_arena;;) {
245 (void)mutex_lock(&ar_ptr->mutex);
246 ar_ptr = ar_ptr->next;
247 if(ar_ptr == &main_arena) break;
248 }
249 save_malloc_hook = __malloc_hook;
250 save_free_hook = __free_hook;
251 __malloc_hook = malloc_atfork;
252 __free_hook = free_atfork;
253 /* Only the current thread may perform malloc/free calls now. */
254 tsd_getspecific(arena_key, save_arena);
255 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
256 out:
257 ++atfork_recursive_cntr;
258 }
259
260 static void
261 ptmalloc_unlock_all (void)
262 {
263 mstate ar_ptr;
264
265 if(__malloc_initialized < 1)
266 return;
267 if (--atfork_recursive_cntr != 0)
268 return;
269 tsd_setspecific(arena_key, save_arena);
270 __malloc_hook = save_malloc_hook;
271 __free_hook = save_free_hook;
272 for(ar_ptr = &main_arena;;) {
273 (void)mutex_unlock(&ar_ptr->mutex);
274 ar_ptr = ar_ptr->next;
275 if(ar_ptr == &main_arena) break;
276 }
277 (void)mutex_unlock(&list_lock);
278 }
279
280 # ifdef __linux__
281
282 /* In NPTL, unlocking a mutex in the child process after a
283 fork() is currently unsafe, whereas re-initializing it is safe and
284 does not leak resources. Therefore, a special atfork handler is
285 installed for the child. */
286
287 static void
288 ptmalloc_unlock_all2 (void)
289 {
290 mstate ar_ptr;
291
292 if(__malloc_initialized < 1)
293 return;
294 tsd_setspecific(arena_key, save_arena);
295 __malloc_hook = save_malloc_hook;
296 __free_hook = save_free_hook;
297 #ifdef PER_THREAD
298 free_list = NULL;
299 #endif
300 for(ar_ptr = &main_arena;;) {
301 mutex_init(&ar_ptr->mutex);
302 #ifdef PER_THREAD
303 if (ar_ptr != save_arena) {
304 ar_ptr->next_free = free_list;
305 free_list = ar_ptr;
306 }
307 #endif
308 ar_ptr = ar_ptr->next;
309 if(ar_ptr == &main_arena) break;
310 }
311 mutex_init(&list_lock);
312 atfork_recursive_cntr = 0;
313 }
314
315 # else
316
317 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
318
319 # endif
320
321 #endif /* !NO_THREADS */
322
323 /* Initialization routine. */
324 #include <string.h>
325 extern char **_environ;
326
327 static char *
328 internal_function
329 next_env_entry (char ***position)
330 {
331 char **current = *position;
332 char *result = NULL;
333
334 while (*current != NULL)
335 {
336 if (__builtin_expect ((*current)[0] == 'M', 0)
337 && (*current)[1] == 'A'
338 && (*current)[2] == 'L'
339 && (*current)[3] == 'L'
340 && (*current)[4] == 'O'
341 && (*current)[5] == 'C'
342 && (*current)[6] == '_')
343 {
344 result = &(*current)[7];
345
346 /* Save current position for next visit. */
347 *position = ++current;
348
349 break;
350 }
351
352 ++current;
353 }
354
355 return result;
356 }
357
358
359 #ifdef SHARED
360 static void *
361 __failing_morecore (ptrdiff_t d)
362 {
363 return (void *) MORECORE_FAILURE;
364 }
365
366 extern struct dl_open_hook *_dl_open_hook;
367 libc_hidden_proto (_dl_open_hook);
368 #endif
369
370 static void
371 ptmalloc_init (void)
372 {
373 if(__malloc_initialized >= 0) return;
374 __malloc_initialized = 0;
375
376 #ifdef SHARED
377 /* In case this libc copy is in a non-default namespace, never use brk.
378 Likewise if dlopened from statically linked program. */
379 Dl_info di;
380 struct link_map *l;
381
382 if (_dl_open_hook != NULL
383 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
384 && l->l_ns != LM_ID_BASE))
385 __morecore = __failing_morecore;
386 #endif
387
388 tsd_key_create(&arena_key, NULL);
389 tsd_setspecific(arena_key, (void *)&main_arena);
390 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
391 const char *s = NULL;
392 if (__builtin_expect (_environ != NULL, 1))
393 {
394 char **runp = _environ;
395 char *envline;
396
397 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
398 0))
399 {
400 size_t len = strcspn (envline, "=");
401
402 if (envline[len] != '=')
403 /* This is a "MALLOC_" variable at the end of the string
404 without a '=' character. Ignore it since otherwise we
405 will access invalid memory below. */
406 continue;
407
408 switch (len)
409 {
410 case 6:
411 if (memcmp (envline, "CHECK_", 6) == 0)
412 s = &envline[7];
413 break;
414 case 8:
415 if (! __builtin_expect (__libc_enable_secure, 0))
416 {
417 if (memcmp (envline, "TOP_PAD_", 8) == 0)
418 __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
419 else if (memcmp (envline, "PERTURB_", 8) == 0)
420 __libc_mallopt(M_PERTURB, atoi(&envline[9]));
421 }
422 break;
423 case 9:
424 if (! __builtin_expect (__libc_enable_secure, 0))
425 {
426 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
427 __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
428 #ifdef PER_THREAD
429 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
430 __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
431 #endif
432 }
433 break;
434 #ifdef PER_THREAD
435 case 10:
436 if (! __builtin_expect (__libc_enable_secure, 0))
437 {
438 if (memcmp (envline, "ARENA_TEST", 10) == 0)
439 __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
440 }
441 break;
442 #endif
443 case 15:
444 if (! __builtin_expect (__libc_enable_secure, 0))
445 {
446 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
447 __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
448 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
449 __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
450 }
451 break;
452 default:
453 break;
454 }
455 }
456 }
457 if(s && s[0]) {
458 __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
459 if (check_action != 0)
460 __malloc_check_init();
461 }
462 void (*hook) (void) = force_reg (__malloc_initialize_hook);
463 if (hook != NULL)
464 (*hook)();
465 __malloc_initialized = 1;
466 }
467
468 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
469 #ifdef thread_atfork_static
470 thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
471 ptmalloc_unlock_all2)
472 #endif
473
474 \f
475
476 /* Managing heaps and arenas (for concurrent threads) */
477
478 #if MALLOC_DEBUG > 1
479
480 /* Print the complete contents of a single heap to stderr. */
481
482 static void
483 dump_heap(heap_info *heap)
484 {
485 char *ptr;
486 mchunkptr p;
487
488 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
489 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
490 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
491 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
492 ~MALLOC_ALIGN_MASK);
493 for(;;) {
494 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
495 if(p == top(heap->ar_ptr)) {
496 fprintf(stderr, " (top)\n");
497 break;
498 } else if(p->size == (0|PREV_INUSE)) {
499 fprintf(stderr, " (fence)\n");
500 break;
501 }
502 fprintf(stderr, "\n");
503 p = next_chunk(p);
504 }
505 }
506
507 #endif /* MALLOC_DEBUG > 1 */
508
509 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
510 addresses as opposed to increasing, new_heap would badly fragment the
511 address space. In that case remember the second HEAP_MAX_SIZE part
512 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
513 call (if it is already aligned) and try to reuse it next time. We need
514 no locking for it, as kernel ensures the atomicity for us - worst case
515 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
516 multiple threads, but only one will succeed. */
517 static char *aligned_heap_area;
518
519 /* Create a new heap. size is automatically rounded up to a multiple
520 of the page size. */
521
522 static heap_info *
523 internal_function
524 new_heap(size_t size, size_t top_pad)
525 {
526 size_t page_mask = GLRO(dl_pagesize) - 1;
527 char *p1, *p2;
528 unsigned long ul;
529 heap_info *h;
530
531 if(size+top_pad < HEAP_MIN_SIZE)
532 size = HEAP_MIN_SIZE;
533 else if(size+top_pad <= HEAP_MAX_SIZE)
534 size += top_pad;
535 else if(size > HEAP_MAX_SIZE)
536 return 0;
537 else
538 size = HEAP_MAX_SIZE;
539 size = (size + page_mask) & ~page_mask;
540
541 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
542 No swap space needs to be reserved for the following large
543 mapping (on Linux, this is the case for all non-writable mappings
544 anyway). */
545 p2 = MAP_FAILED;
546 if(aligned_heap_area) {
547 p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
548 MAP_NORESERVE);
549 aligned_heap_area = NULL;
550 if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
551 __munmap(p2, HEAP_MAX_SIZE);
552 p2 = MAP_FAILED;
553 }
554 }
555 if(p2 == MAP_FAILED) {
556 p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
557 if(p1 != MAP_FAILED) {
558 p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
559 & ~(HEAP_MAX_SIZE-1));
560 ul = p2 - p1;
561 if (ul)
562 __munmap(p1, ul);
563 else
564 aligned_heap_area = p2 + HEAP_MAX_SIZE;
565 __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
566 } else {
567 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
568 is already aligned. */
569 p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
570 if(p2 == MAP_FAILED)
571 return 0;
572 if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
573 __munmap(p2, HEAP_MAX_SIZE);
574 return 0;
575 }
576 }
577 }
578 if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
579 __munmap(p2, HEAP_MAX_SIZE);
580 return 0;
581 }
582 h = (heap_info *)p2;
583 h->size = size;
584 h->mprotect_size = size;
585 THREAD_STAT(stat_n_heaps++);
586 return h;
587 }
588
589 /* Grow a heap. size is automatically rounded up to a
590 multiple of the page size. */
591
592 static int
593 grow_heap(heap_info *h, long diff)
594 {
595 size_t page_mask = GLRO(dl_pagesize) - 1;
596 long new_size;
597
598 diff = (diff + page_mask) & ~page_mask;
599 new_size = (long)h->size + diff;
600 if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
601 return -1;
602 if((unsigned long) new_size > h->mprotect_size) {
603 if (__mprotect((char *)h + h->mprotect_size,
604 (unsigned long) new_size - h->mprotect_size,
605 PROT_READ|PROT_WRITE) != 0)
606 return -2;
607 h->mprotect_size = new_size;
608 }
609
610 h->size = new_size;
611 return 0;
612 }
613
614 /* Shrink a heap. */
615
616 static int
617 shrink_heap(heap_info *h, long diff)
618 {
619 long new_size;
620
621 new_size = (long)h->size - diff;
622 if(new_size < (long)sizeof(*h))
623 return -1;
624 /* Try to re-map the extra heap space freshly to save memory, and
625 make it inaccessible. */
626 if (__builtin_expect (__libc_enable_secure, 0))
627 {
628 if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
629 MAP_FIXED) == (char *) MAP_FAILED)
630 return -2;
631 h->mprotect_size = new_size;
632 }
633 else
634 madvise ((char *)h + new_size, diff, MADV_DONTNEED);
635 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
636
637 h->size = new_size;
638 return 0;
639 }
640
641 /* Delete a heap. */
642
643 #define delete_heap(heap) \
644 do { \
645 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
646 aligned_heap_area = NULL; \
647 __munmap((char*)(heap), HEAP_MAX_SIZE); \
648 } while (0)
649
650 static int
651 internal_function
652 heap_trim(heap_info *heap, size_t pad)
653 {
654 mstate ar_ptr = heap->ar_ptr;
655 unsigned long pagesz = GLRO(dl_pagesize);
656 mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
657 heap_info *prev_heap;
658 long new_size, top_size, extra;
659
660 /* Can this heap go away completely? */
661 while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
662 prev_heap = heap->prev;
663 p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
664 assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
665 p = prev_chunk(p);
666 new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
667 assert(new_size>0 && new_size<(long)(2*MINSIZE));
668 if(!prev_inuse(p))
669 new_size += p->prev_size;
670 assert(new_size>0 && new_size<HEAP_MAX_SIZE);
671 if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
672 break;
673 ar_ptr->system_mem -= heap->size;
674 arena_mem -= heap->size;
675 delete_heap(heap);
676 heap = prev_heap;
677 if(!prev_inuse(p)) { /* consolidate backward */
678 p = prev_chunk(p);
679 unlink(p, bck, fwd);
680 }
681 assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
682 assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
683 top(ar_ptr) = top_chunk = p;
684 set_head(top_chunk, new_size | PREV_INUSE);
685 /*check_chunk(ar_ptr, top_chunk);*/
686 }
687 top_size = chunksize(top_chunk);
688 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
689 if(extra < (long)pagesz)
690 return 0;
691 /* Try to shrink. */
692 if(shrink_heap(heap, extra) != 0)
693 return 0;
694 ar_ptr->system_mem -= extra;
695 arena_mem -= extra;
696
697 /* Success. Adjust top accordingly. */
698 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
699 /*check_chunk(ar_ptr, top_chunk);*/
700 return 1;
701 }
702
703 /* Create a new arena with initial size "size". */
704
705 static mstate
706 _int_new_arena(size_t size)
707 {
708 mstate a;
709 heap_info *h;
710 char *ptr;
711 unsigned long misalign;
712
713 h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
714 mp_.top_pad);
715 if(!h) {
716 /* Maybe size is too large to fit in a single heap. So, just try
717 to create a minimally-sized arena and let _int_malloc() attempt
718 to deal with the large request via mmap_chunk(). */
719 h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
720 if(!h)
721 return 0;
722 }
723 a = h->ar_ptr = (mstate)(h+1);
724 malloc_init_state(a);
725 /*a->next = NULL;*/
726 a->system_mem = a->max_system_mem = h->size;
727 arena_mem += h->size;
728
729 /* Set up the top chunk, with proper alignment. */
730 ptr = (char *)(a + 1);
731 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
732 if (misalign > 0)
733 ptr += MALLOC_ALIGNMENT - misalign;
734 top(a) = (mchunkptr)ptr;
735 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
736
737 tsd_setspecific(arena_key, (void *)a);
738 mutex_init(&a->mutex);
739 (void)mutex_lock(&a->mutex);
740
741 #ifdef PER_THREAD
742 (void)mutex_lock(&list_lock);
743 #endif
744
745 /* Add the new arena to the global list. */
746 a->next = main_arena.next;
747 atomic_write_barrier ();
748 main_arena.next = a;
749
750 #ifdef PER_THREAD
751 (void)mutex_unlock(&list_lock);
752 #endif
753
754 THREAD_STAT(++(a->stat_lock_loop));
755
756 return a;
757 }
758
759
760 #ifdef PER_THREAD
761 static mstate
762 get_free_list (void)
763 {
764 mstate result = free_list;
765 if (result != NULL)
766 {
767 (void)mutex_lock(&list_lock);
768 result = free_list;
769 if (result != NULL)
770 free_list = result->next_free;
771 (void)mutex_unlock(&list_lock);
772
773 if (result != NULL)
774 {
775 (void)mutex_lock(&result->mutex);
776 tsd_setspecific(arena_key, (void *)result);
777 THREAD_STAT(++(result->stat_lock_loop));
778 }
779 }
780
781 return result;
782 }
783
784 /* Lock and return an arena that can be reused for memory allocation.
785 Avoid AVOID_ARENA as we have already failed to allocate memory in
786 it and it is currently locked. */
787 static mstate
788 reused_arena (mstate avoid_arena)
789 {
790 mstate result;
791 static mstate next_to_use;
792 if (next_to_use == NULL)
793 next_to_use = &main_arena;
794
795 result = next_to_use;
796 do
797 {
798 if (!mutex_trylock(&result->mutex))
799 goto out;
800
801 result = result->next;
802 }
803 while (result != next_to_use);
804
805 /* Avoid AVOID_ARENA as we have already failed to allocate memory
806 in that arena and it is currently locked. */
807 if (result == avoid_arena)
808 result = result->next;
809
810 /* No arena available. Wait for the next in line. */
811 (void)mutex_lock(&result->mutex);
812
813 out:
814 tsd_setspecific(arena_key, (void *)result);
815 THREAD_STAT(++(result->stat_lock_loop));
816 next_to_use = result->next;
817
818 return result;
819 }
820 #endif
821
822 static mstate
823 internal_function
824 arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
825 {
826 mstate a;
827
828 #ifdef PER_THREAD
829 static size_t narenas_limit;
830
831 a = get_free_list ();
832 if (a == NULL)
833 {
834 /* Nothing immediately available, so generate a new arena. */
835 if (narenas_limit == 0)
836 {
837 if (mp_.arena_max != 0)
838 narenas_limit = mp_.arena_max;
839 else if (narenas > mp_.arena_test)
840 {
841 int n = __get_nprocs ();
842
843 if (n >= 1)
844 narenas_limit = NARENAS_FROM_NCORES (n);
845 else
846 /* We have no information about the system. Assume two
847 cores. */
848 narenas_limit = NARENAS_FROM_NCORES (2);
849 }
850 }
851 repeat:;
852 size_t n = narenas;
853 /* NB: the following depends on the fact that (size_t)0 - 1 is a
854 very large number and that the underflow is OK. If arena_max
855 is set the value of arena_test is irrelevant. If arena_test
856 is set but narenas is not yet larger or equal to arena_test
857 narenas_limit is 0. There is no possibility for narenas to
858 be too big for the test to always fail since there is not
859 enough address space to create that many arenas. */
860 if (__builtin_expect (n <= narenas_limit - 1, 0))
861 {
862 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
863 goto repeat;
864 a = _int_new_arena (size);
865 if (__builtin_expect (a == NULL, 0))
866 catomic_decrement (&narenas);
867 }
868 else
869 a = reused_arena (avoid_arena);
870 }
871 #else
872 if(!a_tsd)
873 a = a_tsd = &main_arena;
874 else {
875 a = a_tsd->next;
876 if(!a) {
877 /* This can only happen while initializing the new arena. */
878 (void)mutex_lock(&main_arena.mutex);
879 THREAD_STAT(++(main_arena.stat_lock_wait));
880 return &main_arena;
881 }
882 }
883
884 /* Check the global, circularly linked list for available arenas. */
885 bool retried = false;
886 repeat:
887 do {
888 if(!mutex_trylock(&a->mutex)) {
889 if (retried)
890 (void)mutex_unlock(&list_lock);
891 THREAD_STAT(++(a->stat_lock_loop));
892 tsd_setspecific(arena_key, (void *)a);
893 return a;
894 }
895 a = a->next;
896 } while(a != a_tsd);
897
898 /* If not even the list_lock can be obtained, try again. This can
899 happen during `atfork', or for example on systems where thread
900 creation makes it temporarily impossible to obtain _any_
901 locks. */
902 if(!retried && mutex_trylock(&list_lock)) {
903 /* We will block to not run in a busy loop. */
904 (void)mutex_lock(&list_lock);
905
906 /* Since we blocked there might be an arena available now. */
907 retried = true;
908 a = a_tsd;
909 goto repeat;
910 }
911
912 /* Nothing immediately available, so generate a new arena. */
913 a = _int_new_arena(size);
914 (void)mutex_unlock(&list_lock);
915 #endif
916
917 return a;
918 }
919
920 #ifdef PER_THREAD
921 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
922 arena_thread_freeres (void)
923 {
924 void *vptr = NULL;
925 mstate a = tsd_getspecific(arena_key, vptr);
926 tsd_setspecific(arena_key, NULL);
927
928 if (a != NULL)
929 {
930 (void)mutex_lock(&list_lock);
931 a->next_free = free_list;
932 free_list = a;
933 (void)mutex_unlock(&list_lock);
934 }
935 }
936 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
937 #endif
938
939 /*
940 * Local variables:
941 * c-basic-offset: 2
942 * End:
943 */