]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/arena.c
Update copyright notices with scripts/update-copyrights.
[thirdparty/glibc.git] / malloc / arena.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stdbool.h>
21
22 /* Get the implementation for check_may_shrink_heap. */
23 #include <malloc-sysdep.h>
24
25 /* Compile-time constants. */
26
27 #define HEAP_MIN_SIZE (32*1024)
28 #ifndef HEAP_MAX_SIZE
29 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
30 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
31 # else
32 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
33 # endif
34 #endif
35
36 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
37 that are dynamically created for multi-threaded programs. The
38 maximum size must be a power of two, for fast determination of
39 which heap belongs to a chunk. It should be much larger than the
40 mmap threshold, so that requests with a size just below that
41 threshold can be fulfilled without creating too many heaps. */
42
43
44 #ifndef THREAD_STATS
45 #define THREAD_STATS 0
46 #endif
47
48 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
49 computed. */
50
51 /***************************************************************************/
52
53 #define top(ar_ptr) ((ar_ptr)->top)
54
55 /* A heap is a single contiguous memory region holding (coalesceable)
56 malloc_chunks. It is allocated with mmap() and always starts at an
57 address aligned to HEAP_MAX_SIZE. */
58
59 typedef struct _heap_info {
60 mstate ar_ptr; /* Arena for this heap. */
61 struct _heap_info *prev; /* Previous heap. */
62 size_t size; /* Current size in bytes. */
63 size_t mprotect_size; /* Size in bytes that has been mprotected
64 PROT_READ|PROT_WRITE. */
65 /* Make sure the following data is properly aligned, particularly
66 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
67 MALLOC_ALIGNMENT. */
68 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
69 } heap_info;
70
71 /* Get a compile-time error if the heap_info padding is not correct
72 to make alignment work as expected in sYSMALLOc. */
73 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
74 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
75 ? -1 : 1];
76
77 /* Thread specific data */
78
79 static tsd_key_t arena_key;
80 static mutex_t list_lock = MUTEX_INITIALIZER;
81 #ifdef PER_THREAD
82 static size_t narenas = 1;
83 static mstate free_list;
84 #endif
85
86 #if THREAD_STATS
87 static int stat_n_heaps;
88 #define THREAD_STAT(x) x
89 #else
90 #define THREAD_STAT(x) do ; while(0)
91 #endif
92
93 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
94 static unsigned long arena_mem;
95
96 /* Already initialized? */
97 int __malloc_initialized = -1;
98
99 /**************************************************************************/
100
101
102 /* arena_get() acquires an arena and locks the corresponding mutex.
103 First, try the one last locked successfully by this thread. (This
104 is the common case and handled with a macro for speed.) Then, loop
105 once over the circularly linked list of arenas. If no arena is
106 readily available, create a new one. In this latter case, `size'
107 is just a hint as to how much memory will be required immediately
108 in the new arena. */
109
110 #define arena_get(ptr, size) do { \
111 arena_lookup(ptr); \
112 arena_lock(ptr, size); \
113 } while(0)
114
115 #define arena_lookup(ptr) do { \
116 void *vptr = NULL; \
117 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
118 } while(0)
119
120 #ifdef PER_THREAD
121 # define arena_lock(ptr, size) do { \
122 if(ptr) \
123 (void)mutex_lock(&ptr->mutex); \
124 else \
125 ptr = arena_get2(ptr, (size), NULL); \
126 } while(0)
127 #else
128 # define arena_lock(ptr, size) do { \
129 if(ptr && !mutex_trylock(&ptr->mutex)) { \
130 THREAD_STAT(++(ptr->stat_lock_direct)); \
131 } else \
132 ptr = arena_get2(ptr, (size), NULL); \
133 } while(0)
134 #endif
135
136 /* find the heap and corresponding arena for a given ptr */
137
138 #define heap_for_ptr(ptr) \
139 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
140 #define arena_for_chunk(ptr) \
141 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
142
143
144 /**************************************************************************/
145
146 #ifndef NO_THREADS
147
148 /* atfork support. */
149
150 static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
151 const __malloc_ptr_t);
152 static void (*save_free_hook) (__malloc_ptr_t __ptr,
153 const __malloc_ptr_t);
154 static void* save_arena;
155
156 #ifdef ATFORK_MEM
157 ATFORK_MEM;
158 #endif
159
160 /* Magic value for the thread-specific arena pointer when
161 malloc_atfork() is in use. */
162
163 #define ATFORK_ARENA_PTR ((void*)-1)
164
165 /* The following hooks are used while the `atfork' handling mechanism
166 is active. */
167
168 static void*
169 malloc_atfork(size_t sz, const void *caller)
170 {
171 void *vptr = NULL;
172 void *victim;
173
174 tsd_getspecific(arena_key, vptr);
175 if(vptr == ATFORK_ARENA_PTR) {
176 /* We are the only thread that may allocate at all. */
177 if(save_malloc_hook != malloc_check) {
178 return _int_malloc(&main_arena, sz);
179 } else {
180 if(top_check()<0)
181 return 0;
182 victim = _int_malloc(&main_arena, sz+1);
183 return mem2mem_check(victim, sz);
184 }
185 } else {
186 /* Suspend the thread until the `atfork' handlers have completed.
187 By that time, the hooks will have been reset as well, so that
188 mALLOc() can be used again. */
189 (void)mutex_lock(&list_lock);
190 (void)mutex_unlock(&list_lock);
191 return __libc_malloc(sz);
192 }
193 }
194
195 static void
196 free_atfork(void* mem, const void *caller)
197 {
198 void *vptr = NULL;
199 mstate ar_ptr;
200 mchunkptr p; /* chunk corresponding to mem */
201
202 if (mem == 0) /* free(0) has no effect */
203 return;
204
205 p = mem2chunk(mem); /* do not bother to replicate free_check here */
206
207 if (chunk_is_mmapped(p)) /* release mmapped memory. */
208 {
209 munmap_chunk(p);
210 return;
211 }
212
213 ar_ptr = arena_for_chunk(p);
214 tsd_getspecific(arena_key, vptr);
215 _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
216 }
217
218
219 /* Counter for number of times the list is locked by the same thread. */
220 static unsigned int atfork_recursive_cntr;
221
222 /* The following two functions are registered via thread_atfork() to
223 make sure that the mutexes remain in a consistent state in the
224 fork()ed version of a thread. Also adapt the malloc and free hooks
225 temporarily, because the `atfork' handler mechanism may use
226 malloc/free internally (e.g. in LinuxThreads). */
227
228 static void
229 ptmalloc_lock_all (void)
230 {
231 mstate ar_ptr;
232
233 if(__malloc_initialized < 1)
234 return;
235 if (mutex_trylock(&list_lock))
236 {
237 void *my_arena;
238 tsd_getspecific(arena_key, my_arena);
239 if (my_arena == ATFORK_ARENA_PTR)
240 /* This is the same thread which already locks the global list.
241 Just bump the counter. */
242 goto out;
243
244 /* This thread has to wait its turn. */
245 (void)mutex_lock(&list_lock);
246 }
247 for(ar_ptr = &main_arena;;) {
248 (void)mutex_lock(&ar_ptr->mutex);
249 ar_ptr = ar_ptr->next;
250 if(ar_ptr == &main_arena) break;
251 }
252 save_malloc_hook = __malloc_hook;
253 save_free_hook = __free_hook;
254 __malloc_hook = malloc_atfork;
255 __free_hook = free_atfork;
256 /* Only the current thread may perform malloc/free calls now. */
257 tsd_getspecific(arena_key, save_arena);
258 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
259 out:
260 ++atfork_recursive_cntr;
261 }
262
263 static void
264 ptmalloc_unlock_all (void)
265 {
266 mstate ar_ptr;
267
268 if(__malloc_initialized < 1)
269 return;
270 if (--atfork_recursive_cntr != 0)
271 return;
272 tsd_setspecific(arena_key, save_arena);
273 __malloc_hook = save_malloc_hook;
274 __free_hook = save_free_hook;
275 for(ar_ptr = &main_arena;;) {
276 (void)mutex_unlock(&ar_ptr->mutex);
277 ar_ptr = ar_ptr->next;
278 if(ar_ptr == &main_arena) break;
279 }
280 (void)mutex_unlock(&list_lock);
281 }
282
283 # ifdef __linux__
284
285 /* In NPTL, unlocking a mutex in the child process after a
286 fork() is currently unsafe, whereas re-initializing it is safe and
287 does not leak resources. Therefore, a special atfork handler is
288 installed for the child. */
289
290 static void
291 ptmalloc_unlock_all2 (void)
292 {
293 mstate ar_ptr;
294
295 if(__malloc_initialized < 1)
296 return;
297 tsd_setspecific(arena_key, save_arena);
298 __malloc_hook = save_malloc_hook;
299 __free_hook = save_free_hook;
300 #ifdef PER_THREAD
301 free_list = NULL;
302 #endif
303 for(ar_ptr = &main_arena;;) {
304 mutex_init(&ar_ptr->mutex);
305 #ifdef PER_THREAD
306 if (ar_ptr != save_arena) {
307 ar_ptr->next_free = free_list;
308 free_list = ar_ptr;
309 }
310 #endif
311 ar_ptr = ar_ptr->next;
312 if(ar_ptr == &main_arena) break;
313 }
314 mutex_init(&list_lock);
315 atfork_recursive_cntr = 0;
316 }
317
318 # else
319
320 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
321
322 # endif
323
324 #endif /* !NO_THREADS */
325
326 /* Initialization routine. */
327 #include <string.h>
328 extern char **_environ;
329
330 static char *
331 internal_function
332 next_env_entry (char ***position)
333 {
334 char **current = *position;
335 char *result = NULL;
336
337 while (*current != NULL)
338 {
339 if (__builtin_expect ((*current)[0] == 'M', 0)
340 && (*current)[1] == 'A'
341 && (*current)[2] == 'L'
342 && (*current)[3] == 'L'
343 && (*current)[4] == 'O'
344 && (*current)[5] == 'C'
345 && (*current)[6] == '_')
346 {
347 result = &(*current)[7];
348
349 /* Save current position for next visit. */
350 *position = ++current;
351
352 break;
353 }
354
355 ++current;
356 }
357
358 return result;
359 }
360
361
362 #ifdef SHARED
363 static void *
364 __failing_morecore (ptrdiff_t d)
365 {
366 return (void *) MORECORE_FAILURE;
367 }
368
369 extern struct dl_open_hook *_dl_open_hook;
370 libc_hidden_proto (_dl_open_hook);
371 #endif
372
373 static void
374 ptmalloc_init (void)
375 {
376 if(__malloc_initialized >= 0) return;
377 __malloc_initialized = 0;
378
379 #ifdef SHARED
380 /* In case this libc copy is in a non-default namespace, never use brk.
381 Likewise if dlopened from statically linked program. */
382 Dl_info di;
383 struct link_map *l;
384
385 if (_dl_open_hook != NULL
386 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
387 && l->l_ns != LM_ID_BASE))
388 __morecore = __failing_morecore;
389 #endif
390
391 tsd_key_create(&arena_key, NULL);
392 tsd_setspecific(arena_key, (void *)&main_arena);
393 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
394 const char *s = NULL;
395 if (__builtin_expect (_environ != NULL, 1))
396 {
397 char **runp = _environ;
398 char *envline;
399
400 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
401 0))
402 {
403 size_t len = strcspn (envline, "=");
404
405 if (envline[len] != '=')
406 /* This is a "MALLOC_" variable at the end of the string
407 without a '=' character. Ignore it since otherwise we
408 will access invalid memory below. */
409 continue;
410
411 switch (len)
412 {
413 case 6:
414 if (memcmp (envline, "CHECK_", 6) == 0)
415 s = &envline[7];
416 break;
417 case 8:
418 if (! __builtin_expect (__libc_enable_secure, 0))
419 {
420 if (memcmp (envline, "TOP_PAD_", 8) == 0)
421 __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
422 else if (memcmp (envline, "PERTURB_", 8) == 0)
423 __libc_mallopt(M_PERTURB, atoi(&envline[9]));
424 }
425 break;
426 case 9:
427 if (! __builtin_expect (__libc_enable_secure, 0))
428 {
429 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
430 __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
431 #ifdef PER_THREAD
432 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
433 __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
434 #endif
435 }
436 break;
437 #ifdef PER_THREAD
438 case 10:
439 if (! __builtin_expect (__libc_enable_secure, 0))
440 {
441 if (memcmp (envline, "ARENA_TEST", 10) == 0)
442 __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
443 }
444 break;
445 #endif
446 case 15:
447 if (! __builtin_expect (__libc_enable_secure, 0))
448 {
449 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
450 __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
451 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
452 __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
453 }
454 break;
455 default:
456 break;
457 }
458 }
459 }
460 if(s && s[0]) {
461 __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
462 if (check_action != 0)
463 __malloc_check_init();
464 }
465 void (*hook) (void) = force_reg (__malloc_initialize_hook);
466 if (hook != NULL)
467 (*hook)();
468 __malloc_initialized = 1;
469 }
470
471 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
472 #ifdef thread_atfork_static
473 thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
474 ptmalloc_unlock_all2)
475 #endif
476
477 \f
478
479 /* Managing heaps and arenas (for concurrent threads) */
480
481 #if MALLOC_DEBUG > 1
482
483 /* Print the complete contents of a single heap to stderr. */
484
485 static void
486 dump_heap(heap_info *heap)
487 {
488 char *ptr;
489 mchunkptr p;
490
491 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
492 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
493 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
494 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
495 ~MALLOC_ALIGN_MASK);
496 for(;;) {
497 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
498 if(p == top(heap->ar_ptr)) {
499 fprintf(stderr, " (top)\n");
500 break;
501 } else if(p->size == (0|PREV_INUSE)) {
502 fprintf(stderr, " (fence)\n");
503 break;
504 }
505 fprintf(stderr, "\n");
506 p = next_chunk(p);
507 }
508 }
509
510 #endif /* MALLOC_DEBUG > 1 */
511
512 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
513 addresses as opposed to increasing, new_heap would badly fragment the
514 address space. In that case remember the second HEAP_MAX_SIZE part
515 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
516 call (if it is already aligned) and try to reuse it next time. We need
517 no locking for it, as kernel ensures the atomicity for us - worst case
518 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
519 multiple threads, but only one will succeed. */
520 static char *aligned_heap_area;
521
522 /* Create a new heap. size is automatically rounded up to a multiple
523 of the page size. */
524
525 static heap_info *
526 internal_function
527 new_heap(size_t size, size_t top_pad)
528 {
529 size_t page_mask = GLRO(dl_pagesize) - 1;
530 char *p1, *p2;
531 unsigned long ul;
532 heap_info *h;
533
534 if(size+top_pad < HEAP_MIN_SIZE)
535 size = HEAP_MIN_SIZE;
536 else if(size+top_pad <= HEAP_MAX_SIZE)
537 size += top_pad;
538 else if(size > HEAP_MAX_SIZE)
539 return 0;
540 else
541 size = HEAP_MAX_SIZE;
542 size = (size + page_mask) & ~page_mask;
543
544 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
545 No swap space needs to be reserved for the following large
546 mapping (on Linux, this is the case for all non-writable mappings
547 anyway). */
548 p2 = MAP_FAILED;
549 if(aligned_heap_area) {
550 p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
551 MAP_NORESERVE);
552 aligned_heap_area = NULL;
553 if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
554 __munmap(p2, HEAP_MAX_SIZE);
555 p2 = MAP_FAILED;
556 }
557 }
558 if(p2 == MAP_FAILED) {
559 p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
560 if(p1 != MAP_FAILED) {
561 p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
562 & ~(HEAP_MAX_SIZE-1));
563 ul = p2 - p1;
564 if (ul)
565 __munmap(p1, ul);
566 else
567 aligned_heap_area = p2 + HEAP_MAX_SIZE;
568 __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
569 } else {
570 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
571 is already aligned. */
572 p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
573 if(p2 == MAP_FAILED)
574 return 0;
575 if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
576 __munmap(p2, HEAP_MAX_SIZE);
577 return 0;
578 }
579 }
580 }
581 if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
582 __munmap(p2, HEAP_MAX_SIZE);
583 return 0;
584 }
585 h = (heap_info *)p2;
586 h->size = size;
587 h->mprotect_size = size;
588 THREAD_STAT(stat_n_heaps++);
589 return h;
590 }
591
592 /* Grow a heap. size is automatically rounded up to a
593 multiple of the page size. */
594
595 static int
596 grow_heap(heap_info *h, long diff)
597 {
598 size_t page_mask = GLRO(dl_pagesize) - 1;
599 long new_size;
600
601 diff = (diff + page_mask) & ~page_mask;
602 new_size = (long)h->size + diff;
603 if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
604 return -1;
605 if((unsigned long) new_size > h->mprotect_size) {
606 if (__mprotect((char *)h + h->mprotect_size,
607 (unsigned long) new_size - h->mprotect_size,
608 PROT_READ|PROT_WRITE) != 0)
609 return -2;
610 h->mprotect_size = new_size;
611 }
612
613 h->size = new_size;
614 return 0;
615 }
616
617 /* Shrink a heap. */
618
619 static int
620 shrink_heap(heap_info *h, long diff)
621 {
622 long new_size;
623
624 new_size = (long)h->size - diff;
625 if(new_size < (long)sizeof(*h))
626 return -1;
627 /* Try to re-map the extra heap space freshly to save memory, and make it
628 inaccessible. See malloc-sysdep.h to know when this is true. */
629 if (__builtin_expect (check_may_shrink_heap (), 0))
630 {
631 if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
632 MAP_FIXED) == (char *) MAP_FAILED)
633 return -2;
634 h->mprotect_size = new_size;
635 }
636 else
637 __madvise ((char *)h + new_size, diff, MADV_DONTNEED);
638 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
639
640 h->size = new_size;
641 return 0;
642 }
643
644 /* Delete a heap. */
645
646 #define delete_heap(heap) \
647 do { \
648 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
649 aligned_heap_area = NULL; \
650 __munmap((char*)(heap), HEAP_MAX_SIZE); \
651 } while (0)
652
653 static int
654 internal_function
655 heap_trim(heap_info *heap, size_t pad)
656 {
657 mstate ar_ptr = heap->ar_ptr;
658 unsigned long pagesz = GLRO(dl_pagesize);
659 mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
660 heap_info *prev_heap;
661 long new_size, top_size, extra, prev_size, misalign;
662
663 /* Can this heap go away completely? */
664 while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
665 prev_heap = heap->prev;
666 prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
667 p = chunk_at_offset(prev_heap, prev_size);
668 /* fencepost must be properly aligned. */
669 misalign = ((long) p) & MALLOC_ALIGN_MASK;
670 p = chunk_at_offset(prev_heap, prev_size - misalign);
671 assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
672 p = prev_chunk(p);
673 new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
674 assert(new_size>0 && new_size<(long)(2*MINSIZE));
675 if(!prev_inuse(p))
676 new_size += p->prev_size;
677 assert(new_size>0 && new_size<HEAP_MAX_SIZE);
678 if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
679 break;
680 ar_ptr->system_mem -= heap->size;
681 arena_mem -= heap->size;
682 delete_heap(heap);
683 heap = prev_heap;
684 if(!prev_inuse(p)) { /* consolidate backward */
685 p = prev_chunk(p);
686 unlink(p, bck, fwd);
687 }
688 assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
689 assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
690 top(ar_ptr) = top_chunk = p;
691 set_head(top_chunk, new_size | PREV_INUSE);
692 /*check_chunk(ar_ptr, top_chunk);*/
693 }
694 top_size = chunksize(top_chunk);
695 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
696 if(extra < (long)pagesz)
697 return 0;
698 /* Try to shrink. */
699 if(shrink_heap(heap, extra) != 0)
700 return 0;
701 ar_ptr->system_mem -= extra;
702 arena_mem -= extra;
703
704 /* Success. Adjust top accordingly. */
705 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
706 /*check_chunk(ar_ptr, top_chunk);*/
707 return 1;
708 }
709
710 /* Create a new arena with initial size "size". */
711
712 static mstate
713 _int_new_arena(size_t size)
714 {
715 mstate a;
716 heap_info *h;
717 char *ptr;
718 unsigned long misalign;
719
720 h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
721 mp_.top_pad);
722 if(!h) {
723 /* Maybe size is too large to fit in a single heap. So, just try
724 to create a minimally-sized arena and let _int_malloc() attempt
725 to deal with the large request via mmap_chunk(). */
726 h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
727 if(!h)
728 return 0;
729 }
730 a = h->ar_ptr = (mstate)(h+1);
731 malloc_init_state(a);
732 /*a->next = NULL;*/
733 a->system_mem = a->max_system_mem = h->size;
734 arena_mem += h->size;
735
736 /* Set up the top chunk, with proper alignment. */
737 ptr = (char *)(a + 1);
738 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
739 if (misalign > 0)
740 ptr += MALLOC_ALIGNMENT - misalign;
741 top(a) = (mchunkptr)ptr;
742 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
743
744 tsd_setspecific(arena_key, (void *)a);
745 mutex_init(&a->mutex);
746 (void)mutex_lock(&a->mutex);
747
748 #ifdef PER_THREAD
749 (void)mutex_lock(&list_lock);
750 #endif
751
752 /* Add the new arena to the global list. */
753 a->next = main_arena.next;
754 atomic_write_barrier ();
755 main_arena.next = a;
756
757 #ifdef PER_THREAD
758 (void)mutex_unlock(&list_lock);
759 #endif
760
761 THREAD_STAT(++(a->stat_lock_loop));
762
763 return a;
764 }
765
766
767 #ifdef PER_THREAD
768 static mstate
769 get_free_list (void)
770 {
771 mstate result = free_list;
772 if (result != NULL)
773 {
774 (void)mutex_lock(&list_lock);
775 result = free_list;
776 if (result != NULL)
777 free_list = result->next_free;
778 (void)mutex_unlock(&list_lock);
779
780 if (result != NULL)
781 {
782 (void)mutex_lock(&result->mutex);
783 tsd_setspecific(arena_key, (void *)result);
784 THREAD_STAT(++(result->stat_lock_loop));
785 }
786 }
787
788 return result;
789 }
790
791 /* Lock and return an arena that can be reused for memory allocation.
792 Avoid AVOID_ARENA as we have already failed to allocate memory in
793 it and it is currently locked. */
794 static mstate
795 reused_arena (mstate avoid_arena)
796 {
797 mstate result;
798 static mstate next_to_use;
799 if (next_to_use == NULL)
800 next_to_use = &main_arena;
801
802 result = next_to_use;
803 do
804 {
805 if (!mutex_trylock(&result->mutex))
806 goto out;
807
808 result = result->next;
809 }
810 while (result != next_to_use);
811
812 /* Avoid AVOID_ARENA as we have already failed to allocate memory
813 in that arena and it is currently locked. */
814 if (result == avoid_arena)
815 result = result->next;
816
817 /* No arena available. Wait for the next in line. */
818 (void)mutex_lock(&result->mutex);
819
820 out:
821 tsd_setspecific(arena_key, (void *)result);
822 THREAD_STAT(++(result->stat_lock_loop));
823 next_to_use = result->next;
824
825 return result;
826 }
827 #endif
828
829 static mstate
830 internal_function
831 arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
832 {
833 mstate a;
834
835 #ifdef PER_THREAD
836 static size_t narenas_limit;
837
838 a = get_free_list ();
839 if (a == NULL)
840 {
841 /* Nothing immediately available, so generate a new arena. */
842 if (narenas_limit == 0)
843 {
844 if (mp_.arena_max != 0)
845 narenas_limit = mp_.arena_max;
846 else if (narenas > mp_.arena_test)
847 {
848 int n = __get_nprocs ();
849
850 if (n >= 1)
851 narenas_limit = NARENAS_FROM_NCORES (n);
852 else
853 /* We have no information about the system. Assume two
854 cores. */
855 narenas_limit = NARENAS_FROM_NCORES (2);
856 }
857 }
858 repeat:;
859 size_t n = narenas;
860 /* NB: the following depends on the fact that (size_t)0 - 1 is a
861 very large number and that the underflow is OK. If arena_max
862 is set the value of arena_test is irrelevant. If arena_test
863 is set but narenas is not yet larger or equal to arena_test
864 narenas_limit is 0. There is no possibility for narenas to
865 be too big for the test to always fail since there is not
866 enough address space to create that many arenas. */
867 if (__builtin_expect (n <= narenas_limit - 1, 0))
868 {
869 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
870 goto repeat;
871 a = _int_new_arena (size);
872 if (__builtin_expect (a == NULL, 0))
873 catomic_decrement (&narenas);
874 }
875 else
876 a = reused_arena (avoid_arena);
877 }
878 #else
879 if(!a_tsd)
880 a = a_tsd = &main_arena;
881 else {
882 a = a_tsd->next;
883 if(!a) {
884 /* This can only happen while initializing the new arena. */
885 (void)mutex_lock(&main_arena.mutex);
886 THREAD_STAT(++(main_arena.stat_lock_wait));
887 return &main_arena;
888 }
889 }
890
891 /* Check the global, circularly linked list for available arenas. */
892 bool retried = false;
893 repeat:
894 do {
895 if(!mutex_trylock(&a->mutex)) {
896 if (retried)
897 (void)mutex_unlock(&list_lock);
898 THREAD_STAT(++(a->stat_lock_loop));
899 tsd_setspecific(arena_key, (void *)a);
900 return a;
901 }
902 a = a->next;
903 } while(a != a_tsd);
904
905 /* If not even the list_lock can be obtained, try again. This can
906 happen during `atfork', or for example on systems where thread
907 creation makes it temporarily impossible to obtain _any_
908 locks. */
909 if(!retried && mutex_trylock(&list_lock)) {
910 /* We will block to not run in a busy loop. */
911 (void)mutex_lock(&list_lock);
912
913 /* Since we blocked there might be an arena available now. */
914 retried = true;
915 a = a_tsd;
916 goto repeat;
917 }
918
919 /* Nothing immediately available, so generate a new arena. */
920 a = _int_new_arena(size);
921 (void)mutex_unlock(&list_lock);
922 #endif
923
924 return a;
925 }
926
927 /* If we don't have the main arena, then maybe the failure is due to running
928 out of mmapped areas, so we can try allocating on the main arena.
929 Otherwise, it is likely that sbrk() has failed and there is still a chance
930 to mmap(), so try one of the other arenas. */
931 static mstate
932 arena_get_retry (mstate ar_ptr, size_t bytes)
933 {
934 if(ar_ptr != &main_arena) {
935 (void)mutex_unlock(&ar_ptr->mutex);
936 ar_ptr = &main_arena;
937 (void)mutex_lock(&ar_ptr->mutex);
938 } else {
939 /* Grab ar_ptr->next prior to releasing its lock. */
940 mstate prev = ar_ptr->next ? ar_ptr : 0;
941 (void)mutex_unlock(&ar_ptr->mutex);
942 ar_ptr = arena_get2(prev, bytes, ar_ptr);
943 }
944
945 return ar_ptr;
946 }
947
948 #ifdef PER_THREAD
949 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
950 arena_thread_freeres (void)
951 {
952 void *vptr = NULL;
953 mstate a = tsd_getspecific(arena_key, vptr);
954 tsd_setspecific(arena_key, NULL);
955
956 if (a != NULL)
957 {
958 (void)mutex_lock(&list_lock);
959 a->next_free = free_list;
960 free_list = a;
961 (void)mutex_unlock(&list_lock);
962 }
963 }
964 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
965 #endif
966
967 /*
968 * Local variables:
969 * c-basic-offset: 2
970 * End:
971 */