]> git.ipfire.org Git - thirdparty/glibc.git/blob - malloc/arena.c
Replace malloc force_reg by atomic_forced_read.
[thirdparty/glibc.git] / malloc / arena.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stdbool.h>
21
22 /* Compile-time constants. */
23
24 #define HEAP_MIN_SIZE (32*1024)
25 #ifndef HEAP_MAX_SIZE
26 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
27 # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
28 # else
29 # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
30 # endif
31 #endif
32
33 /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
34 that are dynamically created for multi-threaded programs. The
35 maximum size must be a power of two, for fast determination of
36 which heap belongs to a chunk. It should be much larger than the
37 mmap threshold, so that requests with a size just below that
38 threshold can be fulfilled without creating too many heaps. */
39
40
41 #ifndef THREAD_STATS
42 #define THREAD_STATS 0
43 #endif
44
45 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
46 computed. */
47
48 /***************************************************************************/
49
50 #define top(ar_ptr) ((ar_ptr)->top)
51
52 /* A heap is a single contiguous memory region holding (coalesceable)
53 malloc_chunks. It is allocated with mmap() and always starts at an
54 address aligned to HEAP_MAX_SIZE. */
55
56 typedef struct _heap_info {
57 mstate ar_ptr; /* Arena for this heap. */
58 struct _heap_info *prev; /* Previous heap. */
59 size_t size; /* Current size in bytes. */
60 size_t mprotect_size; /* Size in bytes that has been mprotected
61 PROT_READ|PROT_WRITE. */
62 /* Make sure the following data is properly aligned, particularly
63 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
64 MALLOC_ALIGNMENT. */
65 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
66 } heap_info;
67
68 /* Get a compile-time error if the heap_info padding is not correct
69 to make alignment work as expected in sYSMALLOc. */
70 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
71 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
72 ? -1 : 1];
73
74 /* Thread specific data */
75
76 static tsd_key_t arena_key;
77 static mutex_t list_lock = MUTEX_INITIALIZER;
78 #ifdef PER_THREAD
79 static size_t narenas = 1;
80 static mstate free_list;
81 #endif
82
83 #if THREAD_STATS
84 static int stat_n_heaps;
85 #define THREAD_STAT(x) x
86 #else
87 #define THREAD_STAT(x) do ; while(0)
88 #endif
89
90 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
91 static unsigned long arena_mem;
92
93 /* Already initialized? */
94 int __malloc_initialized = -1;
95
96 /**************************************************************************/
97
98
99 /* arena_get() acquires an arena and locks the corresponding mutex.
100 First, try the one last locked successfully by this thread. (This
101 is the common case and handled with a macro for speed.) Then, loop
102 once over the circularly linked list of arenas. If no arena is
103 readily available, create a new one. In this latter case, `size'
104 is just a hint as to how much memory will be required immediately
105 in the new arena. */
106
107 #define arena_get(ptr, size) do { \
108 arena_lookup(ptr); \
109 arena_lock(ptr, size); \
110 } while(0)
111
112 #define arena_lookup(ptr) do { \
113 void *vptr = NULL; \
114 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
115 } while(0)
116
117 #ifdef PER_THREAD
118 # define arena_lock(ptr, size) do { \
119 if(ptr) \
120 (void)mutex_lock(&ptr->mutex); \
121 else \
122 ptr = arena_get2(ptr, (size), NULL); \
123 } while(0)
124 #else
125 # define arena_lock(ptr, size) do { \
126 if(ptr && !mutex_trylock(&ptr->mutex)) { \
127 THREAD_STAT(++(ptr->stat_lock_direct)); \
128 } else \
129 ptr = arena_get2(ptr, (size), NULL); \
130 } while(0)
131 #endif
132
133 /* find the heap and corresponding arena for a given ptr */
134
135 #define heap_for_ptr(ptr) \
136 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
137 #define arena_for_chunk(ptr) \
138 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
139
140
141 /**************************************************************************/
142
143 #ifndef NO_THREADS
144
145 /* atfork support. */
146
147 static void *(*save_malloc_hook) (size_t __size, const void *);
148 static void (*save_free_hook) (void *__ptr, const void *);
149 static void *save_arena;
150
151 #ifdef ATFORK_MEM
152 ATFORK_MEM;
153 #endif
154
155 /* Magic value for the thread-specific arena pointer when
156 malloc_atfork() is in use. */
157
158 #define ATFORK_ARENA_PTR ((void*)-1)
159
160 /* The following hooks are used while the `atfork' handling mechanism
161 is active. */
162
163 static void*
164 malloc_atfork(size_t sz, const void *caller)
165 {
166 void *vptr = NULL;
167 void *victim;
168
169 tsd_getspecific(arena_key, vptr);
170 if(vptr == ATFORK_ARENA_PTR) {
171 /* We are the only thread that may allocate at all. */
172 if(save_malloc_hook != malloc_check) {
173 return _int_malloc(&main_arena, sz);
174 } else {
175 if(top_check()<0)
176 return 0;
177 victim = _int_malloc(&main_arena, sz+1);
178 return mem2mem_check(victim, sz);
179 }
180 } else {
181 /* Suspend the thread until the `atfork' handlers have completed.
182 By that time, the hooks will have been reset as well, so that
183 mALLOc() can be used again. */
184 (void)mutex_lock(&list_lock);
185 (void)mutex_unlock(&list_lock);
186 return __libc_malloc(sz);
187 }
188 }
189
190 static void
191 free_atfork(void* mem, const void *caller)
192 {
193 void *vptr = NULL;
194 mstate ar_ptr;
195 mchunkptr p; /* chunk corresponding to mem */
196
197 if (mem == 0) /* free(0) has no effect */
198 return;
199
200 p = mem2chunk(mem); /* do not bother to replicate free_check here */
201
202 if (chunk_is_mmapped(p)) /* release mmapped memory. */
203 {
204 munmap_chunk(p);
205 return;
206 }
207
208 ar_ptr = arena_for_chunk(p);
209 tsd_getspecific(arena_key, vptr);
210 _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
211 }
212
213
214 /* Counter for number of times the list is locked by the same thread. */
215 static unsigned int atfork_recursive_cntr;
216
217 /* The following two functions are registered via thread_atfork() to
218 make sure that the mutexes remain in a consistent state in the
219 fork()ed version of a thread. Also adapt the malloc and free hooks
220 temporarily, because the `atfork' handler mechanism may use
221 malloc/free internally (e.g. in LinuxThreads). */
222
223 static void
224 ptmalloc_lock_all (void)
225 {
226 mstate ar_ptr;
227
228 if(__malloc_initialized < 1)
229 return;
230 if (mutex_trylock(&list_lock))
231 {
232 void *my_arena;
233 tsd_getspecific(arena_key, my_arena);
234 if (my_arena == ATFORK_ARENA_PTR)
235 /* This is the same thread which already locks the global list.
236 Just bump the counter. */
237 goto out;
238
239 /* This thread has to wait its turn. */
240 (void)mutex_lock(&list_lock);
241 }
242 for(ar_ptr = &main_arena;;) {
243 (void)mutex_lock(&ar_ptr->mutex);
244 ar_ptr = ar_ptr->next;
245 if(ar_ptr == &main_arena) break;
246 }
247 save_malloc_hook = __malloc_hook;
248 save_free_hook = __free_hook;
249 __malloc_hook = malloc_atfork;
250 __free_hook = free_atfork;
251 /* Only the current thread may perform malloc/free calls now. */
252 tsd_getspecific(arena_key, save_arena);
253 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
254 out:
255 ++atfork_recursive_cntr;
256 }
257
258 static void
259 ptmalloc_unlock_all (void)
260 {
261 mstate ar_ptr;
262
263 if(__malloc_initialized < 1)
264 return;
265 if (--atfork_recursive_cntr != 0)
266 return;
267 tsd_setspecific(arena_key, save_arena);
268 __malloc_hook = save_malloc_hook;
269 __free_hook = save_free_hook;
270 for(ar_ptr = &main_arena;;) {
271 (void)mutex_unlock(&ar_ptr->mutex);
272 ar_ptr = ar_ptr->next;
273 if(ar_ptr == &main_arena) break;
274 }
275 (void)mutex_unlock(&list_lock);
276 }
277
278 # ifdef __linux__
279
280 /* In NPTL, unlocking a mutex in the child process after a
281 fork() is currently unsafe, whereas re-initializing it is safe and
282 does not leak resources. Therefore, a special atfork handler is
283 installed for the child. */
284
285 static void
286 ptmalloc_unlock_all2 (void)
287 {
288 mstate ar_ptr;
289
290 if(__malloc_initialized < 1)
291 return;
292 tsd_setspecific(arena_key, save_arena);
293 __malloc_hook = save_malloc_hook;
294 __free_hook = save_free_hook;
295 #ifdef PER_THREAD
296 free_list = NULL;
297 #endif
298 for(ar_ptr = &main_arena;;) {
299 mutex_init(&ar_ptr->mutex);
300 #ifdef PER_THREAD
301 if (ar_ptr != save_arena) {
302 ar_ptr->next_free = free_list;
303 free_list = ar_ptr;
304 }
305 #endif
306 ar_ptr = ar_ptr->next;
307 if(ar_ptr == &main_arena) break;
308 }
309 mutex_init(&list_lock);
310 atfork_recursive_cntr = 0;
311 }
312
313 # else
314
315 # define ptmalloc_unlock_all2 ptmalloc_unlock_all
316
317 # endif
318
319 #endif /* !NO_THREADS */
320
321 /* Initialization routine. */
322 #include <string.h>
323 extern char **_environ;
324
325 static char *
326 internal_function
327 next_env_entry (char ***position)
328 {
329 char **current = *position;
330 char *result = NULL;
331
332 while (*current != NULL)
333 {
334 if (__builtin_expect ((*current)[0] == 'M', 0)
335 && (*current)[1] == 'A'
336 && (*current)[2] == 'L'
337 && (*current)[3] == 'L'
338 && (*current)[4] == 'O'
339 && (*current)[5] == 'C'
340 && (*current)[6] == '_')
341 {
342 result = &(*current)[7];
343
344 /* Save current position for next visit. */
345 *position = ++current;
346
347 break;
348 }
349
350 ++current;
351 }
352
353 return result;
354 }
355
356
357 #ifdef SHARED
358 static void *
359 __failing_morecore (ptrdiff_t d)
360 {
361 return (void *) MORECORE_FAILURE;
362 }
363
364 extern struct dl_open_hook *_dl_open_hook;
365 libc_hidden_proto (_dl_open_hook);
366 #endif
367
368 static void
369 ptmalloc_init (void)
370 {
371 if(__malloc_initialized >= 0) return;
372 __malloc_initialized = 0;
373
374 #ifdef SHARED
375 /* In case this libc copy is in a non-default namespace, never use brk.
376 Likewise if dlopened from statically linked program. */
377 Dl_info di;
378 struct link_map *l;
379
380 if (_dl_open_hook != NULL
381 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
382 && l->l_ns != LM_ID_BASE))
383 __morecore = __failing_morecore;
384 #endif
385
386 tsd_key_create(&arena_key, NULL);
387 tsd_setspecific(arena_key, (void *)&main_arena);
388 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
389 const char *s = NULL;
390 if (__builtin_expect (_environ != NULL, 1))
391 {
392 char **runp = _environ;
393 char *envline;
394
395 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
396 0))
397 {
398 size_t len = strcspn (envline, "=");
399
400 if (envline[len] != '=')
401 /* This is a "MALLOC_" variable at the end of the string
402 without a '=' character. Ignore it since otherwise we
403 will access invalid memory below. */
404 continue;
405
406 switch (len)
407 {
408 case 6:
409 if (memcmp (envline, "CHECK_", 6) == 0)
410 s = &envline[7];
411 break;
412 case 8:
413 if (! __builtin_expect (__libc_enable_secure, 0))
414 {
415 if (memcmp (envline, "TOP_PAD_", 8) == 0)
416 __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
417 else if (memcmp (envline, "PERTURB_", 8) == 0)
418 __libc_mallopt(M_PERTURB, atoi(&envline[9]));
419 }
420 break;
421 case 9:
422 if (! __builtin_expect (__libc_enable_secure, 0))
423 {
424 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
425 __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
426 #ifdef PER_THREAD
427 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
428 __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
429 #endif
430 }
431 break;
432 #ifdef PER_THREAD
433 case 10:
434 if (! __builtin_expect (__libc_enable_secure, 0))
435 {
436 if (memcmp (envline, "ARENA_TEST", 10) == 0)
437 __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
438 }
439 break;
440 #endif
441 case 15:
442 if (! __builtin_expect (__libc_enable_secure, 0))
443 {
444 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
445 __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
446 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
447 __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
448 }
449 break;
450 default:
451 break;
452 }
453 }
454 }
455 if(s && s[0]) {
456 __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
457 if (check_action != 0)
458 __malloc_check_init();
459 }
460 void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
461 if (hook != NULL)
462 (*hook)();
463 __malloc_initialized = 1;
464 }
465
466 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
467 #ifdef thread_atfork_static
468 thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
469 ptmalloc_unlock_all2)
470 #endif
471
472 \f
473
474 /* Managing heaps and arenas (for concurrent threads) */
475
476 #if MALLOC_DEBUG > 1
477
478 /* Print the complete contents of a single heap to stderr. */
479
480 static void
481 dump_heap(heap_info *heap)
482 {
483 char *ptr;
484 mchunkptr p;
485
486 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
487 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
488 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
489 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
490 ~MALLOC_ALIGN_MASK);
491 for(;;) {
492 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
493 if(p == top(heap->ar_ptr)) {
494 fprintf(stderr, " (top)\n");
495 break;
496 } else if(p->size == (0|PREV_INUSE)) {
497 fprintf(stderr, " (fence)\n");
498 break;
499 }
500 fprintf(stderr, "\n");
501 p = next_chunk(p);
502 }
503 }
504
505 #endif /* MALLOC_DEBUG > 1 */
506
507 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
508 addresses as opposed to increasing, new_heap would badly fragment the
509 address space. In that case remember the second HEAP_MAX_SIZE part
510 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
511 call (if it is already aligned) and try to reuse it next time. We need
512 no locking for it, as kernel ensures the atomicity for us - worst case
513 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
514 multiple threads, but only one will succeed. */
515 static char *aligned_heap_area;
516
517 /* Create a new heap. size is automatically rounded up to a multiple
518 of the page size. */
519
520 static heap_info *
521 internal_function
522 new_heap(size_t size, size_t top_pad)
523 {
524 size_t page_mask = GLRO(dl_pagesize) - 1;
525 char *p1, *p2;
526 unsigned long ul;
527 heap_info *h;
528
529 if(size+top_pad < HEAP_MIN_SIZE)
530 size = HEAP_MIN_SIZE;
531 else if(size+top_pad <= HEAP_MAX_SIZE)
532 size += top_pad;
533 else if(size > HEAP_MAX_SIZE)
534 return 0;
535 else
536 size = HEAP_MAX_SIZE;
537 size = (size + page_mask) & ~page_mask;
538
539 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
540 No swap space needs to be reserved for the following large
541 mapping (on Linux, this is the case for all non-writable mappings
542 anyway). */
543 p2 = MAP_FAILED;
544 if(aligned_heap_area) {
545 p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
546 MAP_NORESERVE);
547 aligned_heap_area = NULL;
548 if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
549 __munmap(p2, HEAP_MAX_SIZE);
550 p2 = MAP_FAILED;
551 }
552 }
553 if(p2 == MAP_FAILED) {
554 p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
555 if(p1 != MAP_FAILED) {
556 p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
557 & ~(HEAP_MAX_SIZE-1));
558 ul = p2 - p1;
559 if (ul)
560 __munmap(p1, ul);
561 else
562 aligned_heap_area = p2 + HEAP_MAX_SIZE;
563 __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
564 } else {
565 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
566 is already aligned. */
567 p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
568 if(p2 == MAP_FAILED)
569 return 0;
570 if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
571 __munmap(p2, HEAP_MAX_SIZE);
572 return 0;
573 }
574 }
575 }
576 if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
577 __munmap(p2, HEAP_MAX_SIZE);
578 return 0;
579 }
580 h = (heap_info *)p2;
581 h->size = size;
582 h->mprotect_size = size;
583 THREAD_STAT(stat_n_heaps++);
584 LIBC_PROBE (memory_heap_new, 2, h, h->size);
585 return h;
586 }
587
588 /* Grow a heap. size is automatically rounded up to a
589 multiple of the page size. */
590
591 static int
592 grow_heap(heap_info *h, long diff)
593 {
594 size_t page_mask = GLRO(dl_pagesize) - 1;
595 long new_size;
596
597 diff = (diff + page_mask) & ~page_mask;
598 new_size = (long)h->size + diff;
599 if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
600 return -1;
601 if((unsigned long) new_size > h->mprotect_size) {
602 if (__mprotect((char *)h + h->mprotect_size,
603 (unsigned long) new_size - h->mprotect_size,
604 PROT_READ|PROT_WRITE) != 0)
605 return -2;
606 h->mprotect_size = new_size;
607 }
608
609 h->size = new_size;
610 LIBC_PROBE (memory_heap_more, 2, h, h->size);
611 return 0;
612 }
613
614 /* Shrink a heap. */
615
616 static int
617 shrink_heap(heap_info *h, long diff)
618 {
619 long new_size;
620
621 new_size = (long)h->size - diff;
622 if(new_size < (long)sizeof(*h))
623 return -1;
624 /* Try to re-map the extra heap space freshly to save memory, and make it
625 inaccessible. See malloc-sysdep.h to know when this is true. */
626 if (__builtin_expect (check_may_shrink_heap (), 0))
627 {
628 if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
629 MAP_FIXED) == (char *) MAP_FAILED)
630 return -2;
631 h->mprotect_size = new_size;
632 }
633 else
634 __madvise ((char *)h + new_size, diff, MADV_DONTNEED);
635 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
636
637 h->size = new_size;
638 LIBC_PROBE (memory_heap_less, 2, h, h->size);
639 return 0;
640 }
641
642 /* Delete a heap. */
643
644 #define delete_heap(heap) \
645 do { \
646 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
647 aligned_heap_area = NULL; \
648 __munmap((char*)(heap), HEAP_MAX_SIZE); \
649 } while (0)
650
651 static int
652 internal_function
653 heap_trim(heap_info *heap, size_t pad)
654 {
655 mstate ar_ptr = heap->ar_ptr;
656 unsigned long pagesz = GLRO(dl_pagesize);
657 mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
658 heap_info *prev_heap;
659 long new_size, top_size, extra, prev_size, misalign;
660
661 /* Can this heap go away completely? */
662 while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
663 prev_heap = heap->prev;
664 prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
665 p = chunk_at_offset(prev_heap, prev_size);
666 /* fencepost must be properly aligned. */
667 misalign = ((long) p) & MALLOC_ALIGN_MASK;
668 p = chunk_at_offset(prev_heap, prev_size - misalign);
669 assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
670 p = prev_chunk(p);
671 new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
672 assert(new_size>0 && new_size<(long)(2*MINSIZE));
673 if(!prev_inuse(p))
674 new_size += p->prev_size;
675 assert(new_size>0 && new_size<HEAP_MAX_SIZE);
676 if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
677 break;
678 ar_ptr->system_mem -= heap->size;
679 arena_mem -= heap->size;
680 LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
681 delete_heap(heap);
682 heap = prev_heap;
683 if(!prev_inuse(p)) { /* consolidate backward */
684 p = prev_chunk(p);
685 unlink(p, bck, fwd);
686 }
687 assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
688 assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
689 top(ar_ptr) = top_chunk = p;
690 set_head(top_chunk, new_size | PREV_INUSE);
691 /*check_chunk(ar_ptr, top_chunk);*/
692 }
693 top_size = chunksize(top_chunk);
694 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
695 if(extra < (long)pagesz)
696 return 0;
697 /* Try to shrink. */
698 if(shrink_heap(heap, extra) != 0)
699 return 0;
700 ar_ptr->system_mem -= extra;
701 arena_mem -= extra;
702
703 /* Success. Adjust top accordingly. */
704 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
705 /*check_chunk(ar_ptr, top_chunk);*/
706 return 1;
707 }
708
709 /* Create a new arena with initial size "size". */
710
711 static mstate
712 _int_new_arena(size_t size)
713 {
714 mstate a;
715 heap_info *h;
716 char *ptr;
717 unsigned long misalign;
718
719 h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
720 mp_.top_pad);
721 if(!h) {
722 /* Maybe size is too large to fit in a single heap. So, just try
723 to create a minimally-sized arena and let _int_malloc() attempt
724 to deal with the large request via mmap_chunk(). */
725 h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
726 if(!h)
727 return 0;
728 }
729 a = h->ar_ptr = (mstate)(h+1);
730 malloc_init_state(a);
731 /*a->next = NULL;*/
732 a->system_mem = a->max_system_mem = h->size;
733 arena_mem += h->size;
734
735 /* Set up the top chunk, with proper alignment. */
736 ptr = (char *)(a + 1);
737 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
738 if (misalign > 0)
739 ptr += MALLOC_ALIGNMENT - misalign;
740 top(a) = (mchunkptr)ptr;
741 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
742
743 LIBC_PROBE (memory_arena_new, 2, a, size);
744 tsd_setspecific(arena_key, (void *)a);
745 mutex_init(&a->mutex);
746 (void)mutex_lock(&a->mutex);
747
748 #ifdef PER_THREAD
749 (void)mutex_lock(&list_lock);
750 #endif
751
752 /* Add the new arena to the global list. */
753 a->next = main_arena.next;
754 atomic_write_barrier ();
755 main_arena.next = a;
756
757 #ifdef PER_THREAD
758 (void)mutex_unlock(&list_lock);
759 #endif
760
761 THREAD_STAT(++(a->stat_lock_loop));
762
763 return a;
764 }
765
766
767 #ifdef PER_THREAD
768 static mstate
769 get_free_list (void)
770 {
771 mstate result = free_list;
772 if (result != NULL)
773 {
774 (void)mutex_lock(&list_lock);
775 result = free_list;
776 if (result != NULL)
777 free_list = result->next_free;
778 (void)mutex_unlock(&list_lock);
779
780 if (result != NULL)
781 {
782 LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
783 (void)mutex_lock(&result->mutex);
784 tsd_setspecific(arena_key, (void *)result);
785 THREAD_STAT(++(result->stat_lock_loop));
786 }
787 }
788
789 return result;
790 }
791
792 /* Lock and return an arena that can be reused for memory allocation.
793 Avoid AVOID_ARENA as we have already failed to allocate memory in
794 it and it is currently locked. */
795 static mstate
796 reused_arena (mstate avoid_arena)
797 {
798 mstate result;
799 static mstate next_to_use;
800 if (next_to_use == NULL)
801 next_to_use = &main_arena;
802
803 result = next_to_use;
804 do
805 {
806 if (!mutex_trylock(&result->mutex))
807 goto out;
808
809 result = result->next;
810 }
811 while (result != next_to_use);
812
813 /* Avoid AVOID_ARENA as we have already failed to allocate memory
814 in that arena and it is currently locked. */
815 if (result == avoid_arena)
816 result = result->next;
817
818 /* No arena available. Wait for the next in line. */
819 LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
820 (void)mutex_lock(&result->mutex);
821
822 out:
823 LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
824 tsd_setspecific(arena_key, (void *)result);
825 THREAD_STAT(++(result->stat_lock_loop));
826 next_to_use = result->next;
827
828 return result;
829 }
830 #endif
831
832 static mstate
833 internal_function
834 arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
835 {
836 mstate a;
837
838 #ifdef PER_THREAD
839 static size_t narenas_limit;
840
841 a = get_free_list ();
842 if (a == NULL)
843 {
844 /* Nothing immediately available, so generate a new arena. */
845 if (narenas_limit == 0)
846 {
847 if (mp_.arena_max != 0)
848 narenas_limit = mp_.arena_max;
849 else if (narenas > mp_.arena_test)
850 {
851 int n = __get_nprocs ();
852
853 if (n >= 1)
854 narenas_limit = NARENAS_FROM_NCORES (n);
855 else
856 /* We have no information about the system. Assume two
857 cores. */
858 narenas_limit = NARENAS_FROM_NCORES (2);
859 }
860 }
861 repeat:;
862 size_t n = narenas;
863 /* NB: the following depends on the fact that (size_t)0 - 1 is a
864 very large number and that the underflow is OK. If arena_max
865 is set the value of arena_test is irrelevant. If arena_test
866 is set but narenas is not yet larger or equal to arena_test
867 narenas_limit is 0. There is no possibility for narenas to
868 be too big for the test to always fail since there is not
869 enough address space to create that many arenas. */
870 if (__builtin_expect (n <= narenas_limit - 1, 0))
871 {
872 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
873 goto repeat;
874 a = _int_new_arena (size);
875 if (__builtin_expect (a == NULL, 0))
876 catomic_decrement (&narenas);
877 }
878 else
879 a = reused_arena (avoid_arena);
880 }
881 #else
882 if(!a_tsd)
883 a = a_tsd = &main_arena;
884 else {
885 a = a_tsd->next;
886 if(!a) {
887 /* This can only happen while initializing the new arena. */
888 (void)mutex_lock(&main_arena.mutex);
889 THREAD_STAT(++(main_arena.stat_lock_wait));
890 return &main_arena;
891 }
892 }
893
894 /* Check the global, circularly linked list for available arenas. */
895 bool retried = false;
896 repeat:
897 do {
898 if(!mutex_trylock(&a->mutex)) {
899 if (retried)
900 (void)mutex_unlock(&list_lock);
901 THREAD_STAT(++(a->stat_lock_loop));
902 LIBC_PROBE (memory_arena_reuse, 2, a, a_tsd);
903 tsd_setspecific(arena_key, (void *)a);
904 return a;
905 }
906 a = a->next;
907 } while(a != a_tsd);
908
909 /* If not even the list_lock can be obtained, try again. This can
910 happen during `atfork', or for example on systems where thread
911 creation makes it temporarily impossible to obtain _any_
912 locks. */
913 if(!retried && mutex_trylock(&list_lock)) {
914 /* We will block to not run in a busy loop. */
915 LIBC_PROBE (memory_arena_reuse_wait, 3, &list_lock, NULL, a_tsd);
916 (void)mutex_lock(&list_lock);
917
918 /* Since we blocked there might be an arena available now. */
919 retried = true;
920 a = a_tsd;
921 goto repeat;
922 }
923
924 /* Nothing immediately available, so generate a new arena. */
925 a = _int_new_arena(size);
926 (void)mutex_unlock(&list_lock);
927 #endif
928
929 return a;
930 }
931
932 /* If we don't have the main arena, then maybe the failure is due to running
933 out of mmapped areas, so we can try allocating on the main arena.
934 Otherwise, it is likely that sbrk() has failed and there is still a chance
935 to mmap(), so try one of the other arenas. */
936 static mstate
937 arena_get_retry (mstate ar_ptr, size_t bytes)
938 {
939 LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
940 if(ar_ptr != &main_arena) {
941 (void)mutex_unlock(&ar_ptr->mutex);
942 ar_ptr = &main_arena;
943 (void)mutex_lock(&ar_ptr->mutex);
944 } else {
945 /* Grab ar_ptr->next prior to releasing its lock. */
946 mstate prev = ar_ptr->next ? ar_ptr : 0;
947 (void)mutex_unlock(&ar_ptr->mutex);
948 ar_ptr = arena_get2(prev, bytes, ar_ptr);
949 }
950
951 return ar_ptr;
952 }
953
954 #ifdef PER_THREAD
955 static void __attribute__ ((section ("__libc_thread_freeres_fn")))
956 arena_thread_freeres (void)
957 {
958 void *vptr = NULL;
959 mstate a = tsd_getspecific(arena_key, vptr);
960 tsd_setspecific(arena_key, NULL);
961
962 if (a != NULL)
963 {
964 (void)mutex_lock(&list_lock);
965 a->next_free = free_list;
966 free_list = a;
967 (void)mutex_unlock(&list_lock);
968 }
969 }
970 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
971 #endif
972
973 /*
974 * Local variables:
975 * c-basic-offset: 2
976 * End:
977 */