]>
Commit | Line | Data |
---|---|---|
fa8d436c | 1 | /* Malloc implementation for multiple threads without lock contention. |
425ce2ed | 2 | Copyright (C) 2001,2002,2003,2004,2005,2006,2007,2009 |
c7fd3362 | 3 | Free Software Foundation, Inc. |
fa8d436c UD |
4 | This file is part of the GNU C Library. |
5 | Contributed by Wolfram Gloger <wg@malloc.de>, 2001. | |
6 | ||
7 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
8 | modify it under the terms of the GNU Lesser General Public License as |
9 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c UD |
10 | License, or (at your option) any later version. |
11 | ||
12 | The GNU C Library is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 15 | Lesser General Public License for more details. |
fa8d436c | 16 | |
cc7375ce | 17 | You should have received a copy of the GNU Lesser General Public |
fa8d436c UD |
18 | License along with the GNU C Library; see the file COPYING.LIB. If not, |
19 | write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 | Boston, MA 02111-1307, USA. */ | |
21 | ||
a28b6b0a RM |
22 | #include <stdbool.h> |
23 | ||
fa8d436c UD |
24 | /* Compile-time constants. */ |
25 | ||
26 | #define HEAP_MIN_SIZE (32*1024) | |
27 | #ifndef HEAP_MAX_SIZE | |
e404fb16 | 28 | # ifdef DEFAULT_MMAP_THRESHOLD_MAX |
bd2c2341 | 29 | # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX) |
e404fb16 UD |
30 | # else |
31 | # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */ | |
32 | # endif | |
fa8d436c UD |
33 | #endif |
34 | ||
35 | /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps | |
36 | that are dynamically created for multi-threaded programs. The | |
37 | maximum size must be a power of two, for fast determination of | |
38 | which heap belongs to a chunk. It should be much larger than the | |
39 | mmap threshold, so that requests with a size just below that | |
40 | threshold can be fulfilled without creating too many heaps. */ | |
41 | ||
42 | ||
43 | #ifndef THREAD_STATS | |
44 | #define THREAD_STATS 0 | |
45 | #endif | |
46 | ||
47 | /* If THREAD_STATS is non-zero, some statistics on mutex locking are | |
48 | computed. */ | |
49 | ||
50 | /***************************************************************************/ | |
51 | ||
52 | #define top(ar_ptr) ((ar_ptr)->top) | |
53 | ||
54 | /* A heap is a single contiguous memory region holding (coalesceable) | |
55 | malloc_chunks. It is allocated with mmap() and always starts at an | |
56 | address aligned to HEAP_MAX_SIZE. Not used unless compiling with | |
57 | USE_ARENAS. */ | |
58 | ||
59 | typedef struct _heap_info { | |
60 | mstate ar_ptr; /* Arena for this heap. */ | |
61 | struct _heap_info *prev; /* Previous heap. */ | |
62 | size_t size; /* Current size in bytes. */ | |
c7fd3362 JJ |
63 | size_t mprotect_size; /* Size in bytes that has been mprotected |
64 | PROT_READ|PROT_WRITE. */ | |
7d013a64 RM |
65 | /* Make sure the following data is properly aligned, particularly |
66 | that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of | |
c7fd3362 JJ |
67 | MALLOC_ALIGNMENT. */ |
68 | char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; | |
fa8d436c UD |
69 | } heap_info; |
70 | ||
7d013a64 RM |
71 | /* Get a compile-time error if the heap_info padding is not correct |
72 | to make alignment work as expected in sYSMALLOc. */ | |
73 | extern int sanity_check_heap_info_alignment[(sizeof (heap_info) | |
74 | + 2 * SIZE_SZ) % MALLOC_ALIGNMENT | |
75 | ? -1 : 1]; | |
76 | ||
fa8d436c UD |
77 | /* Thread specific data */ |
78 | ||
79 | static tsd_key_t arena_key; | |
80 | static mutex_t list_lock; | |
425ce2ed UD |
81 | #ifdef PER_THREAD |
82 | static size_t narenas; | |
83 | static mstate free_list; | |
84 | #endif | |
fa8d436c UD |
85 | |
86 | #if THREAD_STATS | |
87 | static int stat_n_heaps; | |
88 | #define THREAD_STAT(x) x | |
89 | #else | |
90 | #define THREAD_STAT(x) do ; while(0) | |
91 | #endif | |
92 | ||
93 | /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */ | |
94 | static unsigned long arena_mem; | |
95 | ||
2a652f5a RM |
96 | /* Already initialized? */ |
97 | int __malloc_initialized = -1; | |
98 | ||
fa8d436c UD |
99 | /**************************************************************************/ |
100 | ||
101 | #if USE_ARENAS | |
102 | ||
103 | /* arena_get() acquires an arena and locks the corresponding mutex. | |
104 | First, try the one last locked successfully by this thread. (This | |
105 | is the common case and handled with a macro for speed.) Then, loop | |
106 | once over the circularly linked list of arenas. If no arena is | |
107 | readily available, create a new one. In this latter case, `size' | |
108 | is just a hint as to how much memory will be required immediately | |
109 | in the new arena. */ | |
110 | ||
111 | #define arena_get(ptr, size) do { \ | |
425ce2ed UD |
112 | arena_lookup(ptr); \ |
113 | arena_lock(ptr, size); \ | |
114 | } while(0) | |
115 | ||
116 | #define arena_lookup(ptr) do { \ | |
fa8d436c UD |
117 | Void_t *vptr = NULL; \ |
118 | ptr = (mstate)tsd_getspecific(arena_key, vptr); \ | |
425ce2ed UD |
119 | } while(0) |
120 | ||
121 | #ifdef PER_THREAD | |
122 | #define arena_lock(ptr, size) do { \ | |
123 | if(ptr) \ | |
124 | (void)mutex_lock(&ptr->mutex); \ | |
125 | else \ | |
126 | ptr = arena_get2(ptr, (size)); \ | |
127 | } while(0) | |
128 | #else | |
129 | #define arena_lock(ptr, size) do { \ | |
fa8d436c UD |
130 | if(ptr && !mutex_trylock(&ptr->mutex)) { \ |
131 | THREAD_STAT(++(ptr->stat_lock_direct)); \ | |
132 | } else \ | |
133 | ptr = arena_get2(ptr, (size)); \ | |
134 | } while(0) | |
425ce2ed | 135 | #endif |
fa8d436c UD |
136 | |
137 | /* find the heap and corresponding arena for a given ptr */ | |
138 | ||
139 | #define heap_for_ptr(ptr) \ | |
140 | ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1))) | |
141 | #define arena_for_chunk(ptr) \ | |
142 | (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena) | |
143 | ||
144 | #else /* !USE_ARENAS */ | |
145 | ||
146 | /* There is only one arena, main_arena. */ | |
147 | ||
148 | #if THREAD_STATS | |
149 | #define arena_get(ar_ptr, sz) do { \ | |
150 | ar_ptr = &main_arena; \ | |
151 | if(!mutex_trylock(&ar_ptr->mutex)) \ | |
152 | ++(ar_ptr->stat_lock_direct); \ | |
153 | else { \ | |
154 | (void)mutex_lock(&ar_ptr->mutex); \ | |
155 | ++(ar_ptr->stat_lock_wait); \ | |
156 | } \ | |
157 | } while(0) | |
158 | #else | |
159 | #define arena_get(ar_ptr, sz) do { \ | |
160 | ar_ptr = &main_arena; \ | |
161 | (void)mutex_lock(&ar_ptr->mutex); \ | |
162 | } while(0) | |
163 | #endif | |
164 | #define arena_for_chunk(ptr) (&main_arena) | |
165 | ||
166 | #endif /* USE_ARENAS */ | |
167 | ||
168 | /**************************************************************************/ | |
169 | ||
170 | #ifndef NO_THREADS | |
171 | ||
172 | /* atfork support. */ | |
173 | ||
06d6611a UD |
174 | static __malloc_ptr_t (*save_malloc_hook) (size_t __size, |
175 | __const __malloc_ptr_t); | |
11bf311e | 176 | # if !defined _LIBC || (defined SHARED && !USE___THREAD) |
06d6611a UD |
177 | static __malloc_ptr_t (*save_memalign_hook) (size_t __align, size_t __size, |
178 | __const __malloc_ptr_t); | |
fde89ad0 | 179 | # endif |
06d6611a UD |
180 | static void (*save_free_hook) (__malloc_ptr_t __ptr, |
181 | __const __malloc_ptr_t); | |
fa8d436c UD |
182 | static Void_t* save_arena; |
183 | ||
666aa020 UD |
184 | #ifdef ATFORK_MEM |
185 | ATFORK_MEM; | |
186 | #endif | |
187 | ||
fa8d436c UD |
188 | /* Magic value for the thread-specific arena pointer when |
189 | malloc_atfork() is in use. */ | |
190 | ||
191 | #define ATFORK_ARENA_PTR ((Void_t*)-1) | |
192 | ||
193 | /* The following hooks are used while the `atfork' handling mechanism | |
194 | is active. */ | |
195 | ||
196 | static Void_t* | |
197 | malloc_atfork(size_t sz, const Void_t *caller) | |
198 | { | |
199 | Void_t *vptr = NULL; | |
200 | Void_t *victim; | |
201 | ||
202 | tsd_getspecific(arena_key, vptr); | |
203 | if(vptr == ATFORK_ARENA_PTR) { | |
204 | /* We are the only thread that may allocate at all. */ | |
205 | if(save_malloc_hook != malloc_check) { | |
206 | return _int_malloc(&main_arena, sz); | |
207 | } else { | |
208 | if(top_check()<0) | |
209 | return 0; | |
210 | victim = _int_malloc(&main_arena, sz+1); | |
211 | return mem2mem_check(victim, sz); | |
212 | } | |
213 | } else { | |
214 | /* Suspend the thread until the `atfork' handlers have completed. | |
215 | By that time, the hooks will have been reset as well, so that | |
216 | mALLOc() can be used again. */ | |
217 | (void)mutex_lock(&list_lock); | |
218 | (void)mutex_unlock(&list_lock); | |
219 | return public_mALLOc(sz); | |
220 | } | |
221 | } | |
222 | ||
223 | static void | |
224 | free_atfork(Void_t* mem, const Void_t *caller) | |
225 | { | |
226 | Void_t *vptr = NULL; | |
227 | mstate ar_ptr; | |
228 | mchunkptr p; /* chunk corresponding to mem */ | |
229 | ||
230 | if (mem == 0) /* free(0) has no effect */ | |
231 | return; | |
232 | ||
233 | p = mem2chunk(mem); /* do not bother to replicate free_check here */ | |
234 | ||
235 | #if HAVE_MMAP | |
236 | if (chunk_is_mmapped(p)) /* release mmapped memory. */ | |
237 | { | |
238 | munmap_chunk(p); | |
239 | return; | |
240 | } | |
241 | #endif | |
242 | ||
425ce2ed UD |
243 | #ifdef ATOMIC_FASTBINS |
244 | ar_ptr = arena_for_chunk(p); | |
245 | tsd_getspecific(arena_key, vptr); | |
246 | _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR); | |
247 | #else | |
fa8d436c UD |
248 | ar_ptr = arena_for_chunk(p); |
249 | tsd_getspecific(arena_key, vptr); | |
250 | if(vptr != ATFORK_ARENA_PTR) | |
251 | (void)mutex_lock(&ar_ptr->mutex); | |
78ac92ad | 252 | _int_free(ar_ptr, p); |
fa8d436c UD |
253 | if(vptr != ATFORK_ARENA_PTR) |
254 | (void)mutex_unlock(&ar_ptr->mutex); | |
425ce2ed | 255 | #endif |
fa8d436c UD |
256 | } |
257 | ||
7dac9f3d UD |
258 | |
259 | /* Counter for number of times the list is locked by the same thread. */ | |
260 | static unsigned int atfork_recursive_cntr; | |
261 | ||
fa8d436c UD |
262 | /* The following two functions are registered via thread_atfork() to |
263 | make sure that the mutexes remain in a consistent state in the | |
264 | fork()ed version of a thread. Also adapt the malloc and free hooks | |
265 | temporarily, because the `atfork' handler mechanism may use | |
266 | malloc/free internally (e.g. in LinuxThreads). */ | |
267 | ||
268 | static void | |
06d6611a | 269 | ptmalloc_lock_all (void) |
fa8d436c UD |
270 | { |
271 | mstate ar_ptr; | |
272 | ||
2a652f5a RM |
273 | if(__malloc_initialized < 1) |
274 | return; | |
7dac9f3d UD |
275 | if (mutex_trylock(&list_lock)) |
276 | { | |
277 | Void_t *my_arena; | |
278 | tsd_getspecific(arena_key, my_arena); | |
279 | if (my_arena == ATFORK_ARENA_PTR) | |
280 | /* This is the same thread which already locks the global list. | |
281 | Just bump the counter. */ | |
282 | goto out; | |
283 | ||
284 | /* This thread has to wait its turn. */ | |
285 | (void)mutex_lock(&list_lock); | |
286 | } | |
fa8d436c UD |
287 | for(ar_ptr = &main_arena;;) { |
288 | (void)mutex_lock(&ar_ptr->mutex); | |
289 | ar_ptr = ar_ptr->next; | |
290 | if(ar_ptr == &main_arena) break; | |
291 | } | |
292 | save_malloc_hook = __malloc_hook; | |
293 | save_free_hook = __free_hook; | |
294 | __malloc_hook = malloc_atfork; | |
295 | __free_hook = free_atfork; | |
296 | /* Only the current thread may perform malloc/free calls now. */ | |
297 | tsd_getspecific(arena_key, save_arena); | |
298 | tsd_setspecific(arena_key, ATFORK_ARENA_PTR); | |
7dac9f3d UD |
299 | out: |
300 | ++atfork_recursive_cntr; | |
fa8d436c UD |
301 | } |
302 | ||
303 | static void | |
06d6611a | 304 | ptmalloc_unlock_all (void) |
fa8d436c UD |
305 | { |
306 | mstate ar_ptr; | |
307 | ||
2a652f5a RM |
308 | if(__malloc_initialized < 1) |
309 | return; | |
7dac9f3d UD |
310 | if (--atfork_recursive_cntr != 0) |
311 | return; | |
fa8d436c UD |
312 | tsd_setspecific(arena_key, save_arena); |
313 | __malloc_hook = save_malloc_hook; | |
314 | __free_hook = save_free_hook; | |
315 | for(ar_ptr = &main_arena;;) { | |
316 | (void)mutex_unlock(&ar_ptr->mutex); | |
317 | ar_ptr = ar_ptr->next; | |
318 | if(ar_ptr == &main_arena) break; | |
319 | } | |
320 | (void)mutex_unlock(&list_lock); | |
321 | } | |
322 | ||
323 | #ifdef __linux__ | |
324 | ||
e851dca1 | 325 | /* In NPTL, unlocking a mutex in the child process after a |
fa8d436c UD |
326 | fork() is currently unsafe, whereas re-initializing it is safe and |
327 | does not leak resources. Therefore, a special atfork handler is | |
328 | installed for the child. */ | |
329 | ||
330 | static void | |
06d6611a | 331 | ptmalloc_unlock_all2 (void) |
fa8d436c UD |
332 | { |
333 | mstate ar_ptr; | |
334 | ||
2a652f5a RM |
335 | if(__malloc_initialized < 1) |
336 | return; | |
fa8d436c UD |
337 | #if defined _LIBC || defined MALLOC_HOOKS |
338 | tsd_setspecific(arena_key, save_arena); | |
339 | __malloc_hook = save_malloc_hook; | |
340 | __free_hook = save_free_hook; | |
425ce2ed UD |
341 | #endif |
342 | #ifdef PER_THREAD | |
343 | free_list = NULL; | |
fa8d436c UD |
344 | #endif |
345 | for(ar_ptr = &main_arena;;) { | |
fdb933e2 | 346 | mutex_init(&ar_ptr->mutex); |
425ce2ed UD |
347 | #ifdef PER_THREAD |
348 | if (ar_ptr != save_arena) { | |
349 | ar_ptr->next_free = free_list; | |
350 | free_list = ar_ptr; | |
351 | } | |
352 | #endif | |
fa8d436c UD |
353 | ar_ptr = ar_ptr->next; |
354 | if(ar_ptr == &main_arena) break; | |
355 | } | |
fdb933e2 | 356 | mutex_init(&list_lock); |
e851dca1 | 357 | atfork_recursive_cntr = 0; |
fa8d436c UD |
358 | } |
359 | ||
360 | #else | |
361 | ||
362 | #define ptmalloc_unlock_all2 ptmalloc_unlock_all | |
363 | ||
364 | #endif | |
365 | ||
366 | #endif /* !defined NO_THREADS */ | |
367 | ||
fa8d436c UD |
368 | /* Initialization routine. */ |
369 | #ifdef _LIBC | |
370 | #include <string.h> | |
371 | extern char **_environ; | |
372 | ||
373 | static char * | |
374 | internal_function | |
375 | next_env_entry (char ***position) | |
376 | { | |
377 | char **current = *position; | |
378 | char *result = NULL; | |
379 | ||
380 | while (*current != NULL) | |
381 | { | |
382 | if (__builtin_expect ((*current)[0] == 'M', 0) | |
383 | && (*current)[1] == 'A' | |
384 | && (*current)[2] == 'L' | |
385 | && (*current)[3] == 'L' | |
386 | && (*current)[4] == 'O' | |
387 | && (*current)[5] == 'C' | |
388 | && (*current)[6] == '_') | |
389 | { | |
390 | result = &(*current)[7]; | |
391 | ||
392 | /* Save current position for next visit. */ | |
393 | *position = ++current; | |
394 | ||
395 | break; | |
396 | } | |
397 | ||
398 | ++current; | |
399 | } | |
400 | ||
401 | return result; | |
402 | } | |
403 | #endif /* _LIBC */ | |
404 | ||
fde89ad0 RM |
405 | /* Set up basic state so that _int_malloc et al can work. */ |
406 | static void | |
06d6611a | 407 | ptmalloc_init_minimal (void) |
fde89ad0 RM |
408 | { |
409 | #if DEFAULT_TOP_PAD != 0 | |
410 | mp_.top_pad = DEFAULT_TOP_PAD; | |
411 | #endif | |
412 | mp_.n_mmaps_max = DEFAULT_MMAP_MAX; | |
413 | mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD; | |
414 | mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD; | |
415 | mp_.pagesize = malloc_getpagesize; | |
425ce2ed UD |
416 | #ifdef PER_THREAD |
417 | # define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8)) | |
418 | mp_.arena_test = NARENAS_FROM_NCORES (1); | |
419 | narenas = 1; | |
420 | #endif | |
fde89ad0 RM |
421 | } |
422 | ||
c0f62c56 | 423 | |
fde89ad0 | 424 | #ifdef _LIBC |
c0f62c56 UD |
425 | # ifdef SHARED |
426 | static void * | |
427 | __failing_morecore (ptrdiff_t d) | |
428 | { | |
429 | return (void *) MORECORE_FAILURE; | |
430 | } | |
5f21997b UD |
431 | |
432 | extern struct dl_open_hook *_dl_open_hook; | |
433 | libc_hidden_proto (_dl_open_hook); | |
c0f62c56 UD |
434 | # endif |
435 | ||
11bf311e | 436 | # if defined SHARED && !USE___THREAD |
fde89ad0 RM |
437 | /* This is called by __pthread_initialize_minimal when it needs to use |
438 | malloc to set up the TLS state. We cannot do the full work of | |
439 | ptmalloc_init (below) until __pthread_initialize_minimal has finished, | |
440 | so it has to switch to using the special startup-time hooks while doing | |
441 | those allocations. */ | |
442 | void | |
443 | __libc_malloc_pthread_startup (bool first_time) | |
444 | { | |
445 | if (first_time) | |
446 | { | |
447 | ptmalloc_init_minimal (); | |
448 | save_malloc_hook = __malloc_hook; | |
449 | save_memalign_hook = __memalign_hook; | |
450 | save_free_hook = __free_hook; | |
451 | __malloc_hook = malloc_starter; | |
452 | __memalign_hook = memalign_starter; | |
453 | __free_hook = free_starter; | |
454 | } | |
455 | else | |
456 | { | |
457 | __malloc_hook = save_malloc_hook; | |
458 | __memalign_hook = save_memalign_hook; | |
459 | __free_hook = save_free_hook; | |
460 | } | |
461 | } | |
462 | # endif | |
463 | #endif | |
464 | ||
fa8d436c | 465 | static void |
06d6611a | 466 | ptmalloc_init (void) |
fa8d436c UD |
467 | { |
468 | #if __STD_C | |
469 | const char* s; | |
470 | #else | |
471 | char* s; | |
472 | #endif | |
473 | int secure = 0; | |
474 | ||
475 | if(__malloc_initialized >= 0) return; | |
476 | __malloc_initialized = 0; | |
477 | ||
fde89ad0 | 478 | #ifdef _LIBC |
11bf311e | 479 | # if defined SHARED && !USE___THREAD |
fde89ad0 RM |
480 | /* ptmalloc_init_minimal may already have been called via |
481 | __libc_malloc_pthread_startup, above. */ | |
482 | if (mp_.pagesize == 0) | |
483 | # endif | |
484 | #endif | |
485 | ptmalloc_init_minimal(); | |
fa8d436c UD |
486 | |
487 | #ifndef NO_THREADS | |
11bf311e | 488 | # if defined _LIBC |
fde89ad0 RM |
489 | /* We know __pthread_initialize_minimal has already been called, |
490 | and that is enough. */ | |
491 | # define NO_STARTER | |
492 | # endif | |
493 | # ifndef NO_STARTER | |
fa8d436c UD |
494 | /* With some threads implementations, creating thread-specific data |
495 | or initializing a mutex may call malloc() itself. Provide a | |
496 | simple starter version (realloc() won't work). */ | |
497 | save_malloc_hook = __malloc_hook; | |
fde89ad0 | 498 | save_memalign_hook = __memalign_hook; |
fa8d436c UD |
499 | save_free_hook = __free_hook; |
500 | __malloc_hook = malloc_starter; | |
fde89ad0 | 501 | __memalign_hook = memalign_starter; |
fa8d436c | 502 | __free_hook = free_starter; |
fde89ad0 | 503 | # ifdef _LIBC |
fa8d436c UD |
504 | /* Initialize the pthreads interface. */ |
505 | if (__pthread_initialize != NULL) | |
506 | __pthread_initialize(); | |
fde89ad0 RM |
507 | # endif /* !defined _LIBC */ |
508 | # endif /* !defined NO_STARTER */ | |
fa8d436c UD |
509 | #endif /* !defined NO_THREADS */ |
510 | mutex_init(&main_arena.mutex); | |
511 | main_arena.next = &main_arena; | |
512 | ||
c0f62c56 | 513 | #if defined _LIBC && defined SHARED |
5f21997b UD |
514 | /* In case this libc copy is in a non-default namespace, never use brk. |
515 | Likewise if dlopened from statically linked program. */ | |
c0f62c56 UD |
516 | Dl_info di; |
517 | struct link_map *l; | |
5f21997b UD |
518 | |
519 | if (_dl_open_hook != NULL | |
520 | || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0 | |
521 | && l->l_ns != LM_ID_BASE)) | |
c0f62c56 UD |
522 | __morecore = __failing_morecore; |
523 | #endif | |
524 | ||
fa8d436c UD |
525 | mutex_init(&list_lock); |
526 | tsd_key_create(&arena_key, NULL); | |
527 | tsd_setspecific(arena_key, (Void_t *)&main_arena); | |
528 | thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); | |
529 | #ifndef NO_THREADS | |
fde89ad0 | 530 | # ifndef NO_STARTER |
fa8d436c | 531 | __malloc_hook = save_malloc_hook; |
fde89ad0 | 532 | __memalign_hook = save_memalign_hook; |
fa8d436c | 533 | __free_hook = save_free_hook; |
fde89ad0 RM |
534 | # else |
535 | # undef NO_STARTER | |
536 | # endif | |
fa8d436c UD |
537 | #endif |
538 | #ifdef _LIBC | |
539 | secure = __libc_enable_secure; | |
540 | s = NULL; | |
08e49216 RM |
541 | if (__builtin_expect (_environ != NULL, 1)) |
542 | { | |
543 | char **runp = _environ; | |
544 | char *envline; | |
545 | ||
546 | while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, | |
547 | 0)) | |
548 | { | |
549 | size_t len = strcspn (envline, "="); | |
550 | ||
551 | if (envline[len] != '=') | |
552 | /* This is a "MALLOC_" variable at the end of the string | |
553 | without a '=' character. Ignore it since otherwise we | |
554 | will access invalid memory below. */ | |
555 | continue; | |
556 | ||
557 | switch (len) | |
558 | { | |
559 | case 6: | |
560 | if (memcmp (envline, "CHECK_", 6) == 0) | |
561 | s = &envline[7]; | |
562 | break; | |
563 | case 8: | |
a5a33449 UD |
564 | if (! secure) |
565 | { | |
566 | if (memcmp (envline, "TOP_PAD_", 8) == 0) | |
567 | mALLOPt(M_TOP_PAD, atoi(&envline[9])); | |
568 | else if (memcmp (envline, "PERTURB_", 8) == 0) | |
569 | mALLOPt(M_PERTURB, atoi(&envline[9])); | |
570 | } | |
08e49216 RM |
571 | break; |
572 | case 9: | |
425ce2ed UD |
573 | if (! secure) |
574 | { | |
575 | if (memcmp (envline, "MMAP_MAX_", 9) == 0) | |
576 | mALLOPt(M_MMAP_MAX, atoi(&envline[10])); | |
577 | #ifdef PER_THREAD | |
578 | else if (memcmp (envline, "ARENA_MAX", 9) == 0) | |
579 | mALLOPt(M_ARENA_MAX, atoi(&envline[10])); | |
580 | #endif | |
581 | } | |
08e49216 | 582 | break; |
425ce2ed UD |
583 | #ifdef PER_THREAD |
584 | case 10: | |
585 | if (! secure) | |
586 | { | |
587 | if (memcmp (envline, "ARENA_TEST", 10) == 0) | |
588 | mALLOPt(M_ARENA_TEST, atoi(&envline[11])); | |
589 | } | |
590 | break; | |
591 | #endif | |
08e49216 RM |
592 | case 15: |
593 | if (! secure) | |
594 | { | |
595 | if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) | |
596 | mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16])); | |
597 | else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) | |
598 | mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16])); | |
599 | } | |
600 | break; | |
601 | default: | |
602 | break; | |
603 | } | |
604 | } | |
605 | } | |
fa8d436c UD |
606 | #else |
607 | if (! secure) | |
608 | { | |
609 | if((s = getenv("MALLOC_TRIM_THRESHOLD_"))) | |
610 | mALLOPt(M_TRIM_THRESHOLD, atoi(s)); | |
611 | if((s = getenv("MALLOC_TOP_PAD_"))) | |
612 | mALLOPt(M_TOP_PAD, atoi(s)); | |
a5a33449 UD |
613 | if((s = getenv("MALLOC_PERTURB_"))) |
614 | mALLOPt(M_PERTURB, atoi(s)); | |
fa8d436c UD |
615 | if((s = getenv("MALLOC_MMAP_THRESHOLD_"))) |
616 | mALLOPt(M_MMAP_THRESHOLD, atoi(s)); | |
617 | if((s = getenv("MALLOC_MMAP_MAX_"))) | |
618 | mALLOPt(M_MMAP_MAX, atoi(s)); | |
619 | } | |
620 | s = getenv("MALLOC_CHECK_"); | |
621 | #endif | |
ceba6be7 UD |
622 | if(s && s[0]) { |
623 | mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); | |
a19fe332 UD |
624 | if (check_action != 0) |
625 | __malloc_check_init(); | |
fa8d436c UD |
626 | } |
627 | if(__malloc_initialize_hook != NULL) | |
628 | (*__malloc_initialize_hook)(); | |
629 | __malloc_initialized = 1; | |
630 | } | |
631 | ||
632 | /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */ | |
633 | #ifdef thread_atfork_static | |
634 | thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \ | |
635 | ptmalloc_unlock_all2) | |
636 | #endif | |
637 | ||
638 | \f | |
639 | ||
640 | /* Managing heaps and arenas (for concurrent threads) */ | |
641 | ||
642 | #if USE_ARENAS | |
643 | ||
644 | #if MALLOC_DEBUG > 1 | |
645 | ||
646 | /* Print the complete contents of a single heap to stderr. */ | |
647 | ||
648 | static void | |
649 | #if __STD_C | |
650 | dump_heap(heap_info *heap) | |
651 | #else | |
652 | dump_heap(heap) heap_info *heap; | |
653 | #endif | |
654 | { | |
655 | char *ptr; | |
656 | mchunkptr p; | |
657 | ||
658 | fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size); | |
659 | ptr = (heap->ar_ptr != (mstate)(heap+1)) ? | |
660 | (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state); | |
661 | p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) & | |
662 | ~MALLOC_ALIGN_MASK); | |
663 | for(;;) { | |
664 | fprintf(stderr, "chunk %p size %10lx", p, (long)p->size); | |
665 | if(p == top(heap->ar_ptr)) { | |
666 | fprintf(stderr, " (top)\n"); | |
667 | break; | |
668 | } else if(p->size == (0|PREV_INUSE)) { | |
669 | fprintf(stderr, " (fence)\n"); | |
670 | break; | |
671 | } | |
672 | fprintf(stderr, "\n"); | |
673 | p = next_chunk(p); | |
674 | } | |
675 | } | |
676 | ||
677 | #endif /* MALLOC_DEBUG > 1 */ | |
678 | ||
26d550d3 UD |
679 | /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing |
680 | addresses as opposed to increasing, new_heap would badly fragment the | |
681 | address space. In that case remember the second HEAP_MAX_SIZE part | |
682 | aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...) | |
683 | call (if it is already aligned) and try to reuse it next time. We need | |
684 | no locking for it, as kernel ensures the atomicity for us - worst case | |
685 | we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in | |
686 | multiple threads, but only one will succeed. */ | |
687 | static char *aligned_heap_area; | |
688 | ||
fa8d436c UD |
689 | /* Create a new heap. size is automatically rounded up to a multiple |
690 | of the page size. */ | |
691 | ||
692 | static heap_info * | |
693 | internal_function | |
694 | #if __STD_C | |
695 | new_heap(size_t size, size_t top_pad) | |
696 | #else | |
697 | new_heap(size, top_pad) size_t size, top_pad; | |
698 | #endif | |
699 | { | |
700 | size_t page_mask = malloc_getpagesize - 1; | |
701 | char *p1, *p2; | |
702 | unsigned long ul; | |
703 | heap_info *h; | |
704 | ||
705 | if(size+top_pad < HEAP_MIN_SIZE) | |
706 | size = HEAP_MIN_SIZE; | |
707 | else if(size+top_pad <= HEAP_MAX_SIZE) | |
708 | size += top_pad; | |
709 | else if(size > HEAP_MAX_SIZE) | |
710 | return 0; | |
711 | else | |
712 | size = HEAP_MAX_SIZE; | |
713 | size = (size + page_mask) & ~page_mask; | |
714 | ||
715 | /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. | |
716 | No swap space needs to be reserved for the following large | |
717 | mapping (on Linux, this is the case for all non-writable mappings | |
718 | anyway). */ | |
26d550d3 UD |
719 | p2 = MAP_FAILED; |
720 | if(aligned_heap_area) { | |
721 | p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, | |
722 | MAP_PRIVATE|MAP_NORESERVE); | |
723 | aligned_heap_area = NULL; | |
724 | if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) { | |
fa8d436c | 725 | munmap(p2, HEAP_MAX_SIZE); |
26d550d3 UD |
726 | p2 = MAP_FAILED; |
727 | } | |
728 | } | |
729 | if(p2 == MAP_FAILED) { | |
730 | p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, | |
731 | MAP_PRIVATE|MAP_NORESERVE); | |
732 | if(p1 != MAP_FAILED) { | |
733 | p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) | |
734 | & ~(HEAP_MAX_SIZE-1)); | |
735 | ul = p2 - p1; | |
736 | if (ul) | |
737 | munmap(p1, ul); | |
738 | else | |
739 | aligned_heap_area = p2 + HEAP_MAX_SIZE; | |
740 | munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); | |
741 | } else { | |
742 | /* Try to take the chance that an allocation of only HEAP_MAX_SIZE | |
743 | is already aligned. */ | |
744 | p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); | |
745 | if(p2 == MAP_FAILED) | |
746 | return 0; | |
747 | if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) { | |
748 | munmap(p2, HEAP_MAX_SIZE); | |
749 | return 0; | |
750 | } | |
fa8d436c UD |
751 | } |
752 | } | |
753 | if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) { | |
754 | munmap(p2, HEAP_MAX_SIZE); | |
755 | return 0; | |
756 | } | |
757 | h = (heap_info *)p2; | |
758 | h->size = size; | |
c7fd3362 | 759 | h->mprotect_size = size; |
fa8d436c UD |
760 | THREAD_STAT(stat_n_heaps++); |
761 | return h; | |
762 | } | |
763 | ||
cbf5760e UD |
764 | /* Grow a heap. size is automatically rounded up to a |
765 | multiple of the page size. */ | |
fa8d436c UD |
766 | |
767 | static int | |
768 | #if __STD_C | |
769 | grow_heap(heap_info *h, long diff) | |
770 | #else | |
771 | grow_heap(h, diff) heap_info *h; long diff; | |
772 | #endif | |
773 | { | |
774 | size_t page_mask = malloc_getpagesize - 1; | |
775 | long new_size; | |
776 | ||
cbf5760e UD |
777 | diff = (diff + page_mask) & ~page_mask; |
778 | new_size = (long)h->size + diff; | |
779 | if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE) | |
780 | return -1; | |
781 | if((unsigned long) new_size > h->mprotect_size) { | |
782 | if (mprotect((char *)h + h->mprotect_size, | |
783 | (unsigned long) new_size - h->mprotect_size, | |
784 | PROT_READ|PROT_WRITE) != 0) | |
785 | return -2; | |
786 | h->mprotect_size = new_size; | |
787 | } | |
788 | ||
789 | h->size = new_size; | |
790 | return 0; | |
791 | } | |
792 | ||
793 | /* Shrink a heap. */ | |
794 | ||
795 | static int | |
796 | #if __STD_C | |
797 | shrink_heap(heap_info *h, long diff) | |
798 | #else | |
799 | shrink_heap(h, diff) heap_info *h; long diff; | |
800 | #endif | |
801 | { | |
802 | long new_size; | |
803 | ||
804 | new_size = (long)h->size - diff; | |
805 | if(new_size < (long)sizeof(*h)) | |
806 | return -1; | |
807 | /* Try to re-map the extra heap space freshly to save memory, and | |
808 | make it inaccessible. */ | |
c7fd3362 | 809 | #ifdef _LIBC |
cbf5760e | 810 | if (__builtin_expect (__libc_enable_secure, 0)) |
c7fd3362 | 811 | #else |
cbf5760e | 812 | if (1) |
c7fd3362 | 813 | #endif |
cbf5760e UD |
814 | { |
815 | if((char *)MMAP((char *)h + new_size, diff, PROT_NONE, | |
816 | MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED) | |
817 | return -2; | |
818 | h->mprotect_size = new_size; | |
819 | } | |
c7fd3362 | 820 | #ifdef _LIBC |
cbf5760e UD |
821 | else |
822 | madvise ((char *)h + new_size, diff, MADV_DONTNEED); | |
c7fd3362 | 823 | #endif |
cbf5760e UD |
824 | /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ |
825 | ||
fa8d436c UD |
826 | h->size = new_size; |
827 | return 0; | |
828 | } | |
829 | ||
830 | /* Delete a heap. */ | |
831 | ||
26d550d3 UD |
832 | #define delete_heap(heap) \ |
833 | do { \ | |
834 | if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \ | |
835 | aligned_heap_area = NULL; \ | |
836 | munmap((char*)(heap), HEAP_MAX_SIZE); \ | |
837 | } while (0) | |
fa8d436c UD |
838 | |
839 | static int | |
840 | internal_function | |
841 | #if __STD_C | |
842 | heap_trim(heap_info *heap, size_t pad) | |
843 | #else | |
844 | heap_trim(heap, pad) heap_info *heap; size_t pad; | |
845 | #endif | |
846 | { | |
847 | mstate ar_ptr = heap->ar_ptr; | |
848 | unsigned long pagesz = mp_.pagesize; | |
849 | mchunkptr top_chunk = top(ar_ptr), p, bck, fwd; | |
850 | heap_info *prev_heap; | |
851 | long new_size, top_size, extra; | |
852 | ||
853 | /* Can this heap go away completely? */ | |
854 | while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) { | |
855 | prev_heap = heap->prev; | |
856 | p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ)); | |
857 | assert(p->size == (0|PREV_INUSE)); /* must be fencepost */ | |
858 | p = prev_chunk(p); | |
859 | new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ); | |
860 | assert(new_size>0 && new_size<(long)(2*MINSIZE)); | |
861 | if(!prev_inuse(p)) | |
862 | new_size += p->prev_size; | |
863 | assert(new_size>0 && new_size<HEAP_MAX_SIZE); | |
864 | if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) | |
865 | break; | |
866 | ar_ptr->system_mem -= heap->size; | |
867 | arena_mem -= heap->size; | |
868 | delete_heap(heap); | |
869 | heap = prev_heap; | |
870 | if(!prev_inuse(p)) { /* consolidate backward */ | |
871 | p = prev_chunk(p); | |
872 | unlink(p, bck, fwd); | |
873 | } | |
874 | assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0); | |
875 | assert( ((char*)p + new_size) == ((char*)heap + heap->size) ); | |
876 | top(ar_ptr) = top_chunk = p; | |
877 | set_head(top_chunk, new_size | PREV_INUSE); | |
878 | /*check_chunk(ar_ptr, top_chunk);*/ | |
879 | } | |
880 | top_size = chunksize(top_chunk); | |
881 | extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz; | |
882 | if(extra < (long)pagesz) | |
883 | return 0; | |
884 | /* Try to shrink. */ | |
cbf5760e | 885 | if(shrink_heap(heap, extra) != 0) |
fa8d436c UD |
886 | return 0; |
887 | ar_ptr->system_mem -= extra; | |
888 | arena_mem -= extra; | |
889 | ||
890 | /* Success. Adjust top accordingly. */ | |
891 | set_head(top_chunk, (top_size - extra) | PREV_INUSE); | |
892 | /*check_chunk(ar_ptr, top_chunk);*/ | |
893 | return 1; | |
894 | } | |
895 | ||
04ec80e4 UD |
896 | /* Create a new arena with initial size "size". */ |
897 | ||
898 | static mstate | |
899 | _int_new_arena(size_t size) | |
900 | { | |
901 | mstate a; | |
902 | heap_info *h; | |
903 | char *ptr; | |
904 | unsigned long misalign; | |
905 | ||
906 | h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), | |
907 | mp_.top_pad); | |
908 | if(!h) { | |
909 | /* Maybe size is too large to fit in a single heap. So, just try | |
910 | to create a minimally-sized arena and let _int_malloc() attempt | |
911 | to deal with the large request via mmap_chunk(). */ | |
912 | h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad); | |
913 | if(!h) | |
914 | return 0; | |
915 | } | |
916 | a = h->ar_ptr = (mstate)(h+1); | |
917 | malloc_init_state(a); | |
918 | /*a->next = NULL;*/ | |
919 | a->system_mem = a->max_system_mem = h->size; | |
920 | arena_mem += h->size; | |
921 | #ifdef NO_THREADS | |
922 | if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) > | |
923 | mp_.max_total_mem) | |
924 | mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem; | |
925 | #endif | |
926 | ||
927 | /* Set up the top chunk, with proper alignment. */ | |
928 | ptr = (char *)(a + 1); | |
929 | misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK; | |
930 | if (misalign > 0) | |
931 | ptr += MALLOC_ALIGNMENT - misalign; | |
932 | top(a) = (mchunkptr)ptr; | |
933 | set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE); | |
934 | ||
425ce2ed UD |
935 | tsd_setspecific(arena_key, (Void_t *)a); |
936 | mutex_init(&a->mutex); | |
937 | (void)mutex_lock(&a->mutex); | |
938 | ||
939 | #ifdef PER_THREAD | |
940 | (void)mutex_lock(&list_lock); | |
941 | #endif | |
942 | ||
943 | /* Add the new arena to the global list. */ | |
944 | a->next = main_arena.next; | |
945 | atomic_write_barrier (); | |
946 | main_arena.next = a; | |
947 | ||
948 | #ifdef PER_THREAD | |
949 | ++narenas; | |
950 | ||
951 | (void)mutex_unlock(&list_lock); | |
952 | #endif | |
953 | ||
954 | THREAD_STAT(++(a->stat_lock_loop)); | |
955 | ||
04ec80e4 UD |
956 | return a; |
957 | } | |
958 | ||
425ce2ed UD |
959 | |
960 | #ifdef PER_THREAD | |
961 | static mstate | |
962 | get_free_list (void) | |
963 | { | |
964 | mstate result = free_list; | |
965 | if (result != NULL) | |
966 | { | |
967 | (void)mutex_lock(&list_lock); | |
968 | result = free_list; | |
969 | if (result != NULL) | |
970 | free_list = result->next_free; | |
971 | (void)mutex_unlock(&list_lock); | |
972 | ||
973 | if (result != NULL) | |
974 | { | |
975 | (void)mutex_lock(&result->mutex); | |
976 | tsd_setspecific(arena_key, (Void_t *)result); | |
977 | THREAD_STAT(++(result->stat_lock_loop)); | |
978 | } | |
979 | } | |
980 | ||
981 | return result; | |
982 | } | |
983 | ||
984 | ||
985 | static mstate | |
986 | reused_arena (void) | |
987 | { | |
988 | if (narenas <= mp_.arena_test) | |
989 | return NULL; | |
990 | ||
991 | static int narenas_limit; | |
992 | if (narenas_limit == 0) | |
993 | { | |
994 | if (mp_.arena_max != 0) | |
995 | narenas_limit = mp_.arena_max; | |
996 | else | |
997 | { | |
998 | int n = __get_nprocs (); | |
999 | ||
1000 | if (n >= 1) | |
1001 | narenas_limit = NARENAS_FROM_NCORES (n); | |
1002 | else | |
1003 | /* We have no information about the system. Assume two | |
1004 | cores. */ | |
1005 | narenas_limit = NARENAS_FROM_NCORES (2); | |
1006 | } | |
1007 | } | |
1008 | ||
1009 | if (narenas < narenas_limit) | |
1010 | return NULL; | |
1011 | ||
1012 | mstate result; | |
1013 | static mstate next_to_use; | |
1014 | if (next_to_use == NULL) | |
1015 | next_to_use = &main_arena; | |
1016 | ||
1017 | result = next_to_use; | |
1018 | do | |
1019 | { | |
1020 | if (!mutex_trylock(&result->mutex)) | |
1021 | goto out; | |
1022 | ||
1023 | result = result->next; | |
1024 | } | |
1025 | while (result != next_to_use); | |
1026 | ||
1027 | /* No arena available. Wait for the next in line. */ | |
1028 | (void)mutex_lock(&result->mutex); | |
1029 | ||
1030 | out: | |
1031 | tsd_setspecific(arena_key, (Void_t *)result); | |
1032 | THREAD_STAT(++(result->stat_lock_loop)); | |
1033 | next_to_use = result->next; | |
1034 | ||
1035 | return result; | |
1036 | } | |
1037 | #endif | |
1038 | ||
fa8d436c UD |
1039 | static mstate |
1040 | internal_function | |
1041 | #if __STD_C | |
1042 | arena_get2(mstate a_tsd, size_t size) | |
1043 | #else | |
1044 | arena_get2(a_tsd, size) mstate a_tsd; size_t size; | |
1045 | #endif | |
1046 | { | |
1047 | mstate a; | |
fa8d436c | 1048 | |
425ce2ed UD |
1049 | #ifdef PER_THREAD |
1050 | if ((a = get_free_list ()) == NULL | |
1051 | && (a = reused_arena ()) == NULL) | |
1052 | /* Nothing immediately available, so generate a new arena. */ | |
1053 | a = _int_new_arena(size); | |
1054 | #else | |
fa8d436c UD |
1055 | if(!a_tsd) |
1056 | a = a_tsd = &main_arena; | |
1057 | else { | |
1058 | a = a_tsd->next; | |
1059 | if(!a) { | |
1060 | /* This can only happen while initializing the new arena. */ | |
1061 | (void)mutex_lock(&main_arena.mutex); | |
1062 | THREAD_STAT(++(main_arena.stat_lock_wait)); | |
1063 | return &main_arena; | |
1064 | } | |
1065 | } | |
1066 | ||
1067 | /* Check the global, circularly linked list for available arenas. */ | |
8c7d3691 | 1068 | bool retried = false; |
fa8d436c UD |
1069 | repeat: |
1070 | do { | |
1071 | if(!mutex_trylock(&a->mutex)) { | |
8c7d3691 UD |
1072 | if (retried) |
1073 | (void)mutex_unlock(&list_lock); | |
fa8d436c UD |
1074 | THREAD_STAT(++(a->stat_lock_loop)); |
1075 | tsd_setspecific(arena_key, (Void_t *)a); | |
1076 | return a; | |
1077 | } | |
1078 | a = a->next; | |
1079 | } while(a != a_tsd); | |
1080 | ||
1081 | /* If not even the list_lock can be obtained, try again. This can | |
1082 | happen during `atfork', or for example on systems where thread | |
1083 | creation makes it temporarily impossible to obtain _any_ | |
1084 | locks. */ | |
8c7d3691 UD |
1085 | if(!retried && mutex_trylock(&list_lock)) { |
1086 | /* We will block to not run in a busy loop. */ | |
1087 | (void)mutex_lock(&list_lock); | |
1088 | ||
1089 | /* Since we blocked there might be an arena available now. */ | |
1090 | retried = true; | |
fa8d436c UD |
1091 | a = a_tsd; |
1092 | goto repeat; | |
1093 | } | |
fa8d436c UD |
1094 | |
1095 | /* Nothing immediately available, so generate a new arena. */ | |
1096 | a = _int_new_arena(size); | |
fa8d436c | 1097 | (void)mutex_unlock(&list_lock); |
425ce2ed | 1098 | #endif |
fa8d436c | 1099 | |
fa8d436c UD |
1100 | return a; |
1101 | } | |
1102 | ||
425ce2ed UD |
1103 | #ifdef PER_THREAD |
1104 | static void __attribute__ ((section ("__libc_thread_freeres_fn"))) | |
1105 | arena_thread_freeres (void) | |
1106 | { | |
1107 | Void_t *vptr = NULL; | |
1108 | mstate a = tsd_getspecific(arena_key, vptr); | |
1109 | tsd_setspecific(arena_key, NULL); | |
1110 | ||
1111 | if (a != NULL) | |
1112 | { | |
1113 | (void)mutex_lock(&list_lock); | |
1114 | a->next_free = free_list; | |
1115 | free_list = a; | |
1116 | (void)mutex_unlock(&list_lock); | |
1117 | } | |
1118 | } | |
1119 | text_set_element (__libc_thread_subfreeres, arena_thread_freeres); | |
1120 | #endif | |
1121 | ||
fa8d436c UD |
1122 | #endif /* USE_ARENAS */ |
1123 | ||
1124 | /* | |
1125 | * Local variables: | |
1126 | * c-basic-offset: 2 | |
1127 | * End: | |
1128 | */ |