]>
Commit | Line | Data |
---|---|---|
fa8d436c UD |
1 | /* Malloc implementation for multiple threads without lock contention. |
2 | Copyright (C) 2001 Free Software Foundation, Inc. | |
3 | This file is part of the GNU C Library. | |
4 | Contributed by Wolfram Gloger <wg@malloc.de>, 2001. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
7 | modify it under the terms of the GNU Lesser General Public License as |
8 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c UD |
9 | License, or (at your option) any later version. |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 14 | Lesser General Public License for more details. |
fa8d436c | 15 | |
cc7375ce | 16 | You should have received a copy of the GNU Lesser General Public |
fa8d436c UD |
17 | License along with the GNU C Library; see the file COPYING.LIB. If not, |
18 | write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
19 | Boston, MA 02111-1307, USA. */ | |
20 | ||
21 | /* $Id$ */ | |
22 | ||
23 | /* Compile-time constants. */ | |
24 | ||
25 | #define HEAP_MIN_SIZE (32*1024) | |
26 | #ifndef HEAP_MAX_SIZE | |
27 | #define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */ | |
28 | #endif | |
29 | ||
30 | /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps | |
31 | that are dynamically created for multi-threaded programs. The | |
32 | maximum size must be a power of two, for fast determination of | |
33 | which heap belongs to a chunk. It should be much larger than the | |
34 | mmap threshold, so that requests with a size just below that | |
35 | threshold can be fulfilled without creating too many heaps. */ | |
36 | ||
37 | ||
38 | #ifndef THREAD_STATS | |
39 | #define THREAD_STATS 0 | |
40 | #endif | |
41 | ||
42 | /* If THREAD_STATS is non-zero, some statistics on mutex locking are | |
43 | computed. */ | |
44 | ||
45 | /***************************************************************************/ | |
46 | ||
47 | #define top(ar_ptr) ((ar_ptr)->top) | |
48 | ||
49 | /* A heap is a single contiguous memory region holding (coalesceable) | |
50 | malloc_chunks. It is allocated with mmap() and always starts at an | |
51 | address aligned to HEAP_MAX_SIZE. Not used unless compiling with | |
52 | USE_ARENAS. */ | |
53 | ||
54 | typedef struct _heap_info { | |
55 | mstate ar_ptr; /* Arena for this heap. */ | |
56 | struct _heap_info *prev; /* Previous heap. */ | |
57 | size_t size; /* Current size in bytes. */ | |
58 | size_t pad; /* Make sure the following data is properly aligned. */ | |
59 | } heap_info; | |
60 | ||
61 | /* Thread specific data */ | |
62 | ||
63 | static tsd_key_t arena_key; | |
64 | static mutex_t list_lock; | |
65 | ||
66 | #if THREAD_STATS | |
67 | static int stat_n_heaps; | |
68 | #define THREAD_STAT(x) x | |
69 | #else | |
70 | #define THREAD_STAT(x) do ; while(0) | |
71 | #endif | |
72 | ||
73 | /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */ | |
74 | static unsigned long arena_mem; | |
75 | ||
2a652f5a RM |
76 | /* Already initialized? */ |
77 | int __malloc_initialized = -1; | |
78 | ||
fa8d436c UD |
79 | /**************************************************************************/ |
80 | ||
81 | #if USE_ARENAS | |
82 | ||
83 | /* arena_get() acquires an arena and locks the corresponding mutex. | |
84 | First, try the one last locked successfully by this thread. (This | |
85 | is the common case and handled with a macro for speed.) Then, loop | |
86 | once over the circularly linked list of arenas. If no arena is | |
87 | readily available, create a new one. In this latter case, `size' | |
88 | is just a hint as to how much memory will be required immediately | |
89 | in the new arena. */ | |
90 | ||
91 | #define arena_get(ptr, size) do { \ | |
92 | Void_t *vptr = NULL; \ | |
93 | ptr = (mstate)tsd_getspecific(arena_key, vptr); \ | |
94 | if(ptr && !mutex_trylock(&ptr->mutex)) { \ | |
95 | THREAD_STAT(++(ptr->stat_lock_direct)); \ | |
96 | } else \ | |
97 | ptr = arena_get2(ptr, (size)); \ | |
98 | } while(0) | |
99 | ||
100 | /* find the heap and corresponding arena for a given ptr */ | |
101 | ||
102 | #define heap_for_ptr(ptr) \ | |
103 | ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1))) | |
104 | #define arena_for_chunk(ptr) \ | |
105 | (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena) | |
106 | ||
107 | #else /* !USE_ARENAS */ | |
108 | ||
109 | /* There is only one arena, main_arena. */ | |
110 | ||
111 | #if THREAD_STATS | |
112 | #define arena_get(ar_ptr, sz) do { \ | |
113 | ar_ptr = &main_arena; \ | |
114 | if(!mutex_trylock(&ar_ptr->mutex)) \ | |
115 | ++(ar_ptr->stat_lock_direct); \ | |
116 | else { \ | |
117 | (void)mutex_lock(&ar_ptr->mutex); \ | |
118 | ++(ar_ptr->stat_lock_wait); \ | |
119 | } \ | |
120 | } while(0) | |
121 | #else | |
122 | #define arena_get(ar_ptr, sz) do { \ | |
123 | ar_ptr = &main_arena; \ | |
124 | (void)mutex_lock(&ar_ptr->mutex); \ | |
125 | } while(0) | |
126 | #endif | |
127 | #define arena_for_chunk(ptr) (&main_arena) | |
128 | ||
129 | #endif /* USE_ARENAS */ | |
130 | ||
131 | /**************************************************************************/ | |
132 | ||
133 | #ifndef NO_THREADS | |
134 | ||
135 | /* atfork support. */ | |
136 | ||
137 | static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size, | |
138 | __const __malloc_ptr_t)); | |
139 | static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr, | |
140 | __const __malloc_ptr_t)); | |
141 | static Void_t* save_arena; | |
142 | ||
143 | /* Magic value for the thread-specific arena pointer when | |
144 | malloc_atfork() is in use. */ | |
145 | ||
146 | #define ATFORK_ARENA_PTR ((Void_t*)-1) | |
147 | ||
148 | /* The following hooks are used while the `atfork' handling mechanism | |
149 | is active. */ | |
150 | ||
151 | static Void_t* | |
152 | malloc_atfork(size_t sz, const Void_t *caller) | |
153 | { | |
154 | Void_t *vptr = NULL; | |
155 | Void_t *victim; | |
156 | ||
157 | tsd_getspecific(arena_key, vptr); | |
158 | if(vptr == ATFORK_ARENA_PTR) { | |
159 | /* We are the only thread that may allocate at all. */ | |
160 | if(save_malloc_hook != malloc_check) { | |
161 | return _int_malloc(&main_arena, sz); | |
162 | } else { | |
163 | if(top_check()<0) | |
164 | return 0; | |
165 | victim = _int_malloc(&main_arena, sz+1); | |
166 | return mem2mem_check(victim, sz); | |
167 | } | |
168 | } else { | |
169 | /* Suspend the thread until the `atfork' handlers have completed. | |
170 | By that time, the hooks will have been reset as well, so that | |
171 | mALLOc() can be used again. */ | |
172 | (void)mutex_lock(&list_lock); | |
173 | (void)mutex_unlock(&list_lock); | |
174 | return public_mALLOc(sz); | |
175 | } | |
176 | } | |
177 | ||
178 | static void | |
179 | free_atfork(Void_t* mem, const Void_t *caller) | |
180 | { | |
181 | Void_t *vptr = NULL; | |
182 | mstate ar_ptr; | |
183 | mchunkptr p; /* chunk corresponding to mem */ | |
184 | ||
185 | if (mem == 0) /* free(0) has no effect */ | |
186 | return; | |
187 | ||
188 | p = mem2chunk(mem); /* do not bother to replicate free_check here */ | |
189 | ||
190 | #if HAVE_MMAP | |
191 | if (chunk_is_mmapped(p)) /* release mmapped memory. */ | |
192 | { | |
193 | munmap_chunk(p); | |
194 | return; | |
195 | } | |
196 | #endif | |
197 | ||
198 | ar_ptr = arena_for_chunk(p); | |
199 | tsd_getspecific(arena_key, vptr); | |
200 | if(vptr != ATFORK_ARENA_PTR) | |
201 | (void)mutex_lock(&ar_ptr->mutex); | |
202 | _int_free(ar_ptr, mem); | |
203 | if(vptr != ATFORK_ARENA_PTR) | |
204 | (void)mutex_unlock(&ar_ptr->mutex); | |
205 | } | |
206 | ||
207 | /* The following two functions are registered via thread_atfork() to | |
208 | make sure that the mutexes remain in a consistent state in the | |
209 | fork()ed version of a thread. Also adapt the malloc and free hooks | |
210 | temporarily, because the `atfork' handler mechanism may use | |
211 | malloc/free internally (e.g. in LinuxThreads). */ | |
212 | ||
213 | static void | |
214 | ptmalloc_lock_all __MALLOC_P((void)) | |
215 | { | |
216 | mstate ar_ptr; | |
217 | ||
2a652f5a RM |
218 | if(__malloc_initialized < 1) |
219 | return; | |
fa8d436c UD |
220 | (void)mutex_lock(&list_lock); |
221 | for(ar_ptr = &main_arena;;) { | |
222 | (void)mutex_lock(&ar_ptr->mutex); | |
223 | ar_ptr = ar_ptr->next; | |
224 | if(ar_ptr == &main_arena) break; | |
225 | } | |
226 | save_malloc_hook = __malloc_hook; | |
227 | save_free_hook = __free_hook; | |
228 | __malloc_hook = malloc_atfork; | |
229 | __free_hook = free_atfork; | |
230 | /* Only the current thread may perform malloc/free calls now. */ | |
231 | tsd_getspecific(arena_key, save_arena); | |
232 | tsd_setspecific(arena_key, ATFORK_ARENA_PTR); | |
233 | } | |
234 | ||
235 | static void | |
236 | ptmalloc_unlock_all __MALLOC_P((void)) | |
237 | { | |
238 | mstate ar_ptr; | |
239 | ||
2a652f5a RM |
240 | if(__malloc_initialized < 1) |
241 | return; | |
fa8d436c UD |
242 | tsd_setspecific(arena_key, save_arena); |
243 | __malloc_hook = save_malloc_hook; | |
244 | __free_hook = save_free_hook; | |
245 | for(ar_ptr = &main_arena;;) { | |
246 | (void)mutex_unlock(&ar_ptr->mutex); | |
247 | ar_ptr = ar_ptr->next; | |
248 | if(ar_ptr == &main_arena) break; | |
249 | } | |
250 | (void)mutex_unlock(&list_lock); | |
251 | } | |
252 | ||
253 | #ifdef __linux__ | |
254 | ||
255 | /* In LinuxThreads, unlocking a mutex in the child process after a | |
256 | fork() is currently unsafe, whereas re-initializing it is safe and | |
257 | does not leak resources. Therefore, a special atfork handler is | |
258 | installed for the child. */ | |
259 | ||
260 | static void | |
261 | ptmalloc_unlock_all2 __MALLOC_P((void)) | |
262 | { | |
263 | mstate ar_ptr; | |
264 | ||
2a652f5a RM |
265 | if(__malloc_initialized < 1) |
266 | return; | |
fa8d436c UD |
267 | #if defined _LIBC || defined MALLOC_HOOKS |
268 | tsd_setspecific(arena_key, save_arena); | |
269 | __malloc_hook = save_malloc_hook; | |
270 | __free_hook = save_free_hook; | |
271 | #endif | |
272 | for(ar_ptr = &main_arena;;) { | |
273 | (void)mutex_init(&ar_ptr->mutex); | |
274 | ar_ptr = ar_ptr->next; | |
275 | if(ar_ptr == &main_arena) break; | |
276 | } | |
277 | (void)mutex_init(&list_lock); | |
278 | } | |
279 | ||
280 | #else | |
281 | ||
282 | #define ptmalloc_unlock_all2 ptmalloc_unlock_all | |
283 | ||
284 | #endif | |
285 | ||
286 | #endif /* !defined NO_THREADS */ | |
287 | ||
fa8d436c UD |
288 | /* Initialization routine. */ |
289 | #ifdef _LIBC | |
290 | #include <string.h> | |
291 | extern char **_environ; | |
292 | ||
293 | static char * | |
294 | internal_function | |
295 | next_env_entry (char ***position) | |
296 | { | |
297 | char **current = *position; | |
298 | char *result = NULL; | |
299 | ||
300 | while (*current != NULL) | |
301 | { | |
302 | if (__builtin_expect ((*current)[0] == 'M', 0) | |
303 | && (*current)[1] == 'A' | |
304 | && (*current)[2] == 'L' | |
305 | && (*current)[3] == 'L' | |
306 | && (*current)[4] == 'O' | |
307 | && (*current)[5] == 'C' | |
308 | && (*current)[6] == '_') | |
309 | { | |
310 | result = &(*current)[7]; | |
311 | ||
312 | /* Save current position for next visit. */ | |
313 | *position = ++current; | |
314 | ||
315 | break; | |
316 | } | |
317 | ||
318 | ++current; | |
319 | } | |
320 | ||
321 | return result; | |
322 | } | |
323 | #endif /* _LIBC */ | |
324 | ||
325 | static void | |
326 | ptmalloc_init __MALLOC_P((void)) | |
327 | { | |
328 | #if __STD_C | |
329 | const char* s; | |
330 | #else | |
331 | char* s; | |
332 | #endif | |
333 | int secure = 0; | |
334 | ||
335 | if(__malloc_initialized >= 0) return; | |
336 | __malloc_initialized = 0; | |
337 | ||
338 | mp_.top_pad = DEFAULT_TOP_PAD; | |
339 | mp_.n_mmaps_max = DEFAULT_MMAP_MAX; | |
340 | mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD; | |
341 | mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD; | |
342 | mp_.pagesize = malloc_getpagesize; | |
343 | ||
344 | #ifndef NO_THREADS | |
345 | /* With some threads implementations, creating thread-specific data | |
346 | or initializing a mutex may call malloc() itself. Provide a | |
347 | simple starter version (realloc() won't work). */ | |
348 | save_malloc_hook = __malloc_hook; | |
349 | save_free_hook = __free_hook; | |
350 | __malloc_hook = malloc_starter; | |
351 | __free_hook = free_starter; | |
352 | #ifdef _LIBC | |
353 | /* Initialize the pthreads interface. */ | |
354 | if (__pthread_initialize != NULL) | |
355 | __pthread_initialize(); | |
356 | #endif | |
357 | #endif /* !defined NO_THREADS */ | |
358 | mutex_init(&main_arena.mutex); | |
359 | main_arena.next = &main_arena; | |
360 | ||
361 | mutex_init(&list_lock); | |
362 | tsd_key_create(&arena_key, NULL); | |
363 | tsd_setspecific(arena_key, (Void_t *)&main_arena); | |
364 | thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); | |
365 | #ifndef NO_THREADS | |
366 | __malloc_hook = save_malloc_hook; | |
367 | __free_hook = save_free_hook; | |
368 | #endif | |
369 | #ifdef _LIBC | |
370 | secure = __libc_enable_secure; | |
371 | s = NULL; | |
372 | { | |
373 | char **runp = _environ; | |
374 | char *envline; | |
375 | ||
376 | while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, | |
377 | 0)) | |
378 | { | |
379 | size_t len = strcspn (envline, "="); | |
380 | ||
381 | if (envline[len] != '=') | |
382 | /* This is a "MALLOC_" variable at the end of the string | |
383 | without a '=' character. Ignore it since otherwise we | |
384 | will access invalid memory below. */ | |
385 | continue; | |
386 | ||
387 | switch (len) | |
388 | { | |
389 | case 6: | |
390 | if (memcmp (envline, "CHECK_", 6) == 0) | |
391 | s = &envline[7]; | |
392 | break; | |
393 | case 8: | |
394 | if (! secure && memcmp (envline, "TOP_PAD_", 8) == 0) | |
395 | mALLOPt(M_TOP_PAD, atoi(&envline[9])); | |
396 | break; | |
397 | case 9: | |
398 | if (! secure && memcmp (envline, "MMAP_MAX_", 9) == 0) | |
399 | mALLOPt(M_MMAP_MAX, atoi(&envline[10])); | |
400 | break; | |
401 | case 15: | |
402 | if (! secure) | |
403 | { | |
404 | if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) | |
405 | mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16])); | |
406 | else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) | |
407 | mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16])); | |
408 | } | |
409 | break; | |
410 | default: | |
411 | break; | |
412 | } | |
413 | } | |
414 | } | |
415 | #else | |
416 | if (! secure) | |
417 | { | |
418 | if((s = getenv("MALLOC_TRIM_THRESHOLD_"))) | |
419 | mALLOPt(M_TRIM_THRESHOLD, atoi(s)); | |
420 | if((s = getenv("MALLOC_TOP_PAD_"))) | |
421 | mALLOPt(M_TOP_PAD, atoi(s)); | |
422 | if((s = getenv("MALLOC_MMAP_THRESHOLD_"))) | |
423 | mALLOPt(M_MMAP_THRESHOLD, atoi(s)); | |
424 | if((s = getenv("MALLOC_MMAP_MAX_"))) | |
425 | mALLOPt(M_MMAP_MAX, atoi(s)); | |
426 | } | |
427 | s = getenv("MALLOC_CHECK_"); | |
428 | #endif | |
429 | if(s) { | |
430 | if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); | |
431 | __malloc_check_init(); | |
432 | } | |
433 | if(__malloc_initialize_hook != NULL) | |
434 | (*__malloc_initialize_hook)(); | |
435 | __malloc_initialized = 1; | |
436 | } | |
437 | ||
438 | /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */ | |
439 | #ifdef thread_atfork_static | |
440 | thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \ | |
441 | ptmalloc_unlock_all2) | |
442 | #endif | |
443 | ||
444 | \f | |
445 | ||
446 | /* Managing heaps and arenas (for concurrent threads) */ | |
447 | ||
448 | #if USE_ARENAS | |
449 | ||
450 | #if MALLOC_DEBUG > 1 | |
451 | ||
452 | /* Print the complete contents of a single heap to stderr. */ | |
453 | ||
454 | static void | |
455 | #if __STD_C | |
456 | dump_heap(heap_info *heap) | |
457 | #else | |
458 | dump_heap(heap) heap_info *heap; | |
459 | #endif | |
460 | { | |
461 | char *ptr; | |
462 | mchunkptr p; | |
463 | ||
464 | fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size); | |
465 | ptr = (heap->ar_ptr != (mstate)(heap+1)) ? | |
466 | (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state); | |
467 | p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) & | |
468 | ~MALLOC_ALIGN_MASK); | |
469 | for(;;) { | |
470 | fprintf(stderr, "chunk %p size %10lx", p, (long)p->size); | |
471 | if(p == top(heap->ar_ptr)) { | |
472 | fprintf(stderr, " (top)\n"); | |
473 | break; | |
474 | } else if(p->size == (0|PREV_INUSE)) { | |
475 | fprintf(stderr, " (fence)\n"); | |
476 | break; | |
477 | } | |
478 | fprintf(stderr, "\n"); | |
479 | p = next_chunk(p); | |
480 | } | |
481 | } | |
482 | ||
483 | #endif /* MALLOC_DEBUG > 1 */ | |
484 | ||
485 | /* Create a new heap. size is automatically rounded up to a multiple | |
486 | of the page size. */ | |
487 | ||
488 | static heap_info * | |
489 | internal_function | |
490 | #if __STD_C | |
491 | new_heap(size_t size, size_t top_pad) | |
492 | #else | |
493 | new_heap(size, top_pad) size_t size, top_pad; | |
494 | #endif | |
495 | { | |
496 | size_t page_mask = malloc_getpagesize - 1; | |
497 | char *p1, *p2; | |
498 | unsigned long ul; | |
499 | heap_info *h; | |
500 | ||
501 | if(size+top_pad < HEAP_MIN_SIZE) | |
502 | size = HEAP_MIN_SIZE; | |
503 | else if(size+top_pad <= HEAP_MAX_SIZE) | |
504 | size += top_pad; | |
505 | else if(size > HEAP_MAX_SIZE) | |
506 | return 0; | |
507 | else | |
508 | size = HEAP_MAX_SIZE; | |
509 | size = (size + page_mask) & ~page_mask; | |
510 | ||
511 | /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. | |
512 | No swap space needs to be reserved for the following large | |
513 | mapping (on Linux, this is the case for all non-writable mappings | |
514 | anyway). */ | |
515 | p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); | |
516 | if(p1 != MAP_FAILED) { | |
517 | p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1)); | |
518 | ul = p2 - p1; | |
519 | munmap(p1, ul); | |
520 | munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); | |
521 | } else { | |
522 | /* Try to take the chance that an allocation of only HEAP_MAX_SIZE | |
523 | is already aligned. */ | |
524 | p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); | |
525 | if(p2 == MAP_FAILED) | |
526 | return 0; | |
527 | if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) { | |
528 | munmap(p2, HEAP_MAX_SIZE); | |
529 | return 0; | |
530 | } | |
531 | } | |
532 | if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) { | |
533 | munmap(p2, HEAP_MAX_SIZE); | |
534 | return 0; | |
535 | } | |
536 | h = (heap_info *)p2; | |
537 | h->size = size; | |
538 | THREAD_STAT(stat_n_heaps++); | |
539 | return h; | |
540 | } | |
541 | ||
542 | /* Grow or shrink a heap. size is automatically rounded up to a | |
543 | multiple of the page size if it is positive. */ | |
544 | ||
545 | static int | |
546 | #if __STD_C | |
547 | grow_heap(heap_info *h, long diff) | |
548 | #else | |
549 | grow_heap(h, diff) heap_info *h; long diff; | |
550 | #endif | |
551 | { | |
552 | size_t page_mask = malloc_getpagesize - 1; | |
553 | long new_size; | |
554 | ||
555 | if(diff >= 0) { | |
556 | diff = (diff + page_mask) & ~page_mask; | |
557 | new_size = (long)h->size + diff; | |
558 | if(new_size > HEAP_MAX_SIZE) | |
559 | return -1; | |
560 | if(mprotect((char *)h + h->size, diff, PROT_READ|PROT_WRITE) != 0) | |
561 | return -2; | |
562 | } else { | |
563 | new_size = (long)h->size + diff; | |
564 | if(new_size < (long)sizeof(*h)) | |
565 | return -1; | |
566 | /* Try to re-map the extra heap space freshly to save memory, and | |
567 | make it inaccessible. */ | |
568 | if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE, | |
569 | MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED) | |
570 | return -2; | |
571 | /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ | |
572 | } | |
573 | h->size = new_size; | |
574 | return 0; | |
575 | } | |
576 | ||
577 | /* Delete a heap. */ | |
578 | ||
579 | #define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE) | |
580 | ||
581 | static int | |
582 | internal_function | |
583 | #if __STD_C | |
584 | heap_trim(heap_info *heap, size_t pad) | |
585 | #else | |
586 | heap_trim(heap, pad) heap_info *heap; size_t pad; | |
587 | #endif | |
588 | { | |
589 | mstate ar_ptr = heap->ar_ptr; | |
590 | unsigned long pagesz = mp_.pagesize; | |
591 | mchunkptr top_chunk = top(ar_ptr), p, bck, fwd; | |
592 | heap_info *prev_heap; | |
593 | long new_size, top_size, extra; | |
594 | ||
595 | /* Can this heap go away completely? */ | |
596 | while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) { | |
597 | prev_heap = heap->prev; | |
598 | p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ)); | |
599 | assert(p->size == (0|PREV_INUSE)); /* must be fencepost */ | |
600 | p = prev_chunk(p); | |
601 | new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ); | |
602 | assert(new_size>0 && new_size<(long)(2*MINSIZE)); | |
603 | if(!prev_inuse(p)) | |
604 | new_size += p->prev_size; | |
605 | assert(new_size>0 && new_size<HEAP_MAX_SIZE); | |
606 | if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) | |
607 | break; | |
608 | ar_ptr->system_mem -= heap->size; | |
609 | arena_mem -= heap->size; | |
610 | delete_heap(heap); | |
611 | heap = prev_heap; | |
612 | if(!prev_inuse(p)) { /* consolidate backward */ | |
613 | p = prev_chunk(p); | |
614 | unlink(p, bck, fwd); | |
615 | } | |
616 | assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0); | |
617 | assert( ((char*)p + new_size) == ((char*)heap + heap->size) ); | |
618 | top(ar_ptr) = top_chunk = p; | |
619 | set_head(top_chunk, new_size | PREV_INUSE); | |
620 | /*check_chunk(ar_ptr, top_chunk);*/ | |
621 | } | |
622 | top_size = chunksize(top_chunk); | |
623 | extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz; | |
624 | if(extra < (long)pagesz) | |
625 | return 0; | |
626 | /* Try to shrink. */ | |
627 | if(grow_heap(heap, -extra) != 0) | |
628 | return 0; | |
629 | ar_ptr->system_mem -= extra; | |
630 | arena_mem -= extra; | |
631 | ||
632 | /* Success. Adjust top accordingly. */ | |
633 | set_head(top_chunk, (top_size - extra) | PREV_INUSE); | |
634 | /*check_chunk(ar_ptr, top_chunk);*/ | |
635 | return 1; | |
636 | } | |
637 | ||
638 | static mstate | |
639 | internal_function | |
640 | #if __STD_C | |
641 | arena_get2(mstate a_tsd, size_t size) | |
642 | #else | |
643 | arena_get2(a_tsd, size) mstate a_tsd; size_t size; | |
644 | #endif | |
645 | { | |
646 | mstate a; | |
647 | int err; | |
648 | ||
649 | if(!a_tsd) | |
650 | a = a_tsd = &main_arena; | |
651 | else { | |
652 | a = a_tsd->next; | |
653 | if(!a) { | |
654 | /* This can only happen while initializing the new arena. */ | |
655 | (void)mutex_lock(&main_arena.mutex); | |
656 | THREAD_STAT(++(main_arena.stat_lock_wait)); | |
657 | return &main_arena; | |
658 | } | |
659 | } | |
660 | ||
661 | /* Check the global, circularly linked list for available arenas. */ | |
662 | repeat: | |
663 | do { | |
664 | if(!mutex_trylock(&a->mutex)) { | |
665 | THREAD_STAT(++(a->stat_lock_loop)); | |
666 | tsd_setspecific(arena_key, (Void_t *)a); | |
667 | return a; | |
668 | } | |
669 | a = a->next; | |
670 | } while(a != a_tsd); | |
671 | ||
672 | /* If not even the list_lock can be obtained, try again. This can | |
673 | happen during `atfork', or for example on systems where thread | |
674 | creation makes it temporarily impossible to obtain _any_ | |
675 | locks. */ | |
676 | if(mutex_trylock(&list_lock)) { | |
677 | a = a_tsd; | |
678 | goto repeat; | |
679 | } | |
680 | (void)mutex_unlock(&list_lock); | |
681 | ||
682 | /* Nothing immediately available, so generate a new arena. */ | |
683 | a = _int_new_arena(size); | |
684 | if(!a) | |
685 | return 0; | |
686 | ||
687 | tsd_setspecific(arena_key, (Void_t *)a); | |
688 | mutex_init(&a->mutex); | |
689 | err = mutex_lock(&a->mutex); /* remember result */ | |
690 | ||
691 | /* Add the new arena to the global list. */ | |
692 | (void)mutex_lock(&list_lock); | |
693 | a->next = main_arena.next; | |
694 | main_arena.next = a; | |
695 | (void)mutex_unlock(&list_lock); | |
696 | ||
697 | if(err) /* locking failed; keep arena for further attempts later */ | |
698 | return 0; | |
699 | ||
700 | THREAD_STAT(++(a->stat_lock_loop)); | |
701 | return a; | |
702 | } | |
703 | ||
704 | /* Create a new arena with initial size "size". */ | |
705 | ||
706 | mstate | |
707 | _int_new_arena(size_t size) | |
708 | { | |
709 | mstate a; | |
710 | heap_info *h; | |
711 | char *ptr; | |
712 | unsigned long misalign; | |
713 | ||
714 | h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), | |
715 | mp_.top_pad); | |
716 | if(!h) { | |
717 | /* Maybe size is too large to fit in a single heap. So, just try | |
718 | to create a minimally-sized arena and let _int_malloc() attempt | |
719 | to deal with the large request via mmap_chunk(). */ | |
720 | h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad); | |
721 | if(!h) | |
722 | return 0; | |
723 | } | |
724 | a = h->ar_ptr = (mstate)(h+1); | |
725 | malloc_init_state(a); | |
726 | /*a->next = NULL;*/ | |
727 | a->system_mem = a->max_system_mem = h->size; | |
728 | arena_mem += h->size; | |
729 | #ifdef NO_THREADS | |
730 | if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) > | |
731 | mp_.max_total_mem) | |
732 | mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem; | |
733 | #endif | |
734 | ||
735 | /* Set up the top chunk, with proper alignment. */ | |
736 | ptr = (char *)(a + 1); | |
737 | misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK; | |
738 | if (misalign > 0) | |
739 | ptr += MALLOC_ALIGNMENT - misalign; | |
740 | top(a) = (mchunkptr)ptr; | |
741 | set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE); | |
742 | ||
743 | return a; | |
744 | } | |
745 | ||
746 | #endif /* USE_ARENAS */ | |
747 | ||
748 | /* | |
749 | * Local variables: | |
750 | * c-basic-offset: 2 | |
751 | * End: | |
752 | */ |