]>
Commit | Line | Data |
---|---|---|
fa8d436c | 1 | /* Malloc implementation for multiple threads without lock contention. |
bfff8b1b | 2 | Copyright (C) 2001-2017 Free Software Foundation, Inc. |
fa8d436c UD |
3 | This file is part of the GNU C Library. |
4 | Contributed by Wolfram Gloger <wg@malloc.de>, 2001. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
cc7375ce RM |
7 | modify it under the terms of the GNU Lesser General Public License as |
8 | published by the Free Software Foundation; either version 2.1 of the | |
fa8d436c UD |
9 | License, or (at your option) any later version. |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
cc7375ce | 14 | Lesser General Public License for more details. |
fa8d436c | 15 | |
cc7375ce | 16 | You should have received a copy of the GNU Lesser General Public |
59ba27a6 PE |
17 | License along with the GNU C Library; see the file COPYING.LIB. If |
18 | not, see <http://www.gnu.org/licenses/>. */ | |
fa8d436c | 19 | |
a28b6b0a RM |
20 | #include <stdbool.h> |
21 | ||
67e58f39 SP |
22 | #if HAVE_TUNABLES |
23 | # define TUNABLE_NAMESPACE malloc | |
24 | #endif | |
25 | #include <elf/dl-tunables.h> | |
26 | ||
fa8d436c UD |
27 | /* Compile-time constants. */ |
28 | ||
6c8dbf00 | 29 | #define HEAP_MIN_SIZE (32 * 1024) |
fa8d436c | 30 | #ifndef HEAP_MAX_SIZE |
e404fb16 | 31 | # ifdef DEFAULT_MMAP_THRESHOLD_MAX |
bd2c2341 | 32 | # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX) |
e404fb16 | 33 | # else |
6c8dbf00 | 34 | # define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */ |
e404fb16 | 35 | # endif |
fa8d436c UD |
36 | #endif |
37 | ||
38 | /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps | |
39 | that are dynamically created for multi-threaded programs. The | |
40 | maximum size must be a power of two, for fast determination of | |
41 | which heap belongs to a chunk. It should be much larger than the | |
42 | mmap threshold, so that requests with a size just below that | |
43 | threshold can be fulfilled without creating too many heaps. */ | |
44 | ||
fa8d436c UD |
45 | /***************************************************************************/ |
46 | ||
47 | #define top(ar_ptr) ((ar_ptr)->top) | |
48 | ||
49 | /* A heap is a single contiguous memory region holding (coalesceable) | |
50 | malloc_chunks. It is allocated with mmap() and always starts at an | |
22a89187 | 51 | address aligned to HEAP_MAX_SIZE. */ |
fa8d436c | 52 | |
6c8dbf00 OB |
53 | typedef struct _heap_info |
54 | { | |
fa8d436c UD |
55 | mstate ar_ptr; /* Arena for this heap. */ |
56 | struct _heap_info *prev; /* Previous heap. */ | |
57 | size_t size; /* Current size in bytes. */ | |
6c8dbf00 OB |
58 | size_t mprotect_size; /* Size in bytes that has been mprotected |
59 | PROT_READ|PROT_WRITE. */ | |
7d013a64 RM |
60 | /* Make sure the following data is properly aligned, particularly |
61 | that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of | |
c7fd3362 JJ |
62 | MALLOC_ALIGNMENT. */ |
63 | char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; | |
fa8d436c UD |
64 | } heap_info; |
65 | ||
7d013a64 RM |
66 | /* Get a compile-time error if the heap_info padding is not correct |
67 | to make alignment work as expected in sYSMALLOc. */ | |
68 | extern int sanity_check_heap_info_alignment[(sizeof (heap_info) | |
6c8dbf00 OB |
69 | + 2 * SIZE_SZ) % MALLOC_ALIGNMENT |
70 | ? -1 : 1]; | |
7d013a64 | 71 | |
6782806d FW |
72 | /* Thread specific data. */ |
73 | ||
74 | static __thread mstate thread_arena attribute_tls_model_ie; | |
75 | ||
90c400bd FW |
76 | /* Arena free list. free_list_lock synchronizes access to the |
77 | free_list variable below, and the next_free and attached_threads | |
78 | members of struct malloc_state objects. No other locks must be | |
79 | acquired after free_list_lock has been acquired. */ | |
fa8d436c | 80 | |
cbb47fa1 | 81 | __libc_lock_define_initialized (static, free_list_lock); |
02d46fc4 | 82 | static size_t narenas = 1; |
425ce2ed | 83 | static mstate free_list; |
fa8d436c | 84 | |
90c400bd FW |
85 | /* list_lock prevents concurrent writes to the next member of struct |
86 | malloc_state objects. | |
87 | ||
88 | Read access to the next member is supposed to synchronize with the | |
89 | atomic_write_barrier and the write to the next member in | |
90 | _int_new_arena. This suffers from data races; see the FIXME | |
91 | comments in _int_new_arena and reused_arena. | |
92 | ||
7962541a FW |
93 | list_lock also prevents concurrent forks. At the time list_lock is |
94 | acquired, no arena lock must have been acquired, but it is | |
95 | permitted to acquire arena locks subsequently, while list_lock is | |
96 | acquired. */ | |
cbb47fa1 | 97 | __libc_lock_define_initialized (static, list_lock); |
90c400bd | 98 | |
2a652f5a RM |
99 | /* Already initialized? */ |
100 | int __malloc_initialized = -1; | |
101 | ||
fa8d436c UD |
102 | /**************************************************************************/ |
103 | ||
fa8d436c UD |
104 | |
105 | /* arena_get() acquires an arena and locks the corresponding mutex. | |
106 | First, try the one last locked successfully by this thread. (This | |
107 | is the common case and handled with a macro for speed.) Then, loop | |
108 | once over the circularly linked list of arenas. If no arena is | |
109 | readily available, create a new one. In this latter case, `size' | |
110 | is just a hint as to how much memory will be required immediately | |
111 | in the new arena. */ | |
112 | ||
113 | #define arena_get(ptr, size) do { \ | |
6782806d | 114 | ptr = thread_arena; \ |
6c8dbf00 OB |
115 | arena_lock (ptr, size); \ |
116 | } while (0) | |
425ce2ed | 117 | |
6c8dbf00 | 118 | #define arena_lock(ptr, size) do { \ |
fff94fa2 | 119 | if (ptr && !arena_is_corrupt (ptr)) \ |
cbb47fa1 | 120 | __libc_lock_lock (ptr->mutex); \ |
6c8dbf00 | 121 | else \ |
92a9b22d | 122 | ptr = arena_get2 ((size), NULL); \ |
6c8dbf00 | 123 | } while (0) |
fa8d436c UD |
124 | |
125 | /* find the heap and corresponding arena for a given ptr */ | |
126 | ||
127 | #define heap_for_ptr(ptr) \ | |
6c8dbf00 | 128 | ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1))) |
fa8d436c | 129 | #define arena_for_chunk(ptr) \ |
e9c4fe93 | 130 | (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr) |
fa8d436c | 131 | |
fa8d436c UD |
132 | |
133 | /**************************************************************************/ | |
134 | ||
fa8d436c UD |
135 | /* atfork support. */ |
136 | ||
29d79486 FW |
137 | /* The following three functions are called around fork from a |
138 | multi-threaded process. We do not use the general fork handler | |
139 | mechanism to make sure that our handlers are the last ones being | |
140 | called, so that other fork handlers can use the malloc | |
141 | subsystem. */ | |
fa8d436c | 142 | |
29d79486 | 143 | void |
186fe877 | 144 | internal_function |
29d79486 | 145 | __malloc_fork_lock_parent (void) |
fa8d436c | 146 | { |
6c8dbf00 | 147 | if (__malloc_initialized < 1) |
2a652f5a | 148 | return; |
6c8dbf00 | 149 | |
90c400bd | 150 | /* We do not acquire free_list_lock here because we completely |
29d79486 | 151 | reconstruct free_list in __malloc_fork_unlock_child. */ |
90c400bd | 152 | |
4bf5f222 | 153 | __libc_lock_lock (list_lock); |
7dac9f3d | 154 | |
8a727af9 | 155 | for (mstate ar_ptr = &main_arena;; ) |
6c8dbf00 | 156 | { |
4bf5f222 | 157 | __libc_lock_lock (ar_ptr->mutex); |
6c8dbf00 OB |
158 | ar_ptr = ar_ptr->next; |
159 | if (ar_ptr == &main_arena) | |
160 | break; | |
7dac9f3d | 161 | } |
fa8d436c UD |
162 | } |
163 | ||
29d79486 | 164 | void |
186fe877 | 165 | internal_function |
29d79486 | 166 | __malloc_fork_unlock_parent (void) |
fa8d436c | 167 | { |
6c8dbf00 | 168 | if (__malloc_initialized < 1) |
2a652f5a | 169 | return; |
6c8dbf00 | 170 | |
8a727af9 | 171 | for (mstate ar_ptr = &main_arena;; ) |
6c8dbf00 | 172 | { |
4bf5f222 | 173 | __libc_lock_unlock (ar_ptr->mutex); |
6c8dbf00 OB |
174 | ar_ptr = ar_ptr->next; |
175 | if (ar_ptr == &main_arena) | |
176 | break; | |
177 | } | |
4bf5f222 | 178 | __libc_lock_unlock (list_lock); |
fa8d436c UD |
179 | } |
180 | ||
29d79486 | 181 | void |
186fe877 | 182 | internal_function |
29d79486 | 183 | __malloc_fork_unlock_child (void) |
fa8d436c | 184 | { |
6c8dbf00 | 185 | if (__malloc_initialized < 1) |
2a652f5a | 186 | return; |
6c8dbf00 | 187 | |
8a727af9 | 188 | /* Push all arenas to the free list, except thread_arena, which is |
a62719ba | 189 | attached to the current thread. */ |
4bf5f222 | 190 | __libc_lock_init (free_list_lock); |
8a727af9 FW |
191 | if (thread_arena != NULL) |
192 | thread_arena->attached_threads = 1; | |
425ce2ed | 193 | free_list = NULL; |
8a727af9 | 194 | for (mstate ar_ptr = &main_arena;; ) |
6c8dbf00 | 195 | { |
4bf5f222 | 196 | __libc_lock_init (ar_ptr->mutex); |
8a727af9 | 197 | if (ar_ptr != thread_arena) |
6c8dbf00 | 198 | { |
a62719ba FW |
199 | /* This arena is no longer attached to any thread. */ |
200 | ar_ptr->attached_threads = 0; | |
6c8dbf00 OB |
201 | ar_ptr->next_free = free_list; |
202 | free_list = ar_ptr; | |
203 | } | |
204 | ar_ptr = ar_ptr->next; | |
205 | if (ar_ptr == &main_arena) | |
206 | break; | |
425ce2ed | 207 | } |
90c400bd | 208 | |
4bf5f222 | 209 | __libc_lock_init (list_lock); |
fa8d436c UD |
210 | } |
211 | ||
67e58f39 SP |
212 | #if HAVE_TUNABLES |
213 | static inline int do_set_mallopt_check (int32_t value); | |
214 | void | |
8cbc826c | 215 | DL_TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp) |
67e58f39 | 216 | { |
8cbc826c | 217 | int32_t value = (int32_t) valp->numval; |
67e58f39 SP |
218 | do_set_mallopt_check (value); |
219 | if (check_action != 0) | |
220 | __malloc_check_init (); | |
221 | } | |
222 | ||
223 | # define DL_TUNABLE_CALLBACK_FNDECL(__name, __type) \ | |
224 | static inline int do_ ## __name (__type value); \ | |
225 | void \ | |
8cbc826c | 226 | DL_TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \ |
67e58f39 | 227 | { \ |
8cbc826c | 228 | __type value = (__type) (valp)->numval; \ |
67e58f39 SP |
229 | do_ ## __name (value); \ |
230 | } | |
231 | ||
232 | DL_TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t) | |
233 | DL_TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t) | |
234 | DL_TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t) | |
235 | DL_TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t) | |
236 | DL_TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t) | |
237 | DL_TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t) | |
238 | DL_TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t) | |
239 | #else | |
fa8d436c | 240 | /* Initialization routine. */ |
fa8d436c UD |
241 | #include <string.h> |
242 | extern char **_environ; | |
243 | ||
244 | static char * | |
245 | internal_function | |
246 | next_env_entry (char ***position) | |
247 | { | |
248 | char **current = *position; | |
249 | char *result = NULL; | |
250 | ||
251 | while (*current != NULL) | |
252 | { | |
253 | if (__builtin_expect ((*current)[0] == 'M', 0) | |
6c8dbf00 OB |
254 | && (*current)[1] == 'A' |
255 | && (*current)[2] == 'L' | |
256 | && (*current)[3] == 'L' | |
257 | && (*current)[4] == 'O' | |
258 | && (*current)[5] == 'C' | |
259 | && (*current)[6] == '_') | |
260 | { | |
261 | result = &(*current)[7]; | |
fa8d436c | 262 | |
6c8dbf00 OB |
263 | /* Save current position for next visit. */ |
264 | *position = ++current; | |
fa8d436c | 265 | |
6c8dbf00 OB |
266 | break; |
267 | } | |
fa8d436c UD |
268 | |
269 | ++current; | |
270 | } | |
271 | ||
272 | return result; | |
273 | } | |
67e58f39 | 274 | #endif |
fa8d436c | 275 | |
c0f62c56 | 276 | |
22a89187 | 277 | #ifdef SHARED |
c0f62c56 UD |
278 | static void * |
279 | __failing_morecore (ptrdiff_t d) | |
280 | { | |
281 | return (void *) MORECORE_FAILURE; | |
282 | } | |
5f21997b UD |
283 | |
284 | extern struct dl_open_hook *_dl_open_hook; | |
285 | libc_hidden_proto (_dl_open_hook); | |
fde89ad0 RM |
286 | #endif |
287 | ||
fa8d436c | 288 | static void |
06d6611a | 289 | ptmalloc_init (void) |
fa8d436c | 290 | { |
6c8dbf00 OB |
291 | if (__malloc_initialized >= 0) |
292 | return; | |
293 | ||
fa8d436c UD |
294 | __malloc_initialized = 0; |
295 | ||
22a89187 | 296 | #ifdef SHARED |
5f21997b UD |
297 | /* In case this libc copy is in a non-default namespace, never use brk. |
298 | Likewise if dlopened from statically linked program. */ | |
c0f62c56 UD |
299 | Dl_info di; |
300 | struct link_map *l; | |
5f21997b UD |
301 | |
302 | if (_dl_open_hook != NULL | |
303 | || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0 | |
6c8dbf00 | 304 | && l->l_ns != LM_ID_BASE)) |
c0f62c56 UD |
305 | __morecore = __failing_morecore; |
306 | #endif | |
307 | ||
6782806d | 308 | thread_arena = &main_arena; |
67e58f39 SP |
309 | |
310 | #if HAVE_TUNABLES | |
311 | /* Ensure initialization/consolidation and do it under a lock so that a | |
312 | thread attempting to use the arena in parallel waits on us till we | |
313 | finish. */ | |
314 | __libc_lock_lock (main_arena.mutex); | |
315 | malloc_consolidate (&main_arena); | |
316 | ||
317 | TUNABLE_SET_VAL_WITH_CALLBACK (check, NULL, set_mallopt_check); | |
318 | TUNABLE_SET_VAL_WITH_CALLBACK (top_pad, NULL, set_top_pad); | |
319 | TUNABLE_SET_VAL_WITH_CALLBACK (perturb, NULL, set_perturb_byte); | |
320 | TUNABLE_SET_VAL_WITH_CALLBACK (mmap_threshold, NULL, set_mmap_threshold); | |
321 | TUNABLE_SET_VAL_WITH_CALLBACK (trim_threshold, NULL, set_trim_threshold); | |
322 | TUNABLE_SET_VAL_WITH_CALLBACK (mmap_max, NULL, set_mmaps_max); | |
323 | TUNABLE_SET_VAL_WITH_CALLBACK (arena_max, NULL, set_arena_max); | |
324 | TUNABLE_SET_VAL_WITH_CALLBACK (arena_test, NULL, set_arena_test); | |
325 | __libc_lock_unlock (main_arena.mutex); | |
326 | #else | |
02d46fc4 | 327 | const char *s = NULL; |
a1ffb40e | 328 | if (__glibc_likely (_environ != NULL)) |
08e49216 RM |
329 | { |
330 | char **runp = _environ; | |
331 | char *envline; | |
332 | ||
333 | while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, | |
6c8dbf00 OB |
334 | 0)) |
335 | { | |
336 | size_t len = strcspn (envline, "="); | |
337 | ||
338 | if (envline[len] != '=') | |
339 | /* This is a "MALLOC_" variable at the end of the string | |
340 | without a '=' character. Ignore it since otherwise we | |
341 | will access invalid memory below. */ | |
342 | continue; | |
343 | ||
344 | switch (len) | |
345 | { | |
346 | case 6: | |
347 | if (memcmp (envline, "CHECK_", 6) == 0) | |
348 | s = &envline[7]; | |
349 | break; | |
350 | case 8: | |
351 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
352 | { | |
353 | if (memcmp (envline, "TOP_PAD_", 8) == 0) | |
354 | __libc_mallopt (M_TOP_PAD, atoi (&envline[9])); | |
355 | else if (memcmp (envline, "PERTURB_", 8) == 0) | |
356 | __libc_mallopt (M_PERTURB, atoi (&envline[9])); | |
357 | } | |
358 | break; | |
359 | case 9: | |
360 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
361 | { | |
362 | if (memcmp (envline, "MMAP_MAX_", 9) == 0) | |
363 | __libc_mallopt (M_MMAP_MAX, atoi (&envline[10])); | |
364 | else if (memcmp (envline, "ARENA_MAX", 9) == 0) | |
365 | __libc_mallopt (M_ARENA_MAX, atoi (&envline[10])); | |
366 | } | |
367 | break; | |
368 | case 10: | |
369 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
370 | { | |
371 | if (memcmp (envline, "ARENA_TEST", 10) == 0) | |
372 | __libc_mallopt (M_ARENA_TEST, atoi (&envline[11])); | |
373 | } | |
374 | break; | |
375 | case 15: | |
376 | if (!__builtin_expect (__libc_enable_secure, 0)) | |
377 | { | |
378 | if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) | |
379 | __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16])); | |
380 | else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) | |
381 | __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16])); | |
382 | } | |
383 | break; | |
384 | default: | |
385 | break; | |
386 | } | |
387 | } | |
388 | } | |
389 | if (s && s[0]) | |
390 | { | |
391 | __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0')); | |
392 | if (check_action != 0) | |
393 | __malloc_check_init (); | |
08e49216 | 394 | } |
67e58f39 SP |
395 | #endif |
396 | ||
2ba3cfa1 | 397 | #if HAVE_MALLOC_INIT_HOOK |
92e1ab0e | 398 | void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook); |
df77455c UD |
399 | if (hook != NULL) |
400 | (*hook)(); | |
2ba3cfa1 | 401 | #endif |
fa8d436c UD |
402 | __malloc_initialized = 1; |
403 | } | |
404 | ||
fa8d436c UD |
405 | /* Managing heaps and arenas (for concurrent threads) */ |
406 | ||
fa8d436c UD |
407 | #if MALLOC_DEBUG > 1 |
408 | ||
409 | /* Print the complete contents of a single heap to stderr. */ | |
410 | ||
411 | static void | |
6c8dbf00 | 412 | dump_heap (heap_info *heap) |
fa8d436c UD |
413 | { |
414 | char *ptr; | |
415 | mchunkptr p; | |
416 | ||
6c8dbf00 OB |
417 | fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size); |
418 | ptr = (heap->ar_ptr != (mstate) (heap + 1)) ? | |
419 | (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state); | |
420 | p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) & | |
421 | ~MALLOC_ALIGN_MASK); | |
422 | for (;; ) | |
423 | { | |
424 | fprintf (stderr, "chunk %p size %10lx", p, (long) p->size); | |
425 | if (p == top (heap->ar_ptr)) | |
426 | { | |
427 | fprintf (stderr, " (top)\n"); | |
428 | break; | |
429 | } | |
430 | else if (p->size == (0 | PREV_INUSE)) | |
431 | { | |
432 | fprintf (stderr, " (fence)\n"); | |
433 | break; | |
434 | } | |
435 | fprintf (stderr, "\n"); | |
436 | p = next_chunk (p); | |
fa8d436c | 437 | } |
fa8d436c | 438 | } |
fa8d436c UD |
439 | #endif /* MALLOC_DEBUG > 1 */ |
440 | ||
26d550d3 UD |
441 | /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing |
442 | addresses as opposed to increasing, new_heap would badly fragment the | |
443 | address space. In that case remember the second HEAP_MAX_SIZE part | |
444 | aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...) | |
445 | call (if it is already aligned) and try to reuse it next time. We need | |
446 | no locking for it, as kernel ensures the atomicity for us - worst case | |
447 | we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in | |
448 | multiple threads, but only one will succeed. */ | |
449 | static char *aligned_heap_area; | |
450 | ||
fa8d436c UD |
451 | /* Create a new heap. size is automatically rounded up to a multiple |
452 | of the page size. */ | |
453 | ||
454 | static heap_info * | |
455 | internal_function | |
6c8dbf00 | 456 | new_heap (size_t size, size_t top_pad) |
fa8d436c | 457 | { |
8a35c3fe | 458 | size_t pagesize = GLRO (dl_pagesize); |
fa8d436c UD |
459 | char *p1, *p2; |
460 | unsigned long ul; | |
461 | heap_info *h; | |
462 | ||
6c8dbf00 | 463 | if (size + top_pad < HEAP_MIN_SIZE) |
fa8d436c | 464 | size = HEAP_MIN_SIZE; |
6c8dbf00 | 465 | else if (size + top_pad <= HEAP_MAX_SIZE) |
fa8d436c | 466 | size += top_pad; |
6c8dbf00 | 467 | else if (size > HEAP_MAX_SIZE) |
fa8d436c UD |
468 | return 0; |
469 | else | |
470 | size = HEAP_MAX_SIZE; | |
8a35c3fe | 471 | size = ALIGN_UP (size, pagesize); |
fa8d436c UD |
472 | |
473 | /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. | |
474 | No swap space needs to be reserved for the following large | |
475 | mapping (on Linux, this is the case for all non-writable mappings | |
476 | anyway). */ | |
26d550d3 | 477 | p2 = MAP_FAILED; |
6c8dbf00 OB |
478 | if (aligned_heap_area) |
479 | { | |
480 | p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, | |
481 | MAP_NORESERVE); | |
482 | aligned_heap_area = NULL; | |
483 | if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))) | |
484 | { | |
485 | __munmap (p2, HEAP_MAX_SIZE); | |
486 | p2 = MAP_FAILED; | |
487 | } | |
26d550d3 | 488 | } |
6c8dbf00 OB |
489 | if (p2 == MAP_FAILED) |
490 | { | |
491 | p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE); | |
492 | if (p1 != MAP_FAILED) | |
493 | { | |
494 | p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1)) | |
495 | & ~(HEAP_MAX_SIZE - 1)); | |
496 | ul = p2 - p1; | |
497 | if (ul) | |
498 | __munmap (p1, ul); | |
499 | else | |
500 | aligned_heap_area = p2 + HEAP_MAX_SIZE; | |
501 | __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); | |
502 | } | |
26d550d3 | 503 | else |
6c8dbf00 OB |
504 | { |
505 | /* Try to take the chance that an allocation of only HEAP_MAX_SIZE | |
506 | is already aligned. */ | |
507 | p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE); | |
508 | if (p2 == MAP_FAILED) | |
509 | return 0; | |
510 | ||
511 | if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)) | |
512 | { | |
513 | __munmap (p2, HEAP_MAX_SIZE); | |
514 | return 0; | |
515 | } | |
516 | } | |
fa8d436c | 517 | } |
6c8dbf00 OB |
518 | if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0) |
519 | { | |
520 | __munmap (p2, HEAP_MAX_SIZE); | |
521 | return 0; | |
522 | } | |
523 | h = (heap_info *) p2; | |
fa8d436c | 524 | h->size = size; |
c7fd3362 | 525 | h->mprotect_size = size; |
322dea08 | 526 | LIBC_PROBE (memory_heap_new, 2, h, h->size); |
fa8d436c UD |
527 | return h; |
528 | } | |
529 | ||
cbf5760e UD |
530 | /* Grow a heap. size is automatically rounded up to a |
531 | multiple of the page size. */ | |
fa8d436c UD |
532 | |
533 | static int | |
6c8dbf00 | 534 | grow_heap (heap_info *h, long diff) |
fa8d436c | 535 | { |
8a35c3fe | 536 | size_t pagesize = GLRO (dl_pagesize); |
fa8d436c UD |
537 | long new_size; |
538 | ||
8a35c3fe | 539 | diff = ALIGN_UP (diff, pagesize); |
6c8dbf00 OB |
540 | new_size = (long) h->size + diff; |
541 | if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE) | |
cbf5760e | 542 | return -1; |
6c8dbf00 OB |
543 | |
544 | if ((unsigned long) new_size > h->mprotect_size) | |
545 | { | |
546 | if (__mprotect ((char *) h + h->mprotect_size, | |
547 | (unsigned long) new_size - h->mprotect_size, | |
548 | PROT_READ | PROT_WRITE) != 0) | |
549 | return -2; | |
550 | ||
551 | h->mprotect_size = new_size; | |
552 | } | |
cbf5760e UD |
553 | |
554 | h->size = new_size; | |
322dea08 | 555 | LIBC_PROBE (memory_heap_more, 2, h, h->size); |
cbf5760e UD |
556 | return 0; |
557 | } | |
558 | ||
559 | /* Shrink a heap. */ | |
560 | ||
561 | static int | |
6c8dbf00 | 562 | shrink_heap (heap_info *h, long diff) |
cbf5760e UD |
563 | { |
564 | long new_size; | |
565 | ||
6c8dbf00 OB |
566 | new_size = (long) h->size - diff; |
567 | if (new_size < (long) sizeof (*h)) | |
cbf5760e | 568 | return -1; |
6c8dbf00 | 569 | |
9fab36eb SP |
570 | /* Try to re-map the extra heap space freshly to save memory, and make it |
571 | inaccessible. See malloc-sysdep.h to know when this is true. */ | |
a1ffb40e | 572 | if (__glibc_unlikely (check_may_shrink_heap ())) |
cbf5760e | 573 | { |
6c8dbf00 OB |
574 | if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE, |
575 | MAP_FIXED) == (char *) MAP_FAILED) | |
576 | return -2; | |
577 | ||
cbf5760e UD |
578 | h->mprotect_size = new_size; |
579 | } | |
cbf5760e | 580 | else |
6c8dbf00 | 581 | __madvise ((char *) h + new_size, diff, MADV_DONTNEED); |
cbf5760e UD |
582 | /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ |
583 | ||
fa8d436c | 584 | h->size = new_size; |
322dea08 | 585 | LIBC_PROBE (memory_heap_less, 2, h, h->size); |
fa8d436c UD |
586 | return 0; |
587 | } | |
588 | ||
589 | /* Delete a heap. */ | |
590 | ||
26d550d3 | 591 | #define delete_heap(heap) \ |
6c8dbf00 OB |
592 | do { \ |
593 | if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \ | |
594 | aligned_heap_area = NULL; \ | |
595 | __munmap ((char *) (heap), HEAP_MAX_SIZE); \ | |
596 | } while (0) | |
fa8d436c UD |
597 | |
598 | static int | |
599 | internal_function | |
6c8dbf00 | 600 | heap_trim (heap_info *heap, size_t pad) |
fa8d436c UD |
601 | { |
602 | mstate ar_ptr = heap->ar_ptr; | |
6c8dbf00 OB |
603 | unsigned long pagesz = GLRO (dl_pagesize); |
604 | mchunkptr top_chunk = top (ar_ptr), p, bck, fwd; | |
fa8d436c | 605 | heap_info *prev_heap; |
c26efef9 | 606 | long new_size, top_size, top_area, extra, prev_size, misalign; |
fa8d436c UD |
607 | |
608 | /* Can this heap go away completely? */ | |
6c8dbf00 OB |
609 | while (top_chunk == chunk_at_offset (heap, sizeof (*heap))) |
610 | { | |
611 | prev_heap = heap->prev; | |
612 | prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ); | |
613 | p = chunk_at_offset (prev_heap, prev_size); | |
614 | /* fencepost must be properly aligned. */ | |
615 | misalign = ((long) p) & MALLOC_ALIGN_MASK; | |
616 | p = chunk_at_offset (prev_heap, prev_size - misalign); | |
e9c4fe93 | 617 | assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */ |
6c8dbf00 OB |
618 | p = prev_chunk (p); |
619 | new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign; | |
620 | assert (new_size > 0 && new_size < (long) (2 * MINSIZE)); | |
621 | if (!prev_inuse (p)) | |
e9c4fe93 | 622 | new_size += prev_size (p); |
6c8dbf00 OB |
623 | assert (new_size > 0 && new_size < HEAP_MAX_SIZE); |
624 | if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) | |
625 | break; | |
626 | ar_ptr->system_mem -= heap->size; | |
6c8dbf00 OB |
627 | LIBC_PROBE (memory_heap_free, 2, heap, heap->size); |
628 | delete_heap (heap); | |
629 | heap = prev_heap; | |
630 | if (!prev_inuse (p)) /* consolidate backward */ | |
631 | { | |
632 | p = prev_chunk (p); | |
fff94fa2 | 633 | unlink (ar_ptr, p, bck, fwd); |
6c8dbf00 OB |
634 | } |
635 | assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0); | |
636 | assert (((char *) p + new_size) == ((char *) heap + heap->size)); | |
637 | top (ar_ptr) = top_chunk = p; | |
638 | set_head (top_chunk, new_size | PREV_INUSE); | |
639 | /*check_chunk(ar_ptr, top_chunk);*/ | |
fa8d436c | 640 | } |
c26efef9 MG |
641 | |
642 | /* Uses similar logic for per-thread arenas as the main arena with systrim | |
e4bc326d CD |
643 | and _int_free by preserving the top pad and rounding down to the nearest |
644 | page. */ | |
6c8dbf00 | 645 | top_size = chunksize (top_chunk); |
e4bc326d CD |
646 | if ((unsigned long)(top_size) < |
647 | (unsigned long)(mp_.trim_threshold)) | |
648 | return 0; | |
649 | ||
c26efef9 | 650 | top_area = top_size - MINSIZE - 1; |
f8ef472c | 651 | if (top_area < 0 || (size_t) top_area <= pad) |
c26efef9 MG |
652 | return 0; |
653 | ||
e4bc326d | 654 | /* Release in pagesize units and round down to the nearest page. */ |
c26efef9 | 655 | extra = ALIGN_DOWN(top_area - pad, pagesz); |
e4bc326d | 656 | if (extra == 0) |
fa8d436c | 657 | return 0; |
6c8dbf00 | 658 | |
fa8d436c | 659 | /* Try to shrink. */ |
6c8dbf00 | 660 | if (shrink_heap (heap, extra) != 0) |
fa8d436c | 661 | return 0; |
6c8dbf00 | 662 | |
fa8d436c | 663 | ar_ptr->system_mem -= extra; |
fa8d436c UD |
664 | |
665 | /* Success. Adjust top accordingly. */ | |
6c8dbf00 | 666 | set_head (top_chunk, (top_size - extra) | PREV_INUSE); |
fa8d436c UD |
667 | /*check_chunk(ar_ptr, top_chunk);*/ |
668 | return 1; | |
669 | } | |
670 | ||
04ec80e4 UD |
671 | /* Create a new arena with initial size "size". */ |
672 | ||
a62719ba | 673 | /* If REPLACED_ARENA is not NULL, detach it from this thread. Must be |
90c400bd | 674 | called while free_list_lock is held. */ |
a62719ba FW |
675 | static void |
676 | detach_arena (mstate replaced_arena) | |
677 | { | |
678 | if (replaced_arena != NULL) | |
679 | { | |
680 | assert (replaced_arena->attached_threads > 0); | |
681 | /* The current implementation only detaches from main_arena in | |
682 | case of allocation failure. This means that it is likely not | |
683 | beneficial to put the arena on free_list even if the | |
684 | reference count reaches zero. */ | |
685 | --replaced_arena->attached_threads; | |
686 | } | |
687 | } | |
688 | ||
04ec80e4 | 689 | static mstate |
6c8dbf00 | 690 | _int_new_arena (size_t size) |
04ec80e4 UD |
691 | { |
692 | mstate a; | |
693 | heap_info *h; | |
694 | char *ptr; | |
695 | unsigned long misalign; | |
696 | ||
6c8dbf00 OB |
697 | h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT), |
698 | mp_.top_pad); | |
699 | if (!h) | |
700 | { | |
701 | /* Maybe size is too large to fit in a single heap. So, just try | |
702 | to create a minimally-sized arena and let _int_malloc() attempt | |
703 | to deal with the large request via mmap_chunk(). */ | |
704 | h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad); | |
705 | if (!h) | |
706 | return 0; | |
707 | } | |
708 | a = h->ar_ptr = (mstate) (h + 1); | |
709 | malloc_init_state (a); | |
a62719ba | 710 | a->attached_threads = 1; |
04ec80e4 UD |
711 | /*a->next = NULL;*/ |
712 | a->system_mem = a->max_system_mem = h->size; | |
04ec80e4 UD |
713 | |
714 | /* Set up the top chunk, with proper alignment. */ | |
6c8dbf00 OB |
715 | ptr = (char *) (a + 1); |
716 | misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK; | |
04ec80e4 UD |
717 | if (misalign > 0) |
718 | ptr += MALLOC_ALIGNMENT - misalign; | |
6c8dbf00 OB |
719 | top (a) = (mchunkptr) ptr; |
720 | set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE); | |
04ec80e4 | 721 | |
3ea5be54 | 722 | LIBC_PROBE (memory_arena_new, 2, a, size); |
a62719ba | 723 | mstate replaced_arena = thread_arena; |
6782806d | 724 | thread_arena = a; |
4bf5f222 | 725 | __libc_lock_init (a->mutex); |
425ce2ed | 726 | |
4bf5f222 | 727 | __libc_lock_lock (list_lock); |
425ce2ed UD |
728 | |
729 | /* Add the new arena to the global list. */ | |
730 | a->next = main_arena.next; | |
90c400bd FW |
731 | /* FIXME: The barrier is an attempt to synchronize with read access |
732 | in reused_arena, which does not acquire list_lock while | |
733 | traversing the list. */ | |
425ce2ed UD |
734 | atomic_write_barrier (); |
735 | main_arena.next = a; | |
736 | ||
4bf5f222 | 737 | __libc_lock_unlock (list_lock); |
425ce2ed | 738 | |
4bf5f222 | 739 | __libc_lock_lock (free_list_lock); |
90c400bd | 740 | detach_arena (replaced_arena); |
4bf5f222 | 741 | __libc_lock_unlock (free_list_lock); |
90c400bd FW |
742 | |
743 | /* Lock this arena. NB: Another thread may have been attached to | |
744 | this arena because the arena is now accessible from the | |
745 | main_arena.next list and could have been picked by reused_arena. | |
746 | This can only happen for the last arena created (before the arena | |
747 | limit is reached). At this point, some arena has to be attached | |
748 | to two threads. We could acquire the arena lock before list_lock | |
749 | to make it less likely that reused_arena picks this new arena, | |
29d79486 FW |
750 | but this could result in a deadlock with |
751 | __malloc_fork_lock_parent. */ | |
90c400bd | 752 | |
4bf5f222 | 753 | __libc_lock_lock (a->mutex); |
90c400bd | 754 | |
04ec80e4 UD |
755 | return a; |
756 | } | |
757 | ||
425ce2ed | 758 | |
f88aab5d | 759 | /* Remove an arena from free_list. */ |
425ce2ed UD |
760 | static mstate |
761 | get_free_list (void) | |
762 | { | |
a62719ba | 763 | mstate replaced_arena = thread_arena; |
425ce2ed UD |
764 | mstate result = free_list; |
765 | if (result != NULL) | |
766 | { | |
4bf5f222 | 767 | __libc_lock_lock (free_list_lock); |
425ce2ed UD |
768 | result = free_list; |
769 | if (result != NULL) | |
a62719ba FW |
770 | { |
771 | free_list = result->next_free; | |
772 | ||
3da825ce | 773 | /* The arena will be attached to this thread. */ |
f88aab5d FW |
774 | assert (result->attached_threads == 0); |
775 | result->attached_threads = 1; | |
a62719ba FW |
776 | |
777 | detach_arena (replaced_arena); | |
778 | } | |
4bf5f222 | 779 | __libc_lock_unlock (free_list_lock); |
425ce2ed UD |
780 | |
781 | if (result != NULL) | |
6c8dbf00 OB |
782 | { |
783 | LIBC_PROBE (memory_arena_reuse_free_list, 1, result); | |
4bf5f222 | 784 | __libc_lock_lock (result->mutex); |
6782806d | 785 | thread_arena = result; |
6c8dbf00 | 786 | } |
425ce2ed UD |
787 | } |
788 | ||
789 | return result; | |
790 | } | |
791 | ||
f88aab5d FW |
792 | /* Remove the arena from the free list (if it is present). |
793 | free_list_lock must have been acquired by the caller. */ | |
794 | static void | |
795 | remove_from_free_list (mstate arena) | |
796 | { | |
797 | mstate *previous = &free_list; | |
798 | for (mstate p = free_list; p != NULL; p = p->next_free) | |
799 | { | |
800 | assert (p->attached_threads == 0); | |
801 | if (p == arena) | |
802 | { | |
803 | /* Remove the requested arena from the list. */ | |
804 | *previous = p->next_free; | |
805 | break; | |
806 | } | |
807 | else | |
808 | previous = &p->next_free; | |
809 | } | |
810 | } | |
811 | ||
77480c6b | 812 | /* Lock and return an arena that can be reused for memory allocation. |
bf51f568 JL |
813 | Avoid AVOID_ARENA as we have already failed to allocate memory in |
814 | it and it is currently locked. */ | |
425ce2ed | 815 | static mstate |
bf51f568 | 816 | reused_arena (mstate avoid_arena) |
425ce2ed | 817 | { |
425ce2ed | 818 | mstate result; |
90c400bd | 819 | /* FIXME: Access to next_to_use suffers from data races. */ |
425ce2ed UD |
820 | static mstate next_to_use; |
821 | if (next_to_use == NULL) | |
822 | next_to_use = &main_arena; | |
823 | ||
3da825ce FW |
824 | /* Iterate over all arenas (including those linked from |
825 | free_list). */ | |
425ce2ed UD |
826 | result = next_to_use; |
827 | do | |
828 | { | |
4bf5f222 | 829 | if (!arena_is_corrupt (result) && !__libc_lock_trylock (result->mutex)) |
6c8dbf00 | 830 | goto out; |
425ce2ed | 831 | |
90c400bd | 832 | /* FIXME: This is a data race, see _int_new_arena. */ |
425ce2ed UD |
833 | result = result->next; |
834 | } | |
835 | while (result != next_to_use); | |
836 | ||
bf51f568 JL |
837 | /* Avoid AVOID_ARENA as we have already failed to allocate memory |
838 | in that arena and it is currently locked. */ | |
839 | if (result == avoid_arena) | |
840 | result = result->next; | |
841 | ||
fff94fa2 SP |
842 | /* Make sure that the arena we get is not corrupted. */ |
843 | mstate begin = result; | |
844 | while (arena_is_corrupt (result) || result == avoid_arena) | |
845 | { | |
846 | result = result->next; | |
847 | if (result == begin) | |
a3b47337 FW |
848 | /* We looped around the arena list. We could not find any |
849 | arena that was either not corrupted or not the one we | |
850 | wanted to avoid. */ | |
851 | return NULL; | |
fff94fa2 SP |
852 | } |
853 | ||
fff94fa2 | 854 | /* No arena available without contention. Wait for the next in line. */ |
6999d38c | 855 | LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena); |
4bf5f222 | 856 | __libc_lock_lock (result->mutex); |
425ce2ed | 857 | |
6c8dbf00 | 858 | out: |
f88aab5d | 859 | /* Attach the arena to the current thread. */ |
a62719ba | 860 | { |
90c400bd | 861 | /* Update the arena thread attachment counters. */ |
a62719ba | 862 | mstate replaced_arena = thread_arena; |
4bf5f222 | 863 | __libc_lock_lock (free_list_lock); |
a62719ba | 864 | detach_arena (replaced_arena); |
f88aab5d FW |
865 | |
866 | /* We may have picked up an arena on the free list. We need to | |
867 | preserve the invariant that no arena on the free list has a | |
868 | positive attached_threads counter (otherwise, | |
869 | arena_thread_freeres cannot use the counter to determine if the | |
870 | arena needs to be put on the free list). We unconditionally | |
871 | remove the selected arena from the free list. The caller of | |
872 | reused_arena checked the free list and observed it to be empty, | |
873 | so the list is very short. */ | |
874 | remove_from_free_list (result); | |
875 | ||
a62719ba | 876 | ++result->attached_threads; |
f88aab5d | 877 | |
4bf5f222 | 878 | __libc_lock_unlock (free_list_lock); |
a62719ba FW |
879 | } |
880 | ||
6999d38c | 881 | LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena); |
6782806d | 882 | thread_arena = result; |
425ce2ed UD |
883 | next_to_use = result->next; |
884 | ||
885 | return result; | |
886 | } | |
425ce2ed | 887 | |
fa8d436c UD |
888 | static mstate |
889 | internal_function | |
92a9b22d | 890 | arena_get2 (size_t size, mstate avoid_arena) |
fa8d436c UD |
891 | { |
892 | mstate a; | |
fa8d436c | 893 | |
77cdc054 AS |
894 | static size_t narenas_limit; |
895 | ||
896 | a = get_free_list (); | |
897 | if (a == NULL) | |
898 | { | |
899 | /* Nothing immediately available, so generate a new arena. */ | |
900 | if (narenas_limit == 0) | |
6c8dbf00 OB |
901 | { |
902 | if (mp_.arena_max != 0) | |
903 | narenas_limit = mp_.arena_max; | |
904 | else if (narenas > mp_.arena_test) | |
905 | { | |
906 | int n = __get_nprocs (); | |
907 | ||
908 | if (n >= 1) | |
909 | narenas_limit = NARENAS_FROM_NCORES (n); | |
910 | else | |
911 | /* We have no information about the system. Assume two | |
912 | cores. */ | |
913 | narenas_limit = NARENAS_FROM_NCORES (2); | |
914 | } | |
915 | } | |
77cdc054 AS |
916 | repeat:; |
917 | size_t n = narenas; | |
41b81892 | 918 | /* NB: the following depends on the fact that (size_t)0 - 1 is a |
6c8dbf00 OB |
919 | very large number and that the underflow is OK. If arena_max |
920 | is set the value of arena_test is irrelevant. If arena_test | |
921 | is set but narenas is not yet larger or equal to arena_test | |
922 | narenas_limit is 0. There is no possibility for narenas to | |
923 | be too big for the test to always fail since there is not | |
924 | enough address space to create that many arenas. */ | |
a1ffb40e | 925 | if (__glibc_unlikely (n <= narenas_limit - 1)) |
6c8dbf00 OB |
926 | { |
927 | if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n)) | |
928 | goto repeat; | |
929 | a = _int_new_arena (size); | |
a1ffb40e | 930 | if (__glibc_unlikely (a == NULL)) |
6c8dbf00 OB |
931 | catomic_decrement (&narenas); |
932 | } | |
a5fb313c | 933 | else |
6c8dbf00 | 934 | a = reused_arena (avoid_arena); |
77cdc054 | 935 | } |
fa8d436c UD |
936 | return a; |
937 | } | |
938 | ||
c78ab094 SP |
939 | /* If we don't have the main arena, then maybe the failure is due to running |
940 | out of mmapped areas, so we can try allocating on the main arena. | |
941 | Otherwise, it is likely that sbrk() has failed and there is still a chance | |
942 | to mmap(), so try one of the other arenas. */ | |
943 | static mstate | |
944 | arena_get_retry (mstate ar_ptr, size_t bytes) | |
945 | { | |
655673f3 | 946 | LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr); |
6c8dbf00 OB |
947 | if (ar_ptr != &main_arena) |
948 | { | |
4bf5f222 | 949 | __libc_lock_unlock (ar_ptr->mutex); |
c3b9ef8d SP |
950 | /* Don't touch the main arena if it is corrupt. */ |
951 | if (arena_is_corrupt (&main_arena)) | |
952 | return NULL; | |
953 | ||
6c8dbf00 | 954 | ar_ptr = &main_arena; |
4bf5f222 | 955 | __libc_lock_lock (ar_ptr->mutex); |
6c8dbf00 OB |
956 | } |
957 | else | |
958 | { | |
4bf5f222 | 959 | __libc_lock_unlock (ar_ptr->mutex); |
92a9b22d | 960 | ar_ptr = arena_get2 (bytes, ar_ptr); |
6c8dbf00 | 961 | } |
c78ab094 SP |
962 | |
963 | return ar_ptr; | |
964 | } | |
965 | ||
425ce2ed UD |
966 | static void __attribute__ ((section ("__libc_thread_freeres_fn"))) |
967 | arena_thread_freeres (void) | |
968 | { | |
6782806d FW |
969 | mstate a = thread_arena; |
970 | thread_arena = NULL; | |
425ce2ed UD |
971 | |
972 | if (a != NULL) | |
973 | { | |
4bf5f222 | 974 | __libc_lock_lock (free_list_lock); |
a62719ba FW |
975 | /* If this was the last attached thread for this arena, put the |
976 | arena on the free list. */ | |
977 | assert (a->attached_threads > 0); | |
978 | if (--a->attached_threads == 0) | |
979 | { | |
980 | a->next_free = free_list; | |
981 | free_list = a; | |
982 | } | |
4bf5f222 | 983 | __libc_lock_unlock (free_list_lock); |
425ce2ed UD |
984 | } |
985 | } | |
986 | text_set_element (__libc_thread_subfreeres, arena_thread_freeres); | |
425ce2ed | 987 | |
fa8d436c UD |
988 | /* |
989 | * Local variables: | |
990 | * c-basic-offset: 2 | |
991 | * End: | |
992 | */ |