/* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 2001-2016 Free Software Foundation, Inc.
+ Copyright (C) 2001-2019 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
- not, see <http://www.gnu.org/licenses/>. */
+ not, see <https://www.gnu.org/licenses/>. */
#include <stdbool.h>
+#if HAVE_TUNABLES
+# define TUNABLE_NAMESPACE malloc
+#endif
+#include <elf/dl-tunables.h>
+
/* Compile-time constants. */
#define HEAP_MIN_SIZE (32 * 1024)
members of struct malloc_state objects. No other locks must be
acquired after free_list_lock has been acquired. */
-static mutex_t free_list_lock = _LIBC_LOCK_INITIALIZER;
+__libc_lock_define_initialized (static, free_list_lock);
static size_t narenas = 1;
static mstate free_list;
acquired, no arena lock must have been acquired, but it is
permitted to acquire arena locks subsequently, while list_lock is
acquired. */
-static mutex_t list_lock = _LIBC_LOCK_INITIALIZER;
+__libc_lock_define_initialized (static, list_lock);
/* Already initialized? */
int __malloc_initialized = -1;
} while (0)
#define arena_lock(ptr, size) do { \
- if (ptr && !arena_is_corrupt (ptr)) \
- (void) mutex_lock (&ptr->mutex); \
+ if (ptr) \
+ __libc_lock_lock (ptr->mutex); \
else \
ptr = arena_get2 ((size), NULL); \
} while (0)
#define heap_for_ptr(ptr) \
((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
#define arena_for_chunk(ptr) \
- (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
+ (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
/**************************************************************************/
/* atfork support. */
-static void *(*save_malloc_hook)(size_t __size, const void *);
-static void (*save_free_hook) (void *__ptr, const void *);
-static void *save_arena;
-
-/* Magic value for the thread-specific arena pointer when
- malloc_atfork() is in use. */
-
-# define ATFORK_ARENA_PTR ((void *) -1)
-
-/* The following hooks are used while the `atfork' handling mechanism
- is active. */
-
-static void *
-malloc_atfork (size_t sz, const void *caller)
-{
- void *victim;
-
- if (thread_arena == ATFORK_ARENA_PTR)
- {
- /* We are the only thread that may allocate at all. */
- if (save_malloc_hook != malloc_check)
- {
- return _int_malloc (&main_arena, sz);
- }
- else
- {
- if (top_check () < 0)
- return 0;
-
- victim = _int_malloc (&main_arena, sz + 1);
- return mem2mem_check (victim, sz);
- }
- }
- else
- {
- /* Suspend the thread until the `atfork' handlers have completed.
- By that time, the hooks will have been reset as well, so that
- mALLOc() can be used again. */
- (void) mutex_lock (&list_lock);
- (void) mutex_unlock (&list_lock);
- return __libc_malloc (sz);
- }
-}
-
-static void
-free_atfork (void *mem, const void *caller)
-{
- mstate ar_ptr;
- mchunkptr p; /* chunk corresponding to mem */
-
- if (mem == 0) /* free(0) has no effect */
- return;
-
- p = mem2chunk (mem); /* do not bother to replicate free_check here */
-
- if (chunk_is_mmapped (p)) /* release mmapped memory. */
- {
- munmap_chunk (p);
- return;
- }
-
- ar_ptr = arena_for_chunk (p);
- _int_free (ar_ptr, p, thread_arena == ATFORK_ARENA_PTR);
-}
-
-
-/* Counter for number of times the list is locked by the same thread. */
-static unsigned int atfork_recursive_cntr;
-
/* The following three functions are called around fork from a
multi-threaded process. We do not use the general fork handler
mechanism to make sure that our handlers are the last ones being
void
__malloc_fork_lock_parent (void)
{
- mstate ar_ptr;
-
if (__malloc_initialized < 1)
return;
/* We do not acquire free_list_lock here because we completely
reconstruct free_list in __malloc_fork_unlock_child. */
- if (mutex_trylock (&list_lock))
- {
- if (thread_arena == ATFORK_ARENA_PTR)
- /* This is the same thread which already locks the global list.
- Just bump the counter. */
- goto out;
+ __libc_lock_lock (list_lock);
- /* This thread has to wait its turn. */
- (void) mutex_lock (&list_lock);
- }
- for (ar_ptr = &main_arena;; )
+ for (mstate ar_ptr = &main_arena;; )
{
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if (ar_ptr == &main_arena)
break;
}
- save_malloc_hook = __malloc_hook;
- save_free_hook = __free_hook;
- __malloc_hook = malloc_atfork;
- __free_hook = free_atfork;
- /* Only the current thread may perform malloc/free calls now.
- save_arena will be reattached to the current thread, in
- __malloc_fork_lock_parent, so save_arena->attached_threads is not
- updated. */
- save_arena = thread_arena;
- thread_arena = ATFORK_ARENA_PTR;
-out:
- ++atfork_recursive_cntr;
}
void
__malloc_fork_unlock_parent (void)
{
- mstate ar_ptr;
-
if (__malloc_initialized < 1)
return;
- if (--atfork_recursive_cntr != 0)
- return;
-
- /* Replace ATFORK_ARENA_PTR with save_arena.
- save_arena->attached_threads was not changed in
- __malloc_fork_lock_parent and is still correct. */
- thread_arena = save_arena;
- __malloc_hook = save_malloc_hook;
- __free_hook = save_free_hook;
- for (ar_ptr = &main_arena;; )
+ for (mstate ar_ptr = &main_arena;; )
{
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if (ar_ptr == &main_arena)
break;
}
- (void) mutex_unlock (&list_lock);
+ __libc_lock_unlock (list_lock);
}
void
__malloc_fork_unlock_child (void)
{
- mstate ar_ptr;
-
if (__malloc_initialized < 1)
return;
- thread_arena = save_arena;
- __malloc_hook = save_malloc_hook;
- __free_hook = save_free_hook;
-
- /* Push all arenas to the free list, except save_arena, which is
+ /* Push all arenas to the free list, except thread_arena, which is
attached to the current thread. */
- mutex_init (&free_list_lock);
- if (save_arena != NULL)
- ((mstate) save_arena)->attached_threads = 1;
+ __libc_lock_init (free_list_lock);
+ if (thread_arena != NULL)
+ thread_arena->attached_threads = 1;
free_list = NULL;
- for (ar_ptr = &main_arena;; )
+ for (mstate ar_ptr = &main_arena;; )
{
- mutex_init (&ar_ptr->mutex);
- if (ar_ptr != save_arena)
+ __libc_lock_init (ar_ptr->mutex);
+ if (ar_ptr != thread_arena)
{
/* This arena is no longer attached to any thread. */
ar_ptr->attached_threads = 0;
break;
}
- mutex_init (&list_lock);
- atfork_recursive_cntr = 0;
+ __libc_lock_init (list_lock);
+}
+
+#if HAVE_TUNABLES
+void
+TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
+{
+ int32_t value = (int32_t) valp->numval;
+ if (value != 0)
+ __malloc_check_init ();
+}
+
+# define TUNABLE_CALLBACK_FNDECL(__name, __type) \
+static inline int do_ ## __name (__type value); \
+void \
+TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
+{ \
+ __type value = (__type) (valp)->numval; \
+ do_ ## __name (value); \
}
+TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t)
+TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t)
+TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t)
+TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t)
+TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t)
+TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t)
+TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t)
+#if USE_TCACHE
+TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t)
+TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t)
+TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t)
+#endif
+TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t)
+#else
/* Initialization routine. */
#include <string.h>
extern char **_environ;
static char *
-internal_function
next_env_entry (char ***position)
{
char **current = *position;
return result;
}
+#endif
#ifdef SHARED
#endif
thread_arena = &main_arena;
+
+ malloc_init_state (&main_arena);
+
+#if HAVE_TUNABLES
+ TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
+ TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
+ TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
+ TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
+ TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold));
+ TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
+ TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
+ TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
+# if USE_TCACHE
+ TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
+ TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
+ TUNABLE_GET (tcache_unsorted_limit, size_t,
+ TUNABLE_CALLBACK (set_tcache_unsorted_limit));
+# endif
+ TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
+#else
const char *s = NULL;
if (__glibc_likely (_environ != NULL))
{
}
}
}
- if (s && s[0])
- {
- __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
- if (check_action != 0)
- __malloc_check_init ();
- }
+ if (s && s[0] != '\0' && s[0] != '0')
+ __malloc_check_init ();
+#endif
+
+#if HAVE_MALLOC_INIT_HOOK
void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
if (hook != NULL)
(*hook)();
+#endif
__malloc_initialized = 1;
}
of the page size. */
static heap_info *
-internal_function
new_heap (size_t size, size_t top_pad)
{
size_t pagesize = GLRO (dl_pagesize);
} while (0)
static int
-internal_function
heap_trim (heap_info *heap, size_t pad)
{
mstate ar_ptr = heap->ar_ptr;
unsigned long pagesz = GLRO (dl_pagesize);
- mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
+ mchunkptr top_chunk = top (ar_ptr), p;
heap_info *prev_heap;
long new_size, top_size, top_area, extra, prev_size, misalign;
/* fencepost must be properly aligned. */
misalign = ((long) p) & MALLOC_ALIGN_MASK;
p = chunk_at_offset (prev_heap, prev_size - misalign);
- assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
+ assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
p = prev_chunk (p);
new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
if (!prev_inuse (p))
- new_size += p->prev_size;
+ new_size += prev_size (p);
assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
break;
if (!prev_inuse (p)) /* consolidate backward */
{
p = prev_chunk (p);
- unlink (ar_ptr, p, bck, fwd);
+ unlink_chunk (ar_ptr, p);
}
assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
assert (((char *) p + new_size) == ((char *) heap + heap->size));
LIBC_PROBE (memory_arena_new, 2, a, size);
mstate replaced_arena = thread_arena;
thread_arena = a;
- mutex_init (&a->mutex);
+ __libc_lock_init (a->mutex);
- (void) mutex_lock (&list_lock);
+ __libc_lock_lock (list_lock);
/* Add the new arena to the global list. */
a->next = main_arena.next;
atomic_write_barrier ();
main_arena.next = a;
- (void) mutex_unlock (&list_lock);
+ __libc_lock_unlock (list_lock);
- (void) mutex_lock (&free_list_lock);
+ __libc_lock_lock (free_list_lock);
detach_arena (replaced_arena);
- (void) mutex_unlock (&free_list_lock);
+ __libc_lock_unlock (free_list_lock);
/* Lock this arena. NB: Another thread may have been attached to
this arena because the arena is now accessible from the
but this could result in a deadlock with
__malloc_fork_lock_parent. */
- (void) mutex_lock (&a->mutex);
+ __libc_lock_lock (a->mutex);
return a;
}
-/* Remove an arena from free_list. The arena may be in use because it
- was attached concurrently to a thread by reused_arena below. */
+/* Remove an arena from free_list. */
static mstate
get_free_list (void)
{
mstate result = free_list;
if (result != NULL)
{
- (void) mutex_lock (&free_list_lock);
+ __libc_lock_lock (free_list_lock);
result = free_list;
if (result != NULL)
{
free_list = result->next_free;
/* The arena will be attached to this thread. */
- ++result->attached_threads;
+ assert (result->attached_threads == 0);
+ result->attached_threads = 1;
detach_arena (replaced_arena);
}
- (void) mutex_unlock (&free_list_lock);
+ __libc_lock_unlock (free_list_lock);
if (result != NULL)
{
LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
- (void) mutex_lock (&result->mutex);
+ __libc_lock_lock (result->mutex);
thread_arena = result;
}
}
return result;
}
+/* Remove the arena from the free list (if it is present).
+ free_list_lock must have been acquired by the caller. */
+static void
+remove_from_free_list (mstate arena)
+{
+ mstate *previous = &free_list;
+ for (mstate p = free_list; p != NULL; p = p->next_free)
+ {
+ assert (p->attached_threads == 0);
+ if (p == arena)
+ {
+ /* Remove the requested arena from the list. */
+ *previous = p->next_free;
+ break;
+ }
+ else
+ previous = &p->next_free;
+ }
+}
+
/* Lock and return an arena that can be reused for memory allocation.
Avoid AVOID_ARENA as we have already failed to allocate memory in
it and it is currently locked. */
result = next_to_use;
do
{
- if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
+ if (!__libc_lock_trylock (result->mutex))
goto out;
/* FIXME: This is a data race, see _int_new_arena. */
if (result == avoid_arena)
result = result->next;
- /* Make sure that the arena we get is not corrupted. */
- mstate begin = result;
- while (arena_is_corrupt (result) || result == avoid_arena)
- {
- result = result->next;
- if (result == begin)
- break;
- }
-
- /* We could not find any arena that was either not corrupted or not the one
- we wanted to avoid. */
- if (result == begin || result == avoid_arena)
- return NULL;
-
/* No arena available without contention. Wait for the next in line. */
LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
- (void) mutex_lock (&result->mutex);
+ __libc_lock_lock (result->mutex);
out:
- /* Attach the arena to the current thread. Note that we may have
- selected an arena which was on free_list. */
+ /* Attach the arena to the current thread. */
{
/* Update the arena thread attachment counters. */
mstate replaced_arena = thread_arena;
- (void) mutex_lock (&free_list_lock);
+ __libc_lock_lock (free_list_lock);
detach_arena (replaced_arena);
+
+ /* We may have picked up an arena on the free list. We need to
+ preserve the invariant that no arena on the free list has a
+ positive attached_threads counter (otherwise,
+ arena_thread_freeres cannot use the counter to determine if the
+ arena needs to be put on the free list). We unconditionally
+ remove the selected arena from the free list. The caller of
+ reused_arena checked the free list and observed it to be empty,
+ so the list is very short. */
+ remove_from_free_list (result);
+
++result->attached_threads;
- (void) mutex_unlock (&free_list_lock);
+
+ __libc_lock_unlock (free_list_lock);
}
LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
}
static mstate
-internal_function
arena_get2 (size_t size, mstate avoid_arena)
{
mstate a;
LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
if (ar_ptr != &main_arena)
{
- (void) mutex_unlock (&ar_ptr->mutex);
- /* Don't touch the main arena if it is corrupt. */
- if (arena_is_corrupt (&main_arena))
- return NULL;
-
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = &main_arena;
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
}
else
{
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = arena_get2 (bytes, ar_ptr);
}
return ar_ptr;
}
-static void __attribute__ ((section ("__libc_thread_freeres_fn")))
-arena_thread_freeres (void)
+void
+__malloc_arena_thread_freeres (void)
{
+ /* Shut down the thread cache first. This could deallocate data for
+ the thread arena, so do this before we put the arena on the free
+ list. */
+ tcache_thread_shutdown ();
+
mstate a = thread_arena;
thread_arena = NULL;
if (a != NULL)
{
- (void) mutex_lock (&free_list_lock);
+ __libc_lock_lock (free_list_lock);
/* If this was the last attached thread for this arena, put the
arena on the free list. */
assert (a->attached_threads > 0);
a->next_free = free_list;
free_list = a;
}
- (void) mutex_unlock (&free_list_lock);
+ __libc_lock_unlock (free_list_lock);
}
}
-text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
/*
* Local variables: