acquired. */
__libc_lock_define_initialized (static, list_lock);
-/* Already initialized? */
-static bool __malloc_initialized = false;
-
/**************************************************************************/
void
__malloc_fork_lock_parent (void)
{
- if (!__malloc_initialized)
- return;
-
/* We do not acquire free_list_lock here because we completely
reconstruct free_list in __malloc_fork_unlock_child. */
void
__malloc_fork_unlock_parent (void)
{
- if (!__malloc_initialized)
- return;
-
for (mstate ar_ptr = &main_arena;; )
{
__libc_lock_unlock (ar_ptr->mutex);
void
__malloc_fork_unlock_child (void)
{
- if (!__malloc_initialized)
- return;
-
/* Push all arenas to the free list, except thread_arena, which is
attached to the current thread. */
__libc_lock_init (free_list_lock);
static void tcache_key_initialize (void);
#endif
-static void
-ptmalloc_init (void)
+void
+__ptmalloc_init (void)
{
- if (__malloc_initialized)
- return;
-
- __malloc_initialized = true;
-
#if USE_TCACHE
tcache_key_initialize ();
#endif
/*
Initialize a malloc_state struct.
- This is called from ptmalloc_init () or from _int_new_arena ()
+ This is called from __ptmalloc_init () or from _int_new_arena ()
when creating a new arena.
*/
mstate ar_ptr;
void *victim;
- if (!__malloc_initialized)
- ptmalloc_init ();
-
MAYBE_INIT_TCACHE ();
if (SINGLE_THREAD_P)
void *newp; /* chunk to return */
- if (!__malloc_initialized)
- ptmalloc_init ();
-
#if REALLOC_ZERO_BYTES_FREES
if (bytes == 0 && oldmem != NULL)
{
void *
__libc_memalign (size_t alignment, size_t bytes)
{
- if (!__malloc_initialized)
- ptmalloc_init ();
-
void *address = RETURN_ADDRESS (0);
return _mid_memalign (alignment, bytes, address);
}
weak_function
aligned_alloc (size_t alignment, size_t bytes)
{
- if (!__malloc_initialized)
- ptmalloc_init ();
-
/* Similar to memalign, but starting with ISO C17 the standard
requires an error for alignments that are not supported by the
implementation. Valid alignments for the current implementation
void *
__libc_valloc (size_t bytes)
{
- if (!__malloc_initialized)
- ptmalloc_init ();
-
void *address = RETURN_ADDRESS (0);
size_t pagesize = GLRO (dl_pagesize);
return _mid_memalign (pagesize, bytes, address);
void *
__libc_pvalloc (size_t bytes)
{
- if (!__malloc_initialized)
- ptmalloc_init ();
-
void *address = RETURN_ADDRESS (0);
size_t pagesize = GLRO (dl_pagesize);
size_t rounded_bytes;
sz = bytes;
- if (!__malloc_initialized)
- ptmalloc_init ();
-
#if USE_TCACHE
size_t tc_idx = usize2tidx (bytes);
if (tcache_available (tc_idx))
{
int result = 0;
- if (!__malloc_initialized)
- ptmalloc_init ();
-
mstate ar_ptr = &main_arena;
do
{
struct mallinfo2 m;
mstate ar_ptr;
- if (!__malloc_initialized)
- ptmalloc_init ();
-
memset (&m, 0, sizeof (m));
ar_ptr = &main_arena;
do
mstate ar_ptr;
unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
- if (!__malloc_initialized)
- ptmalloc_init ();
_IO_flockfile (stderr);
int old_flags2 = stderr->_flags2;
stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
mstate av = &main_arena;
int res = 1;
- if (!__malloc_initialized)
- ptmalloc_init ();
__libc_lock_lock (av->mutex);
LIBC_PROBE (memory_mallopt, 2, param_number, value);
}
#if USE_TCACHE
+
+static volatile int dummy_var;
+
static __attribute_noinline__ void
malloc_printerr_tail (const char *str)
{
/* Ensure this cannot be a no-return function. */
- if (!__malloc_initialized)
+ if (dummy_var)
return;
malloc_printerr (str);
}
{
void *mem;
- if (!__malloc_initialized)
- ptmalloc_init ();
-
/* Test whether the SIZE argument is valid. It must be a power of
two multiple of sizeof (void *). */
if (alignment % sizeof (void *) != 0
size_t total_aspace = 0;
size_t total_aspace_mprotect = 0;
-
-
- if (!__malloc_initialized)
- ptmalloc_init ();
-
fputs ("<malloc version=\"1\">\n", fp);
/* Iterate over all arenas currently in use. */