/* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 1996-2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Copyright (C) 1996-2013 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>
and Doug Lea <dl@cs.oswego.edu>, 2001.
Compilation Environment options:
- HAVE_MREMAP 0 unless linux defined
+ HAVE_MREMAP 0
Changing default word sizes:
#include <stddef.h> /* for size_t */
#include <stdlib.h> /* for getenv(), abort() */
+#include <unistd.h> /* for __libc_enable_secure */
#include <malloc-machine.h>
+#include <malloc-sysdep.h>
#include <atomic.h>
#include <_itoa.h>
#include <stdio.h> /* needed for malloc_stats */
#include <errno.h>
+#include <shlib-compat.h>
+
/* For uintptr_t. */
#include <stdint.h>
#ifndef MALLOC_ALIGNMENT
-/* XXX This is the correct definition. It differs from 2*SIZE_SZ only on
- powerpc32. For the time being, changing this is causing more
- compatibility problems due to malloc_get_state/malloc_set_state than
- will returning blocks not adequately aligned for long double objects
- under -mlong-double-128.
-
-#define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
- ? __alignof__ (long double) : 2 * SIZE_SZ)
-*/
-#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
+# if !SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_16)
+/* This is the correct definition when there is no past ABI to constrain it.
+
+ Among configurations with a past ABI constraint, it differs from
+ 2*SIZE_SZ only on powerpc32. For the time being, changing this is
+ causing more compatibility problems due to malloc_get_state and
+ malloc_set_state than will returning blocks not adequately aligned for
+ long double objects under -mlong-double-128. */
+
+# define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
+ ? __alignof__ (long double) : 2 * SIZE_SZ)
+# else
+# define MALLOC_ALIGNMENT (2 * SIZE_SZ)
+# endif
#endif
/* The corresponding bit mask value */
/*
Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
- large blocks. This is currently only possible on Linux with
- kernel versions newer than 1.3.77.
+ large blocks.
*/
#ifndef HAVE_MREMAP
-#ifdef linux
-#define HAVE_MREMAP 1
-#else
#define HAVE_MREMAP 0
#endif
-#endif /* HAVE_MREMAP */
-
/*
This version of malloc supports the standard SVID/XPG mallinfo
const void *caller);
static void* memalign_check(size_t alignment, size_t bytes,
const void *caller);
-/* These routines are never needed in this configuration. */
+#ifndef NO_THREADS
static void* malloc_atfork(size_t sz, const void *caller);
static void free_atfork(void* mem, const void *caller);
+#endif
/* ------------- Optional versions of memcopy ---------------- */
the malloc code, but "mem" is the pointer that is returned to the
user. "Nextchunk" is the beginning of the next contiguous chunk.
- Chunks always begin on even word boundries, so the mem portion
+ Chunks always begin on even word boundaries, so the mem portion
(which is returned to the user) is also on an even word boundary, and
thus at least double-word aligned.
The bins top out around 1MB because we expect to service large
requests via mmap.
+
+ Bin 0 does not exist. Bin 1 is the unordered list; if that would be
+ a valid chunk size the small bins are bumped up one.
*/
#define NBINS 128
#define NSMALLBINS 64
#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
-#define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH)
+#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
+#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
#define in_smallbin_range(sz) \
((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
#define smallbin_index(sz) \
- (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
+ ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \
+ + SMALLBIN_CORRECTION)
#define largebin_index_32(sz) \
(((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
126)
+#define largebin_index_32_big(sz) \
+(((((unsigned long)(sz)) >> 6) <= 45)? 49 + (((unsigned long)(sz)) >> 6): \
+ ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
+ ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
+ ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
+ ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
+ 126)
+
// XXX It remains to be seen whether it is good to keep the widths of
// XXX the buckets the same or whether it should be scaled by a factor
// XXX of two as well.
126)
#define largebin_index(sz) \
- (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
+ (SIZE_SZ == 8 ? largebin_index_64 (sz) \
+ : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
+ : largebin_index_32 (sz))
#define bin_index(sz) \
((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
#endif
/* Forward declarations. */
-static void* malloc_hook_ini __MALLOC_P ((size_t sz,
- const __malloc_ptr_t caller));
-static void* realloc_hook_ini __MALLOC_P ((void* ptr, size_t sz,
- const __malloc_ptr_t caller));
-static void* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
- const __malloc_ptr_t caller));
+static void* malloc_hook_ini (size_t sz,
+ const void *caller) __THROW;
+static void* realloc_hook_ini (void* ptr, size_t sz,
+ const void *caller) __THROW;
+static void* memalign_hook_ini (size_t alignment, size_t sz,
+ const void *caller) __THROW;
void weak_variable (*__malloc_initialize_hook) (void) = NULL;
-void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
- const __malloc_ptr_t) = NULL;
-__malloc_ptr_t weak_variable (*__malloc_hook)
- (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
-__malloc_ptr_t weak_variable (*__realloc_hook)
- (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
+void weak_variable (*__free_hook) (void *__ptr,
+ const void *) = NULL;
+void *weak_variable (*__malloc_hook)
+ (size_t __size, const void *) = malloc_hook_ini;
+void *weak_variable (*__realloc_hook)
+ (void *__ptr, size_t __size, const void *)
= realloc_hook_ini;
-__malloc_ptr_t weak_variable (*__memalign_hook)
- (size_t __alignment, size_t __size, const __malloc_ptr_t)
+void *weak_variable (*__memalign_hook)
+ (size_t __alignment, size_t __size, const void *)
= memalign_hook_ini;
void weak_variable (*__after_morecore_hook) (void) = NULL;
is no following chunk whose prev_size field could be used.
See the front_misalign handling below, for glibc there is no
- need for further alignments. */
- size = (nb + SIZE_SZ + pagemask) & ~pagemask;
+ need for further alignments unless we have have high alignment.
+ */
+ if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+ size = (nb + SIZE_SZ + pagemask) & ~pagemask;
+ else
+ size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
tried_mmap = true;
/* Don't try if size wraps around 0 */
returned start address to meet alignment requirements here
and in memalign(), and still be able to compute proper
address argument for later munmap in free() and realloc().
+ */
- For glibc, chunk2mem increases the address by 2*SIZE_SZ and
- MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
- aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
- assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
-
- p = (mchunkptr)mm;
- set_head(p, size|IS_MMAPPED);
+ if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+ {
+ /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
+ MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
+ aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
+ assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
+ front_misalign = 0;
+ }
+ else
+ front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
+ if (front_misalign > 0) {
+ correction = MALLOC_ALIGNMENT - front_misalign;
+ p = (mchunkptr)(mm + correction);
+ p->prev_size = correction;
+ set_head(p, (size - correction) |IS_MMAPPED);
+ }
+ else
+ {
+ p = (mchunkptr)mm;
+ set_head(p, size|IS_MMAPPED);
+ }
/* update statistics */
top(av) = chunk_at_offset(heap, sizeof(*heap));
set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
- /* Setup fencepost and free the old top chunk. */
+ /* Setup fencepost and free the old top chunk with a multiple of
+ MALLOC_ALIGNMENT in size. */
/* The fencepost takes at least MINSIZE bytes, because it might
become the top chunk again later. Note that a footer is set
up, too, although the chunk is marked in use. */
- old_size -= MINSIZE;
+ old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
if (old_size >= MINSIZE) {
set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
/* handle non-contiguous cases */
else {
- /* MORECORE/mmap must correctly align */
- assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
+ if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+ /* MORECORE/mmap must correctly align */
+ assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
+ else {
+ front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+ if (front_misalign > 0) {
+
+ /*
+ Skip over some bytes to arrive at an aligned position.
+ We don't need to specially mark these wasted front bytes.
+ They will never be accessed anyway because
+ prev_inuse of av->top (and any chunk created from its start)
+ is always true after initialization.
+ */
+
+ aligned_brk += MALLOC_ALIGNMENT - front_misalign;
+ }
+ }
/* Find out current end of memory */
if (snd_brk == (char*)(MORECORE_FAILURE)) {
mstate ar_ptr;
void *victim;
- __malloc_ptr_t (*hook) (size_t, const __malloc_ptr_t)
+ void *(*hook) (size_t, const void *)
= force_reg (__malloc_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(bytes, RETURN_ADDRESS (0));
return 0;
victim = _int_malloc(ar_ptr, bytes);
if(!victim) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = arena_get_retry(ar_ptr, bytes);
+ if (__builtin_expect(ar_ptr != NULL, 1)) {
victim = _int_malloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
- (void)mutex_unlock(&main_arena.mutex);
- if(ar_ptr) {
- victim = _int_malloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
}
} else
(void)mutex_unlock(&ar_ptr->mutex);
mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
- void (*hook) (__malloc_ptr_t, const __malloc_ptr_t)
+ void (*hook) (void *, const void *)
= force_reg (__free_hook);
if (__builtin_expect (hook != NULL, 0)) {
(*hook)(mem, RETURN_ADDRESS (0));
void* newp; /* chunk to return */
- __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, const __malloc_ptr_t) =
+ void *(*hook) (void *, size_t, const void *) =
force_reg (__realloc_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
mstate ar_ptr;
void *p;
- __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
- const __malloc_ptr_t)) =
+ void *(*hook) (size_t, size_t, const void *) =
force_reg (__memalign_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
return 0;
p = _int_memalign(ar_ptr, alignment, bytes);
if(!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = arena_get_retry (ar_ptr, bytes);
+ if (__builtin_expect(ar_ptr != NULL, 1)) {
p = _int_memalign(ar_ptr, alignment, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- mstate prev = ar_ptr->next ? ar_ptr : 0;
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = arena_get2(prev, bytes);
- if(ar_ptr) {
- p = _int_memalign(ar_ptr, alignment, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
}
} else
(void)mutex_unlock(&ar_ptr->mutex);
size_t pagesz = GLRO(dl_pagesize);
- __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
- const __malloc_ptr_t)) =
+ void *(*hook) (size_t, size_t, const void *) =
force_reg (__memalign_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
if(!ar_ptr)
return 0;
p = _int_valloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
if(!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = arena_get_retry (ar_ptr, bytes);
+ if (__builtin_expect(ar_ptr != NULL, 1)) {
p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
- if(ar_ptr) {
- p = _int_memalign(ar_ptr, pagesz, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
}
- }
+ } else
+ (void)mutex_unlock (&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
size_t page_mask = GLRO(dl_pagesize) - 1;
size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
- __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
- const __malloc_ptr_t)) =
+ void *(*hook) (size_t, size_t, const void *) =
force_reg (__memalign_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
p = _int_pvalloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
if(!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
+ if (__builtin_expect(ar_ptr != NULL, 1)) {
p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
- bytes + 2*pagesz + MINSIZE);
- if(ar_ptr) {
- p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
}
- }
+ } else
+ (void)mutex_unlock(&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
}
}
- __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, const __malloc_ptr_t)) =
+ void *(*hook) (size_t, const void *) =
force_reg (__malloc_hook);
if (__builtin_expect (hook != NULL, 0)) {
sz = bytes;
#endif
mem = _int_malloc(av, sz);
- /* Only clearing follows, so we can unlock early. */
- (void)mutex_unlock(&av->mutex);
assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
av == arena_for_chunk(mem2chunk(mem)));
if (mem == 0) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(av != &main_arena) {
- (void)mutex_lock(&main_arena.mutex);
- mem = _int_malloc(&main_arena, sz);
- (void)mutex_unlock(&main_arena.mutex);
- } else {
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- (void)mutex_lock(&main_arena.mutex);
- av = arena_get2(av->next ? av : 0, sz);
- (void)mutex_unlock(&main_arena.mutex);
- if(av) {
- mem = _int_malloc(av, sz);
- (void)mutex_unlock(&av->mutex);
- }
+ av = arena_get_retry (av, sz);
+ if (__builtin_expect(av != NULL, 1)) {
+ mem = _int_malloc(av, sz);
+ (void)mutex_unlock(&av->mutex);
}
if (mem == 0) return 0;
- }
+ } else
+ (void)mutex_unlock(&av->mutex);
p = mem2chunk(mem);
/* Two optional cases in which clearing not necessary */
malloc_printerr (check_action, errstr, chunk2mem(p));
return;
}
- /* We know that each chunk is at least MINSIZE bytes in size. */
- if (__builtin_expect (size < MINSIZE, 0))
+ /* We know that each chunk is at least MINSIZE bytes in size or a
+ multiple of MALLOC_ALIGNMENT. */
+ if (__builtin_expect (size < MINSIZE || !aligned_OK (size), 0))
{
errstr = "free(): invalid size";
goto errout;
content. */
memset (paligned_mem, 0x89, size & ~psm1);
#endif
- madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
+ __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
result = 1;
}
mchunkptr p;
if (mem != 0) {
p = mem2chunk(mem);
+
+ if (__builtin_expect(using_malloc_checking == 1, 0))
+ return malloc_check_get_size(p);
if (chunk_is_mmapped(p))
return chunksize(p) - 2*SIZE_SZ;
else if (inuse(p))
/*
------------------------------ mallinfo ------------------------------
+ Accumulate malloc statistics for arena AV into M.
*/
-static struct mallinfo
-int_mallinfo(mstate av)
+static void
+int_mallinfo(mstate av, struct mallinfo *m)
{
- struct mallinfo mi;
size_t i;
mbinptr b;
mchunkptr p;
}
}
- mi.smblks = nfastblocks;
- mi.ordblks = nblocks;
- mi.fordblks = avail;
- mi.uordblks = av->system_mem - avail;
- mi.arena = av->system_mem;
- mi.hblks = mp_.n_mmaps;
- mi.hblkhd = mp_.mmapped_mem;
- mi.fsmblks = fastavail;
- mi.keepcost = chunksize(av->top);
- mi.usmblks = mp_.max_total_mem;
- return mi;
+ m->smblks += nfastblocks;
+ m->ordblks += nblocks;
+ m->fordblks += avail;
+ m->uordblks += av->system_mem - avail;
+ m->arena += av->system_mem;
+ m->fsmblks += fastavail;
+ if (av == &main_arena)
+ {
+ m->hblks = mp_.n_mmaps;
+ m->hblkhd = mp_.mmapped_mem;
+ m->usmblks = mp_.max_total_mem;
+ m->keepcost = chunksize(av->top);
+ }
}
struct mallinfo __libc_mallinfo()
{
struct mallinfo m;
+ mstate ar_ptr;
if(__malloc_initialized < 0)
ptmalloc_init ();
- (void)mutex_lock(&main_arena.mutex);
- m = int_mallinfo(&main_arena);
- (void)mutex_unlock(&main_arena.mutex);
+
+ memset(&m, 0, sizeof (m));
+ ar_ptr = &main_arena;
+ do {
+ (void)mutex_lock(&ar_ptr->mutex);
+ int_mallinfo(ar_ptr, &m);
+ (void)mutex_unlock(&ar_ptr->mutex);
+
+ ar_ptr = ar_ptr->next;
+ } while (ar_ptr != &main_arena);
+
return m;
}
*/
void
-__malloc_stats()
+__malloc_stats (void)
{
int i;
mstate ar_ptr;
- struct mallinfo mi;
unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
#if THREAD_STATS
long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
for (i=0, ar_ptr = &main_arena;; i++) {
+ struct mallinfo mi;
+
+ memset(&mi, 0, sizeof(mi));
(void)mutex_lock(&ar_ptr->mutex);
- mi = int_mallinfo(ar_ptr);
+ int_mallinfo(ar_ptr, &mi);
fprintf(stderr, "Arena %d:\n", i);
fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
if((unsigned long)value > HEAP_MAX_SIZE/2)
res = 0;
else
- mp_.mmap_threshold = value;
- mp_.no_dyn_threshold = 1;
+ {
+ mp_.mmap_threshold = value;
+ mp_.no_dyn_threshold = 1;
+ }
break;
case M_MMAP_MAX:
- mp_.n_mmaps_max = value;
- mp_.no_dyn_threshold = 1;
+ mp_.n_mmaps_max = value;
+ mp_.no_dyn_threshold = 1;
break;
case M_CHECK_ACTION:
while (cp > buf)
*--cp = '0';
- __libc_message (action & 2,
- "*** glibc detected *** %s: %s: 0x%s ***\n",
+ __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n",
__libc_argv[0] ?: "<unknown>", str, cp);
}
else if (action & 2)
/* Call the hook here, so that caller is posix_memalign's caller
and not posix_memalign itself. */
- __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
- const __malloc_ptr_t)) =
+ void *(*hook) (size_t, size_t, const void *) =
force_reg (__memalign_hook);
if (__builtin_expect (hook != NULL, 0))
mem = (*hook)(alignment, size, RETURN_ADDRESS (0));