void *newp; /* chunk to return */
- /* realloc of null is supposed to be same as malloc */
- if (oldmem == NULL)
- return __libc_malloc (bytes);
-
#if REALLOC_ZERO_BYTES_FREES
- if (bytes == 0)
+ if (bytes == 0 && oldmem != NULL)
{
__libc_free (oldmem); return NULL;
}
#endif
+ /* realloc of null is supposed to be same as malloc */
+ if (oldmem == NULL)
+ return __libc_malloc (bytes);
+
/* Perform a quick check to ensure that the pointer's tag matches the
memory's tag. */
if (__glibc_unlikely (mtag_enabled))
if (bytes <= usable)
{
size_t difference = usable - bytes;
- if ((unsigned long) difference < 2 * sizeof (INTERNAL_SIZE_T))
+ if ((unsigned long) difference < 2 * sizeof (INTERNAL_SIZE_T)
+ || (chunk_is_mmapped (oldp) && difference <= GLRO (dl_pagesize)))
return oldmem;
}
/* its size */
const INTERNAL_SIZE_T oldsize = chunksize (oldp);
+ if (chunk_is_mmapped (oldp))
+ ar_ptr = NULL;
+ else
+ ar_ptr = arena_for_chunk (oldp);
+
/* Little security check which won't hurt performance: the allocator
never wraps around at the end of the address space. Therefore
we can exclude some size values which might appear here by
return tag_new_usable (newmem);
}
#endif
- /* Return if shrinking and mremap was unsuccessful. */
- if (bytes <= usable)
- return oldmem;
+ /* Note the extra SIZE_SZ overhead. */
+ if (oldsize - SIZE_SZ >= nb)
+ return oldmem; /* do nothing */
/* Must alloc, copy, free. */
newmem = __libc_malloc (bytes);
return newmem;
}
- ar_ptr = arena_for_chunk (oldp);
-
if (SINGLE_THREAD_P)
{
newp = _int_realloc (ar_ptr, oldp, oldsize, nb);