1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <https://www.gnu.org/licenses/>. */
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
25 There have been substantial changes made after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
29 * Version ptmalloc2-20011215
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
44 * Why use this malloc?
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
50 allocator for malloc-intensive programs.
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
71 * Contents, described in more detail in "description of public routines" below.
73 Standard (ANSI/SVID/...) functions:
75 calloc(size_t n_elements, size_t element_size);
77 realloc(void* p, size_t n);
78 memalign(size_t alignment, size_t n);
81 mallopt(int parameter_number, int parameter_value)
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
87 malloc_trim(size_t pad);
88 malloc_usable_size(void* p);
93 Supported pointer representation: 4 or 8 bytes
94 Supported size_t representation: 4 or 8 bytes
95 Note that size_t is allowed to be 4 bytes even if pointers are 8.
96 You can adjust this by defining INTERNAL_SIZE_T
98 Alignment: 2 * sizeof(size_t) (default)
99 (i.e., 8 byte alignment with 4byte size_t). This suffices for
100 nearly all current machines and C compilers. However, you can
101 define MALLOC_ALIGNMENT to be wider than this if necessary.
103 Minimum overhead per allocated chunk: 4 or 8 bytes
104 Each malloced chunk has a hidden word of overhead holding size
105 and status information.
107 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
108 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
110 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
111 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
112 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 free list pointers. Thus, the minimum allocatable size is
116 Even a request for zero bytes (i.e., malloc(0)) returns a
117 pointer to something of the minimum allocatable size.
119 The maximum overhead wastage (i.e., number of extra bytes
120 allocated than were requested in malloc) is less than or equal
121 to the minimum size, except for requests >= mmap_threshold that
122 are serviced via mmap(), where the worst case wastage is 2 *
123 sizeof(size_t) bytes plus the remainder from a system page (the
124 minimal mmap unit); typically 4096 or 8192 bytes.
126 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
127 8-byte size_t: 2^64 minus about two pages
129 It is assumed that (possibly signed) size_t values suffice to
130 represent chunk sizes. `Possibly signed' is due to the fact
131 that `size_t' may be defined on a system as either a signed or
132 an unsigned type. The ISO C standard says that it must be
133 unsigned, but a few systems are known not to adhere to this.
134 Additionally, even when size_t is unsigned, sbrk (which is by
135 default used to obtain memory from system) accepts signed
136 arguments, and may not be able to handle size_t-wide arguments
137 with negative sign bit. Generally, values that would
138 appear as negative after accounting for overhead and alignment
139 are supported only via mmap(), which does not have this
142 Requests for sizes outside the allowed range will perform an optional
143 failure action and then return null. (Requests may also
144 also fail because a system is out of memory.)
146 Thread-safety: thread-safe
148 Compliance: I believe it is compliant with the 1997 Single Unix Specification
149 Also SVID/XPG, ANSI C, and probably others as well.
151 * Synopsis of compile-time options:
153 People have reported using previous versions of this malloc on all
154 versions of Unix, sometimes by tweaking some of the defines
155 below. It has been tested most extensively on Solaris and Linux.
156 People also report using it in stand-alone embedded systems.
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
168 Compilation Environment options:
172 Changing default word sizes:
174 INTERNAL_SIZE_T size_t
176 Configuration and functionality options:
178 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
179 USE_MALLOC_LOCK NOT defined
180 MALLOC_DEBUG NOT defined
181 REALLOC_ZERO_BYTES_FREES 1
184 Options for customizing MORECORE:
188 MORECORE_CONTIGUOUS 1
189 MORECORE_CANNOT_TRIM NOT defined
191 MMAP_AS_MORECORE_SIZE (1024 * 1024)
193 Tuning options that are also dynamically changeable via mallopt:
195 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
196 DEFAULT_TRIM_THRESHOLD 128 * 1024
198 DEFAULT_MMAP_THRESHOLD 128 * 1024
199 DEFAULT_MMAP_MAX 65536
201 There are several other #defined constants and macros that you
202 probably don't want to touch unless you are extending or adapting malloc. */
205 void* is the pointer type that malloc should say it returns
212 #include <stddef.h> /* for size_t */
213 #include <stdlib.h> /* for getenv(), abort() */
214 #include <unistd.h> /* for __libc_enable_secure */
218 #include <bits/wordsize.h>
219 #include <sys/sysinfo.h>
221 #include <ldsodefs.h>
224 #include <stdio.h> /* needed for malloc_stats */
228 #include <shlib-compat.h>
233 /* For va_arg, va_start, va_end. */
236 /* For MIN, MAX, powerof2. */
237 #include <sys/param.h>
239 /* For ALIGN_UP et. al. */
240 #include <libc-pointer-arith.h>
242 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
243 #include <libc-diag.h>
245 /* For memory tagging. */
246 #include <libc-mtag.h>
248 #include <malloc/malloc-internal.h>
250 /* For SINGLE_THREAD_P. */
251 #include <sysdep-cancel.h>
253 #include <libc-internal.h>
258 Because freed chunks may be overwritten with bookkeeping fields, this
259 malloc will often die when freed memory is overwritten by user
260 programs. This can be very effective (albeit in an annoying way)
261 in helping track down dangling pointers.
263 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
264 enabled that will catch more memory errors. You probably won't be
265 able to make much sense of the actual assertion errors, but they
266 should help you locate incorrectly overwritten memory. The checking
267 is fairly extensive, and will slow down execution
268 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
269 will attempt to check every non-mmapped allocated and free chunk in
270 the course of computing the summmaries. (By nature, mmapped regions
271 cannot be checked very much automatically.)
273 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
274 this code. The assertions in the check routines spell out in more
275 detail the assumptions and invariants underlying the algorithms.
277 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
278 checking that all accesses to malloced memory stay within their
279 bounds. However, there are several add-ons and adaptations of this
280 or other mallocs available that do this.
284 #define MALLOC_DEBUG 0
288 # define __assert_fail(assertion, file, line, function) \
289 __malloc_assert(assertion, file, line, function)
291 extern const char *__progname
;
294 __malloc_assert (const char *assertion
, const char *file
, unsigned int line
,
295 const char *function
)
297 (void) __fxprintf (NULL
, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
298 __progname
, __progname
[0] ? ": " : "",
300 function
? function
: "", function
? ": " : "",
308 /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
309 # define TCACHE_MAX_BINS 64
310 # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
312 /* Only used to pre-fill the tunables. */
313 # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
315 /* When "x" is from chunksize(). */
316 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
317 /* When "x" is a user-provided size. */
318 # define usize2tidx(x) csize2tidx (request2size (x))
320 /* With rounding and alignment, the bins are...
321 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
322 idx 1 bytes 25..40 or 13..20
323 idx 2 bytes 41..56 or 21..28
326 /* This is another arbitrary limit, which tunables can change. Each
327 tcache bin will hold at most this number of chunks. */
328 # define TCACHE_FILL_COUNT 7
330 /* Maximum chunks in tcache bins for tunables. This value must fit the range
331 of tcache->counts[] entries, else they may overflow. */
332 # define MAX_TCACHE_COUNT UINT16_MAX
336 Use randomness from ASLR (mmap_base) to protect single-linked lists
337 of Fast-Bins and TCache. That is, mask the "next" pointers of the
338 lists' chunks, and also perform allocation alignment checks on them.
339 This mechanism reduces the risk of pointer hijacking, as was done with
340 Safe-Unlinking in the double-linked lists of Small-Bins.
341 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
342 larger pages provide less entropy, although the pointer mangling
344 #define PROTECT_PTR(pos, ptr) \
345 ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
346 #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
349 REALLOC_ZERO_BYTES_FREES controls the behavior of realloc (p, 0)
350 when p is nonnull. If nonzero, realloc (p, 0) should free p and
351 return NULL. Otherwise, realloc (p, 0) should do the equivalent
352 of freeing p and returning what malloc (0) would return.
354 ISO C17 says the behavior is implementation-defined here; glibc
355 follows historical practice and defines it to be nonzero.
358 #ifndef REALLOC_ZERO_BYTES_FREES
359 #define REALLOC_ZERO_BYTES_FREES 1
363 TRIM_FASTBINS controls whether free() of a very small chunk can
364 immediately lead to trimming. Setting to true (1) can reduce memory
365 footprint, but will almost always slow down programs that use a lot
368 Define this only if you are willing to give up some speed to more
369 aggressively reduce system-level memory footprint when releasing
370 memory in programs that use many small chunks. You can get
371 essentially the same effect by setting MXFAST to 0, but this can
372 lead to even greater slowdowns in programs using many small chunks.
373 TRIM_FASTBINS is an in-between compile-time option, that disables
374 only those chunks bordering topmost memory from being placed in
378 #ifndef TRIM_FASTBINS
379 #define TRIM_FASTBINS 0
383 /* Definition for getting more memory from the OS. */
384 #define MORECORE (*__morecore)
385 #define MORECORE_FAILURE 0
386 void * __default_morecore (ptrdiff_t);
387 void *(*__morecore
)(ptrdiff_t) = __default_morecore
;
389 /* Memory tagging. */
391 /* Some systems support the concept of tagging (sometimes known as
392 coloring) memory locations on a fine grained basis. Each memory
393 location is given a color (normally allocated randomly) and
394 pointers are also colored. When the pointer is dereferenced, the
395 pointer's color is checked against the memory's color and if they
396 differ the access is faulted (sometimes lazily).
398 We use this in glibc by maintaining a single color for the malloc
399 data structures that are interleaved with the user data and then
400 assigning separate colors for each block allocation handed out. In
401 this way simple buffer overruns will be rapidly detected. When
402 memory is freed, the memory is recolored back to the glibc default
403 so that simple use-after-free errors can also be detected.
405 If memory is reallocated the buffer is recolored even if the
406 address remains the same. This has a performance impact, but
407 guarantees that the old pointer cannot mistakenly be reused (code
408 that compares old against new will see a mismatch and will then
409 need to behave as though realloc moved the data to a new location).
411 Internal API for memory tagging support.
413 The aim is to keep the code for memory tagging support as close to
414 the normal APIs in glibc as possible, so that if tagging is not
415 enabled in the library, or is disabled at runtime then standard
416 operations can continue to be used. Support macros are used to do
419 void *tag_new_zero_region (void *ptr, size_t size)
421 Allocates a new tag, colors the memory with that tag, zeros the
422 memory and returns a pointer that is correctly colored for that
423 location. The non-tagging version will simply call memset with 0.
425 void *tag_region (void *ptr, size_t size)
427 Color the region of memory pointed to by PTR and size SIZE with
428 the color of PTR. Returns the original pointer.
430 void *tag_new_usable (void *ptr)
432 Allocate a new random color and use it to color the user region of
433 a chunk; this may include data from the subsequent chunk's header
434 if tagging is sufficiently fine grained. Returns PTR suitably
435 recolored for accessing the memory there.
437 void *tag_at (void *ptr)
439 Read the current color of the memory at the address pointed to by
440 PTR (ignoring it's current color) and return PTR recolored to that
441 color. PTR must be valid address in all other respects. When
442 tagging is not enabled, it simply returns the original pointer.
446 static bool mtag_enabled
= false;
447 static int mtag_mmap_flags
= 0;
449 # define mtag_enabled false
450 # define mtag_mmap_flags 0
453 static __always_inline
void *
454 tag_region (void *ptr
, size_t size
)
456 if (__glibc_unlikely (mtag_enabled
))
457 return __libc_mtag_tag_region (ptr
, size
);
461 static __always_inline
void *
462 tag_new_zero_region (void *ptr
, size_t size
)
464 if (__glibc_unlikely (mtag_enabled
))
465 return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr
), size
);
466 return memset (ptr
, 0, size
);
471 tag_new_usable (void *ptr
);
473 static __always_inline
void *
476 if (__glibc_unlikely (mtag_enabled
))
477 return __libc_mtag_address_get_tag (ptr
);
484 MORECORE-related declarations. By default, rely on sbrk
489 MORECORE is the name of the routine to call to obtain more memory
490 from the system. See below for general guidance on writing
491 alternative MORECORE functions, as well as a version for WIN32 and a
492 sample version for pre-OSX macos.
496 #define MORECORE sbrk
500 MORECORE_FAILURE is the value returned upon failure of MORECORE
501 as well as mmap. Since it cannot be an otherwise valid memory address,
502 and must reflect values of standard sys calls, you probably ought not
506 #ifndef MORECORE_FAILURE
507 #define MORECORE_FAILURE (-1)
511 If MORECORE_CONTIGUOUS is true, take advantage of fact that
512 consecutive calls to MORECORE with positive arguments always return
513 contiguous increasing addresses. This is true of unix sbrk. Even
514 if not defined, when regions happen to be contiguous, malloc will
515 permit allocations spanning regions obtained from different
516 calls. But defining this when applicable enables some stronger
517 consistency checks and space efficiencies.
520 #ifndef MORECORE_CONTIGUOUS
521 #define MORECORE_CONTIGUOUS 1
525 Define MORECORE_CANNOT_TRIM if your version of MORECORE
526 cannot release space back to the system when given negative
527 arguments. This is generally necessary only if you are using
528 a hand-crafted MORECORE function that cannot handle negative arguments.
531 /* #define MORECORE_CANNOT_TRIM */
533 /* MORECORE_CLEARS (default 1)
534 The degree to which the routine mapped to MORECORE zeroes out
535 memory: never (0), only for newly allocated space (1) or always
536 (2). The distinction between (1) and (2) is necessary because on
537 some systems, if the application first decrements and then
538 increments the break value, the contents of the reallocated space
542 #ifndef MORECORE_CLEARS
543 # define MORECORE_CLEARS 1
548 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
549 sbrk fails, and mmap is used as a backup. The value must be a
550 multiple of page size. This backup strategy generally applies only
551 when systems have "holes" in address space, so sbrk cannot perform
552 contiguous expansion, but there is still space available on system.
553 On systems for which this is known to be useful (i.e. most linux
554 kernels), this occurs only when programs allocate huge amounts of
555 memory. Between this, and the fact that mmap regions tend to be
556 limited, the size should be large, to avoid too many mmap calls and
557 thus avoid running out of kernel resources. */
559 #ifndef MMAP_AS_MORECORE_SIZE
560 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
564 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
569 #define HAVE_MREMAP 0
572 /* We may need to support __malloc_initialize_hook for backwards
575 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
576 # define HAVE_MALLOC_INIT_HOOK 1
578 # define HAVE_MALLOC_INIT_HOOK 0
583 This version of malloc supports the standard SVID/XPG mallinfo
584 routine that returns a struct containing usage properties and
585 statistics. It should work on any SVID/XPG compliant system that has
586 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
587 install such a thing yourself, cut out the preliminary declarations
588 as described above and below and save them in a malloc.h file. But
589 there's no compelling reason to bother to do this.)
591 The main declaration needed is the mallinfo struct that is returned
592 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
593 bunch of fields that are not even meaningful in this version of
594 malloc. These fields are are instead filled by mallinfo() with
595 other numbers that might be of interest.
599 /* ---------- description of public routines ------------ */
603 Returns a pointer to a newly allocated chunk of at least n bytes, or null
604 if no space is available. Additionally, on failure, errno is
605 set to ENOMEM on ANSI C systems.
607 If n is zero, malloc returns a minimum-sized chunk. (The minimum
608 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
609 systems.) On most systems, size_t is an unsigned type, so calls
610 with negative arguments are interpreted as requests for huge amounts
611 of space, which will often fail. The maximum supported value of n
612 differs across systems, but is in all cases less than the maximum
613 representable value of a size_t.
615 void* __libc_malloc(size_t);
616 libc_hidden_proto (__libc_malloc
)
620 Releases the chunk of memory pointed to by p, that had been previously
621 allocated using malloc or a related routine such as realloc.
622 It has no effect if p is null. It can have arbitrary (i.e., bad!)
623 effects if p has already been freed.
625 Unless disabled (using mallopt), freeing very large spaces will
626 when possible, automatically trigger operations that give
627 back unused memory to the system, thus reducing program footprint.
629 void __libc_free(void*);
630 libc_hidden_proto (__libc_free
)
633 calloc(size_t n_elements, size_t element_size);
634 Returns a pointer to n_elements * element_size bytes, with all locations
637 void* __libc_calloc(size_t, size_t);
640 realloc(void* p, size_t n)
641 Returns a pointer to a chunk of size n that contains the same data
642 as does chunk p up to the minimum of (n, p's size) bytes, or null
643 if no space is available.
645 The returned pointer may or may not be the same as p. The algorithm
646 prefers extending p when possible, otherwise it employs the
647 equivalent of a malloc-copy-free sequence.
649 If p is null, realloc is equivalent to malloc.
651 If space is not available, realloc returns null, errno is set (if on
652 ANSI) and p is NOT freed.
654 if n is for fewer bytes than already held by p, the newly unused
655 space is lopped off and freed if possible. Unless the #define
656 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
657 zero (re)allocates a minimum-sized chunk.
659 Large chunks that were internally obtained via mmap will always be
660 grown using malloc-copy-free sequences unless the system supports
661 MREMAP (currently only linux).
663 The old unix realloc convention of allowing the last-free'd chunk
664 to be used as an argument to realloc is not supported.
666 void* __libc_realloc(void*, size_t);
667 libc_hidden_proto (__libc_realloc
)
670 memalign(size_t alignment, size_t n);
671 Returns a pointer to a newly allocated chunk of n bytes, aligned
672 in accord with the alignment argument.
674 The alignment argument should be a power of two. If the argument is
675 not a power of two, the nearest greater power is used.
676 8-byte alignment is guaranteed by normal malloc calls, so don't
677 bother calling memalign with an argument of 8 or less.
679 Overreliance on memalign is a sure way to fragment space.
681 void* __libc_memalign(size_t, size_t);
682 libc_hidden_proto (__libc_memalign
)
686 Equivalent to memalign(pagesize, n), where pagesize is the page
687 size of the system. If the pagesize is unknown, 4096 is used.
689 void* __libc_valloc(size_t);
694 mallopt(int parameter_number, int parameter_value)
695 Sets tunable parameters The format is to provide a
696 (parameter-number, parameter-value) pair. mallopt then sets the
697 corresponding parameter to the argument value if it can (i.e., so
698 long as the value is meaningful), and returns 1 if successful else
699 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
700 normally defined in malloc.h. Only one of these (M_MXFAST) is used
701 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
702 so setting them has no effect. But this malloc also supports four
703 other options in mallopt. See below for details. Briefly, supported
704 parameters are as follows (listed defaults are for "typical"
707 Symbol param # default allowed param values
708 M_MXFAST 1 64 0-80 (0 disables fastbins)
709 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
711 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
712 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
714 int __libc_mallopt(int, int);
715 libc_hidden_proto (__libc_mallopt
)
720 Returns (by copy) a struct containing various summary statistics:
722 arena: current total non-mmapped bytes allocated from system
723 ordblks: the number of free chunks
724 smblks: the number of fastbin blocks (i.e., small chunks that
725 have been freed but not use resused or consolidated)
726 hblks: current number of mmapped regions
727 hblkhd: total bytes held in mmapped regions
729 fsmblks: total bytes held in fastbin blocks
730 uordblks: current total allocated space (normal or mmapped)
731 fordblks: total free space
732 keepcost: the maximum number of bytes that could ideally be released
733 back to system via malloc_trim. ("ideally" means that
734 it ignores page restrictions etc.)
736 Because these fields are ints, but internal bookkeeping may
737 be kept as longs, the reported values may wrap around zero and
740 struct mallinfo2
__libc_mallinfo2(void);
741 libc_hidden_proto (__libc_mallinfo2
)
743 struct mallinfo
__libc_mallinfo(void);
748 Equivalent to valloc(minimum-page-that-holds(n)), that is,
749 round up n to nearest pagesize.
751 void* __libc_pvalloc(size_t);
754 malloc_trim(size_t pad);
756 If possible, gives memory back to the system (via negative
757 arguments to sbrk) if there is unused memory at the `high' end of
758 the malloc pool. You can call this after freeing large blocks of
759 memory to potentially reduce the system-level memory requirements
760 of a program. However, it cannot guarantee to reduce memory. Under
761 some allocation patterns, some large free blocks of memory will be
762 locked between two used chunks, so they cannot be given back to
765 The `pad' argument to malloc_trim represents the amount of free
766 trailing space to leave untrimmed. If this argument is zero,
767 only the minimum amount of memory to maintain internal data
768 structures will be left (one page or less). Non-zero arguments
769 can be supplied to maintain enough trailing space to service
770 future expected allocations without having to re-obtain memory
773 Malloc_trim returns 1 if it actually released any memory, else 0.
774 On systems that do not support "negative sbrks", it will always
777 int __malloc_trim(size_t);
780 malloc_usable_size(void* p);
782 Returns the number of bytes you can actually use in
783 an allocated chunk, which may be more than you requested (although
784 often not) due to alignment and minimum size constraints.
785 You can use this many bytes without worrying about
786 overwriting other allocated objects. This is not a particularly great
787 programming practice. malloc_usable_size can be more useful in
788 debugging and assertions, for example:
791 assert(malloc_usable_size(p) >= 256);
794 size_t __malloc_usable_size(void*);
798 Prints on stderr the amount of space obtained from the system (both
799 via sbrk and mmap), the maximum amount (which may be more than
800 current if malloc_trim and/or munmap got called), and the current
801 number of bytes allocated via malloc (or realloc, etc) but not yet
802 freed. Note that this is the number of bytes allocated, not the
803 number requested. It will be larger than the number requested
804 because of alignment and bookkeeping overhead. Because it includes
805 alignment wastage as being in use, this figure may be greater than
806 zero even when no user-level chunks are allocated.
808 The reported current and maximum system memory can be inaccurate if
809 a program makes other calls to system memory allocation functions
810 (normally sbrk) outside of malloc.
812 malloc_stats prints only the most commonly interesting statistics.
813 More information can be obtained by calling mallinfo.
816 void __malloc_stats(void);
819 posix_memalign(void **memptr, size_t alignment, size_t size);
821 POSIX wrapper like memalign(), checking for validity of size.
823 int __posix_memalign(void **, size_t, size_t);
825 /* mallopt tuning options */
828 M_MXFAST is the maximum request size used for "fastbins", special bins
829 that hold returned chunks without consolidating their spaces. This
830 enables future requests for chunks of the same size to be handled
831 very quickly, but can increase fragmentation, and thus increase the
832 overall memory footprint of a program.
834 This malloc manages fastbins very conservatively yet still
835 efficiently, so fragmentation is rarely a problem for values less
836 than or equal to the default. The maximum supported value of MXFAST
837 is 80. You wouldn't want it any higher than this anyway. Fastbins
838 are designed especially for use with many small structs, objects or
839 strings -- the default handles structs/objects/arrays with sizes up
840 to 8 4byte fields, or small strings representing words, tokens,
841 etc. Using fastbins for larger objects normally worsens
842 fragmentation without improving speed.
844 M_MXFAST is set in REQUEST size units. It is internally used in
845 chunksize units, which adds padding and alignment. You can reduce
846 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
847 algorithm to be a closer approximation of fifo-best-fit in all cases,
848 not just for larger requests, but will generally cause it to be
853 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
858 #ifndef DEFAULT_MXFAST
859 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
864 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
865 to keep before releasing via malloc_trim in free().
867 Automatic trimming is mainly useful in long-lived programs.
868 Because trimming via sbrk can be slow on some systems, and can
869 sometimes be wasteful (in cases where programs immediately
870 afterward allocate more large chunks) the value should be high
871 enough so that your overall system performance would improve by
872 releasing this much memory.
874 The trim threshold and the mmap control parameters (see below)
875 can be traded off with one another. Trimming and mmapping are
876 two different ways of releasing unused memory back to the
877 system. Between these two, it is often possible to keep
878 system-level demands of a long-lived program down to a bare
879 minimum. For example, in one test suite of sessions measuring
880 the XF86 X server on Linux, using a trim threshold of 128K and a
881 mmap threshold of 192K led to near-minimal long term resource
884 If you are using this malloc in a long-lived program, it should
885 pay to experiment with these values. As a rough guide, you
886 might set to a value close to the average size of a process
887 (program) running on your system. Releasing this much memory
888 would allow such a process to run in memory. Generally, it's
889 worth it to tune for trimming rather tham memory mapping when a
890 program undergoes phases where several large chunks are
891 allocated and released in ways that can reuse each other's
892 storage, perhaps mixed with phases where there are no such
893 chunks at all. And in well-behaved long-lived programs,
894 controlling release of large blocks via trimming versus mapping
897 However, in most programs, these parameters serve mainly as
898 protection against the system-level effects of carrying around
899 massive amounts of unneeded memory. Since frequent calls to
900 sbrk, mmap, and munmap otherwise degrade performance, the default
901 parameters are set to relatively high values that serve only as
904 The trim value It must be greater than page size to have any useful
905 effect. To disable trimming completely, you can set to
908 Trim settings interact with fastbin (MXFAST) settings: Unless
909 TRIM_FASTBINS is defined, automatic trimming never takes place upon
910 freeing a chunk with size less than or equal to MXFAST. Trimming is
911 instead delayed until subsequent freeing of larger chunks. However,
912 you can still force an attempted trim by calling malloc_trim.
914 Also, trimming is not generally possible in cases where
915 the main arena is obtained via mmap.
917 Note that the trick some people use of mallocing a huge space and
918 then freeing it at program startup, in an attempt to reserve system
919 memory, doesn't have the intended effect under automatic trimming,
920 since that memory will immediately be returned to the system.
923 #define M_TRIM_THRESHOLD -1
925 #ifndef DEFAULT_TRIM_THRESHOLD
926 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
930 M_TOP_PAD is the amount of extra `padding' space to allocate or
931 retain whenever sbrk is called. It is used in two ways internally:
933 * When sbrk is called to extend the top of the arena to satisfy
934 a new malloc request, this much padding is added to the sbrk
937 * When malloc_trim is called automatically from free(),
938 it is used as the `pad' argument.
940 In both cases, the actual amount of padding is rounded
941 so that the end of the arena is always a system page boundary.
943 The main reason for using padding is to avoid calling sbrk so
944 often. Having even a small pad greatly reduces the likelihood
945 that nearly every malloc request during program start-up (or
946 after trimming) will invoke sbrk, which needlessly wastes
949 Automatic rounding-up to page-size units is normally sufficient
950 to avoid measurable overhead, so the default is 0. However, in
951 systems where sbrk is relatively slow, it can pay to increase
952 this value, at the expense of carrying around more memory than
958 #ifndef DEFAULT_TOP_PAD
959 #define DEFAULT_TOP_PAD (0)
963 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
964 adjusted MMAP_THRESHOLD.
967 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
968 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
971 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
972 /* For 32-bit platforms we cannot increase the maximum mmap
973 threshold much because it is also the minimum value for the
974 maximum heap size and its alignment. Going above 512k (i.e., 1M
975 for new heaps) wastes too much address space. */
976 # if __WORDSIZE == 32
977 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
979 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
984 M_MMAP_THRESHOLD is the request size threshold for using mmap()
985 to service a request. Requests of at least this size that cannot
986 be allocated using already-existing space will be serviced via mmap.
987 (If enough normal freed space already exists it is used instead.)
989 Using mmap segregates relatively large chunks of memory so that
990 they can be individually obtained and released from the host
991 system. A request serviced through mmap is never reused by any
992 other request (at least not directly; the system may just so
993 happen to remap successive requests to the same locations).
995 Segregating space in this way has the benefits that:
997 1. Mmapped space can ALWAYS be individually released back
998 to the system, which helps keep the system level memory
999 demands of a long-lived program low.
1000 2. Mapped memory can never become `locked' between
1001 other chunks, as can happen with normally allocated chunks, which
1002 means that even trimming via malloc_trim would not release them.
1003 3. On some systems with "holes" in address spaces, mmap can obtain
1004 memory that sbrk cannot.
1006 However, it has the disadvantages that:
1008 1. The space cannot be reclaimed, consolidated, and then
1009 used to service later requests, as happens with normal chunks.
1010 2. It can lead to more wastage because of mmap page alignment
1012 3. It causes malloc performance to be more dependent on host
1013 system memory management support routines which may vary in
1014 implementation quality and may impose arbitrary
1015 limitations. Generally, servicing a request via normal
1016 malloc steps is faster than going through a system's mmap.
1018 The advantages of mmap nearly always outweigh disadvantages for
1019 "large" chunks, but the value of "large" varies across systems. The
1020 default is an empirically derived value that works well in most
1025 The above was written in 2001. Since then the world has changed a lot.
1026 Memory got bigger. Applications got bigger. The virtual address space
1027 layout in 32 bit linux changed.
1029 In the new situation, brk() and mmap space is shared and there are no
1030 artificial limits on brk size imposed by the kernel. What is more,
1031 applications have started using transient allocations larger than the
1032 128Kb as was imagined in 2001.
1034 The price for mmap is also high now; each time glibc mmaps from the
1035 kernel, the kernel is forced to zero out the memory it gives to the
1036 application. Zeroing memory is expensive and eats a lot of cache and
1037 memory bandwidth. This has nothing to do with the efficiency of the
1038 virtual memory system, by doing mmap the kernel just has no choice but
1041 In 2001, the kernel had a maximum size for brk() which was about 800
1042 megabytes on 32 bit x86, at that point brk() would hit the first
1043 mmaped shared libaries and couldn't expand anymore. With current 2.6
1044 kernels, the VA space layout is different and brk() and mmap
1045 both can span the entire heap at will.
1047 Rather than using a static threshold for the brk/mmap tradeoff,
1048 we are now using a simple dynamic one. The goal is still to avoid
1049 fragmentation. The old goals we kept are
1050 1) try to get the long lived large allocations to use mmap()
1051 2) really large allocations should always use mmap()
1052 and we're adding now:
1053 3) transient allocations should use brk() to avoid forcing the kernel
1054 having to zero memory over and over again
1056 The implementation works with a sliding threshold, which is by default
1057 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1058 out at 128Kb as per the 2001 default.
1060 This allows us to satisfy requirement 1) under the assumption that long
1061 lived allocations are made early in the process' lifespan, before it has
1062 started doing dynamic allocations of the same size (which will
1063 increase the threshold).
1065 The upperbound on the threshold satisfies requirement 2)
1067 The threshold goes up in value when the application frees memory that was
1068 allocated with the mmap allocator. The idea is that once the application
1069 starts freeing memory of a certain size, it's highly probable that this is
1070 a size the application uses for transient allocations. This estimator
1071 is there to satisfy the new third requirement.
1075 #define M_MMAP_THRESHOLD -3
1077 #ifndef DEFAULT_MMAP_THRESHOLD
1078 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1082 M_MMAP_MAX is the maximum number of requests to simultaneously
1083 service using mmap. This parameter exists because
1084 some systems have a limited number of internal tables for
1085 use by mmap, and using more than a few of them may degrade
1088 The default is set to a value that serves only as a safeguard.
1089 Setting to 0 disables use of mmap for servicing large requests.
1092 #define M_MMAP_MAX -4
1094 #ifndef DEFAULT_MMAP_MAX
1095 #define DEFAULT_MMAP_MAX (65536)
1100 #ifndef RETURN_ADDRESS
1101 #define RETURN_ADDRESS(X_) (NULL)
1104 /* Forward declarations. */
1105 struct malloc_chunk
;
1106 typedef struct malloc_chunk
* mchunkptr
;
1108 /* Internal routines. */
1110 static void* _int_malloc(mstate
, size_t);
1111 static void _int_free(mstate
, mchunkptr
, int);
1112 static void* _int_realloc(mstate
, mchunkptr
, INTERNAL_SIZE_T
,
1114 static void* _int_memalign(mstate
, size_t, size_t);
1115 static void* _mid_memalign(size_t, size_t, void *);
1117 static void malloc_printerr(const char *str
) __attribute__ ((noreturn
));
1119 static void* mem2mem_check(void *p
, size_t sz
);
1120 static void top_check(void);
1121 static void munmap_chunk(mchunkptr p
);
1123 static mchunkptr
mremap_chunk(mchunkptr p
, size_t new_size
);
1126 static void* malloc_check(size_t sz
, const void *caller
);
1127 static void free_check(void* mem
, const void *caller
);
1128 static void* realloc_check(void* oldmem
, size_t bytes
,
1129 const void *caller
);
1130 static void* memalign_check(size_t alignment
, size_t bytes
,
1131 const void *caller
);
1133 /* ------------------ MMAP support ------------------ */
1137 #include <sys/mman.h>
1139 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1140 # define MAP_ANONYMOUS MAP_ANON
1143 #ifndef MAP_NORESERVE
1144 # define MAP_NORESERVE 0
1147 #define MMAP(addr, size, prot, flags) \
1148 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1152 ----------------------- Chunk representations -----------------------
1157 This struct declaration is misleading (but accurate and necessary).
1158 It declares a "view" into memory allowing access to necessary
1159 fields at known offsets from a given base. See explanation below.
1162 struct malloc_chunk
{
1164 INTERNAL_SIZE_T mchunk_prev_size
; /* Size of previous chunk (if free). */
1165 INTERNAL_SIZE_T mchunk_size
; /* Size in bytes, including overhead. */
1167 struct malloc_chunk
* fd
; /* double links -- used only if free. */
1168 struct malloc_chunk
* bk
;
1170 /* Only used for large blocks: pointer to next larger size. */
1171 struct malloc_chunk
* fd_nextsize
; /* double links -- used only if free. */
1172 struct malloc_chunk
* bk_nextsize
;
1177 malloc_chunk details:
1179 (The following includes lightly edited explanations by Colin Plumb.)
1181 Chunks of memory are maintained using a `boundary tag' method as
1182 described in e.g., Knuth or Standish. (See the paper by Paul
1183 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1184 survey of such techniques.) Sizes of free chunks are stored both
1185 in the front of each chunk and at the end. This makes
1186 consolidating fragmented chunks into bigger chunks very fast. The
1187 size fields also hold bits representing whether chunks are free or
1190 An allocated chunk looks like this:
1193 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1194 | Size of previous chunk, if unallocated (P clear) |
1195 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1196 | Size of chunk, in bytes |A|M|P|
1197 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1198 | User data starts here... .
1200 . (malloc_usable_size() bytes) .
1202 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1203 | (size of chunk, but used for application data) |
1204 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1205 | Size of next chunk, in bytes |A|0|1|
1206 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1208 Where "chunk" is the front of the chunk for the purpose of most of
1209 the malloc code, but "mem" is the pointer that is returned to the
1210 user. "Nextchunk" is the beginning of the next contiguous chunk.
1212 Chunks always begin on even word boundaries, so the mem portion
1213 (which is returned to the user) is also on an even word boundary, and
1214 thus at least double-word aligned.
1216 Free chunks are stored in circular doubly-linked lists, and look like this:
1218 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1219 | Size of previous chunk, if unallocated (P clear) |
1220 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1221 `head:' | Size of chunk, in bytes |A|0|P|
1222 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1223 | Forward pointer to next chunk in list |
1224 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1225 | Back pointer to previous chunk in list |
1226 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1227 | Unused space (may be 0 bytes long) .
1230 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1231 `foot:' | Size of chunk, in bytes |
1232 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1233 | Size of next chunk, in bytes |A|0|0|
1234 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1236 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1237 chunk size (which is always a multiple of two words), is an in-use
1238 bit for the *previous* chunk. If that bit is *clear*, then the
1239 word before the current chunk size contains the previous chunk
1240 size, and can be used to find the front of the previous chunk.
1241 The very first chunk allocated always has this bit set,
1242 preventing access to non-existent (or non-owned) memory. If
1243 prev_inuse is set for any given chunk, then you CANNOT determine
1244 the size of the previous chunk, and might even get a memory
1245 addressing fault when trying to do so.
1247 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1248 main arena, described by the main_arena variable. When additional
1249 threads are spawned, each thread receives its own arena (up to a
1250 configurable limit, after which arenas are reused for multiple
1251 threads), and the chunks in these arenas have the A bit set. To
1252 find the arena for a chunk on such a non-main arena, heap_for_ptr
1253 performs a bit mask operation and indirection through the ar_ptr
1254 member of the per-heap header heap_info (see arena.c).
1256 Note that the `foot' of the current chunk is actually represented
1257 as the prev_size of the NEXT chunk. This makes it easier to
1258 deal with alignments etc but can be very confusing when trying
1259 to extend or adapt this code.
1261 The three exceptions to all this are:
1263 1. The special chunk `top' doesn't bother using the
1264 trailing size field since there is no next contiguous chunk
1265 that would have to index off it. After initialization, `top'
1266 is forced to always exist. If it would become less than
1267 MINSIZE bytes long, it is replenished.
1269 2. Chunks allocated via mmap, which have the second-lowest-order
1270 bit M (IS_MMAPPED) set in their size fields. Because they are
1271 allocated one-by-one, each must contain its own trailing size
1272 field. If the M bit is set, the other bits are ignored
1273 (because mmapped chunks are neither in an arena, nor adjacent
1274 to a freed chunk). The M bit is also used for chunks which
1275 originally came from a dumped heap via malloc_set_state in
1278 3. Chunks in fastbins are treated as allocated chunks from the
1279 point of view of the chunk allocator. They are consolidated
1280 with their neighbors only in bulk, in malloc_consolidate.
1284 ---------- Size and alignment checks and conversions ----------
1287 /* Conversion from malloc headers to user pointers, and back. When
1288 using memory tagging the user data and the malloc data structure
1289 headers have distinct tags. Converting fully from one to the other
1290 involves extracting the tag at the other address and creating a
1291 suitable pointer using it. That can be quite expensive. There are
1292 cases when the pointers are not dereferenced (for example only used
1293 for alignment check) so the tags are not relevant, and there are
1294 cases when user data is not tagged distinctly from malloc headers
1295 (user data is untagged because tagging is done late in malloc and
1296 early in free). User memory tagging across internal interfaces:
1298 sysmalloc: Returns untagged memory.
1299 _int_malloc: Returns untagged memory.
1300 _int_free: Takes untagged memory.
1301 _int_memalign: Returns untagged memory.
1302 _int_memalign: Returns untagged memory.
1303 _mid_memalign: Returns tagged memory.
1304 _int_realloc: Takes and returns tagged memory.
1307 /* The chunk header is two SIZE_SZ elements, but this is used widely, so
1308 we define it here for clarity later. */
1309 #define CHUNK_HDR_SZ (2 * SIZE_SZ)
1311 /* Convert a chunk address to a user mem pointer without correcting
1313 #define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
1315 /* Convert a chunk address to a user mem pointer and extract the right tag. */
1316 #define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ))
1318 /* Convert a user mem pointer to a chunk address and extract the right tag. */
1319 #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
1321 /* The smallest possible chunk */
1322 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1324 /* The smallest size we can malloc is an aligned minimal chunk */
1327 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1329 /* Check if m has acceptable alignment */
1331 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1333 #define misaligned_chunk(p) \
1334 ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
1335 & MALLOC_ALIGN_MASK)
1337 /* pad request bytes into a usable size -- internal version */
1338 /* Note: This must be a macro that evaluates to a compile time constant
1339 if passed a literal constant. */
1340 #define request2size(req) \
1341 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1343 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1345 /* Check if REQ overflows when padded and aligned and if the resulting value
1346 is less than PTRDIFF_T. Returns TRUE and the requested size or MINSIZE in
1347 case the value is less than MINSIZE on SZ or false if any of the previous
1350 checked_request2size (size_t req
, size_t *sz
) __nonnull (1)
1352 if (__glibc_unlikely (req
> PTRDIFF_MAX
))
1355 /* When using tagged memory, we cannot share the end of the user
1356 block with the header for the next chunk, so ensure that we
1357 allocate blocks that are rounded up to the granule size. Take
1358 care not to overflow from close to MAX_SIZE_T to a small
1359 number. Ideally, this would be part of request2size(), but that
1360 must be a macro that produces a compile time constant if passed
1361 a constant literal. */
1362 if (__glibc_unlikely (mtag_enabled
))
1364 /* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */
1367 req
= (req
+ (__MTAG_GRANULE_SIZE
- 1)) &
1368 ~(size_t)(__MTAG_GRANULE_SIZE
- 1);
1371 *sz
= request2size (req
);
1376 --------------- Physical chunk operations ---------------
1380 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1381 #define PREV_INUSE 0x1
1383 /* extract inuse bit of previous chunk */
1384 #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1387 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1388 #define IS_MMAPPED 0x2
1390 /* check for mmap()'ed chunk */
1391 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1394 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1395 from a non-main arena. This is only set immediately before handing
1396 the chunk to the user, if necessary. */
1397 #define NON_MAIN_ARENA 0x4
1399 /* Check for chunk from main arena. */
1400 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1402 /* Mark a chunk as not being on the main arena. */
1403 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1407 Bits to mask off when extracting size
1409 Note: IS_MMAPPED is intentionally not masked off from size field in
1410 macros for which mmapped chunks should never be seen. This should
1411 cause helpful core dumps to occur if it is tried by accident by
1412 people extending or adapting this malloc.
1414 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1416 /* Get size, ignoring use bits */
1417 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1419 /* Like chunksize, but do not mask SIZE_BITS. */
1420 #define chunksize_nomask(p) ((p)->mchunk_size)
1422 /* Ptr to next physical malloc_chunk. */
1423 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1425 /* Size of the chunk below P. Only valid if !prev_inuse (P). */
1426 #define prev_size(p) ((p)->mchunk_prev_size)
1428 /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1429 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1431 /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
1432 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1434 /* Treat space at ptr + offset as a chunk */
1435 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1437 /* extract p's inuse bit */
1439 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1441 /* set/clear chunk as being inuse without otherwise disturbing */
1442 #define set_inuse(p) \
1443 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1445 #define clear_inuse(p) \
1446 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1449 /* check/set/clear inuse bits in known places */
1450 #define inuse_bit_at_offset(p, s) \
1451 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1453 #define set_inuse_bit_at_offset(p, s) \
1454 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1456 #define clear_inuse_bit_at_offset(p, s) \
1457 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1460 /* Set size at head, without disturbing its use bit */
1461 #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1463 /* Set size/use field */
1464 #define set_head(p, s) ((p)->mchunk_size = (s))
1466 /* Set size at footer (only when chunk is not in use) */
1467 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1469 #pragma GCC poison mchunk_size
1470 #pragma GCC poison mchunk_prev_size
1472 /* This is the size of the real usable data in the chunk. Not valid for
1473 dumped heap chunks. */
1474 #define memsize(p) \
1475 (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
1476 chunksize (p) - CHUNK_HDR_SZ : \
1477 chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
1479 /* If memory tagging is enabled the layout changes to accomodate the granule
1480 size, this is wasteful for small allocations so not done by default.
1481 Both the chunk header and user data has to be granule aligned. */
1482 _Static_assert (__MTAG_GRANULE_SIZE
<= CHUNK_HDR_SZ
,
1483 "memory tagging is not supported with large granule.");
1485 static __always_inline
void *
1486 tag_new_usable (void *ptr
)
1488 if (__glibc_unlikely (mtag_enabled
) && ptr
)
1490 mchunkptr cp
= mem2chunk(ptr
);
1491 ptr
= __libc_mtag_tag_region (__libc_mtag_new_tag (ptr
), memsize (cp
));
1497 -------------------- Internal data structures --------------------
1499 All internal state is held in an instance of malloc_state defined
1500 below. There are no other static variables, except in two optional
1502 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1503 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1506 Beware of lots of tricks that minimize the total bookkeeping space
1507 requirements. The result is a little over 1K bytes (for 4byte
1508 pointers and size_t.)
1514 An array of bin headers for free chunks. Each bin is doubly
1515 linked. The bins are approximately proportionally (log) spaced.
1516 There are a lot of these bins (128). This may look excessive, but
1517 works very well in practice. Most bins hold sizes that are
1518 unusual as malloc request sizes, but are more usual for fragments
1519 and consolidated sets of chunks, which is what these bins hold, so
1520 they can be found quickly. All procedures maintain the invariant
1521 that no consolidated chunk physically borders another one, so each
1522 chunk in a list is known to be preceeded and followed by either
1523 inuse chunks or the ends of memory.
1525 Chunks in bins are kept in size order, with ties going to the
1526 approximately least recently used chunk. Ordering isn't needed
1527 for the small bins, which all contain the same-sized chunks, but
1528 facilitates best-fit allocation for larger chunks. These lists
1529 are just sequential. Keeping them in order almost never requires
1530 enough traversal to warrant using fancier ordered data
1533 Chunks of the same size are linked with the most
1534 recently freed at the front, and allocations are taken from the
1535 back. This results in LRU (FIFO) allocation order, which tends
1536 to give each chunk an equal opportunity to be consolidated with
1537 adjacent freed chunks, resulting in larger free chunks and less
1540 To simplify use in double-linked lists, each bin header acts
1541 as a malloc_chunk. This avoids special-casing for headers.
1542 But to conserve space and improve locality, we allocate
1543 only the fd/bk pointers of bins, and then use repositioning tricks
1544 to treat these as the fields of a malloc_chunk*.
1547 typedef struct malloc_chunk
*mbinptr
;
1549 /* addressing -- note that bin_at(0) does not exist */
1550 #define bin_at(m, i) \
1551 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1552 - offsetof (struct malloc_chunk, fd))
1554 /* analog of ++bin */
1555 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1557 /* Reminders about list directionality within bins */
1558 #define first(b) ((b)->fd)
1559 #define last(b) ((b)->bk)
1564 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1565 8 bytes apart. Larger bins are approximately logarithmically spaced:
1571 4 bins of size 32768
1572 2 bins of size 262144
1573 1 bin of size what's left
1575 There is actually a little bit of slop in the numbers in bin_index
1576 for the sake of speed. This makes no difference elsewhere.
1578 The bins top out around 1MB because we expect to service large
1581 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1582 a valid chunk size the small bins are bumped up one.
1586 #define NSMALLBINS 64
1587 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1588 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
1589 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1591 #define in_smallbin_range(sz) \
1592 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1594 #define smallbin_index(sz) \
1595 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1596 + SMALLBIN_CORRECTION)
1598 #define largebin_index_32(sz) \
1599 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1600 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1601 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1602 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1603 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1606 #define largebin_index_32_big(sz) \
1607 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1608 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1609 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1610 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1611 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1614 // XXX It remains to be seen whether it is good to keep the widths of
1615 // XXX the buckets the same or whether it should be scaled by a factor
1616 // XXX of two as well.
1617 #define largebin_index_64(sz) \
1618 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1619 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1620 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1621 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1622 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1625 #define largebin_index(sz) \
1626 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1627 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1628 : largebin_index_32 (sz))
1630 #define bin_index(sz) \
1631 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1633 /* Take a chunk off a bin list. */
1635 unlink_chunk (mstate av
, mchunkptr p
)
1637 if (chunksize (p
) != prev_size (next_chunk (p
)))
1638 malloc_printerr ("corrupted size vs. prev_size");
1640 mchunkptr fd
= p
->fd
;
1641 mchunkptr bk
= p
->bk
;
1643 if (__builtin_expect (fd
->bk
!= p
|| bk
->fd
!= p
, 0))
1644 malloc_printerr ("corrupted double-linked list");
1648 if (!in_smallbin_range (chunksize_nomask (p
)) && p
->fd_nextsize
!= NULL
)
1650 if (p
->fd_nextsize
->bk_nextsize
!= p
1651 || p
->bk_nextsize
->fd_nextsize
!= p
)
1652 malloc_printerr ("corrupted double-linked list (not small)");
1654 if (fd
->fd_nextsize
== NULL
)
1656 if (p
->fd_nextsize
== p
)
1657 fd
->fd_nextsize
= fd
->bk_nextsize
= fd
;
1660 fd
->fd_nextsize
= p
->fd_nextsize
;
1661 fd
->bk_nextsize
= p
->bk_nextsize
;
1662 p
->fd_nextsize
->bk_nextsize
= fd
;
1663 p
->bk_nextsize
->fd_nextsize
= fd
;
1668 p
->fd_nextsize
->bk_nextsize
= p
->bk_nextsize
;
1669 p
->bk_nextsize
->fd_nextsize
= p
->fd_nextsize
;
1677 All remainders from chunk splits, as well as all returned chunks,
1678 are first placed in the "unsorted" bin. They are then placed
1679 in regular bins after malloc gives them ONE chance to be used before
1680 binning. So, basically, the unsorted_chunks list acts as a queue,
1681 with chunks being placed on it in free (and malloc_consolidate),
1682 and taken off (to be either used or placed in bins) in malloc.
1684 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1685 does not have to be taken into account in size comparisons.
1688 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1689 #define unsorted_chunks(M) (bin_at (M, 1))
1694 The top-most available chunk (i.e., the one bordering the end of
1695 available memory) is treated specially. It is never included in
1696 any bin, is used only if no other chunk is available, and is
1697 released back to the system if it is very large (see
1698 M_TRIM_THRESHOLD). Because top initially
1699 points to its own bin with initial zero size, thus forcing
1700 extension on the first malloc request, we avoid having any special
1701 code in malloc to check whether it even exists yet. But we still
1702 need to do so when getting memory from system, so we make
1703 initial_top treat the bin as a legal but unusable chunk during the
1704 interval between initialization and the first call to
1705 sysmalloc. (This is somewhat delicate, since it relies on
1706 the 2 preceding words to be zero during this interval as well.)
1709 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1710 #define initial_top(M) (unsorted_chunks (M))
1715 To help compensate for the large number of bins, a one-level index
1716 structure is used for bin-by-bin searching. `binmap' is a
1717 bitvector recording whether bins are definitely empty so they can
1718 be skipped over during during traversals. The bits are NOT always
1719 cleared as soon as bins are empty, but instead only
1720 when they are noticed to be empty during traversal in malloc.
1723 /* Conservatively use 32 bits per map word, even if on 64bit system */
1724 #define BINMAPSHIFT 5
1725 #define BITSPERMAP (1U << BINMAPSHIFT)
1726 #define BINMAPSIZE (NBINS / BITSPERMAP)
1728 #define idx2block(i) ((i) >> BINMAPSHIFT)
1729 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1731 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1732 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1733 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1738 An array of lists holding recently freed small chunks. Fastbins
1739 are not doubly linked. It is faster to single-link them, and
1740 since chunks are never removed from the middles of these lists,
1741 double linking is not necessary. Also, unlike regular bins, they
1742 are not even processed in FIFO order (they use faster LIFO) since
1743 ordering doesn't much matter in the transient contexts in which
1744 fastbins are normally used.
1746 Chunks in fastbins keep their inuse bit set, so they cannot
1747 be consolidated with other free chunks. malloc_consolidate
1748 releases all chunks in fastbins and consolidates them with
1752 typedef struct malloc_chunk
*mfastbinptr
;
1753 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1755 /* offset 2 to use otherwise unindexable first 2 bins */
1756 #define fastbin_index(sz) \
1757 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1760 /* The maximum fastbin request size we support */
1761 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1763 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1766 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1767 that triggers automatic consolidation of possibly-surrounding
1768 fastbin chunks. This is a heuristic, so the exact value should not
1769 matter too much. It is defined at half the default trim threshold as a
1770 compromise heuristic to only attempt consolidation if it is likely
1771 to lead to trimming. However, it is not dynamically tunable, since
1772 consolidation reduces fragmentation surrounding large chunks even
1773 if trimming is not used.
1776 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1779 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1780 regions. Otherwise, contiguity is exploited in merging together,
1781 when possible, results from consecutive MORECORE calls.
1783 The initial value comes from MORECORE_CONTIGUOUS, but is
1784 changed dynamically if mmap is ever used as an sbrk substitute.
1787 #define NONCONTIGUOUS_BIT (2U)
1789 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1790 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1791 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1792 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1794 /* Maximum size of memory handled in fastbins. */
1795 static INTERNAL_SIZE_T global_max_fast
;
1798 Set value of max_fast.
1799 Use impossibly small value if 0.
1800 Precondition: there are no existing fastbin chunks in the main arena.
1801 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1802 before changing max_fast. Note other arenas will leak their fast bin
1803 entries if max_fast is reduced.
1806 #define set_max_fast(s) \
1807 global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
1808 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1810 static inline INTERNAL_SIZE_T
1813 /* Tell the GCC optimizers that global_max_fast is never larger
1814 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1815 _int_malloc after constant propagation of the size parameter.
1816 (The code never executes because malloc preserves the
1817 global_max_fast invariant, but the optimizers may not recognize
1819 if (global_max_fast
> MAX_FAST_SIZE
)
1820 __builtin_unreachable ();
1821 return global_max_fast
;
1825 ----------- Internal state representation and initialization -----------
1829 have_fastchunks indicates that there are probably some fastbin chunks.
1830 It is set true on entering a chunk into any fastbin, and cleared early in
1831 malloc_consolidate. The value is approximate since it may be set when there
1832 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1833 available. Given it's sole purpose is to reduce number of redundant calls to
1834 malloc_consolidate, it does not affect correctness. As a result we can safely
1835 use relaxed atomic accesses.
1841 /* Serialize access. */
1842 __libc_lock_define (, mutex
);
1844 /* Flags (formerly in max_fast). */
1847 /* Set if the fastbin chunks contain recently inserted free blocks. */
1848 /* Note this is a bool but not all targets support atomics on booleans. */
1849 int have_fastchunks
;
1852 mfastbinptr fastbinsY
[NFASTBINS
];
1854 /* Base of the topmost chunk -- not otherwise kept in a bin */
1857 /* The remainder from the most recent split of a small request */
1858 mchunkptr last_remainder
;
1860 /* Normal bins packed as described above */
1861 mchunkptr bins
[NBINS
* 2 - 2];
1863 /* Bitmap of bins */
1864 unsigned int binmap
[BINMAPSIZE
];
1867 struct malloc_state
*next
;
1869 /* Linked list for free arenas. Access to this field is serialized
1870 by free_list_lock in arena.c. */
1871 struct malloc_state
*next_free
;
1873 /* Number of threads attached to this arena. 0 if the arena is on
1874 the free list. Access to this field is serialized by
1875 free_list_lock in arena.c. */
1876 INTERNAL_SIZE_T attached_threads
;
1878 /* Memory allocated from the system in this arena. */
1879 INTERNAL_SIZE_T system_mem
;
1880 INTERNAL_SIZE_T max_system_mem
;
1885 /* Tunable parameters */
1886 unsigned long trim_threshold
;
1887 INTERNAL_SIZE_T top_pad
;
1888 INTERNAL_SIZE_T mmap_threshold
;
1889 INTERNAL_SIZE_T arena_test
;
1890 INTERNAL_SIZE_T arena_max
;
1892 /* Memory map support */
1896 /* the mmap_threshold is dynamic, until the user sets
1897 it manually, at which point we need to disable any
1898 dynamic behavior. */
1899 int no_dyn_threshold
;
1902 INTERNAL_SIZE_T mmapped_mem
;
1903 INTERNAL_SIZE_T max_mmapped_mem
;
1905 /* First address handed out by MORECORE/sbrk. */
1909 /* Maximum number of buckets to use. */
1911 size_t tcache_max_bytes
;
1912 /* Maximum number of chunks in each bucket. */
1913 size_t tcache_count
;
1914 /* Maximum number of chunks to remove from the unsorted list, which
1915 aren't used to prefill the cache. */
1916 size_t tcache_unsorted_limit
;
1920 /* There are several instances of this struct ("arenas") in this
1921 malloc. If you are adapting this malloc in a way that does NOT use
1922 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1923 before using. This malloc relies on the property that malloc_state
1924 is initialized to all zeroes (as is true of C statics). */
1926 static struct malloc_state main_arena
=
1928 .mutex
= _LIBC_LOCK_INITIALIZER
,
1929 .next
= &main_arena
,
1930 .attached_threads
= 1
1933 /* These variables are used for undumping support. Chunked are marked
1934 as using mmap, but we leave them alone if they fall into this
1935 range. NB: The chunk size for these chunks only includes the
1936 initial size field (of SIZE_SZ bytes), there is no trailing size
1937 field (unlike with regular mmapped chunks). */
1938 static mchunkptr dumped_main_arena_start
; /* Inclusive. */
1939 static mchunkptr dumped_main_arena_end
; /* Exclusive. */
1941 /* True if the pointer falls into the dumped arena. Use this after
1942 chunk_is_mmapped indicates a chunk is mmapped. */
1943 #define DUMPED_MAIN_ARENA_CHUNK(p) \
1944 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1946 /* There is only one instance of the malloc parameters. */
1948 static struct malloc_par mp_
=
1950 .top_pad
= DEFAULT_TOP_PAD
,
1951 .n_mmaps_max
= DEFAULT_MMAP_MAX
,
1952 .mmap_threshold
= DEFAULT_MMAP_THRESHOLD
,
1953 .trim_threshold
= DEFAULT_TRIM_THRESHOLD
,
1954 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1955 .arena_test
= NARENAS_FROM_NCORES (1)
1958 .tcache_count
= TCACHE_FILL_COUNT
,
1959 .tcache_bins
= TCACHE_MAX_BINS
,
1960 .tcache_max_bytes
= tidx2usize (TCACHE_MAX_BINS
-1),
1961 .tcache_unsorted_limit
= 0 /* No limit. */
1966 Initialize a malloc_state struct.
1968 This is called from ptmalloc_init () or from _int_new_arena ()
1969 when creating a new arena.
1973 malloc_init_state (mstate av
)
1978 /* Establish circular links for normal bins */
1979 for (i
= 1; i
< NBINS
; ++i
)
1981 bin
= bin_at (av
, i
);
1982 bin
->fd
= bin
->bk
= bin
;
1985 #if MORECORE_CONTIGUOUS
1986 if (av
!= &main_arena
)
1988 set_noncontiguous (av
);
1989 if (av
== &main_arena
)
1990 set_max_fast (DEFAULT_MXFAST
);
1991 atomic_store_relaxed (&av
->have_fastchunks
, false);
1993 av
->top
= initial_top (av
);
1997 Other internal utilities operating on mstates
2000 static void *sysmalloc (INTERNAL_SIZE_T
, mstate
);
2001 static int systrim (size_t, mstate
);
2002 static void malloc_consolidate (mstate
);
2005 /* -------------- Early definitions for debugging hooks ---------------- */
2007 /* Define and initialize the hook variables. These weak definitions must
2008 appear before any use of the variables in a function (arena.c uses one). */
2009 #ifndef weak_variable
2010 /* In GNU libc we want the hook variables to be weak definitions to
2011 avoid a problem with Emacs. */
2012 # define weak_variable weak_function
2015 /* Forward declarations. */
2016 static void *malloc_hook_ini (size_t sz
,
2017 const void *caller
) __THROW
;
2018 static void *realloc_hook_ini (void *ptr
, size_t sz
,
2019 const void *caller
) __THROW
;
2020 static void *memalign_hook_ini (size_t alignment
, size_t sz
,
2021 const void *caller
) __THROW
;
2023 #if HAVE_MALLOC_INIT_HOOK
2024 void (*__malloc_initialize_hook
) (void) __attribute__ ((nocommon
));
2025 compat_symbol (libc
, __malloc_initialize_hook
,
2026 __malloc_initialize_hook
, GLIBC_2_0
);
2029 void weak_variable (*__free_hook
) (void *__ptr
,
2030 const void *) = NULL
;
2031 void *weak_variable (*__malloc_hook
)
2032 (size_t __size
, const void *) = malloc_hook_ini
;
2033 void *weak_variable (*__realloc_hook
)
2034 (void *__ptr
, size_t __size
, const void *)
2036 void *weak_variable (*__memalign_hook
)
2037 (size_t __alignment
, size_t __size
, const void *)
2038 = memalign_hook_ini
;
2039 void weak_variable (*__after_morecore_hook
) (void) = NULL
;
2041 /* This function is called from the arena shutdown hook, to free the
2042 thread cache (if it exists). */
2043 static void tcache_thread_shutdown (void);
2045 /* ------------------ Testing support ----------------------------------*/
2047 static int perturb_byte
;
2050 alloc_perturb (char *p
, size_t n
)
2052 if (__glibc_unlikely (perturb_byte
))
2053 memset (p
, perturb_byte
^ 0xff, n
);
2057 free_perturb (char *p
, size_t n
)
2059 if (__glibc_unlikely (perturb_byte
))
2060 memset (p
, perturb_byte
, n
);
2065 #include <stap-probe.h>
2067 /* ------------------- Support for multiple arenas -------------------- */
2073 These routines make a number of assertions about the states
2074 of data structures that should be true at all times. If any
2075 are not true, it's very likely that a user program has somehow
2076 trashed memory. (It's also possible that there is a coding error
2077 in malloc. In which case, please report it!)
2082 # define check_chunk(A, P)
2083 # define check_free_chunk(A, P)
2084 # define check_inuse_chunk(A, P)
2085 # define check_remalloced_chunk(A, P, N)
2086 # define check_malloced_chunk(A, P, N)
2087 # define check_malloc_state(A)
2091 # define check_chunk(A, P) do_check_chunk (A, P)
2092 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
2093 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
2094 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
2095 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
2096 # define check_malloc_state(A) do_check_malloc_state (A)
2099 Properties of all chunks
2103 do_check_chunk (mstate av
, mchunkptr p
)
2105 unsigned long sz
= chunksize (p
);
2106 /* min and max possible addresses assuming contiguous allocation */
2107 char *max_address
= (char *) (av
->top
) + chunksize (av
->top
);
2108 char *min_address
= max_address
- av
->system_mem
;
2110 if (!chunk_is_mmapped (p
))
2112 /* Has legal address ... */
2115 if (contiguous (av
))
2117 assert (((char *) p
) >= min_address
);
2118 assert (((char *) p
+ sz
) <= ((char *) (av
->top
)));
2123 /* top size is always at least MINSIZE */
2124 assert ((unsigned long) (sz
) >= MINSIZE
);
2125 /* top predecessor always marked inuse */
2126 assert (prev_inuse (p
));
2129 else if (!DUMPED_MAIN_ARENA_CHUNK (p
))
2131 /* address is outside main heap */
2132 if (contiguous (av
) && av
->top
!= initial_top (av
))
2134 assert (((char *) p
) < min_address
|| ((char *) p
) >= max_address
);
2136 /* chunk is page-aligned */
2137 assert (((prev_size (p
) + sz
) & (GLRO (dl_pagesize
) - 1)) == 0);
2138 /* mem is aligned */
2139 assert (aligned_OK (chunk2mem (p
)));
2144 Properties of free chunks
2148 do_check_free_chunk (mstate av
, mchunkptr p
)
2150 INTERNAL_SIZE_T sz
= chunksize_nomask (p
) & ~(PREV_INUSE
| NON_MAIN_ARENA
);
2151 mchunkptr next
= chunk_at_offset (p
, sz
);
2153 do_check_chunk (av
, p
);
2155 /* Chunk must claim to be free ... */
2156 assert (!inuse (p
));
2157 assert (!chunk_is_mmapped (p
));
2159 /* Unless a special marker, must have OK fields */
2160 if ((unsigned long) (sz
) >= MINSIZE
)
2162 assert ((sz
& MALLOC_ALIGN_MASK
) == 0);
2163 assert (aligned_OK (chunk2mem (p
)));
2164 /* ... matching footer field */
2165 assert (prev_size (next_chunk (p
)) == sz
);
2166 /* ... and is fully consolidated */
2167 assert (prev_inuse (p
));
2168 assert (next
== av
->top
|| inuse (next
));
2170 /* ... and has minimally sane links */
2171 assert (p
->fd
->bk
== p
);
2172 assert (p
->bk
->fd
== p
);
2174 else /* markers are always of size SIZE_SZ */
2175 assert (sz
== SIZE_SZ
);
2179 Properties of inuse chunks
2183 do_check_inuse_chunk (mstate av
, mchunkptr p
)
2187 do_check_chunk (av
, p
);
2189 if (chunk_is_mmapped (p
))
2190 return; /* mmapped chunks have no next/prev */
2192 /* Check whether it claims to be in use ... */
2195 next
= next_chunk (p
);
2197 /* ... and is surrounded by OK chunks.
2198 Since more things can be checked with free chunks than inuse ones,
2199 if an inuse chunk borders them and debug is on, it's worth doing them.
2201 if (!prev_inuse (p
))
2203 /* Note that we cannot even look at prev unless it is not inuse */
2204 mchunkptr prv
= prev_chunk (p
);
2205 assert (next_chunk (prv
) == p
);
2206 do_check_free_chunk (av
, prv
);
2209 if (next
== av
->top
)
2211 assert (prev_inuse (next
));
2212 assert (chunksize (next
) >= MINSIZE
);
2214 else if (!inuse (next
))
2215 do_check_free_chunk (av
, next
);
2219 Properties of chunks recycled from fastbins
2223 do_check_remalloced_chunk (mstate av
, mchunkptr p
, INTERNAL_SIZE_T s
)
2225 INTERNAL_SIZE_T sz
= chunksize_nomask (p
) & ~(PREV_INUSE
| NON_MAIN_ARENA
);
2227 if (!chunk_is_mmapped (p
))
2229 assert (av
== arena_for_chunk (p
));
2230 if (chunk_main_arena (p
))
2231 assert (av
== &main_arena
);
2233 assert (av
!= &main_arena
);
2236 do_check_inuse_chunk (av
, p
);
2238 /* Legal size ... */
2239 assert ((sz
& MALLOC_ALIGN_MASK
) == 0);
2240 assert ((unsigned long) (sz
) >= MINSIZE
);
2241 /* ... and alignment */
2242 assert (aligned_OK (chunk2mem (p
)));
2243 /* chunk is less than MINSIZE more than request */
2244 assert ((long) (sz
) - (long) (s
) >= 0);
2245 assert ((long) (sz
) - (long) (s
+ MINSIZE
) < 0);
2249 Properties of nonrecycled chunks at the point they are malloced
2253 do_check_malloced_chunk (mstate av
, mchunkptr p
, INTERNAL_SIZE_T s
)
2255 /* same as recycled case ... */
2256 do_check_remalloced_chunk (av
, p
, s
);
2259 ... plus, must obey implementation invariant that prev_inuse is
2260 always true of any allocated chunk; i.e., that each allocated
2261 chunk borders either a previously allocated and still in-use
2262 chunk, or the base of its memory arena. This is ensured
2263 by making all allocations from the `lowest' part of any found
2264 chunk. This does not necessarily hold however for chunks
2265 recycled via fastbins.
2268 assert (prev_inuse (p
));
2273 Properties of malloc_state.
2275 This may be useful for debugging malloc, as well as detecting user
2276 programmer errors that somehow write into malloc_state.
2278 If you are extending or experimenting with this malloc, you can
2279 probably figure out how to hack this routine to print out or
2280 display chunk addresses, sizes, bins, and other instrumentation.
2284 do_check_malloc_state (mstate av
)
2291 INTERNAL_SIZE_T size
;
2292 unsigned long total
= 0;
2295 /* internal size_t must be no wider than pointer type */
2296 assert (sizeof (INTERNAL_SIZE_T
) <= sizeof (char *));
2298 /* alignment is a power of 2 */
2299 assert ((MALLOC_ALIGNMENT
& (MALLOC_ALIGNMENT
- 1)) == 0);
2301 /* Check the arena is initialized. */
2302 assert (av
->top
!= 0);
2304 /* No memory has been allocated yet, so doing more tests is not possible. */
2305 if (av
->top
== initial_top (av
))
2308 /* pagesize is a power of 2 */
2309 assert (powerof2(GLRO (dl_pagesize
)));
2311 /* A contiguous main_arena is consistent with sbrk_base. */
2312 if (av
== &main_arena
&& contiguous (av
))
2313 assert ((char *) mp_
.sbrk_base
+ av
->system_mem
==
2314 (char *) av
->top
+ chunksize (av
->top
));
2316 /* properties of fastbins */
2318 /* max_fast is in allowed range */
2319 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE
));
2321 max_fast_bin
= fastbin_index (get_max_fast ());
2323 for (i
= 0; i
< NFASTBINS
; ++i
)
2325 p
= fastbin (av
, i
);
2327 /* The following test can only be performed for the main arena.
2328 While mallopt calls malloc_consolidate to get rid of all fast
2329 bins (especially those larger than the new maximum) this does
2330 only happen for the main arena. Trying to do this for any
2331 other arena would mean those arenas have to be locked and
2332 malloc_consolidate be called for them. This is excessive. And
2333 even if this is acceptable to somebody it still cannot solve
2334 the problem completely since if the arena is locked a
2335 concurrent malloc call might create a new arena which then
2336 could use the newly invalid fast bins. */
2338 /* all bins past max_fast are empty */
2339 if (av
== &main_arena
&& i
> max_fast_bin
)
2344 if (__glibc_unlikely (misaligned_chunk (p
)))
2345 malloc_printerr ("do_check_malloc_state(): "
2346 "unaligned fastbin chunk detected");
2347 /* each chunk claims to be inuse */
2348 do_check_inuse_chunk (av
, p
);
2349 total
+= chunksize (p
);
2350 /* chunk belongs in this bin */
2351 assert (fastbin_index (chunksize (p
)) == i
);
2352 p
= REVEAL_PTR (p
->fd
);
2356 /* check normal bins */
2357 for (i
= 1; i
< NBINS
; ++i
)
2361 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2364 unsigned int binbit
= get_binmap (av
, i
);
2365 int empty
= last (b
) == b
;
2372 for (p
= last (b
); p
!= b
; p
= p
->bk
)
2374 /* each chunk claims to be free */
2375 do_check_free_chunk (av
, p
);
2376 size
= chunksize (p
);
2380 /* chunk belongs in bin */
2381 idx
= bin_index (size
);
2383 /* lists are sorted */
2384 assert (p
->bk
== b
||
2385 (unsigned long) chunksize (p
->bk
) >= (unsigned long) chunksize (p
));
2387 if (!in_smallbin_range (size
))
2389 if (p
->fd_nextsize
!= NULL
)
2391 if (p
->fd_nextsize
== p
)
2392 assert (p
->bk_nextsize
== p
);
2395 if (p
->fd_nextsize
== first (b
))
2396 assert (chunksize (p
) < chunksize (p
->fd_nextsize
));
2398 assert (chunksize (p
) > chunksize (p
->fd_nextsize
));
2401 assert (chunksize (p
) > chunksize (p
->bk_nextsize
));
2403 assert (chunksize (p
) < chunksize (p
->bk_nextsize
));
2407 assert (p
->bk_nextsize
== NULL
);
2410 else if (!in_smallbin_range (size
))
2411 assert (p
->fd_nextsize
== NULL
&& p
->bk_nextsize
== NULL
);
2412 /* chunk is followed by a legal chain of inuse chunks */
2413 for (q
= next_chunk (p
);
2414 (q
!= av
->top
&& inuse (q
) &&
2415 (unsigned long) (chunksize (q
)) >= MINSIZE
);
2417 do_check_inuse_chunk (av
, q
);
2421 /* top chunk is OK */
2422 check_chunk (av
, av
->top
);
2427 /* ----------------- Support for debugging hooks -------------------- */
2431 /* ----------- Routines dealing with system allocation -------------- */
2434 sysmalloc handles malloc cases requiring more memory from the system.
2435 On entry, it is assumed that av->top does not have enough
2436 space to service request for nb bytes, thus requiring that av->top
2437 be extended or replaced.
2441 sysmalloc (INTERNAL_SIZE_T nb
, mstate av
)
2443 mchunkptr old_top
; /* incoming value of av->top */
2444 INTERNAL_SIZE_T old_size
; /* its size */
2445 char *old_end
; /* its end address */
2447 long size
; /* arg to first MORECORE or mmap call */
2448 char *brk
; /* return value from MORECORE */
2450 long correction
; /* arg to 2nd MORECORE call */
2451 char *snd_brk
; /* 2nd return val */
2453 INTERNAL_SIZE_T front_misalign
; /* unusable bytes at front of new space */
2454 INTERNAL_SIZE_T end_misalign
; /* partial page left at end of new space */
2455 char *aligned_brk
; /* aligned offset into brk */
2457 mchunkptr p
; /* the allocated/returned chunk */
2458 mchunkptr remainder
; /* remainder from allocation */
2459 unsigned long remainder_size
; /* its size */
2462 size_t pagesize
= GLRO (dl_pagesize
);
2463 bool tried_mmap
= false;
2467 If have mmap, and the request size meets the mmap threshold, and
2468 the system supports mmap, and there are few enough currently
2469 allocated mmapped regions, try to directly map this request
2470 rather than expanding top.
2474 || ((unsigned long) (nb
) >= (unsigned long) (mp_
.mmap_threshold
)
2475 && (mp_
.n_mmaps
< mp_
.n_mmaps_max
)))
2477 char *mm
; /* return value from mmap call*/
2481 Round up size to nearest page. For mmapped chunks, the overhead
2482 is one SIZE_SZ unit larger than for normal chunks, because there
2483 is no following chunk whose prev_size field could be used.
2485 See the front_misalign handling below, for glibc there is no
2486 need for further alignments unless we have have high alignment.
2488 if (MALLOC_ALIGNMENT
== CHUNK_HDR_SZ
)
2489 size
= ALIGN_UP (nb
+ SIZE_SZ
, pagesize
);
2491 size
= ALIGN_UP (nb
+ SIZE_SZ
+ MALLOC_ALIGN_MASK
, pagesize
);
2494 /* Don't try if size wraps around 0 */
2495 if ((unsigned long) (size
) > (unsigned long) (nb
))
2497 mm
= (char *) (MMAP (0, size
,
2498 mtag_mmap_flags
| PROT_READ
| PROT_WRITE
, 0));
2500 if (mm
!= MAP_FAILED
)
2503 The offset to the start of the mmapped region is stored
2504 in the prev_size field of the chunk. This allows us to adjust
2505 returned start address to meet alignment requirements here
2506 and in memalign(), and still be able to compute proper
2507 address argument for later munmap in free() and realloc().
2510 if (MALLOC_ALIGNMENT
== CHUNK_HDR_SZ
)
2512 /* For glibc, chunk2mem increases the address by
2513 CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is
2514 CHUNK_HDR_SZ-1. Each mmap'ed area is page
2515 aligned and therefore definitely
2516 MALLOC_ALIGN_MASK-aligned. */
2517 assert (((INTERNAL_SIZE_T
) chunk2mem (mm
) & MALLOC_ALIGN_MASK
) == 0);
2521 front_misalign
= (INTERNAL_SIZE_T
) chunk2mem (mm
) & MALLOC_ALIGN_MASK
;
2522 if (front_misalign
> 0)
2524 correction
= MALLOC_ALIGNMENT
- front_misalign
;
2525 p
= (mchunkptr
) (mm
+ correction
);
2526 set_prev_size (p
, correction
);
2527 set_head (p
, (size
- correction
) | IS_MMAPPED
);
2532 set_prev_size (p
, 0);
2533 set_head (p
, size
| IS_MMAPPED
);
2536 /* update statistics */
2538 int new = atomic_exchange_and_add (&mp_
.n_mmaps
, 1) + 1;
2539 atomic_max (&mp_
.max_n_mmaps
, new);
2542 sum
= atomic_exchange_and_add (&mp_
.mmapped_mem
, size
) + size
;
2543 atomic_max (&mp_
.max_mmapped_mem
, sum
);
2545 check_chunk (av
, p
);
2547 return chunk2mem (p
);
2552 /* There are no usable arenas and mmap also failed. */
2556 /* Record incoming configuration of top */
2559 old_size
= chunksize (old_top
);
2560 old_end
= (char *) (chunk_at_offset (old_top
, old_size
));
2562 brk
= snd_brk
= (char *) (MORECORE_FAILURE
);
2565 If not the first time through, we require old_size to be
2566 at least MINSIZE and to have prev_inuse set.
2569 assert ((old_top
== initial_top (av
) && old_size
== 0) ||
2570 ((unsigned long) (old_size
) >= MINSIZE
&&
2571 prev_inuse (old_top
) &&
2572 ((unsigned long) old_end
& (pagesize
- 1)) == 0));
2574 /* Precondition: not enough current space to satisfy nb request */
2575 assert ((unsigned long) (old_size
) < (unsigned long) (nb
+ MINSIZE
));
2578 if (av
!= &main_arena
)
2580 heap_info
*old_heap
, *heap
;
2581 size_t old_heap_size
;
2583 /* First try to extend the current heap. */
2584 old_heap
= heap_for_ptr (old_top
);
2585 old_heap_size
= old_heap
->size
;
2586 if ((long) (MINSIZE
+ nb
- old_size
) > 0
2587 && grow_heap (old_heap
, MINSIZE
+ nb
- old_size
) == 0)
2589 av
->system_mem
+= old_heap
->size
- old_heap_size
;
2590 set_head (old_top
, (((char *) old_heap
+ old_heap
->size
) - (char *) old_top
)
2593 else if ((heap
= new_heap (nb
+ (MINSIZE
+ sizeof (*heap
)), mp_
.top_pad
)))
2595 /* Use a newly allocated heap. */
2597 heap
->prev
= old_heap
;
2598 av
->system_mem
+= heap
->size
;
2599 /* Set up the new top. */
2600 top (av
) = chunk_at_offset (heap
, sizeof (*heap
));
2601 set_head (top (av
), (heap
->size
- sizeof (*heap
)) | PREV_INUSE
);
2603 /* Setup fencepost and free the old top chunk with a multiple of
2604 MALLOC_ALIGNMENT in size. */
2605 /* The fencepost takes at least MINSIZE bytes, because it might
2606 become the top chunk again later. Note that a footer is set
2607 up, too, although the chunk is marked in use. */
2608 old_size
= (old_size
- MINSIZE
) & ~MALLOC_ALIGN_MASK
;
2609 set_head (chunk_at_offset (old_top
, old_size
+ CHUNK_HDR_SZ
),
2611 if (old_size
>= MINSIZE
)
2613 set_head (chunk_at_offset (old_top
, old_size
),
2614 CHUNK_HDR_SZ
| PREV_INUSE
);
2615 set_foot (chunk_at_offset (old_top
, old_size
), CHUNK_HDR_SZ
);
2616 set_head (old_top
, old_size
| PREV_INUSE
| NON_MAIN_ARENA
);
2617 _int_free (av
, old_top
, 1);
2621 set_head (old_top
, (old_size
+ CHUNK_HDR_SZ
) | PREV_INUSE
);
2622 set_foot (old_top
, (old_size
+ CHUNK_HDR_SZ
));
2625 else if (!tried_mmap
)
2626 /* We can at least try to use to mmap memory. */
2629 else /* av == main_arena */
2632 { /* Request enough space for nb + pad + overhead */
2633 size
= nb
+ mp_
.top_pad
+ MINSIZE
;
2636 If contiguous, we can subtract out existing space that we hope to
2637 combine with new space. We add it back later only if
2638 we don't actually get contiguous space.
2641 if (contiguous (av
))
2645 Round to a multiple of page size.
2646 If MORECORE is not contiguous, this ensures that we only call it
2647 with whole-page arguments. And if MORECORE is contiguous and
2648 this is not first time through, this preserves page-alignment of
2649 previous calls. Otherwise, we correct to page-align below.
2652 size
= ALIGN_UP (size
, pagesize
);
2655 Don't try to call MORECORE if argument is so big as to appear
2656 negative. Note that since mmap takes size_t arg, it may succeed
2657 below even if we cannot call MORECORE.
2662 brk
= (char *) (MORECORE (size
));
2663 LIBC_PROBE (memory_sbrk_more
, 2, brk
, size
);
2666 if (brk
!= (char *) (MORECORE_FAILURE
))
2668 /* Call the `morecore' hook if necessary. */
2669 void (*hook
) (void) = atomic_forced_read (__after_morecore_hook
);
2670 if (__builtin_expect (hook
!= NULL
, 0))
2676 If have mmap, try using it as a backup when MORECORE fails or
2677 cannot be used. This is worth doing on systems that have "holes" in
2678 address space, so sbrk cannot extend to give contiguous space, but
2679 space is available elsewhere. Note that we ignore mmap max count
2680 and threshold limits, since the space will not be used as a
2681 segregated mmap region.
2684 /* Cannot merge with old top, so add its size back in */
2685 if (contiguous (av
))
2686 size
= ALIGN_UP (size
+ old_size
, pagesize
);
2688 /* If we are relying on mmap as backup, then use larger units */
2689 if ((unsigned long) (size
) < (unsigned long) (MMAP_AS_MORECORE_SIZE
))
2690 size
= MMAP_AS_MORECORE_SIZE
;
2692 /* Don't try if size wraps around 0 */
2693 if ((unsigned long) (size
) > (unsigned long) (nb
))
2695 char *mbrk
= (char *) (MMAP (0, size
,
2696 mtag_mmap_flags
| PROT_READ
| PROT_WRITE
,
2699 if (mbrk
!= MAP_FAILED
)
2701 /* We do not need, and cannot use, another sbrk call to find end */
2703 snd_brk
= brk
+ size
;
2706 Record that we no longer have a contiguous sbrk region.
2707 After the first time mmap is used as backup, we do not
2708 ever rely on contiguous space since this could incorrectly
2711 set_noncontiguous (av
);
2716 if (brk
!= (char *) (MORECORE_FAILURE
))
2718 if (mp_
.sbrk_base
== 0)
2719 mp_
.sbrk_base
= brk
;
2720 av
->system_mem
+= size
;
2723 If MORECORE extends previous space, we can likewise extend top size.
2726 if (brk
== old_end
&& snd_brk
== (char *) (MORECORE_FAILURE
))
2727 set_head (old_top
, (size
+ old_size
) | PREV_INUSE
);
2729 else if (contiguous (av
) && old_size
&& brk
< old_end
)
2730 /* Oops! Someone else killed our space.. Can't touch anything. */
2731 malloc_printerr ("break adjusted to free malloc space");
2734 Otherwise, make adjustments:
2736 * If the first time through or noncontiguous, we need to call sbrk
2737 just to find out where the end of memory lies.
2739 * We need to ensure that all returned chunks from malloc will meet
2742 * If there was an intervening foreign sbrk, we need to adjust sbrk
2743 request size to account for fact that we will not be able to
2744 combine new space with existing space in old_top.
2746 * Almost all systems internally allocate whole pages at a time, in
2747 which case we might as well use the whole last page of request.
2748 So we allocate enough more memory to hit a page boundary now,
2749 which in turn causes future contiguous calls to page-align.
2759 /* handle contiguous cases */
2760 if (contiguous (av
))
2762 /* Count foreign sbrk as system_mem. */
2764 av
->system_mem
+= brk
- old_end
;
2766 /* Guarantee alignment of first new chunk made from this space */
2768 front_misalign
= (INTERNAL_SIZE_T
) chunk2mem (brk
) & MALLOC_ALIGN_MASK
;
2769 if (front_misalign
> 0)
2772 Skip over some bytes to arrive at an aligned position.
2773 We don't need to specially mark these wasted front bytes.
2774 They will never be accessed anyway because
2775 prev_inuse of av->top (and any chunk created from its start)
2776 is always true after initialization.
2779 correction
= MALLOC_ALIGNMENT
- front_misalign
;
2780 aligned_brk
+= correction
;
2784 If this isn't adjacent to existing space, then we will not
2785 be able to merge with old_top space, so must add to 2nd request.
2788 correction
+= old_size
;
2790 /* Extend the end address to hit a page boundary */
2791 end_misalign
= (INTERNAL_SIZE_T
) (brk
+ size
+ correction
);
2792 correction
+= (ALIGN_UP (end_misalign
, pagesize
)) - end_misalign
;
2794 assert (correction
>= 0);
2795 snd_brk
= (char *) (MORECORE (correction
));
2798 If can't allocate correction, try to at least find out current
2799 brk. It might be enough to proceed without failing.
2801 Note that if second sbrk did NOT fail, we assume that space
2802 is contiguous with first sbrk. This is a safe assumption unless
2803 program is multithreaded but doesn't use locks and a foreign sbrk
2804 occurred between our first and second calls.
2807 if (snd_brk
== (char *) (MORECORE_FAILURE
))
2810 snd_brk
= (char *) (MORECORE (0));
2814 /* Call the `morecore' hook if necessary. */
2815 void (*hook
) (void) = atomic_forced_read (__after_morecore_hook
);
2816 if (__builtin_expect (hook
!= NULL
, 0))
2821 /* handle non-contiguous cases */
2824 if (MALLOC_ALIGNMENT
== CHUNK_HDR_SZ
)
2825 /* MORECORE/mmap must correctly align */
2826 assert (((unsigned long) chunk2mem (brk
) & MALLOC_ALIGN_MASK
) == 0);
2829 front_misalign
= (INTERNAL_SIZE_T
) chunk2mem (brk
) & MALLOC_ALIGN_MASK
;
2830 if (front_misalign
> 0)
2833 Skip over some bytes to arrive at an aligned position.
2834 We don't need to specially mark these wasted front bytes.
2835 They will never be accessed anyway because
2836 prev_inuse of av->top (and any chunk created from its start)
2837 is always true after initialization.
2840 aligned_brk
+= MALLOC_ALIGNMENT
- front_misalign
;
2844 /* Find out current end of memory */
2845 if (snd_brk
== (char *) (MORECORE_FAILURE
))
2847 snd_brk
= (char *) (MORECORE (0));
2851 /* Adjust top based on results of second sbrk */
2852 if (snd_brk
!= (char *) (MORECORE_FAILURE
))
2854 av
->top
= (mchunkptr
) aligned_brk
;
2855 set_head (av
->top
, (snd_brk
- aligned_brk
+ correction
) | PREV_INUSE
);
2856 av
->system_mem
+= correction
;
2859 If not the first time through, we either have a
2860 gap due to foreign sbrk or a non-contiguous region. Insert a
2861 double fencepost at old_top to prevent consolidation with space
2862 we don't own. These fenceposts are artificial chunks that are
2863 marked as inuse and are in any case too small to use. We need
2864 two to make sizes and alignments work out.
2870 Shrink old_top to insert fenceposts, keeping size a
2871 multiple of MALLOC_ALIGNMENT. We know there is at least
2872 enough space in old_top to do this.
2874 old_size
= (old_size
- 2 * CHUNK_HDR_SZ
) & ~MALLOC_ALIGN_MASK
;
2875 set_head (old_top
, old_size
| PREV_INUSE
);
2878 Note that the following assignments completely overwrite
2879 old_top when old_size was previously MINSIZE. This is
2880 intentional. We need the fencepost, even if old_top otherwise gets
2883 set_head (chunk_at_offset (old_top
, old_size
),
2884 CHUNK_HDR_SZ
| PREV_INUSE
);
2885 set_head (chunk_at_offset (old_top
,
2886 old_size
+ CHUNK_HDR_SZ
),
2887 CHUNK_HDR_SZ
| PREV_INUSE
);
2889 /* If possible, release the rest. */
2890 if (old_size
>= MINSIZE
)
2892 _int_free (av
, old_top
, 1);
2898 } /* if (av != &main_arena) */
2900 if ((unsigned long) av
->system_mem
> (unsigned long) (av
->max_system_mem
))
2901 av
->max_system_mem
= av
->system_mem
;
2902 check_malloc_state (av
);
2904 /* finally, do the allocation */
2906 size
= chunksize (p
);
2908 /* check that one of the above allocation paths succeeded */
2909 if ((unsigned long) (size
) >= (unsigned long) (nb
+ MINSIZE
))
2911 remainder_size
= size
- nb
;
2912 remainder
= chunk_at_offset (p
, nb
);
2913 av
->top
= remainder
;
2914 set_head (p
, nb
| PREV_INUSE
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
2915 set_head (remainder
, remainder_size
| PREV_INUSE
);
2916 check_malloced_chunk (av
, p
, nb
);
2917 return chunk2mem (p
);
2920 /* catch all failure paths */
2921 __set_errno (ENOMEM
);
2927 systrim is an inverse of sorts to sysmalloc. It gives memory back
2928 to the system (via negative arguments to sbrk) if there is unused
2929 memory at the `high' end of the malloc pool. It is called
2930 automatically by free() when top space exceeds the trim
2931 threshold. It is also called by the public malloc_trim routine. It
2932 returns 1 if it actually released any memory, else 0.
2936 systrim (size_t pad
, mstate av
)
2938 long top_size
; /* Amount of top-most memory */
2939 long extra
; /* Amount to release */
2940 long released
; /* Amount actually released */
2941 char *current_brk
; /* address returned by pre-check sbrk call */
2942 char *new_brk
; /* address returned by post-check sbrk call */
2946 pagesize
= GLRO (dl_pagesize
);
2947 top_size
= chunksize (av
->top
);
2949 top_area
= top_size
- MINSIZE
- 1;
2950 if (top_area
<= pad
)
2953 /* Release in pagesize units and round down to the nearest page. */
2954 extra
= ALIGN_DOWN(top_area
- pad
, pagesize
);
2960 Only proceed if end of memory is where we last set it.
2961 This avoids problems if there were foreign sbrk calls.
2963 current_brk
= (char *) (MORECORE (0));
2964 if (current_brk
== (char *) (av
->top
) + top_size
)
2967 Attempt to release memory. We ignore MORECORE return value,
2968 and instead call again to find out where new end of memory is.
2969 This avoids problems if first call releases less than we asked,
2970 of if failure somehow altered brk value. (We could still
2971 encounter problems if it altered brk in some very bad way,
2972 but the only thing we can do is adjust anyway, which will cause
2973 some downstream failure.)
2977 /* Call the `morecore' hook if necessary. */
2978 void (*hook
) (void) = atomic_forced_read (__after_morecore_hook
);
2979 if (__builtin_expect (hook
!= NULL
, 0))
2981 new_brk
= (char *) (MORECORE (0));
2983 LIBC_PROBE (memory_sbrk_less
, 2, new_brk
, extra
);
2985 if (new_brk
!= (char *) MORECORE_FAILURE
)
2987 released
= (long) (current_brk
- new_brk
);
2991 /* Success. Adjust top. */
2992 av
->system_mem
-= released
;
2993 set_head (av
->top
, (top_size
- released
) | PREV_INUSE
);
2994 check_malloc_state (av
);
3003 munmap_chunk (mchunkptr p
)
3005 size_t pagesize
= GLRO (dl_pagesize
);
3006 INTERNAL_SIZE_T size
= chunksize (p
);
3008 assert (chunk_is_mmapped (p
));
3010 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
3011 main arena. We never free this memory. */
3012 if (DUMPED_MAIN_ARENA_CHUNK (p
))
3015 uintptr_t mem
= (uintptr_t) chunk2mem (p
);
3016 uintptr_t block
= (uintptr_t) p
- prev_size (p
);
3017 size_t total_size
= prev_size (p
) + size
;
3018 /* Unfortunately we have to do the compilers job by hand here. Normally
3019 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3020 page size. But gcc does not recognize the optimization possibility
3021 (in the moment at least) so we combine the two values into one before
3023 if (__glibc_unlikely ((block
| total_size
) & (pagesize
- 1)) != 0
3024 || __glibc_unlikely (!powerof2 (mem
& (pagesize
- 1))))
3025 malloc_printerr ("munmap_chunk(): invalid pointer");
3027 atomic_decrement (&mp_
.n_mmaps
);
3028 atomic_add (&mp_
.mmapped_mem
, -total_size
);
3030 /* If munmap failed the process virtual memory address space is in a
3031 bad shape. Just leave the block hanging around, the process will
3032 terminate shortly anyway since not much can be done. */
3033 __munmap ((char *) block
, total_size
);
3039 mremap_chunk (mchunkptr p
, size_t new_size
)
3041 size_t pagesize
= GLRO (dl_pagesize
);
3042 INTERNAL_SIZE_T offset
= prev_size (p
);
3043 INTERNAL_SIZE_T size
= chunksize (p
);
3046 assert (chunk_is_mmapped (p
));
3048 uintptr_t block
= (uintptr_t) p
- offset
;
3049 uintptr_t mem
= (uintptr_t) chunk2mem(p
);
3050 size_t total_size
= offset
+ size
;
3051 if (__glibc_unlikely ((block
| total_size
) & (pagesize
- 1)) != 0
3052 || __glibc_unlikely (!powerof2 (mem
& (pagesize
- 1))))
3053 malloc_printerr("mremap_chunk(): invalid pointer");
3055 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3056 new_size
= ALIGN_UP (new_size
+ offset
+ SIZE_SZ
, pagesize
);
3058 /* No need to remap if the number of pages does not change. */
3059 if (total_size
== new_size
)
3062 cp
= (char *) __mremap ((char *) block
, total_size
, new_size
,
3065 if (cp
== MAP_FAILED
)
3068 p
= (mchunkptr
) (cp
+ offset
);
3070 assert (aligned_OK (chunk2mem (p
)));
3072 assert (prev_size (p
) == offset
);
3073 set_head (p
, (new_size
- offset
) | IS_MMAPPED
);
3075 INTERNAL_SIZE_T
new;
3076 new = atomic_exchange_and_add (&mp_
.mmapped_mem
, new_size
- size
- offset
)
3077 + new_size
- size
- offset
;
3078 atomic_max (&mp_
.max_mmapped_mem
, new);
3081 #endif /* HAVE_MREMAP */
3083 /*------------------------ Public wrappers. --------------------------------*/
3087 /* We overlay this structure on the user-data portion of a chunk when
3088 the chunk is stored in the per-thread cache. */
3089 typedef struct tcache_entry
3091 struct tcache_entry
*next
;
3092 /* This field exists to detect double frees. */
3093 struct tcache_perthread_struct
*key
;
3096 /* There is one of these for each thread, which contains the
3097 per-thread cache (hence "tcache_perthread_struct"). Keeping
3098 overall size low is mildly important. Note that COUNTS and ENTRIES
3099 are redundant (we could have just counted the linked list each
3100 time), this is for performance reasons. */
3101 typedef struct tcache_perthread_struct
3103 uint16_t counts
[TCACHE_MAX_BINS
];
3104 tcache_entry
*entries
[TCACHE_MAX_BINS
];
3105 } tcache_perthread_struct
;
3107 static __thread
bool tcache_shutting_down
= false;
3108 static __thread tcache_perthread_struct
*tcache
= NULL
;
3110 /* Caller must ensure that we know tc_idx is valid and there's room
3112 static __always_inline
void
3113 tcache_put (mchunkptr chunk
, size_t tc_idx
)
3115 tcache_entry
*e
= (tcache_entry
*) chunk2mem (chunk
);
3117 /* Mark this chunk as "in the tcache" so the test in _int_free will
3118 detect a double free. */
3121 e
->next
= PROTECT_PTR (&e
->next
, tcache
->entries
[tc_idx
]);
3122 tcache
->entries
[tc_idx
] = e
;
3123 ++(tcache
->counts
[tc_idx
]);
3126 /* Caller must ensure that we know tc_idx is valid and there's
3127 available chunks to remove. */
3128 static __always_inline
void *
3129 tcache_get (size_t tc_idx
)
3131 tcache_entry
*e
= tcache
->entries
[tc_idx
];
3132 if (__glibc_unlikely (!aligned_OK (e
)))
3133 malloc_printerr ("malloc(): unaligned tcache chunk detected");
3134 tcache
->entries
[tc_idx
] = REVEAL_PTR (e
->next
);
3135 --(tcache
->counts
[tc_idx
]);
3141 tcache_thread_shutdown (void)
3144 tcache_perthread_struct
*tcache_tmp
= tcache
;
3149 /* Disable the tcache and prevent it from being reinitialized. */
3151 tcache_shutting_down
= true;
3153 /* Free all of the entries and the tcache itself back to the arena
3154 heap for coalescing. */
3155 for (i
= 0; i
< TCACHE_MAX_BINS
; ++i
)
3157 while (tcache_tmp
->entries
[i
])
3159 tcache_entry
*e
= tcache_tmp
->entries
[i
];
3160 if (__glibc_unlikely (!aligned_OK (e
)))
3161 malloc_printerr ("tcache_thread_shutdown(): "
3162 "unaligned tcache chunk detected");
3163 tcache_tmp
->entries
[i
] = REVEAL_PTR (e
->next
);
3168 __libc_free (tcache_tmp
);
3176 const size_t bytes
= sizeof (tcache_perthread_struct
);
3178 if (tcache_shutting_down
)
3181 arena_get (ar_ptr
, bytes
);
3182 victim
= _int_malloc (ar_ptr
, bytes
);
3183 if (!victim
&& ar_ptr
!= NULL
)
3185 ar_ptr
= arena_get_retry (ar_ptr
, bytes
);
3186 victim
= _int_malloc (ar_ptr
, bytes
);
3191 __libc_lock_unlock (ar_ptr
->mutex
);
3193 /* In a low memory situation, we may not be able to allocate memory
3194 - in which case, we just keep trying later. However, we
3195 typically do this very early, so either there is sufficient
3196 memory, or there isn't enough memory to do non-trivial
3197 allocations anyway. */
3200 tcache
= (tcache_perthread_struct
*) victim
;
3201 memset (tcache
, 0, sizeof (tcache_perthread_struct
));
3206 # define MAYBE_INIT_TCACHE() \
3207 if (__glibc_unlikely (tcache == NULL)) \
3210 #else /* !USE_TCACHE */
3211 # define MAYBE_INIT_TCACHE()
3214 tcache_thread_shutdown (void)
3216 /* Nothing to do if there is no thread cache. */
3219 #endif /* !USE_TCACHE */
3222 __libc_malloc (size_t bytes
)
3227 _Static_assert (PTRDIFF_MAX
<= SIZE_MAX
/ 2,
3228 "PTRDIFF_MAX is not more than half of SIZE_MAX");
3230 void *(*hook
) (size_t, const void *)
3231 = atomic_forced_read (__malloc_hook
);
3232 if (__builtin_expect (hook
!= NULL
, 0))
3233 return (*hook
)(bytes
, RETURN_ADDRESS (0));
3235 /* int_free also calls request2size, be careful to not pad twice. */
3237 if (!checked_request2size (bytes
, &tbytes
))
3239 __set_errno (ENOMEM
);
3242 size_t tc_idx
= csize2tidx (tbytes
);
3244 MAYBE_INIT_TCACHE ();
3246 DIAG_PUSH_NEEDS_COMMENT
;
3247 if (tc_idx
< mp_
.tcache_bins
3249 && tcache
->counts
[tc_idx
] > 0)
3251 victim
= tcache_get (tc_idx
);
3252 return tag_new_usable (victim
);
3254 DIAG_POP_NEEDS_COMMENT
;
3257 if (SINGLE_THREAD_P
)
3259 victim
= tag_new_usable (_int_malloc (&main_arena
, bytes
));
3260 assert (!victim
|| chunk_is_mmapped (mem2chunk (victim
)) ||
3261 &main_arena
== arena_for_chunk (mem2chunk (victim
)));
3265 arena_get (ar_ptr
, bytes
);
3267 victim
= _int_malloc (ar_ptr
, bytes
);
3268 /* Retry with another arena only if we were able to find a usable arena
3270 if (!victim
&& ar_ptr
!= NULL
)
3272 LIBC_PROBE (memory_malloc_retry
, 1, bytes
);
3273 ar_ptr
= arena_get_retry (ar_ptr
, bytes
);
3274 victim
= _int_malloc (ar_ptr
, bytes
);
3278 __libc_lock_unlock (ar_ptr
->mutex
);
3280 victim
= tag_new_usable (victim
);
3282 assert (!victim
|| chunk_is_mmapped (mem2chunk (victim
)) ||
3283 ar_ptr
== arena_for_chunk (mem2chunk (victim
)));
3286 libc_hidden_def (__libc_malloc
)
3289 __libc_free (void *mem
)
3292 mchunkptr p
; /* chunk corresponding to mem */
3294 void (*hook
) (void *, const void *)
3295 = atomic_forced_read (__free_hook
);
3296 if (__builtin_expect (hook
!= NULL
, 0))
3298 (*hook
)(mem
, RETURN_ADDRESS (0));
3302 if (mem
== 0) /* free(0) has no effect */
3305 /* Quickly check that the freed pointer matches the tag for the memory.
3306 This gives a useful double-free detection. */
3307 if (__glibc_unlikely (mtag_enabled
))
3308 *(volatile char *)mem
;
3312 p
= mem2chunk (mem
);
3314 if (chunk_is_mmapped (p
)) /* release mmapped memory. */
3316 /* See if the dynamic brk/mmap threshold needs adjusting.
3317 Dumped fake mmapped chunks do not affect the threshold. */
3318 if (!mp_
.no_dyn_threshold
3319 && chunksize_nomask (p
) > mp_
.mmap_threshold
3320 && chunksize_nomask (p
) <= DEFAULT_MMAP_THRESHOLD_MAX
3321 && !DUMPED_MAIN_ARENA_CHUNK (p
))
3323 mp_
.mmap_threshold
= chunksize (p
);
3324 mp_
.trim_threshold
= 2 * mp_
.mmap_threshold
;
3325 LIBC_PROBE (memory_mallopt_free_dyn_thresholds
, 2,
3326 mp_
.mmap_threshold
, mp_
.trim_threshold
);
3332 MAYBE_INIT_TCACHE ();
3334 /* Mark the chunk as belonging to the library again. */
3335 (void)tag_region (chunk2mem (p
), memsize (p
));
3337 ar_ptr
= arena_for_chunk (p
);
3338 _int_free (ar_ptr
, p
, 0);
3343 libc_hidden_def (__libc_free
)
3346 __libc_realloc (void *oldmem
, size_t bytes
)
3349 INTERNAL_SIZE_T nb
; /* padded request size */
3351 void *newp
; /* chunk to return */
3353 void *(*hook
) (void *, size_t, const void *) =
3354 atomic_forced_read (__realloc_hook
);
3355 if (__builtin_expect (hook
!= NULL
, 0))
3356 return (*hook
)(oldmem
, bytes
, RETURN_ADDRESS (0));
3358 #if REALLOC_ZERO_BYTES_FREES
3359 if (bytes
== 0 && oldmem
!= NULL
)
3361 __libc_free (oldmem
); return 0;
3365 /* realloc of null is supposed to be same as malloc */
3367 return __libc_malloc (bytes
);
3369 /* Perform a quick check to ensure that the pointer's tag matches the
3371 if (__glibc_unlikely (mtag_enabled
))
3372 *(volatile char*) oldmem
;
3374 /* chunk corresponding to oldmem */
3375 const mchunkptr oldp
= mem2chunk (oldmem
);
3377 const INTERNAL_SIZE_T oldsize
= chunksize (oldp
);
3379 if (chunk_is_mmapped (oldp
))
3383 MAYBE_INIT_TCACHE ();
3384 ar_ptr
= arena_for_chunk (oldp
);
3387 /* Little security check which won't hurt performance: the allocator
3388 never wrapps around at the end of the address space. Therefore
3389 we can exclude some size values which might appear here by
3390 accident or by "design" from some intruder. We need to bypass
3391 this check for dumped fake mmap chunks from the old main arena
3392 because the new malloc may provide additional alignment. */
3393 if ((__builtin_expect ((uintptr_t) oldp
> (uintptr_t) -oldsize
, 0)
3394 || __builtin_expect (misaligned_chunk (oldp
), 0))
3395 && !DUMPED_MAIN_ARENA_CHUNK (oldp
))
3396 malloc_printerr ("realloc(): invalid pointer");
3398 if (!checked_request2size (bytes
, &nb
))
3400 __set_errno (ENOMEM
);
3404 if (chunk_is_mmapped (oldp
))
3406 /* If this is a faked mmapped chunk from the dumped main arena,
3407 always make a copy (and do not free the old chunk). */
3408 if (DUMPED_MAIN_ARENA_CHUNK (oldp
))
3410 /* Must alloc, copy, free. */
3411 void *newmem
= __libc_malloc (bytes
);
3414 /* Copy as many bytes as are available from the old chunk
3415 and fit into the new size. NB: The overhead for faked
3416 mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for
3417 regular mmapped chunks. */
3418 if (bytes
> oldsize
- SIZE_SZ
)
3419 bytes
= oldsize
- SIZE_SZ
;
3420 memcpy (newmem
, oldmem
, bytes
);
3427 newp
= mremap_chunk (oldp
, nb
);
3430 void *newmem
= chunk2mem_tag (newp
);
3431 /* Give the new block a different tag. This helps to ensure
3432 that stale handles to the previous mapping are not
3433 reused. There's a performance hit for both us and the
3434 caller for doing this, so we might want to
3436 return tag_new_usable (newmem
);
3439 /* Note the extra SIZE_SZ overhead. */
3440 if (oldsize
- SIZE_SZ
>= nb
)
3441 return oldmem
; /* do nothing */
3443 /* Must alloc, copy, free. */
3444 newmem
= __libc_malloc (bytes
);
3446 return 0; /* propagate failure */
3448 memcpy (newmem
, oldmem
, oldsize
- CHUNK_HDR_SZ
);
3449 munmap_chunk (oldp
);
3453 if (SINGLE_THREAD_P
)
3455 newp
= _int_realloc (ar_ptr
, oldp
, oldsize
, nb
);
3456 assert (!newp
|| chunk_is_mmapped (mem2chunk (newp
)) ||
3457 ar_ptr
== arena_for_chunk (mem2chunk (newp
)));
3462 __libc_lock_lock (ar_ptr
->mutex
);
3464 newp
= _int_realloc (ar_ptr
, oldp
, oldsize
, nb
);
3466 __libc_lock_unlock (ar_ptr
->mutex
);
3467 assert (!newp
|| chunk_is_mmapped (mem2chunk (newp
)) ||
3468 ar_ptr
== arena_for_chunk (mem2chunk (newp
)));
3472 /* Try harder to allocate memory in other arenas. */
3473 LIBC_PROBE (memory_realloc_retry
, 2, bytes
, oldmem
);
3474 newp
= __libc_malloc (bytes
);
3477 size_t sz
= memsize (oldp
);
3478 memcpy (newp
, oldmem
, sz
);
3479 (void) tag_region (chunk2mem (oldp
), sz
);
3480 _int_free (ar_ptr
, oldp
, 0);
3486 libc_hidden_def (__libc_realloc
)
3489 __libc_memalign (size_t alignment
, size_t bytes
)
3491 void *address
= RETURN_ADDRESS (0);
3492 return _mid_memalign (alignment
, bytes
, address
);
3496 _mid_memalign (size_t alignment
, size_t bytes
, void *address
)
3501 void *(*hook
) (size_t, size_t, const void *) =
3502 atomic_forced_read (__memalign_hook
);
3503 if (__builtin_expect (hook
!= NULL
, 0))
3504 return (*hook
)(alignment
, bytes
, address
);
3506 /* If we need less alignment than we give anyway, just relay to malloc. */
3507 if (alignment
<= MALLOC_ALIGNMENT
)
3508 return __libc_malloc (bytes
);
3510 /* Otherwise, ensure that it is at least a minimum chunk size */
3511 if (alignment
< MINSIZE
)
3512 alignment
= MINSIZE
;
3514 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3515 power of 2 and will cause overflow in the check below. */
3516 if (alignment
> SIZE_MAX
/ 2 + 1)
3518 __set_errno (EINVAL
);
3523 /* Make sure alignment is power of 2. */
3524 if (!powerof2 (alignment
))
3526 size_t a
= MALLOC_ALIGNMENT
* 2;
3527 while (a
< alignment
)
3532 if (SINGLE_THREAD_P
)
3534 p
= _int_memalign (&main_arena
, alignment
, bytes
);
3535 assert (!p
|| chunk_is_mmapped (mem2chunk (p
)) ||
3536 &main_arena
== arena_for_chunk (mem2chunk (p
)));
3537 return tag_new_usable (p
);
3540 arena_get (ar_ptr
, bytes
+ alignment
+ MINSIZE
);
3542 p
= _int_memalign (ar_ptr
, alignment
, bytes
);
3543 if (!p
&& ar_ptr
!= NULL
)
3545 LIBC_PROBE (memory_memalign_retry
, 2, bytes
, alignment
);
3546 ar_ptr
= arena_get_retry (ar_ptr
, bytes
);
3547 p
= _int_memalign (ar_ptr
, alignment
, bytes
);
3551 __libc_lock_unlock (ar_ptr
->mutex
);
3553 assert (!p
|| chunk_is_mmapped (mem2chunk (p
)) ||
3554 ar_ptr
== arena_for_chunk (mem2chunk (p
)));
3555 return tag_new_usable (p
);
3558 weak_alias (__libc_memalign
, aligned_alloc
)
3559 libc_hidden_def (__libc_memalign
)
3562 __libc_valloc (size_t bytes
)
3564 if (__malloc_initialized
< 0)
3567 void *address
= RETURN_ADDRESS (0);
3568 size_t pagesize
= GLRO (dl_pagesize
);
3569 return _mid_memalign (pagesize
, bytes
, address
);
3573 __libc_pvalloc (size_t bytes
)
3575 if (__malloc_initialized
< 0)
3578 void *address
= RETURN_ADDRESS (0);
3579 size_t pagesize
= GLRO (dl_pagesize
);
3580 size_t rounded_bytes
;
3581 /* ALIGN_UP with overflow check. */
3582 if (__glibc_unlikely (__builtin_add_overflow (bytes
,
3586 __set_errno (ENOMEM
);
3589 rounded_bytes
= rounded_bytes
& -(pagesize
- 1);
3591 return _mid_memalign (pagesize
, rounded_bytes
, address
);
3595 __libc_calloc (size_t n
, size_t elem_size
)
3599 INTERNAL_SIZE_T sz
, oldtopsize
;
3601 unsigned long clearsize
;
3602 unsigned long nclears
;
3606 if (__glibc_unlikely (__builtin_mul_overflow (n
, elem_size
, &bytes
)))
3608 __set_errno (ENOMEM
);
3614 void *(*hook
) (size_t, const void *) =
3615 atomic_forced_read (__malloc_hook
);
3616 if (__builtin_expect (hook
!= NULL
, 0))
3618 mem
= (*hook
)(sz
, RETURN_ADDRESS (0));
3622 return memset (mem
, 0, sz
);
3625 MAYBE_INIT_TCACHE ();
3627 if (SINGLE_THREAD_P
)
3634 /* Check if we hand out the top chunk, in which case there may be no
3638 oldtopsize
= chunksize (top (av
));
3639 # if MORECORE_CLEARS < 2
3640 /* Only newly allocated memory is guaranteed to be cleared. */
3641 if (av
== &main_arena
&&
3642 oldtopsize
< mp_
.sbrk_base
+ av
->max_system_mem
- (char *) oldtop
)
3643 oldtopsize
= (mp_
.sbrk_base
+ av
->max_system_mem
- (char *) oldtop
);
3645 if (av
!= &main_arena
)
3647 heap_info
*heap
= heap_for_ptr (oldtop
);
3648 if (oldtopsize
< (char *) heap
+ heap
->mprotect_size
- (char *) oldtop
)
3649 oldtopsize
= (char *) heap
+ heap
->mprotect_size
- (char *) oldtop
;
3655 /* No usable arenas. */
3659 mem
= _int_malloc (av
, sz
);
3661 assert (!mem
|| chunk_is_mmapped (mem2chunk (mem
)) ||
3662 av
== arena_for_chunk (mem2chunk (mem
)));
3664 if (!SINGLE_THREAD_P
)
3666 if (mem
== 0 && av
!= NULL
)
3668 LIBC_PROBE (memory_calloc_retry
, 1, sz
);
3669 av
= arena_get_retry (av
, sz
);
3670 mem
= _int_malloc (av
, sz
);
3674 __libc_lock_unlock (av
->mutex
);
3677 /* Allocation failed even after a retry. */
3681 mchunkptr p
= mem2chunk (mem
);
3683 /* If we are using memory tagging, then we need to set the tags
3684 regardless of MORECORE_CLEARS, so we zero the whole block while
3686 if (__glibc_unlikely (mtag_enabled
))
3687 return tag_new_zero_region (mem
, memsize (p
));
3689 INTERNAL_SIZE_T csz
= chunksize (p
);
3691 /* Two optional cases in which clearing not necessary */
3692 if (chunk_is_mmapped (p
))
3694 if (__builtin_expect (perturb_byte
, 0))
3695 return memset (mem
, 0, sz
);
3701 if (perturb_byte
== 0 && (p
== oldtop
&& csz
> oldtopsize
))
3703 /* clear only the bytes from non-freshly-sbrked memory */
3708 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3709 contents have an odd number of INTERNAL_SIZE_T-sized words;
3711 d
= (INTERNAL_SIZE_T
*) mem
;
3712 clearsize
= csz
- SIZE_SZ
;
3713 nclears
= clearsize
/ sizeof (INTERNAL_SIZE_T
);
3714 assert (nclears
>= 3);
3717 return memset (d
, 0, clearsize
);
3745 ------------------------------ malloc ------------------------------
3749 _int_malloc (mstate av
, size_t bytes
)
3751 INTERNAL_SIZE_T nb
; /* normalized request size */
3752 unsigned int idx
; /* associated bin index */
3753 mbinptr bin
; /* associated bin */
3755 mchunkptr victim
; /* inspected/selected chunk */
3756 INTERNAL_SIZE_T size
; /* its size */
3757 int victim_index
; /* its bin index */
3759 mchunkptr remainder
; /* remainder from a split */
3760 unsigned long remainder_size
; /* its size */
3762 unsigned int block
; /* bit map traverser */
3763 unsigned int bit
; /* bit map traverser */
3764 unsigned int map
; /* current word of binmap */
3766 mchunkptr fwd
; /* misc temp for linking */
3767 mchunkptr bck
; /* misc temp for linking */
3770 size_t tcache_unsorted_count
; /* count of unsorted chunks processed */
3774 Convert request size to internal form by adding SIZE_SZ bytes
3775 overhead plus possibly more to obtain necessary alignment and/or
3776 to obtain a size of at least MINSIZE, the smallest allocatable
3777 size. Also, checked_request2size returns false for request sizes
3778 that are so large that they wrap around zero when padded and
3782 if (!checked_request2size (bytes
, &nb
))
3784 __set_errno (ENOMEM
);
3788 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3790 if (__glibc_unlikely (av
== NULL
))
3792 void *p
= sysmalloc (nb
, av
);
3794 alloc_perturb (p
, bytes
);
3799 If the size qualifies as a fastbin, first check corresponding bin.
3800 This code is safe to execute even if av is not yet initialized, so we
3801 can try it without checking, which saves some time on this fast path.
3804 #define REMOVE_FB(fb, victim, pp) \
3808 if (victim == NULL) \
3810 pp = REVEAL_PTR (victim->fd); \
3811 if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
3812 malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
3814 while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
3817 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3819 idx
= fastbin_index (nb
);
3820 mfastbinptr
*fb
= &fastbin (av
, idx
);
3826 if (__glibc_unlikely (misaligned_chunk (victim
)))
3827 malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
3829 if (SINGLE_THREAD_P
)
3830 *fb
= REVEAL_PTR (victim
->fd
);
3832 REMOVE_FB (fb
, pp
, victim
);
3833 if (__glibc_likely (victim
!= NULL
))
3835 size_t victim_idx
= fastbin_index (chunksize (victim
));
3836 if (__builtin_expect (victim_idx
!= idx
, 0))
3837 malloc_printerr ("malloc(): memory corruption (fast)");
3838 check_remalloced_chunk (av
, victim
, nb
);
3840 /* While we're here, if we see other chunks of the same size,
3841 stash them in the tcache. */
3842 size_t tc_idx
= csize2tidx (nb
);
3843 if (tcache
&& tc_idx
< mp_
.tcache_bins
)
3845 mchunkptr tc_victim
;
3847 /* While bin not empty and tcache not full, copy chunks. */
3848 while (tcache
->counts
[tc_idx
] < mp_
.tcache_count
3849 && (tc_victim
= *fb
) != NULL
)
3851 if (__glibc_unlikely (misaligned_chunk (tc_victim
)))
3852 malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
3853 if (SINGLE_THREAD_P
)
3854 *fb
= REVEAL_PTR (tc_victim
->fd
);
3857 REMOVE_FB (fb
, pp
, tc_victim
);
3858 if (__glibc_unlikely (tc_victim
== NULL
))
3861 tcache_put (tc_victim
, tc_idx
);
3865 void *p
= chunk2mem (victim
);
3866 alloc_perturb (p
, bytes
);
3873 If a small request, check regular bin. Since these "smallbins"
3874 hold one size each, no searching within bins is necessary.
3875 (For a large request, we need to wait until unsorted chunks are
3876 processed to find best fit. But for small ones, fits are exact
3877 anyway, so we can check now, which is faster.)
3880 if (in_smallbin_range (nb
))
3882 idx
= smallbin_index (nb
);
3883 bin
= bin_at (av
, idx
);
3885 if ((victim
= last (bin
)) != bin
)
3888 if (__glibc_unlikely (bck
->fd
!= victim
))
3889 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3890 set_inuse_bit_at_offset (victim
, nb
);
3894 if (av
!= &main_arena
)
3895 set_non_main_arena (victim
);
3896 check_malloced_chunk (av
, victim
, nb
);
3898 /* While we're here, if we see other chunks of the same size,
3899 stash them in the tcache. */
3900 size_t tc_idx
= csize2tidx (nb
);
3901 if (tcache
&& tc_idx
< mp_
.tcache_bins
)
3903 mchunkptr tc_victim
;
3905 /* While bin not empty and tcache not full, copy chunks over. */
3906 while (tcache
->counts
[tc_idx
] < mp_
.tcache_count
3907 && (tc_victim
= last (bin
)) != bin
)
3911 bck
= tc_victim
->bk
;
3912 set_inuse_bit_at_offset (tc_victim
, nb
);
3913 if (av
!= &main_arena
)
3914 set_non_main_arena (tc_victim
);
3918 tcache_put (tc_victim
, tc_idx
);
3923 void *p
= chunk2mem (victim
);
3924 alloc_perturb (p
, bytes
);
3930 If this is a large request, consolidate fastbins before continuing.
3931 While it might look excessive to kill all fastbins before
3932 even seeing if there is space available, this avoids
3933 fragmentation problems normally associated with fastbins.
3934 Also, in practice, programs tend to have runs of either small or
3935 large requests, but less often mixtures, so consolidation is not
3936 invoked all that often in most programs. And the programs that
3937 it is called frequently in otherwise tend to fragment.
3942 idx
= largebin_index (nb
);
3943 if (atomic_load_relaxed (&av
->have_fastchunks
))
3944 malloc_consolidate (av
);
3948 Process recently freed or remaindered chunks, taking one only if
3949 it is exact fit, or, if this a small request, the chunk is remainder from
3950 the most recent non-exact fit. Place other traversed chunks in
3951 bins. Note that this step is the only place in any routine where
3952 chunks are placed in bins.
3954 The outer loop here is needed because we might not realize until
3955 near the end of malloc that we should have consolidated, so must
3956 do so and retry. This happens at most once, and only when we would
3957 otherwise need to expand memory to service a "small" request.
3961 INTERNAL_SIZE_T tcache_nb
= 0;
3962 size_t tc_idx
= csize2tidx (nb
);
3963 if (tcache
&& tc_idx
< mp_
.tcache_bins
)
3965 int return_cached
= 0;
3967 tcache_unsorted_count
= 0;
3973 while ((victim
= unsorted_chunks (av
)->bk
) != unsorted_chunks (av
))
3976 size
= chunksize (victim
);
3977 mchunkptr next
= chunk_at_offset (victim
, size
);
3979 if (__glibc_unlikely (size
<= CHUNK_HDR_SZ
)
3980 || __glibc_unlikely (size
> av
->system_mem
))
3981 malloc_printerr ("malloc(): invalid size (unsorted)");
3982 if (__glibc_unlikely (chunksize_nomask (next
) < CHUNK_HDR_SZ
)
3983 || __glibc_unlikely (chunksize_nomask (next
) > av
->system_mem
))
3984 malloc_printerr ("malloc(): invalid next size (unsorted)");
3985 if (__glibc_unlikely ((prev_size (next
) & ~(SIZE_BITS
)) != size
))
3986 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
3987 if (__glibc_unlikely (bck
->fd
!= victim
)
3988 || __glibc_unlikely (victim
->fd
!= unsorted_chunks (av
)))
3989 malloc_printerr ("malloc(): unsorted double linked list corrupted");
3990 if (__glibc_unlikely (prev_inuse (next
)))
3991 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
3994 If a small request, try to use last remainder if it is the
3995 only chunk in unsorted bin. This helps promote locality for
3996 runs of consecutive small requests. This is the only
3997 exception to best-fit, and applies only when there is
3998 no exact fit for a small chunk.
4001 if (in_smallbin_range (nb
) &&
4002 bck
== unsorted_chunks (av
) &&
4003 victim
== av
->last_remainder
&&
4004 (unsigned long) (size
) > (unsigned long) (nb
+ MINSIZE
))
4006 /* split and reattach remainder */
4007 remainder_size
= size
- nb
;
4008 remainder
= chunk_at_offset (victim
, nb
);
4009 unsorted_chunks (av
)->bk
= unsorted_chunks (av
)->fd
= remainder
;
4010 av
->last_remainder
= remainder
;
4011 remainder
->bk
= remainder
->fd
= unsorted_chunks (av
);
4012 if (!in_smallbin_range (remainder_size
))
4014 remainder
->fd_nextsize
= NULL
;
4015 remainder
->bk_nextsize
= NULL
;
4018 set_head (victim
, nb
| PREV_INUSE
|
4019 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4020 set_head (remainder
, remainder_size
| PREV_INUSE
);
4021 set_foot (remainder
, remainder_size
);
4023 check_malloced_chunk (av
, victim
, nb
);
4024 void *p
= chunk2mem (victim
);
4025 alloc_perturb (p
, bytes
);
4029 /* remove from unsorted list */
4030 if (__glibc_unlikely (bck
->fd
!= victim
))
4031 malloc_printerr ("malloc(): corrupted unsorted chunks 3");
4032 unsorted_chunks (av
)->bk
= bck
;
4033 bck
->fd
= unsorted_chunks (av
);
4035 /* Take now instead of binning if exact fit */
4039 set_inuse_bit_at_offset (victim
, size
);
4040 if (av
!= &main_arena
)
4041 set_non_main_arena (victim
);
4043 /* Fill cache first, return to user only if cache fills.
4044 We may return one of these chunks later. */
4046 && tcache
->counts
[tc_idx
] < mp_
.tcache_count
)
4048 tcache_put (victim
, tc_idx
);
4055 check_malloced_chunk (av
, victim
, nb
);
4056 void *p
= chunk2mem (victim
);
4057 alloc_perturb (p
, bytes
);
4064 /* place chunk in bin */
4066 if (in_smallbin_range (size
))
4068 victim_index
= smallbin_index (size
);
4069 bck
= bin_at (av
, victim_index
);
4074 victim_index
= largebin_index (size
);
4075 bck
= bin_at (av
, victim_index
);
4078 /* maintain large bins in sorted order */
4081 /* Or with inuse bit to speed comparisons */
4083 /* if smaller than smallest, bypass loop below */
4084 assert (chunk_main_arena (bck
->bk
));
4085 if ((unsigned long) (size
)
4086 < (unsigned long) chunksize_nomask (bck
->bk
))
4091 victim
->fd_nextsize
= fwd
->fd
;
4092 victim
->bk_nextsize
= fwd
->fd
->bk_nextsize
;
4093 fwd
->fd
->bk_nextsize
= victim
->bk_nextsize
->fd_nextsize
= victim
;
4097 assert (chunk_main_arena (fwd
));
4098 while ((unsigned long) size
< chunksize_nomask (fwd
))
4100 fwd
= fwd
->fd_nextsize
;
4101 assert (chunk_main_arena (fwd
));
4104 if ((unsigned long) size
4105 == (unsigned long) chunksize_nomask (fwd
))
4106 /* Always insert in the second position. */
4110 victim
->fd_nextsize
= fwd
;
4111 victim
->bk_nextsize
= fwd
->bk_nextsize
;
4112 if (__glibc_unlikely (fwd
->bk_nextsize
->fd_nextsize
!= fwd
))
4113 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
4114 fwd
->bk_nextsize
= victim
;
4115 victim
->bk_nextsize
->fd_nextsize
= victim
;
4119 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
4123 victim
->fd_nextsize
= victim
->bk_nextsize
= victim
;
4126 mark_bin (av
, victim_index
);
4133 /* If we've processed as many chunks as we're allowed while
4134 filling the cache, return one of the cached ones. */
4135 ++tcache_unsorted_count
;
4137 && mp_
.tcache_unsorted_limit
> 0
4138 && tcache_unsorted_count
> mp_
.tcache_unsorted_limit
)
4140 return tcache_get (tc_idx
);
4144 #define MAX_ITERS 10000
4145 if (++iters
>= MAX_ITERS
)
4150 /* If all the small chunks we found ended up cached, return one now. */
4153 return tcache_get (tc_idx
);
4158 If a large request, scan through the chunks of current bin in
4159 sorted order to find smallest that fits. Use the skip list for this.
4162 if (!in_smallbin_range (nb
))
4164 bin
= bin_at (av
, idx
);
4166 /* skip scan if empty or largest chunk is too small */
4167 if ((victim
= first (bin
)) != bin
4168 && (unsigned long) chunksize_nomask (victim
)
4169 >= (unsigned long) (nb
))
4171 victim
= victim
->bk_nextsize
;
4172 while (((unsigned long) (size
= chunksize (victim
)) <
4173 (unsigned long) (nb
)))
4174 victim
= victim
->bk_nextsize
;
4176 /* Avoid removing the first entry for a size so that the skip
4177 list does not have to be rerouted. */
4178 if (victim
!= last (bin
)
4179 && chunksize_nomask (victim
)
4180 == chunksize_nomask (victim
->fd
))
4181 victim
= victim
->fd
;
4183 remainder_size
= size
- nb
;
4184 unlink_chunk (av
, victim
);
4187 if (remainder_size
< MINSIZE
)
4189 set_inuse_bit_at_offset (victim
, size
);
4190 if (av
!= &main_arena
)
4191 set_non_main_arena (victim
);
4196 remainder
= chunk_at_offset (victim
, nb
);
4197 /* We cannot assume the unsorted list is empty and therefore
4198 have to perform a complete insert here. */
4199 bck
= unsorted_chunks (av
);
4201 if (__glibc_unlikely (fwd
->bk
!= bck
))
4202 malloc_printerr ("malloc(): corrupted unsorted chunks");
4203 remainder
->bk
= bck
;
4204 remainder
->fd
= fwd
;
4205 bck
->fd
= remainder
;
4206 fwd
->bk
= remainder
;
4207 if (!in_smallbin_range (remainder_size
))
4209 remainder
->fd_nextsize
= NULL
;
4210 remainder
->bk_nextsize
= NULL
;
4212 set_head (victim
, nb
| PREV_INUSE
|
4213 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4214 set_head (remainder
, remainder_size
| PREV_INUSE
);
4215 set_foot (remainder
, remainder_size
);
4217 check_malloced_chunk (av
, victim
, nb
);
4218 void *p
= chunk2mem (victim
);
4219 alloc_perturb (p
, bytes
);
4225 Search for a chunk by scanning bins, starting with next largest
4226 bin. This search is strictly by best-fit; i.e., the smallest
4227 (with ties going to approximately the least recently used) chunk
4228 that fits is selected.
4230 The bitmap avoids needing to check that most blocks are nonempty.
4231 The particular case of skipping all bins during warm-up phases
4232 when no chunks have been returned yet is faster than it might look.
4236 bin
= bin_at (av
, idx
);
4237 block
= idx2block (idx
);
4238 map
= av
->binmap
[block
];
4239 bit
= idx2bit (idx
);
4243 /* Skip rest of block if there are no more set bits in this block. */
4244 if (bit
> map
|| bit
== 0)
4248 if (++block
>= BINMAPSIZE
) /* out of bins */
4251 while ((map
= av
->binmap
[block
]) == 0);
4253 bin
= bin_at (av
, (block
<< BINMAPSHIFT
));
4257 /* Advance to bin with set bit. There must be one. */
4258 while ((bit
& map
) == 0)
4260 bin
= next_bin (bin
);
4265 /* Inspect the bin. It is likely to be non-empty */
4266 victim
= last (bin
);
4268 /* If a false alarm (empty bin), clear the bit. */
4271 av
->binmap
[block
] = map
&= ~bit
; /* Write through */
4272 bin
= next_bin (bin
);
4278 size
= chunksize (victim
);
4280 /* We know the first chunk in this bin is big enough to use. */
4281 assert ((unsigned long) (size
) >= (unsigned long) (nb
));
4283 remainder_size
= size
- nb
;
4286 unlink_chunk (av
, victim
);
4289 if (remainder_size
< MINSIZE
)
4291 set_inuse_bit_at_offset (victim
, size
);
4292 if (av
!= &main_arena
)
4293 set_non_main_arena (victim
);
4299 remainder
= chunk_at_offset (victim
, nb
);
4301 /* We cannot assume the unsorted list is empty and therefore
4302 have to perform a complete insert here. */
4303 bck
= unsorted_chunks (av
);
4305 if (__glibc_unlikely (fwd
->bk
!= bck
))
4306 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
4307 remainder
->bk
= bck
;
4308 remainder
->fd
= fwd
;
4309 bck
->fd
= remainder
;
4310 fwd
->bk
= remainder
;
4312 /* advertise as last remainder */
4313 if (in_smallbin_range (nb
))
4314 av
->last_remainder
= remainder
;
4315 if (!in_smallbin_range (remainder_size
))
4317 remainder
->fd_nextsize
= NULL
;
4318 remainder
->bk_nextsize
= NULL
;
4320 set_head (victim
, nb
| PREV_INUSE
|
4321 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4322 set_head (remainder
, remainder_size
| PREV_INUSE
);
4323 set_foot (remainder
, remainder_size
);
4325 check_malloced_chunk (av
, victim
, nb
);
4326 void *p
= chunk2mem (victim
);
4327 alloc_perturb (p
, bytes
);
4334 If large enough, split off the chunk bordering the end of memory
4335 (held in av->top). Note that this is in accord with the best-fit
4336 search rule. In effect, av->top is treated as larger (and thus
4337 less well fitting) than any other available chunk since it can
4338 be extended to be as large as necessary (up to system
4341 We require that av->top always exists (i.e., has size >=
4342 MINSIZE) after initialization, so if it would otherwise be
4343 exhausted by current request, it is replenished. (The main
4344 reason for ensuring it exists is that we may need MINSIZE space
4345 to put in fenceposts in sysmalloc.)
4349 size
= chunksize (victim
);
4351 if (__glibc_unlikely (size
> av
->system_mem
))
4352 malloc_printerr ("malloc(): corrupted top size");
4354 if ((unsigned long) (size
) >= (unsigned long) (nb
+ MINSIZE
))
4356 remainder_size
= size
- nb
;
4357 remainder
= chunk_at_offset (victim
, nb
);
4358 av
->top
= remainder
;
4359 set_head (victim
, nb
| PREV_INUSE
|
4360 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4361 set_head (remainder
, remainder_size
| PREV_INUSE
);
4363 check_malloced_chunk (av
, victim
, nb
);
4364 void *p
= chunk2mem (victim
);
4365 alloc_perturb (p
, bytes
);
4369 /* When we are using atomic ops to free fast chunks we can get
4370 here for all block sizes. */
4371 else if (atomic_load_relaxed (&av
->have_fastchunks
))
4373 malloc_consolidate (av
);
4374 /* restore original bin index */
4375 if (in_smallbin_range (nb
))
4376 idx
= smallbin_index (nb
);
4378 idx
= largebin_index (nb
);
4382 Otherwise, relay to handle system-dependent cases
4386 void *p
= sysmalloc (nb
, av
);
4388 alloc_perturb (p
, bytes
);
4395 ------------------------------ free ------------------------------
4399 _int_free (mstate av
, mchunkptr p
, int have_lock
)
4401 INTERNAL_SIZE_T size
; /* its size */
4402 mfastbinptr
*fb
; /* associated fastbin */
4403 mchunkptr nextchunk
; /* next contiguous chunk */
4404 INTERNAL_SIZE_T nextsize
; /* its size */
4405 int nextinuse
; /* true if nextchunk is used */
4406 INTERNAL_SIZE_T prevsize
; /* size of previous contiguous chunk */
4407 mchunkptr bck
; /* misc temp for linking */
4408 mchunkptr fwd
; /* misc temp for linking */
4410 size
= chunksize (p
);
4412 /* Little security check which won't hurt performance: the
4413 allocator never wrapps around at the end of the address space.
4414 Therefore we can exclude some size values which might appear
4415 here by accident or by "design" from some intruder. */
4416 if (__builtin_expect ((uintptr_t) p
> (uintptr_t) -size
, 0)
4417 || __builtin_expect (misaligned_chunk (p
), 0))
4418 malloc_printerr ("free(): invalid pointer");
4419 /* We know that each chunk is at least MINSIZE bytes in size or a
4420 multiple of MALLOC_ALIGNMENT. */
4421 if (__glibc_unlikely (size
< MINSIZE
|| !aligned_OK (size
)))
4422 malloc_printerr ("free(): invalid size");
4424 check_inuse_chunk(av
, p
);
4428 size_t tc_idx
= csize2tidx (size
);
4429 if (tcache
!= NULL
&& tc_idx
< mp_
.tcache_bins
)
4431 /* Check to see if it's already in the tcache. */
4432 tcache_entry
*e
= (tcache_entry
*) chunk2mem (p
);
4434 /* This test succeeds on double free. However, we don't 100%
4435 trust it (it also matches random payload data at a 1 in
4436 2^<size_t> chance), so verify it's not an unlikely
4437 coincidence before aborting. */
4438 if (__glibc_unlikely (e
->key
== tcache
))
4442 LIBC_PROBE (memory_tcache_double_free
, 2, e
, tc_idx
);
4443 for (tmp
= tcache
->entries
[tc_idx
];
4445 tmp
= REVEAL_PTR (tmp
->next
), ++cnt
)
4447 if (cnt
>= mp_
.tcache_count
)
4448 malloc_printerr ("free(): too many chunks detected in tcache");
4449 if (__glibc_unlikely (!aligned_OK (tmp
)))
4450 malloc_printerr ("free(): unaligned chunk detected in tcache 2");
4452 malloc_printerr ("free(): double free detected in tcache 2");
4453 /* If we get here, it was a coincidence. We've wasted a
4454 few cycles, but don't abort. */
4458 if (tcache
->counts
[tc_idx
] < mp_
.tcache_count
)
4460 tcache_put (p
, tc_idx
);
4468 If eligible, place chunk on a fastbin so it can be found
4469 and used quickly in malloc.
4472 if ((unsigned long)(size
) <= (unsigned long)(get_max_fast ())
4476 If TRIM_FASTBINS set, don't place chunks
4477 bordering top into fastbins
4479 && (chunk_at_offset(p
, size
) != av
->top
)
4483 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p
, size
))
4485 || __builtin_expect (chunksize (chunk_at_offset (p
, size
))
4486 >= av
->system_mem
, 0))
4489 /* We might not have a lock at this point and concurrent modifications
4490 of system_mem might result in a false positive. Redo the test after
4491 getting the lock. */
4494 __libc_lock_lock (av
->mutex
);
4495 fail
= (chunksize_nomask (chunk_at_offset (p
, size
)) <= CHUNK_HDR_SZ
4496 || chunksize (chunk_at_offset (p
, size
)) >= av
->system_mem
);
4497 __libc_lock_unlock (av
->mutex
);
4501 malloc_printerr ("free(): invalid next size (fast)");
4504 free_perturb (chunk2mem(p
), size
- CHUNK_HDR_SZ
);
4506 atomic_store_relaxed (&av
->have_fastchunks
, true);
4507 unsigned int idx
= fastbin_index(size
);
4508 fb
= &fastbin (av
, idx
);
4510 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4511 mchunkptr old
= *fb
, old2
;
4513 if (SINGLE_THREAD_P
)
4515 /* Check that the top of the bin is not the record we are going to
4516 add (i.e., double free). */
4517 if (__builtin_expect (old
== p
, 0))
4518 malloc_printerr ("double free or corruption (fasttop)");
4519 p
->fd
= PROTECT_PTR (&p
->fd
, old
);
4525 /* Check that the top of the bin is not the record we are going to
4526 add (i.e., double free). */
4527 if (__builtin_expect (old
== p
, 0))
4528 malloc_printerr ("double free or corruption (fasttop)");
4530 p
->fd
= PROTECT_PTR (&p
->fd
, old
);
4532 while ((old
= catomic_compare_and_exchange_val_rel (fb
, p
, old2
))
4535 /* Check that size of fastbin chunk at the top is the same as
4536 size of the chunk that we are adding. We can dereference OLD
4537 only if we have the lock, otherwise it might have already been
4539 if (have_lock
&& old
!= NULL
4540 && __builtin_expect (fastbin_index (chunksize (old
)) != idx
, 0))
4541 malloc_printerr ("invalid fastbin entry (free)");
4545 Consolidate other non-mmapped chunks as they arrive.
4548 else if (!chunk_is_mmapped(p
)) {
4550 /* If we're single-threaded, don't lock the arena. */
4551 if (SINGLE_THREAD_P
)
4555 __libc_lock_lock (av
->mutex
);
4557 nextchunk
= chunk_at_offset(p
, size
);
4559 /* Lightweight tests: check whether the block is already the
4561 if (__glibc_unlikely (p
== av
->top
))
4562 malloc_printerr ("double free or corruption (top)");
4563 /* Or whether the next chunk is beyond the boundaries of the arena. */
4564 if (__builtin_expect (contiguous (av
)
4565 && (char *) nextchunk
4566 >= ((char *) av
->top
+ chunksize(av
->top
)), 0))
4567 malloc_printerr ("double free or corruption (out)");
4568 /* Or whether the block is actually not marked used. */
4569 if (__glibc_unlikely (!prev_inuse(nextchunk
)))
4570 malloc_printerr ("double free or corruption (!prev)");
4572 nextsize
= chunksize(nextchunk
);
4573 if (__builtin_expect (chunksize_nomask (nextchunk
) <= CHUNK_HDR_SZ
, 0)
4574 || __builtin_expect (nextsize
>= av
->system_mem
, 0))
4575 malloc_printerr ("free(): invalid next size (normal)");
4577 free_perturb (chunk2mem(p
), size
- CHUNK_HDR_SZ
);
4579 /* consolidate backward */
4580 if (!prev_inuse(p
)) {
4581 prevsize
= prev_size (p
);
4583 p
= chunk_at_offset(p
, -((long) prevsize
));
4584 if (__glibc_unlikely (chunksize(p
) != prevsize
))
4585 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4586 unlink_chunk (av
, p
);
4589 if (nextchunk
!= av
->top
) {
4590 /* get and clear inuse bit */
4591 nextinuse
= inuse_bit_at_offset(nextchunk
, nextsize
);
4593 /* consolidate forward */
4595 unlink_chunk (av
, nextchunk
);
4598 clear_inuse_bit_at_offset(nextchunk
, 0);
4601 Place the chunk in unsorted chunk list. Chunks are
4602 not placed into regular bins until after they have
4603 been given one chance to be used in malloc.
4606 bck
= unsorted_chunks(av
);
4608 if (__glibc_unlikely (fwd
->bk
!= bck
))
4609 malloc_printerr ("free(): corrupted unsorted chunks");
4612 if (!in_smallbin_range(size
))
4614 p
->fd_nextsize
= NULL
;
4615 p
->bk_nextsize
= NULL
;
4620 set_head(p
, size
| PREV_INUSE
);
4623 check_free_chunk(av
, p
);
4627 If the chunk borders the current high end of memory,
4628 consolidate into top
4633 set_head(p
, size
| PREV_INUSE
);
4639 If freeing a large space, consolidate possibly-surrounding
4640 chunks. Then, if the total unused topmost memory exceeds trim
4641 threshold, ask malloc_trim to reduce top.
4643 Unless max_fast is 0, we don't know if there are fastbins
4644 bordering top, so we cannot tell for sure whether threshold
4645 has been reached unless fastbins are consolidated. But we
4646 don't want to consolidate on each free. As a compromise,
4647 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4651 if ((unsigned long)(size
) >= FASTBIN_CONSOLIDATION_THRESHOLD
) {
4652 if (atomic_load_relaxed (&av
->have_fastchunks
))
4653 malloc_consolidate(av
);
4655 if (av
== &main_arena
) {
4656 #ifndef MORECORE_CANNOT_TRIM
4657 if ((unsigned long)(chunksize(av
->top
)) >=
4658 (unsigned long)(mp_
.trim_threshold
))
4659 systrim(mp_
.top_pad
, av
);
4662 /* Always try heap_trim(), even if the top chunk is not
4663 large, because the corresponding heap might go away. */
4664 heap_info
*heap
= heap_for_ptr(top(av
));
4666 assert(heap
->ar_ptr
== av
);
4667 heap_trim(heap
, mp_
.top_pad
);
4672 __libc_lock_unlock (av
->mutex
);
4675 If the chunk was allocated via mmap, release via munmap().
4684 ------------------------- malloc_consolidate -------------------------
4686 malloc_consolidate is a specialized version of free() that tears
4687 down chunks held in fastbins. Free itself cannot be used for this
4688 purpose since, among other things, it might place chunks back onto
4689 fastbins. So, instead, we need to use a minor variant of the same
4693 static void malloc_consolidate(mstate av
)
4695 mfastbinptr
* fb
; /* current fastbin being consolidated */
4696 mfastbinptr
* maxfb
; /* last fastbin (for loop control) */
4697 mchunkptr p
; /* current chunk being consolidated */
4698 mchunkptr nextp
; /* next chunk to consolidate */
4699 mchunkptr unsorted_bin
; /* bin header */
4700 mchunkptr first_unsorted
; /* chunk to link to */
4702 /* These have same use as in free() */
4703 mchunkptr nextchunk
;
4704 INTERNAL_SIZE_T size
;
4705 INTERNAL_SIZE_T nextsize
;
4706 INTERNAL_SIZE_T prevsize
;
4709 atomic_store_relaxed (&av
->have_fastchunks
, false);
4711 unsorted_bin
= unsorted_chunks(av
);
4714 Remove each chunk from fast bin and consolidate it, placing it
4715 then in unsorted bin. Among other reasons for doing this,
4716 placing in unsorted bin avoids needing to calculate actual bins
4717 until malloc is sure that chunks aren't immediately going to be
4721 maxfb
= &fastbin (av
, NFASTBINS
- 1);
4722 fb
= &fastbin (av
, 0);
4724 p
= atomic_exchange_acq (fb
, NULL
);
4728 if (__glibc_unlikely (misaligned_chunk (p
)))
4729 malloc_printerr ("malloc_consolidate(): "
4730 "unaligned fastbin chunk detected");
4732 unsigned int idx
= fastbin_index (chunksize (p
));
4733 if ((&fastbin (av
, idx
)) != fb
)
4734 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4737 check_inuse_chunk(av
, p
);
4738 nextp
= REVEAL_PTR (p
->fd
);
4740 /* Slightly streamlined version of consolidation code in free() */
4741 size
= chunksize (p
);
4742 nextchunk
= chunk_at_offset(p
, size
);
4743 nextsize
= chunksize(nextchunk
);
4745 if (!prev_inuse(p
)) {
4746 prevsize
= prev_size (p
);
4748 p
= chunk_at_offset(p
, -((long) prevsize
));
4749 if (__glibc_unlikely (chunksize(p
) != prevsize
))
4750 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4751 unlink_chunk (av
, p
);
4754 if (nextchunk
!= av
->top
) {
4755 nextinuse
= inuse_bit_at_offset(nextchunk
, nextsize
);
4759 unlink_chunk (av
, nextchunk
);
4761 clear_inuse_bit_at_offset(nextchunk
, 0);
4763 first_unsorted
= unsorted_bin
->fd
;
4764 unsorted_bin
->fd
= p
;
4765 first_unsorted
->bk
= p
;
4767 if (!in_smallbin_range (size
)) {
4768 p
->fd_nextsize
= NULL
;
4769 p
->bk_nextsize
= NULL
;
4772 set_head(p
, size
| PREV_INUSE
);
4773 p
->bk
= unsorted_bin
;
4774 p
->fd
= first_unsorted
;
4780 set_head(p
, size
| PREV_INUSE
);
4784 } while ( (p
= nextp
) != 0);
4787 } while (fb
++ != maxfb
);
4791 ------------------------------ realloc ------------------------------
4795 _int_realloc(mstate av
, mchunkptr oldp
, INTERNAL_SIZE_T oldsize
,
4798 mchunkptr newp
; /* chunk to return */
4799 INTERNAL_SIZE_T newsize
; /* its size */
4800 void* newmem
; /* corresponding user mem */
4802 mchunkptr next
; /* next contiguous chunk after oldp */
4804 mchunkptr remainder
; /* extra space at end of newp */
4805 unsigned long remainder_size
; /* its size */
4808 if (__builtin_expect (chunksize_nomask (oldp
) <= CHUNK_HDR_SZ
, 0)
4809 || __builtin_expect (oldsize
>= av
->system_mem
, 0))
4810 malloc_printerr ("realloc(): invalid old size");
4812 check_inuse_chunk (av
, oldp
);
4814 /* All callers already filter out mmap'ed chunks. */
4815 assert (!chunk_is_mmapped (oldp
));
4817 next
= chunk_at_offset (oldp
, oldsize
);
4818 INTERNAL_SIZE_T nextsize
= chunksize (next
);
4819 if (__builtin_expect (chunksize_nomask (next
) <= CHUNK_HDR_SZ
, 0)
4820 || __builtin_expect (nextsize
>= av
->system_mem
, 0))
4821 malloc_printerr ("realloc(): invalid next size");
4823 if ((unsigned long) (oldsize
) >= (unsigned long) (nb
))
4825 /* already big enough; split below */
4832 /* Try to expand forward into top */
4833 if (next
== av
->top
&&
4834 (unsigned long) (newsize
= oldsize
+ nextsize
) >=
4835 (unsigned long) (nb
+ MINSIZE
))
4837 set_head_size (oldp
, nb
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4838 av
->top
= chunk_at_offset (oldp
, nb
);
4839 set_head (av
->top
, (newsize
- nb
) | PREV_INUSE
);
4840 check_inuse_chunk (av
, oldp
);
4841 return tag_new_usable (chunk2mem (oldp
));
4844 /* Try to expand forward into next chunk; split off remainder below */
4845 else if (next
!= av
->top
&&
4847 (unsigned long) (newsize
= oldsize
+ nextsize
) >=
4848 (unsigned long) (nb
))
4851 unlink_chunk (av
, next
);
4854 /* allocate, copy, free */
4857 newmem
= _int_malloc (av
, nb
- MALLOC_ALIGN_MASK
);
4859 return 0; /* propagate failure */
4861 newp
= mem2chunk (newmem
);
4862 newsize
= chunksize (newp
);
4865 Avoid copy if newp is next chunk after oldp.
4874 void *oldmem
= chunk2mem (oldp
);
4875 size_t sz
= memsize (oldp
);
4876 (void) tag_region (oldmem
, sz
);
4877 newmem
= tag_new_usable (newmem
);
4878 memcpy (newmem
, oldmem
, sz
);
4879 _int_free (av
, oldp
, 1);
4880 check_inuse_chunk (av
, newp
);
4886 /* If possible, free extra space in old or extended chunk */
4888 assert ((unsigned long) (newsize
) >= (unsigned long) (nb
));
4890 remainder_size
= newsize
- nb
;
4892 if (remainder_size
< MINSIZE
) /* not enough extra to split off */
4894 set_head_size (newp
, newsize
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4895 set_inuse_bit_at_offset (newp
, newsize
);
4897 else /* split remainder */
4899 remainder
= chunk_at_offset (newp
, nb
);
4900 /* Clear any user-space tags before writing the header. */
4901 remainder
= tag_region (remainder
, remainder_size
);
4902 set_head_size (newp
, nb
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4903 set_head (remainder
, remainder_size
| PREV_INUSE
|
4904 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4905 /* Mark remainder as inuse so free() won't complain */
4906 set_inuse_bit_at_offset (remainder
, remainder_size
);
4907 _int_free (av
, remainder
, 1);
4910 check_inuse_chunk (av
, newp
);
4911 return tag_new_usable (chunk2mem (newp
));
4915 ------------------------------ memalign ------------------------------
4919 _int_memalign (mstate av
, size_t alignment
, size_t bytes
)
4921 INTERNAL_SIZE_T nb
; /* padded request size */
4922 char *m
; /* memory returned by malloc call */
4923 mchunkptr p
; /* corresponding chunk */
4924 char *brk
; /* alignment point within p */
4925 mchunkptr newp
; /* chunk to return */
4926 INTERNAL_SIZE_T newsize
; /* its size */
4927 INTERNAL_SIZE_T leadsize
; /* leading space before alignment point */
4928 mchunkptr remainder
; /* spare room at end to split off */
4929 unsigned long remainder_size
; /* its size */
4930 INTERNAL_SIZE_T size
;
4934 if (!checked_request2size (bytes
, &nb
))
4936 __set_errno (ENOMEM
);
4941 Strategy: find a spot within that chunk that meets the alignment
4942 request, and then possibly free the leading and trailing space.
4945 /* Call malloc with worst case padding to hit alignment. */
4947 m
= (char *) (_int_malloc (av
, nb
+ alignment
+ MINSIZE
));
4950 return 0; /* propagate failure */
4954 if ((((unsigned long) (m
)) % alignment
) != 0) /* misaligned */
4957 Find an aligned spot inside chunk. Since we need to give back
4958 leading space in a chunk of at least MINSIZE, if the first
4959 calculation places us at a spot with less than MINSIZE leader,
4960 we can move to the next aligned spot -- we've allocated enough
4961 total room so that this is always possible.
4963 brk
= (char *) mem2chunk (((unsigned long) (m
+ alignment
- 1)) &
4964 - ((signed long) alignment
));
4965 if ((unsigned long) (brk
- (char *) (p
)) < MINSIZE
)
4968 newp
= (mchunkptr
) brk
;
4969 leadsize
= brk
- (char *) (p
);
4970 newsize
= chunksize (p
) - leadsize
;
4972 /* For mmapped chunks, just adjust offset */
4973 if (chunk_is_mmapped (p
))
4975 set_prev_size (newp
, prev_size (p
) + leadsize
);
4976 set_head (newp
, newsize
| IS_MMAPPED
);
4977 return chunk2mem (newp
);
4980 /* Otherwise, give back leader, use the rest */
4981 set_head (newp
, newsize
| PREV_INUSE
|
4982 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4983 set_inuse_bit_at_offset (newp
, newsize
);
4984 set_head_size (p
, leadsize
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4985 _int_free (av
, p
, 1);
4988 assert (newsize
>= nb
&&
4989 (((unsigned long) (chunk2mem (p
))) % alignment
) == 0);
4992 /* Also give back spare room at the end */
4993 if (!chunk_is_mmapped (p
))
4995 size
= chunksize (p
);
4996 if ((unsigned long) (size
) > (unsigned long) (nb
+ MINSIZE
))
4998 remainder_size
= size
- nb
;
4999 remainder
= chunk_at_offset (p
, nb
);
5000 set_head (remainder
, remainder_size
| PREV_INUSE
|
5001 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5002 set_head_size (p
, nb
);
5003 _int_free (av
, remainder
, 1);
5007 check_inuse_chunk (av
, p
);
5008 return chunk2mem (p
);
5013 ------------------------------ malloc_trim ------------------------------
5017 mtrim (mstate av
, size_t pad
)
5019 /* Ensure all blocks are consolidated. */
5020 malloc_consolidate (av
);
5022 const size_t ps
= GLRO (dl_pagesize
);
5023 int psindex
= bin_index (ps
);
5024 const size_t psm1
= ps
- 1;
5027 for (int i
= 1; i
< NBINS
; ++i
)
5028 if (i
== 1 || i
>= psindex
)
5030 mbinptr bin
= bin_at (av
, i
);
5032 for (mchunkptr p
= last (bin
); p
!= bin
; p
= p
->bk
)
5034 INTERNAL_SIZE_T size
= chunksize (p
);
5036 if (size
> psm1
+ sizeof (struct malloc_chunk
))
5038 /* See whether the chunk contains at least one unused page. */
5039 char *paligned_mem
= (char *) (((uintptr_t) p
5040 + sizeof (struct malloc_chunk
)
5043 assert ((char *) chunk2mem (p
) + 2 * CHUNK_HDR_SZ
5045 assert ((char *) p
+ size
> paligned_mem
);
5047 /* This is the size we could potentially free. */
5048 size
-= paligned_mem
- (char *) p
;
5053 /* When debugging we simulate destroying the memory
5055 memset (paligned_mem
, 0x89, size
& ~psm1
);
5057 __madvise (paligned_mem
, size
& ~psm1
, MADV_DONTNEED
);
5065 #ifndef MORECORE_CANNOT_TRIM
5066 return result
| (av
== &main_arena
? systrim (pad
, av
) : 0);
5075 __malloc_trim (size_t s
)
5079 if (__malloc_initialized
< 0)
5082 mstate ar_ptr
= &main_arena
;
5085 __libc_lock_lock (ar_ptr
->mutex
);
5086 result
|= mtrim (ar_ptr
, s
);
5087 __libc_lock_unlock (ar_ptr
->mutex
);
5089 ar_ptr
= ar_ptr
->next
;
5091 while (ar_ptr
!= &main_arena
);
5098 ------------------------- malloc_usable_size -------------------------
5109 p
= mem2chunk (mem
);
5111 if (__builtin_expect (using_malloc_checking
== 1, 0))
5112 return malloc_check_get_size (p
);
5114 if (chunk_is_mmapped (p
))
5116 if (DUMPED_MAIN_ARENA_CHUNK (p
))
5117 result
= chunksize (p
) - SIZE_SZ
;
5119 result
= chunksize (p
) - CHUNK_HDR_SZ
;
5122 result
= memsize (p
);
5131 __malloc_usable_size (void *m
)
5135 result
= musable (m
);
5140 ------------------------------ mallinfo ------------------------------
5141 Accumulate malloc statistics for arena AV into M.
5145 int_mallinfo (mstate av
, struct mallinfo2
*m
)
5150 INTERNAL_SIZE_T avail
;
5151 INTERNAL_SIZE_T fastavail
;
5155 check_malloc_state (av
);
5157 /* Account for top */
5158 avail
= chunksize (av
->top
);
5159 nblocks
= 1; /* top always exists */
5161 /* traverse fastbins */
5165 for (i
= 0; i
< NFASTBINS
; ++i
)
5167 for (p
= fastbin (av
, i
);
5169 p
= REVEAL_PTR (p
->fd
))
5171 if (__glibc_unlikely (misaligned_chunk (p
)))
5172 malloc_printerr ("int_mallinfo(): "
5173 "unaligned fastbin chunk detected");
5175 fastavail
+= chunksize (p
);
5181 /* traverse regular bins */
5182 for (i
= 1; i
< NBINS
; ++i
)
5185 for (p
= last (b
); p
!= b
; p
= p
->bk
)
5188 avail
+= chunksize (p
);
5192 m
->smblks
+= nfastblocks
;
5193 m
->ordblks
+= nblocks
;
5194 m
->fordblks
+= avail
;
5195 m
->uordblks
+= av
->system_mem
- avail
;
5196 m
->arena
+= av
->system_mem
;
5197 m
->fsmblks
+= fastavail
;
5198 if (av
== &main_arena
)
5200 m
->hblks
= mp_
.n_mmaps
;
5201 m
->hblkhd
= mp_
.mmapped_mem
;
5203 m
->keepcost
= chunksize (av
->top
);
5209 __libc_mallinfo2 (void)
5214 if (__malloc_initialized
< 0)
5217 memset (&m
, 0, sizeof (m
));
5218 ar_ptr
= &main_arena
;
5221 __libc_lock_lock (ar_ptr
->mutex
);
5222 int_mallinfo (ar_ptr
, &m
);
5223 __libc_lock_unlock (ar_ptr
->mutex
);
5225 ar_ptr
= ar_ptr
->next
;
5227 while (ar_ptr
!= &main_arena
);
5231 libc_hidden_def (__libc_mallinfo2
)
5234 __libc_mallinfo (void)
5237 struct mallinfo2 m2
= __libc_mallinfo2 ();
5240 m
.ordblks
= m2
.ordblks
;
5241 m
.smblks
= m2
.smblks
;
5243 m
.hblkhd
= m2
.hblkhd
;
5244 m
.usmblks
= m2
.usmblks
;
5245 m
.fsmblks
= m2
.fsmblks
;
5246 m
.uordblks
= m2
.uordblks
;
5247 m
.fordblks
= m2
.fordblks
;
5248 m
.keepcost
= m2
.keepcost
;
5255 ------------------------------ malloc_stats ------------------------------
5259 __malloc_stats (void)
5263 unsigned int in_use_b
= mp_
.mmapped_mem
, system_b
= in_use_b
;
5265 if (__malloc_initialized
< 0)
5267 _IO_flockfile (stderr
);
5268 int old_flags2
= stderr
->_flags2
;
5269 stderr
->_flags2
|= _IO_FLAGS2_NOTCANCEL
;
5270 for (i
= 0, ar_ptr
= &main_arena
;; i
++)
5272 struct mallinfo2 mi
;
5274 memset (&mi
, 0, sizeof (mi
));
5275 __libc_lock_lock (ar_ptr
->mutex
);
5276 int_mallinfo (ar_ptr
, &mi
);
5277 fprintf (stderr
, "Arena %d:\n", i
);
5278 fprintf (stderr
, "system bytes = %10u\n", (unsigned int) mi
.arena
);
5279 fprintf (stderr
, "in use bytes = %10u\n", (unsigned int) mi
.uordblks
);
5280 #if MALLOC_DEBUG > 1
5282 dump_heap (heap_for_ptr (top (ar_ptr
)));
5284 system_b
+= mi
.arena
;
5285 in_use_b
+= mi
.uordblks
;
5286 __libc_lock_unlock (ar_ptr
->mutex
);
5287 ar_ptr
= ar_ptr
->next
;
5288 if (ar_ptr
== &main_arena
)
5291 fprintf (stderr
, "Total (incl. mmap):\n");
5292 fprintf (stderr
, "system bytes = %10u\n", system_b
);
5293 fprintf (stderr
, "in use bytes = %10u\n", in_use_b
);
5294 fprintf (stderr
, "max mmap regions = %10u\n", (unsigned int) mp_
.max_n_mmaps
);
5295 fprintf (stderr
, "max mmap bytes = %10lu\n",
5296 (unsigned long) mp_
.max_mmapped_mem
);
5297 stderr
->_flags2
= old_flags2
;
5298 _IO_funlockfile (stderr
);
5303 ------------------------------ mallopt ------------------------------
5305 static __always_inline
int
5306 do_set_trim_threshold (size_t value
)
5308 LIBC_PROBE (memory_mallopt_trim_threshold
, 3, value
, mp_
.trim_threshold
,
5309 mp_
.no_dyn_threshold
);
5310 mp_
.trim_threshold
= value
;
5311 mp_
.no_dyn_threshold
= 1;
5315 static __always_inline
int
5316 do_set_top_pad (size_t value
)
5318 LIBC_PROBE (memory_mallopt_top_pad
, 3, value
, mp_
.top_pad
,
5319 mp_
.no_dyn_threshold
);
5320 mp_
.top_pad
= value
;
5321 mp_
.no_dyn_threshold
= 1;
5325 static __always_inline
int
5326 do_set_mmap_threshold (size_t value
)
5328 /* Forbid setting the threshold too high. */
5329 if (value
<= HEAP_MAX_SIZE
/ 2)
5331 LIBC_PROBE (memory_mallopt_mmap_threshold
, 3, value
, mp_
.mmap_threshold
,
5332 mp_
.no_dyn_threshold
);
5333 mp_
.mmap_threshold
= value
;
5334 mp_
.no_dyn_threshold
= 1;
5340 static __always_inline
int
5341 do_set_mmaps_max (int32_t value
)
5343 LIBC_PROBE (memory_mallopt_mmap_max
, 3, value
, mp_
.n_mmaps_max
,
5344 mp_
.no_dyn_threshold
);
5345 mp_
.n_mmaps_max
= value
;
5346 mp_
.no_dyn_threshold
= 1;
5350 static __always_inline
int
5351 do_set_mallopt_check (int32_t value
)
5356 static __always_inline
int
5357 do_set_perturb_byte (int32_t value
)
5359 LIBC_PROBE (memory_mallopt_perturb
, 2, value
, perturb_byte
);
5360 perturb_byte
= value
;
5364 static __always_inline
int
5365 do_set_arena_test (size_t value
)
5367 LIBC_PROBE (memory_mallopt_arena_test
, 2, value
, mp_
.arena_test
);
5368 mp_
.arena_test
= value
;
5372 static __always_inline
int
5373 do_set_arena_max (size_t value
)
5375 LIBC_PROBE (memory_mallopt_arena_max
, 2, value
, mp_
.arena_max
);
5376 mp_
.arena_max
= value
;
5381 static __always_inline
int
5382 do_set_tcache_max (size_t value
)
5384 if (value
<= MAX_TCACHE_SIZE
)
5386 LIBC_PROBE (memory_tunable_tcache_max_bytes
, 2, value
, mp_
.tcache_max_bytes
);
5387 mp_
.tcache_max_bytes
= value
;
5388 mp_
.tcache_bins
= csize2tidx (request2size(value
)) + 1;
5394 static __always_inline
int
5395 do_set_tcache_count (size_t value
)
5397 if (value
<= MAX_TCACHE_COUNT
)
5399 LIBC_PROBE (memory_tunable_tcache_count
, 2, value
, mp_
.tcache_count
);
5400 mp_
.tcache_count
= value
;
5406 static __always_inline
int
5407 do_set_tcache_unsorted_limit (size_t value
)
5409 LIBC_PROBE (memory_tunable_tcache_unsorted_limit
, 2, value
, mp_
.tcache_unsorted_limit
);
5410 mp_
.tcache_unsorted_limit
= value
;
5417 do_set_mxfast (size_t value
)
5419 if (value
<= MAX_FAST_SIZE
)
5421 LIBC_PROBE (memory_mallopt_mxfast
, 2, value
, get_max_fast ());
5422 set_max_fast (value
);
5429 __libc_mallopt (int param_number
, int value
)
5431 mstate av
= &main_arena
;
5434 if (__malloc_initialized
< 0)
5436 __libc_lock_lock (av
->mutex
);
5438 LIBC_PROBE (memory_mallopt
, 2, param_number
, value
);
5440 /* We must consolidate main arena before changing max_fast
5441 (see definition of set_max_fast). */
5442 malloc_consolidate (av
);
5444 /* Many of these helper functions take a size_t. We do not worry
5445 about overflow here, because negative int values will wrap to
5446 very large size_t values and the helpers have sufficient range
5447 checking for such conversions. Many of these helpers are also
5448 used by the tunables macros in arena.c. */
5450 switch (param_number
)
5453 res
= do_set_mxfast (value
);
5456 case M_TRIM_THRESHOLD
:
5457 res
= do_set_trim_threshold (value
);
5461 res
= do_set_top_pad (value
);
5464 case M_MMAP_THRESHOLD
:
5465 res
= do_set_mmap_threshold (value
);
5469 res
= do_set_mmaps_max (value
);
5472 case M_CHECK_ACTION
:
5473 res
= do_set_mallopt_check (value
);
5477 res
= do_set_perturb_byte (value
);
5482 res
= do_set_arena_test (value
);
5487 res
= do_set_arena_max (value
);
5490 __libc_lock_unlock (av
->mutex
);
5493 libc_hidden_def (__libc_mallopt
)
5497 -------------------- Alternative MORECORE functions --------------------
5502 General Requirements for MORECORE.
5504 The MORECORE function must have the following properties:
5506 If MORECORE_CONTIGUOUS is false:
5508 * MORECORE must allocate in multiples of pagesize. It will
5509 only be called with arguments that are multiples of pagesize.
5511 * MORECORE(0) must return an address that is at least
5512 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5514 else (i.e. If MORECORE_CONTIGUOUS is true):
5516 * Consecutive calls to MORECORE with positive arguments
5517 return increasing addresses, indicating that space has been
5518 contiguously extended.
5520 * MORECORE need not allocate in multiples of pagesize.
5521 Calls to MORECORE need not have args of multiples of pagesize.
5523 * MORECORE need not page-align.
5527 * MORECORE may allocate more memory than requested. (Or even less,
5528 but this will generally result in a malloc failure.)
5530 * MORECORE must not allocate memory when given argument zero, but
5531 instead return one past the end address of memory from previous
5532 nonzero call. This malloc does NOT call MORECORE(0)
5533 until at least one call with positive arguments is made, so
5534 the initial value returned is not important.
5536 * Even though consecutive calls to MORECORE need not return contiguous
5537 addresses, it must be OK for malloc'ed chunks to span multiple
5538 regions in those cases where they do happen to be contiguous.
5540 * MORECORE need not handle negative arguments -- it may instead
5541 just return MORECORE_FAILURE when given negative arguments.
5542 Negative arguments are always multiples of pagesize. MORECORE
5543 must not misinterpret negative args as large positive unsigned
5544 args. You can suppress all such calls from even occurring by defining
5545 MORECORE_CANNOT_TRIM,
5547 There is some variation across systems about the type of the
5548 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5549 actually be size_t, because sbrk supports negative args, so it is
5550 normally the signed type of the same width as size_t (sometimes
5551 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5552 matter though. Internally, we use "long" as arguments, which should
5553 work across all reasonable possibilities.
5555 Additionally, if MORECORE ever returns failure for a positive
5556 request, then mmap is used as a noncontiguous system allocator. This
5557 is a useful backup strategy for systems with holes in address spaces
5558 -- in this case sbrk cannot contiguously expand the heap, but mmap
5559 may be able to map noncontiguous space.
5561 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5562 a function that always returns MORECORE_FAILURE.
5564 If you are using this malloc with something other than sbrk (or its
5565 emulation) to supply memory regions, you probably want to set
5566 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5567 allocator kindly contributed for pre-OSX macOS. It uses virtually
5568 but not necessarily physically contiguous non-paged memory (locked
5569 in, present and won't get swapped out). You can use it by
5570 uncommenting this section, adding some #includes, and setting up the
5571 appropriate defines above:
5573 *#define MORECORE osMoreCore
5574 *#define MORECORE_CONTIGUOUS 0
5576 There is also a shutdown routine that should somehow be called for
5577 cleanup upon program exit.
5579 *#define MAX_POOL_ENTRIES 100
5580 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5581 static int next_os_pool;
5582 void *our_os_pools[MAX_POOL_ENTRIES];
5584 void *osMoreCore(int size)
5587 static void *sbrk_top = 0;
5591 if (size < MINIMUM_MORECORE_SIZE)
5592 size = MINIMUM_MORECORE_SIZE;
5593 if (CurrentExecutionLevel() == kTaskLevel)
5594 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5597 return (void *) MORECORE_FAILURE;
5599 // save ptrs so they can be freed during cleanup
5600 our_os_pools[next_os_pool] = ptr;
5602 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5603 sbrk_top = (char *) ptr + size;
5608 // we don't currently support shrink behavior
5609 return (void *) MORECORE_FAILURE;
5617 // cleanup any allocated memory pools
5618 // called as last thing before shutting down driver
5620 void osCleanupMem(void)
5624 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5627 PoolDeallocate(*ptr);
5637 extern char **__libc_argv attribute_hidden
;
5640 malloc_printerr (const char *str
)
5642 __libc_message (do_abort
, "%s\n", str
);
5643 __builtin_unreachable ();
5646 /* We need a wrapper function for one of the additions of POSIX. */
5648 __posix_memalign (void **memptr
, size_t alignment
, size_t size
)
5652 /* Test whether the SIZE argument is valid. It must be a power of
5653 two multiple of sizeof (void *). */
5654 if (alignment
% sizeof (void *) != 0
5655 || !powerof2 (alignment
/ sizeof (void *))
5660 void *address
= RETURN_ADDRESS (0);
5661 mem
= _mid_memalign (alignment
, size
, address
);
5671 weak_alias (__posix_memalign
, posix_memalign
)
5675 __malloc_info (int options
, FILE *fp
)
5677 /* For now, at least. */
5682 size_t total_nblocks
= 0;
5683 size_t total_nfastblocks
= 0;
5684 size_t total_avail
= 0;
5685 size_t total_fastavail
= 0;
5686 size_t total_system
= 0;
5687 size_t total_max_system
= 0;
5688 size_t total_aspace
= 0;
5689 size_t total_aspace_mprotect
= 0;
5693 if (__malloc_initialized
< 0)
5696 fputs ("<malloc version=\"1\">\n", fp
);
5698 /* Iterate over all arenas currently in use. */
5699 mstate ar_ptr
= &main_arena
;
5702 fprintf (fp
, "<heap nr=\"%d\">\n<sizes>\n", n
++);
5705 size_t nfastblocks
= 0;
5707 size_t fastavail
= 0;
5714 } sizes
[NFASTBINS
+ NBINS
- 1];
5715 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5717 __libc_lock_lock (ar_ptr
->mutex
);
5719 /* Account for top chunk. The top-most available chunk is
5720 treated specially and is never in any bin. See "initial_top"
5722 avail
= chunksize (ar_ptr
->top
);
5723 nblocks
= 1; /* Top always exists. */
5725 for (size_t i
= 0; i
< NFASTBINS
; ++i
)
5727 mchunkptr p
= fastbin (ar_ptr
, i
);
5730 size_t nthissize
= 0;
5731 size_t thissize
= chunksize (p
);
5735 if (__glibc_unlikely (misaligned_chunk (p
)))
5736 malloc_printerr ("__malloc_info(): "
5737 "unaligned fastbin chunk detected");
5739 p
= REVEAL_PTR (p
->fd
);
5742 fastavail
+= nthissize
* thissize
;
5743 nfastblocks
+= nthissize
;
5744 sizes
[i
].from
= thissize
- (MALLOC_ALIGNMENT
- 1);
5745 sizes
[i
].to
= thissize
;
5746 sizes
[i
].count
= nthissize
;
5749 sizes
[i
].from
= sizes
[i
].to
= sizes
[i
].count
= 0;
5751 sizes
[i
].total
= sizes
[i
].count
* sizes
[i
].to
;
5756 struct malloc_chunk
*r
;
5758 for (size_t i
= 1; i
< NBINS
; ++i
)
5760 bin
= bin_at (ar_ptr
, i
);
5762 sizes
[NFASTBINS
- 1 + i
].from
= ~((size_t) 0);
5763 sizes
[NFASTBINS
- 1 + i
].to
= sizes
[NFASTBINS
- 1 + i
].total
5764 = sizes
[NFASTBINS
- 1 + i
].count
= 0;
5769 size_t r_size
= chunksize_nomask (r
);
5770 ++sizes
[NFASTBINS
- 1 + i
].count
;
5771 sizes
[NFASTBINS
- 1 + i
].total
+= r_size
;
5772 sizes
[NFASTBINS
- 1 + i
].from
5773 = MIN (sizes
[NFASTBINS
- 1 + i
].from
, r_size
);
5774 sizes
[NFASTBINS
- 1 + i
].to
= MAX (sizes
[NFASTBINS
- 1 + i
].to
,
5780 if (sizes
[NFASTBINS
- 1 + i
].count
== 0)
5781 sizes
[NFASTBINS
- 1 + i
].from
= 0;
5782 nblocks
+= sizes
[NFASTBINS
- 1 + i
].count
;
5783 avail
+= sizes
[NFASTBINS
- 1 + i
].total
;
5786 size_t heap_size
= 0;
5787 size_t heap_mprotect_size
= 0;
5788 size_t heap_count
= 0;
5789 if (ar_ptr
!= &main_arena
)
5791 /* Iterate over the arena heaps from back to front. */
5792 heap_info
*heap
= heap_for_ptr (top (ar_ptr
));
5795 heap_size
+= heap
->size
;
5796 heap_mprotect_size
+= heap
->mprotect_size
;
5800 while (heap
!= NULL
);
5803 __libc_lock_unlock (ar_ptr
->mutex
);
5805 total_nfastblocks
+= nfastblocks
;
5806 total_fastavail
+= fastavail
;
5808 total_nblocks
+= nblocks
;
5809 total_avail
+= avail
;
5811 for (size_t i
= 0; i
< nsizes
; ++i
)
5812 if (sizes
[i
].count
!= 0 && i
!= NFASTBINS
)
5814 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5815 sizes
[i
].from
, sizes
[i
].to
, sizes
[i
].total
, sizes
[i
].count
);
5817 if (sizes
[NFASTBINS
].count
!= 0)
5819 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5820 sizes
[NFASTBINS
].from
, sizes
[NFASTBINS
].to
,
5821 sizes
[NFASTBINS
].total
, sizes
[NFASTBINS
].count
);
5823 total_system
+= ar_ptr
->system_mem
;
5824 total_max_system
+= ar_ptr
->max_system_mem
;
5827 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5828 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5829 "<system type=\"current\" size=\"%zu\"/>\n"
5830 "<system type=\"max\" size=\"%zu\"/>\n",
5831 nfastblocks
, fastavail
, nblocks
, avail
,
5832 ar_ptr
->system_mem
, ar_ptr
->max_system_mem
);
5834 if (ar_ptr
!= &main_arena
)
5837 "<aspace type=\"total\" size=\"%zu\"/>\n"
5838 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5839 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5840 heap_size
, heap_mprotect_size
, heap_count
);
5841 total_aspace
+= heap_size
;
5842 total_aspace_mprotect
+= heap_mprotect_size
;
5847 "<aspace type=\"total\" size=\"%zu\"/>\n"
5848 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5849 ar_ptr
->system_mem
, ar_ptr
->system_mem
);
5850 total_aspace
+= ar_ptr
->system_mem
;
5851 total_aspace_mprotect
+= ar_ptr
->system_mem
;
5854 fputs ("</heap>\n", fp
);
5855 ar_ptr
= ar_ptr
->next
;
5857 while (ar_ptr
!= &main_arena
);
5860 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5861 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5862 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5863 "<system type=\"current\" size=\"%zu\"/>\n"
5864 "<system type=\"max\" size=\"%zu\"/>\n"
5865 "<aspace type=\"total\" size=\"%zu\"/>\n"
5866 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5868 total_nfastblocks
, total_fastavail
, total_nblocks
, total_avail
,
5869 mp_
.n_mmaps
, mp_
.mmapped_mem
,
5870 total_system
, total_max_system
,
5871 total_aspace
, total_aspace_mprotect
);
5875 weak_alias (__malloc_info
, malloc_info
)
5878 strong_alias (__libc_calloc
, __calloc
) weak_alias (__libc_calloc
, calloc
)
5879 strong_alias (__libc_free
, __free
) strong_alias (__libc_free
, free
)
5880 strong_alias (__libc_malloc
, __malloc
) strong_alias (__libc_malloc
, malloc
)
5881 strong_alias (__libc_memalign
, __memalign
)
5882 weak_alias (__libc_memalign
, memalign
)
5883 strong_alias (__libc_realloc
, __realloc
) strong_alias (__libc_realloc
, realloc
)
5884 strong_alias (__libc_valloc
, __valloc
) weak_alias (__libc_valloc
, valloc
)
5885 strong_alias (__libc_pvalloc
, __pvalloc
) weak_alias (__libc_pvalloc
, pvalloc
)
5886 strong_alias (__libc_mallinfo
, __mallinfo
)
5887 weak_alias (__libc_mallinfo
, mallinfo
)
5888 strong_alias (__libc_mallinfo2
, __mallinfo2
)
5889 weak_alias (__libc_mallinfo2
, mallinfo2
)
5890 strong_alias (__libc_mallopt
, __mallopt
) weak_alias (__libc_mallopt
, mallopt
)
5892 weak_alias (__malloc_stats
, malloc_stats
)
5893 weak_alias (__malloc_usable_size
, malloc_usable_size
)
5894 weak_alias (__malloc_trim
, malloc_trim
)
5896 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5897 compat_symbol (libc
, __libc_free
, cfree
, GLIBC_2_0
);
5900 /* ------------------------------------------------------------
5903 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]