1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2009, 2010, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
26 There have been substantial changesmade after the integration into
27 glibc in all parts of the code. Do not look for much commonality
28 with the ptmalloc2 version.
30 * Version ptmalloc2-20011215
32 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
36 In order to compile this implementation, a Makefile is provided with
37 the ptmalloc2 distribution, which has pre-defined targets for some
38 popular systems (e.g. "make posix" for Posix threads). All that is
39 typically required with regard to compiler flags is the selection of
40 the thread package via defining one out of USE_PTHREADS, USE_THR or
41 USE_SPROC. Check the thread-m.h file for what effects this has.
42 Many/most systems will additionally require USE_TSD_DATA_HACK to be
43 defined, so this is the default for "make posix".
45 * Why use this malloc?
47 This is not the fastest, most space-conserving, most portable, or
48 most tunable malloc ever written. However it is among the fastest
49 while also being among the most space-conserving, portable and tunable.
50 Consistent balance across these factors results in a good general-purpose
51 allocator for malloc-intensive programs.
53 The main properties of the algorithms are:
54 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
55 with ties normally decided via FIFO (i.e. least recently used).
56 * For small (<= 64 bytes by default) requests, it is a caching
57 allocator, that maintains pools of quickly recycled chunks.
58 * In between, and for combinations of large and small requests, it does
59 the best it can trying to meet both goals at once.
60 * For very large requests (>= 128KB by default), it relies on system
61 memory mapping facilities, if supported.
63 For a longer but slightly out of date high-level description, see
64 http://gee.cs.oswego.edu/dl/html/malloc.html
66 You may already by default be using a C library containing a malloc
67 that is based on some version of this malloc (for example in
68 linux). You might still want to use the one in this file in order to
69 customize settings or to avoid overheads associated with library
72 * Contents, described in more detail in "description of public routines" below.
74 Standard (ANSI/SVID/...) functions:
76 calloc(size_t n_elements, size_t element_size);
78 realloc(Void_t* p, size_t n);
79 memalign(size_t alignment, size_t n);
82 mallopt(int parameter_number, int parameter_value)
85 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
86 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
89 malloc_trim(size_t pad);
90 malloc_usable_size(Void_t* p);
95 Supported pointer representation: 4 or 8 bytes
96 Supported size_t representation: 4 or 8 bytes
97 Note that size_t is allowed to be 4 bytes even if pointers are 8.
98 You can adjust this by defining INTERNAL_SIZE_T
100 Alignment: 2 * sizeof(size_t) (default)
101 (i.e., 8 byte alignment with 4byte size_t). This suffices for
102 nearly all current machines and C compilers. However, you can
103 define MALLOC_ALIGNMENT to be wider than this if necessary.
105 Minimum overhead per allocated chunk: 4 or 8 bytes
106 Each malloced chunk has a hidden word of overhead holding size
107 and status information.
109 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
110 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
112 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
113 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
114 needed; 4 (8) for a trailing size field and 8 (16) bytes for
115 free list pointers. Thus, the minimum allocatable size is
118 Even a request for zero bytes (i.e., malloc(0)) returns a
119 pointer to something of the minimum allocatable size.
121 The maximum overhead wastage (i.e., number of extra bytes
122 allocated than were requested in malloc) is less than or equal
123 to the minimum size, except for requests >= mmap_threshold that
124 are serviced via mmap(), where the worst case wastage is 2 *
125 sizeof(size_t) bytes plus the remainder from a system page (the
126 minimal mmap unit); typically 4096 or 8192 bytes.
128 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
129 8-byte size_t: 2^64 minus about two pages
131 It is assumed that (possibly signed) size_t values suffice to
132 represent chunk sizes. `Possibly signed' is due to the fact
133 that `size_t' may be defined on a system as either a signed or
134 an unsigned type. The ISO C standard says that it must be
135 unsigned, but a few systems are known not to adhere to this.
136 Additionally, even when size_t is unsigned, sbrk (which is by
137 default used to obtain memory from system) accepts signed
138 arguments, and may not be able to handle size_t-wide arguments
139 with negative sign bit. Generally, values that would
140 appear as negative after accounting for overhead and alignment
141 are supported only via mmap(), which does not have this
144 Requests for sizes outside the allowed range will perform an optional
145 failure action and then return null. (Requests may also
146 also fail because a system is out of memory.)
148 Thread-safety: thread-safe unless NO_THREADS is defined
150 Compliance: I believe it is compliant with the 1997 Single Unix Specification
151 Also SVID/XPG, ANSI C, and probably others as well.
153 * Synopsis of compile-time options:
155 People have reported using previous versions of this malloc on all
156 versions of Unix, sometimes by tweaking some of the defines
157 below. It has been tested most extensively on Solaris and
158 Linux. It is also reported to work on WIN32 platforms.
159 People also report using it in stand-alone embedded systems.
161 The implementation is in straight, hand-tuned ANSI C. It is not
162 at all modular. (Sorry!) It uses a lot of macros. To be at all
163 usable, this code should be compiled using an optimizing compiler
164 (for example gcc -O3) that can simplify expressions and control
165 paths. (FAQ: some macros import variables as arguments rather than
166 declare locals because people reported that some debuggers
167 otherwise get confused.)
171 Compilation Environment options:
173 __STD_C derived from C compiler defines
176 USE_MEMCPY 1 if HAVE_MEMCPY is defined
177 HAVE_MMAP defined as 1
179 HAVE_MREMAP 0 unless linux defined
180 USE_ARENAS the same as HAVE_MMAP
181 malloc_getpagesize derived from system #includes, or 4096 if not
182 HAVE_USR_INCLUDE_MALLOC_H NOT defined
183 LACKS_UNISTD_H NOT defined unless WIN32
184 LACKS_SYS_PARAM_H NOT defined unless WIN32
185 LACKS_SYS_MMAN_H NOT defined unless WIN32
187 Changing default word sizes:
189 INTERNAL_SIZE_T size_t
190 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
191 __alignof__ (long double))
193 Configuration and functionality options:
195 USE_DL_PREFIX NOT defined
196 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
197 USE_MALLOC_LOCK NOT defined
198 MALLOC_DEBUG NOT defined
199 REALLOC_ZERO_BYTES_FREES 1
200 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
203 Options for customizing MORECORE:
207 MORECORE_CONTIGUOUS 1
208 MORECORE_CANNOT_TRIM NOT defined
210 MMAP_AS_MORECORE_SIZE (1024 * 1024)
212 Tuning options that are also dynamically changeable via mallopt:
214 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
215 DEFAULT_TRIM_THRESHOLD 128 * 1024
217 DEFAULT_MMAP_THRESHOLD 128 * 1024
218 DEFAULT_MMAP_MAX 65536
220 There are several other #defined constants and macros that you
221 probably don't want to touch unless you are extending or adapting malloc. */
224 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
225 compiler, or a C compiler sufficiently close to ANSI to get away
230 #if defined(__STDC__) || defined(__cplusplus)
239 Void_t* is the pointer type that malloc should say it returns
243 #if (__STD_C || defined(WIN32))
251 #include <stddef.h> /* for size_t */
252 #include <stdlib.h> /* for getenv(), abort() */
254 #include <sys/types.h>
257 #include <malloc-machine.h>
260 #ifdef ATOMIC_FASTBINS
263 #include <stdio-common/_itoa.h>
264 #include <bits/wordsize.h>
265 #include <sys/sysinfo.h>
272 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
274 /* #define LACKS_UNISTD_H */
276 #ifndef LACKS_UNISTD_H
280 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
282 /* #define LACKS_SYS_PARAM_H */
285 #include <stdio.h> /* needed for malloc_stats */
286 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
291 /* For va_arg, va_start, va_end. */
294 /* For writev and struct iovec. */
297 #include <sys/syslog.h>
299 /* For various dynamic linking things. */
306 Because freed chunks may be overwritten with bookkeeping fields, this
307 malloc will often die when freed memory is overwritten by user
308 programs. This can be very effective (albeit in an annoying way)
309 in helping track down dangling pointers.
311 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
312 enabled that will catch more memory errors. You probably won't be
313 able to make much sense of the actual assertion errors, but they
314 should help you locate incorrectly overwritten memory. The checking
315 is fairly extensive, and will slow down execution
316 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
317 will attempt to check every non-mmapped allocated and free chunk in
318 the course of computing the summmaries. (By nature, mmapped regions
319 cannot be checked very much automatically.)
321 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
322 this code. The assertions in the check routines spell out in more
323 detail the assumptions and invariants underlying the algorithms.
325 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
326 checking that all accesses to malloced memory stay within their
327 bounds. However, there are several add-ons and adaptations of this
328 or other mallocs available that do this.
332 # define assert(expr) ((void) 0)
334 # define assert(expr) \
337 : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__))
339 extern const char *__progname
;
342 __malloc_assert (const char *assertion
, const char *file
, unsigned int line
,
343 const char *function
)
345 (void) __fxprintf (NULL
, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
346 __progname
, __progname
[0] ? ": " : "",
348 function
? function
: "", function
? ": " : "",
357 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
360 The default version is the same as size_t.
362 While not strictly necessary, it is best to define this as an
363 unsigned type, even if size_t is a signed type. This may avoid some
364 artificial size limitations on some systems.
366 On a 64-bit machine, you may be able to reduce malloc overhead by
367 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
368 expense of not being able to handle more than 2^32 of malloced
369 space. If this limitation is acceptable, you are encouraged to set
370 this unless you are on a platform requiring 16byte alignments. In
371 this case the alignment requirements turn out to negate any
372 potential advantages of decreasing size_t word size.
374 Implementors: Beware of the possible combinations of:
375 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
376 and might be the same width as int or as long
377 - size_t might have different width and signedness as INTERNAL_SIZE_T
378 - int and long might be 32 or 64 bits, and might be the same width
379 To deal with this, most comparisons and difference computations
380 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
381 aware of the fact that casting an unsigned int to a wider long does
382 not sign-extend. (This also makes checking for negative numbers
383 awkward.) Some of these casts result in harmless compiler warnings
387 #ifndef INTERNAL_SIZE_T
388 #define INTERNAL_SIZE_T size_t
391 /* The corresponding word size */
392 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
396 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
397 It must be a power of two at least 2 * SIZE_SZ, even on machines
398 for which smaller alignments would suffice. It may be defined as
399 larger than this though. Note however that code and data structures
400 are optimized for the case of 8-byte alignment.
404 #ifndef MALLOC_ALIGNMENT
405 /* XXX This is the correct definition. It differs from 2*SIZE_SZ only on
406 powerpc32. For the time being, changing this is causing more
407 compatibility problems due to malloc_get_state/malloc_set_state than
408 will returning blocks not adequately aligned for long double objects
409 under -mlong-double-128.
411 #define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
412 ? __alignof__ (long double) : 2 * SIZE_SZ)
414 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
417 /* The corresponding bit mask value */
418 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
423 REALLOC_ZERO_BYTES_FREES should be set if a call to
424 realloc with zero bytes should be the same as a call to free.
425 This is required by the C standard. Otherwise, since this malloc
426 returns a unique pointer for malloc(0), so does realloc(p, 0).
429 #ifndef REALLOC_ZERO_BYTES_FREES
430 #define REALLOC_ZERO_BYTES_FREES 1
434 TRIM_FASTBINS controls whether free() of a very small chunk can
435 immediately lead to trimming. Setting to true (1) can reduce memory
436 footprint, but will almost always slow down programs that use a lot
439 Define this only if you are willing to give up some speed to more
440 aggressively reduce system-level memory footprint when releasing
441 memory in programs that use many small chunks. You can get
442 essentially the same effect by setting MXFAST to 0, but this can
443 lead to even greater slowdowns in programs using many small chunks.
444 TRIM_FASTBINS is an in-between compile-time option, that disables
445 only those chunks bordering topmost memory from being placed in
449 #ifndef TRIM_FASTBINS
450 #define TRIM_FASTBINS 0
455 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
456 This is necessary when you only want to use this malloc in one part
457 of a program, using your regular system malloc elsewhere.
460 /* #define USE_DL_PREFIX */
464 Two-phase name translation.
465 All of the actual routines are given mangled names.
466 When wrappers are used, they become the public callable versions.
467 When DL_PREFIX is used, the callable names are prefixed.
471 #define public_cALLOc dlcalloc
472 #define public_fREe dlfree
473 #define public_cFREe dlcfree
474 #define public_mALLOc dlmalloc
475 #define public_mEMALIGn dlmemalign
476 #define public_rEALLOc dlrealloc
477 #define public_vALLOc dlvalloc
478 #define public_pVALLOc dlpvalloc
479 #define public_mALLINFo dlmallinfo
480 #define public_mALLOPt dlmallopt
481 #define public_mTRIm dlmalloc_trim
482 #define public_mSTATs dlmalloc_stats
483 #define public_mUSABLe dlmalloc_usable_size
484 #define public_iCALLOc dlindependent_calloc
485 #define public_iCOMALLOc dlindependent_comalloc
486 #define public_gET_STATe dlget_state
487 #define public_sET_STATe dlset_state
488 #else /* USE_DL_PREFIX */
491 /* Special defines for the GNU C library. */
492 #define public_cALLOc __libc_calloc
493 #define public_fREe __libc_free
494 #define public_cFREe __libc_cfree
495 #define public_mALLOc __libc_malloc
496 #define public_mEMALIGn __libc_memalign
497 #define public_rEALLOc __libc_realloc
498 #define public_vALLOc __libc_valloc
499 #define public_pVALLOc __libc_pvalloc
500 #define public_mALLINFo __libc_mallinfo
501 #define public_mALLOPt __libc_mallopt
502 #define public_mTRIm __malloc_trim
503 #define public_mSTATs __malloc_stats
504 #define public_mUSABLe __malloc_usable_size
505 #define public_iCALLOc __libc_independent_calloc
506 #define public_iCOMALLOc __libc_independent_comalloc
507 #define public_gET_STATe __malloc_get_state
508 #define public_sET_STATe __malloc_set_state
509 #define malloc_getpagesize __getpagesize()
512 #define munmap __munmap
513 #define mremap __mremap
514 #define mprotect __mprotect
515 #define MORECORE (*__morecore)
516 #define MORECORE_FAILURE 0
518 Void_t
* __default_morecore (ptrdiff_t);
519 Void_t
*(*__morecore
)(ptrdiff_t) = __default_morecore
;
522 #define public_cALLOc calloc
523 #define public_fREe free
524 #define public_cFREe cfree
525 #define public_mALLOc malloc
526 #define public_mEMALIGn memalign
527 #define public_rEALLOc realloc
528 #define public_vALLOc valloc
529 #define public_pVALLOc pvalloc
530 #define public_mALLINFo mallinfo
531 #define public_mALLOPt mallopt
532 #define public_mTRIm malloc_trim
533 #define public_mSTATs malloc_stats
534 #define public_mUSABLe malloc_usable_size
535 #define public_iCALLOc independent_calloc
536 #define public_iCOMALLOc independent_comalloc
537 #define public_gET_STATe malloc_get_state
538 #define public_sET_STATe malloc_set_state
540 #endif /* USE_DL_PREFIX */
543 #define __builtin_expect(expr, val) (expr)
545 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
549 HAVE_MEMCPY should be defined if you are not otherwise using
550 ANSI STD C, but still have memcpy and memset in your C library
551 and want to use them in calloc and realloc. Otherwise simple
552 macro versions are defined below.
554 USE_MEMCPY should be defined as 1 if you actually want to
555 have memset and memcpy called. People report that the macro
556 versions are faster than libc versions on some systems.
558 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
559 (of <= 36 bytes) are manually unrolled in realloc and calloc.
573 #if (__STD_C || defined(HAVE_MEMCPY))
579 /* On Win32 memset and memcpy are already declared in windows.h */
582 void* memset(void*, int, size_t);
583 void* memcpy(void*, const void*, size_t);
593 /* Force a value to be in a register and stop the compiler referring
594 to the source (mostly memory location) again. */
595 #define force_reg(val) \
596 ({ __typeof (val) _v; asm ("" : "=r" (_v) : "0" (val)); _v; })
600 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
601 malloc fails to be able to return memory, either because memory is
602 exhausted or because of illegal arguments.
604 By default, sets errno if running on STD_C platform, else does nothing.
607 #ifndef MALLOC_FAILURE_ACTION
609 #define MALLOC_FAILURE_ACTION \
613 #define MALLOC_FAILURE_ACTION
618 MORECORE-related declarations. By default, rely on sbrk
622 #ifdef LACKS_UNISTD_H
623 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
625 extern Void_t
* sbrk(ptrdiff_t);
627 extern Void_t
* sbrk();
633 MORECORE is the name of the routine to call to obtain more memory
634 from the system. See below for general guidance on writing
635 alternative MORECORE functions, as well as a version for WIN32 and a
636 sample version for pre-OSX macos.
640 #define MORECORE sbrk
644 MORECORE_FAILURE is the value returned upon failure of MORECORE
645 as well as mmap. Since it cannot be an otherwise valid memory address,
646 and must reflect values of standard sys calls, you probably ought not
650 #ifndef MORECORE_FAILURE
651 #define MORECORE_FAILURE (-1)
655 If MORECORE_CONTIGUOUS is true, take advantage of fact that
656 consecutive calls to MORECORE with positive arguments always return
657 contiguous increasing addresses. This is true of unix sbrk. Even
658 if not defined, when regions happen to be contiguous, malloc will
659 permit allocations spanning regions obtained from different
660 calls. But defining this when applicable enables some stronger
661 consistency checks and space efficiencies.
664 #ifndef MORECORE_CONTIGUOUS
665 #define MORECORE_CONTIGUOUS 1
669 Define MORECORE_CANNOT_TRIM if your version of MORECORE
670 cannot release space back to the system when given negative
671 arguments. This is generally necessary only if you are using
672 a hand-crafted MORECORE function that cannot handle negative arguments.
675 /* #define MORECORE_CANNOT_TRIM */
677 /* MORECORE_CLEARS (default 1)
678 The degree to which the routine mapped to MORECORE zeroes out
679 memory: never (0), only for newly allocated space (1) or always
680 (2). The distinction between (1) and (2) is necessary because on
681 some systems, if the application first decrements and then
682 increments the break value, the contents of the reallocated space
686 #ifndef MORECORE_CLEARS
687 #define MORECORE_CLEARS 1
692 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
693 allocate very large blocks. These will be returned to the
694 operating system immediately after a free(). Also, if mmap
695 is available, it is used as a backup strategy in cases where
696 MORECORE fails to provide space from system.
698 This malloc is best tuned to work with mmap for large requests.
699 If you do not have mmap, operations involving very large chunks (1MB
700 or so) may be slower than you'd like.
707 Standard unix mmap using /dev/zero clears memory so calloc doesn't
712 #define MMAP_CLEARS 1
717 #define MMAP_CLEARS 0
723 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
724 sbrk fails, and mmap is used as a backup (which is done only if
725 HAVE_MMAP). The value must be a multiple of page size. This
726 backup strategy generally applies only when systems have "holes" in
727 address space, so sbrk cannot perform contiguous expansion, but
728 there is still space available on system. On systems for which
729 this is known to be useful (i.e. most linux kernels), this occurs
730 only when programs allocate huge amounts of memory. Between this,
731 and the fact that mmap regions tend to be limited, the size should
732 be large, to avoid too many mmap calls and thus avoid running out
736 #ifndef MMAP_AS_MORECORE_SIZE
737 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
741 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
742 large blocks. This is currently only possible on Linux with
743 kernel versions newer than 1.3.77.
748 #define HAVE_MREMAP 1
750 #define HAVE_MREMAP 0
753 #endif /* HAVE_MMAP */
755 /* Define USE_ARENAS to enable support for multiple `arenas'. These
756 are allocated using mmap(), are necessary for threads and
757 occasionally useful to overcome address space limitations affecting
761 #define USE_ARENAS HAVE_MMAP
766 The system page size. To the extent possible, this malloc manages
767 memory from the system in page-size units. Note that this value is
768 cached during initialization into a field of malloc_state. So even
769 if malloc_getpagesize is a function, it is only called once.
771 The following mechanics for getpagesize were adapted from bsd/gnu
772 getpagesize.h. If none of the system-probes here apply, a value of
773 4096 is used, which should be OK: If they don't apply, then using
774 the actual value probably doesn't impact performance.
778 #ifndef malloc_getpagesize
780 #ifndef LACKS_UNISTD_H
784 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
785 # ifndef _SC_PAGE_SIZE
786 # define _SC_PAGE_SIZE _SC_PAGESIZE
790 # ifdef _SC_PAGE_SIZE
791 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
793 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
794 extern size_t getpagesize();
795 # define malloc_getpagesize getpagesize()
797 # ifdef WIN32 /* use supplied emulation of getpagesize */
798 # define malloc_getpagesize getpagesize()
800 # ifndef LACKS_SYS_PARAM_H
801 # include <sys/param.h>
803 # ifdef EXEC_PAGESIZE
804 # define malloc_getpagesize EXEC_PAGESIZE
808 # define malloc_getpagesize NBPG
810 # define malloc_getpagesize (NBPG * CLSIZE)
814 # define malloc_getpagesize NBPC
817 # define malloc_getpagesize PAGESIZE
818 # else /* just guess */
819 # define malloc_getpagesize (4096)
830 This version of malloc supports the standard SVID/XPG mallinfo
831 routine that returns a struct containing usage properties and
832 statistics. It should work on any SVID/XPG compliant system that has
833 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
834 install such a thing yourself, cut out the preliminary declarations
835 as described above and below and save them in a malloc.h file. But
836 there's no compelling reason to bother to do this.)
838 The main declaration needed is the mallinfo struct that is returned
839 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
840 bunch of fields that are not even meaningful in this version of
841 malloc. These fields are are instead filled by mallinfo() with
842 other numbers that might be of interest.
844 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
845 /usr/include/malloc.h file that includes a declaration of struct
846 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
847 version is declared below. These must be precisely the same for
848 mallinfo() to work. The original SVID version of this struct,
849 defined on most systems with mallinfo, declares all fields as
850 ints. But some others define as unsigned long. If your system
851 defines the fields using a type of different width than listed here,
852 you must #include your system version and #define
853 HAVE_USR_INCLUDE_MALLOC_H.
856 /* #define HAVE_USR_INCLUDE_MALLOC_H */
858 #ifdef HAVE_USR_INCLUDE_MALLOC_H
859 #include "/usr/include/malloc.h"
863 /* ---------- description of public routines ------------ */
867 Returns a pointer to a newly allocated chunk of at least n bytes, or null
868 if no space is available. Additionally, on failure, errno is
869 set to ENOMEM on ANSI C systems.
871 If n is zero, malloc returns a minumum-sized chunk. (The minimum
872 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
873 systems.) On most systems, size_t is an unsigned type, so calls
874 with negative arguments are interpreted as requests for huge amounts
875 of space, which will often fail. The maximum supported value of n
876 differs across systems, but is in all cases less than the maximum
877 representable value of a size_t.
880 Void_t
* public_mALLOc(size_t);
882 Void_t
* public_mALLOc();
884 #ifdef libc_hidden_proto
885 libc_hidden_proto (public_mALLOc
)
890 Releases the chunk of memory pointed to by p, that had been previously
891 allocated using malloc or a related routine such as realloc.
892 It has no effect if p is null. It can have arbitrary (i.e., bad!)
893 effects if p has already been freed.
895 Unless disabled (using mallopt), freeing very large spaces will
896 when possible, automatically trigger operations that give
897 back unused memory to the system, thus reducing program footprint.
900 void public_fREe(Void_t
*);
904 #ifdef libc_hidden_proto
905 libc_hidden_proto (public_fREe
)
909 calloc(size_t n_elements, size_t element_size);
910 Returns a pointer to n_elements * element_size bytes, with all locations
914 Void_t
* public_cALLOc(size_t, size_t);
916 Void_t
* public_cALLOc();
920 realloc(Void_t* p, size_t n)
921 Returns a pointer to a chunk of size n that contains the same data
922 as does chunk p up to the minimum of (n, p's size) bytes, or null
923 if no space is available.
925 The returned pointer may or may not be the same as p. The algorithm
926 prefers extending p when possible, otherwise it employs the
927 equivalent of a malloc-copy-free sequence.
929 If p is null, realloc is equivalent to malloc.
931 If space is not available, realloc returns null, errno is set (if on
932 ANSI) and p is NOT freed.
934 if n is for fewer bytes than already held by p, the newly unused
935 space is lopped off and freed if possible. Unless the #define
936 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
937 zero (re)allocates a minimum-sized chunk.
939 Large chunks that were internally obtained via mmap will always
940 be reallocated using malloc-copy-free sequences unless
941 the system supports MREMAP (currently only linux).
943 The old unix realloc convention of allowing the last-free'd chunk
944 to be used as an argument to realloc is not supported.
947 Void_t
* public_rEALLOc(Void_t
*, size_t);
949 Void_t
* public_rEALLOc();
951 #ifdef libc_hidden_proto
952 libc_hidden_proto (public_rEALLOc
)
956 memalign(size_t alignment, size_t n);
957 Returns a pointer to a newly allocated chunk of n bytes, aligned
958 in accord with the alignment argument.
960 The alignment argument should be a power of two. If the argument is
961 not a power of two, the nearest greater power is used.
962 8-byte alignment is guaranteed by normal malloc calls, so don't
963 bother calling memalign with an argument of 8 or less.
965 Overreliance on memalign is a sure way to fragment space.
968 Void_t
* public_mEMALIGn(size_t, size_t);
970 Void_t
* public_mEMALIGn();
972 #ifdef libc_hidden_proto
973 libc_hidden_proto (public_mEMALIGn
)
978 Equivalent to memalign(pagesize, n), where pagesize is the page
979 size of the system. If the pagesize is unknown, 4096 is used.
982 Void_t
* public_vALLOc(size_t);
984 Void_t
* public_vALLOc();
990 mallopt(int parameter_number, int parameter_value)
991 Sets tunable parameters The format is to provide a
992 (parameter-number, parameter-value) pair. mallopt then sets the
993 corresponding parameter to the argument value if it can (i.e., so
994 long as the value is meaningful), and returns 1 if successful else
995 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
996 normally defined in malloc.h. Only one of these (M_MXFAST) is used
997 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
998 so setting them has no effect. But this malloc also supports four
999 other options in mallopt. See below for details. Briefly, supported
1000 parameters are as follows (listed defaults are for "typical"
1003 Symbol param # default allowed param values
1004 M_MXFAST 1 64 0-80 (0 disables fastbins)
1005 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
1007 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
1008 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
1011 int public_mALLOPt(int, int);
1013 int public_mALLOPt();
1019 Returns (by copy) a struct containing various summary statistics:
1021 arena: current total non-mmapped bytes allocated from system
1022 ordblks: the number of free chunks
1023 smblks: the number of fastbin blocks (i.e., small chunks that
1024 have been freed but not use resused or consolidated)
1025 hblks: current number of mmapped regions
1026 hblkhd: total bytes held in mmapped regions
1027 usmblks: the maximum total allocated space. This will be greater
1028 than current total if trimming has occurred.
1029 fsmblks: total bytes held in fastbin blocks
1030 uordblks: current total allocated space (normal or mmapped)
1031 fordblks: total free space
1032 keepcost: the maximum number of bytes that could ideally be released
1033 back to system via malloc_trim. ("ideally" means that
1034 it ignores page restrictions etc.)
1036 Because these fields are ints, but internal bookkeeping may
1037 be kept as longs, the reported values may wrap around zero and
1041 struct mallinfo
public_mALLINFo(void);
1043 struct mallinfo
public_mALLINFo();
1048 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1050 independent_calloc is similar to calloc, but instead of returning a
1051 single cleared space, it returns an array of pointers to n_elements
1052 independent elements that can hold contents of size elem_size, each
1053 of which starts out cleared, and can be independently freed,
1054 realloc'ed etc. The elements are guaranteed to be adjacently
1055 allocated (this is not guaranteed to occur with multiple callocs or
1056 mallocs), which may also improve cache locality in some
1059 The "chunks" argument is optional (i.e., may be null, which is
1060 probably the most typical usage). If it is null, the returned array
1061 is itself dynamically allocated and should also be freed when it is
1062 no longer needed. Otherwise, the chunks array must be of at least
1063 n_elements in length. It is filled in with the pointers to the
1066 In either case, independent_calloc returns this pointer array, or
1067 null if the allocation failed. If n_elements is zero and "chunks"
1068 is null, it returns a chunk representing an array with zero elements
1069 (which should be freed if not wanted).
1071 Each element must be individually freed when it is no longer
1072 needed. If you'd like to instead be able to free all at once, you
1073 should instead use regular calloc and assign pointers into this
1074 space to represent elements. (In this case though, you cannot
1075 independently free elements.)
1077 independent_calloc simplifies and speeds up implementations of many
1078 kinds of pools. It may also be useful when constructing large data
1079 structures that initially have a fixed number of fixed-sized nodes,
1080 but the number is not known at compile time, and some of the nodes
1081 may later need to be freed. For example:
1083 struct Node { int item; struct Node* next; };
1085 struct Node* build_list() {
1087 int n = read_number_of_nodes_needed();
1088 if (n <= 0) return 0;
1089 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1090 if (pool == 0) die();
1091 // organize into a linked list...
1092 struct Node* first = pool[0];
1093 for (i = 0; i < n-1; ++i)
1094 pool[i]->next = pool[i+1];
1095 free(pool); // Can now free the array (or not, if it is needed later)
1100 Void_t
** public_iCALLOc(size_t, size_t, Void_t
**);
1102 Void_t
** public_iCALLOc();
1106 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1108 independent_comalloc allocates, all at once, a set of n_elements
1109 chunks with sizes indicated in the "sizes" array. It returns
1110 an array of pointers to these elements, each of which can be
1111 independently freed, realloc'ed etc. The elements are guaranteed to
1112 be adjacently allocated (this is not guaranteed to occur with
1113 multiple callocs or mallocs), which may also improve cache locality
1114 in some applications.
1116 The "chunks" argument is optional (i.e., may be null). If it is null
1117 the returned array is itself dynamically allocated and should also
1118 be freed when it is no longer needed. Otherwise, the chunks array
1119 must be of at least n_elements in length. It is filled in with the
1120 pointers to the chunks.
1122 In either case, independent_comalloc returns this pointer array, or
1123 null if the allocation failed. If n_elements is zero and chunks is
1124 null, it returns a chunk representing an array with zero elements
1125 (which should be freed if not wanted).
1127 Each element must be individually freed when it is no longer
1128 needed. If you'd like to instead be able to free all at once, you
1129 should instead use a single regular malloc, and assign pointers at
1130 particular offsets in the aggregate space. (In this case though, you
1131 cannot independently free elements.)
1133 independent_comallac differs from independent_calloc in that each
1134 element may have a different size, and also that it does not
1135 automatically clear elements.
1137 independent_comalloc can be used to speed up allocation in cases
1138 where several structs or objects must always be allocated at the
1139 same time. For example:
1144 void send_message(char* msg) {
1145 int msglen = strlen(msg);
1146 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1148 if (independent_comalloc(3, sizes, chunks) == 0)
1150 struct Head* head = (struct Head*)(chunks[0]);
1151 char* body = (char*)(chunks[1]);
1152 struct Foot* foot = (struct Foot*)(chunks[2]);
1156 In general though, independent_comalloc is worth using only for
1157 larger values of n_elements. For small values, you probably won't
1158 detect enough difference from series of malloc calls to bother.
1160 Overuse of independent_comalloc can increase overall memory usage,
1161 since it cannot reuse existing noncontiguous small chunks that
1162 might be available for some of the elements.
1165 Void_t
** public_iCOMALLOc(size_t, size_t*, Void_t
**);
1167 Void_t
** public_iCOMALLOc();
1175 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1176 round up n to nearest pagesize.
1179 Void_t
* public_pVALLOc(size_t);
1181 Void_t
* public_pVALLOc();
1186 Equivalent to free(p).
1188 cfree is needed/defined on some systems that pair it with calloc,
1189 for odd historical reasons (such as: cfree is used in example
1190 code in the first edition of K&R).
1193 void public_cFREe(Void_t
*);
1195 void public_cFREe();
1199 malloc_trim(size_t pad);
1201 If possible, gives memory back to the system (via negative
1202 arguments to sbrk) if there is unused memory at the `high' end of
1203 the malloc pool. You can call this after freeing large blocks of
1204 memory to potentially reduce the system-level memory requirements
1205 of a program. However, it cannot guarantee to reduce memory. Under
1206 some allocation patterns, some large free blocks of memory will be
1207 locked between two used chunks, so they cannot be given back to
1210 The `pad' argument to malloc_trim represents the amount of free
1211 trailing space to leave untrimmed. If this argument is zero,
1212 only the minimum amount of memory to maintain internal data
1213 structures will be left (one page or less). Non-zero arguments
1214 can be supplied to maintain enough trailing space to service
1215 future expected allocations without having to re-obtain memory
1218 Malloc_trim returns 1 if it actually released any memory, else 0.
1219 On systems that do not support "negative sbrks", it will always
1223 int public_mTRIm(size_t);
1229 malloc_usable_size(Void_t* p);
1231 Returns the number of bytes you can actually use in
1232 an allocated chunk, which may be more than you requested (although
1233 often not) due to alignment and minimum size constraints.
1234 You can use this many bytes without worrying about
1235 overwriting other allocated objects. This is not a particularly great
1236 programming practice. malloc_usable_size can be more useful in
1237 debugging and assertions, for example:
1240 assert(malloc_usable_size(p) >= 256);
1244 size_t public_mUSABLe(Void_t
*);
1246 size_t public_mUSABLe();
1251 Prints on stderr the amount of space obtained from the system (both
1252 via sbrk and mmap), the maximum amount (which may be more than
1253 current if malloc_trim and/or munmap got called), and the current
1254 number of bytes allocated via malloc (or realloc, etc) but not yet
1255 freed. Note that this is the number of bytes allocated, not the
1256 number requested. It will be larger than the number requested
1257 because of alignment and bookkeeping overhead. Because it includes
1258 alignment wastage as being in use, this figure may be greater than
1259 zero even when no user-level chunks are allocated.
1261 The reported current and maximum system memory can be inaccurate if
1262 a program makes other calls to system memory allocation functions
1263 (normally sbrk) outside of malloc.
1265 malloc_stats prints only the most commonly interesting statistics.
1266 More information can be obtained by calling mallinfo.
1270 void public_mSTATs(void);
1272 void public_mSTATs();
1276 malloc_get_state(void);
1278 Returns the state of all malloc variables in an opaque data
1282 Void_t
* public_gET_STATe(void);
1284 Void_t
* public_gET_STATe();
1288 malloc_set_state(Void_t* state);
1290 Restore the state of all malloc variables from data obtained with
1294 int public_sET_STATe(Void_t
*);
1296 int public_sET_STATe();
1301 posix_memalign(void **memptr, size_t alignment, size_t size);
1303 POSIX wrapper like memalign(), checking for validity of size.
1305 int __posix_memalign(void **, size_t, size_t);
1308 /* mallopt tuning options */
1311 M_MXFAST is the maximum request size used for "fastbins", special bins
1312 that hold returned chunks without consolidating their spaces. This
1313 enables future requests for chunks of the same size to be handled
1314 very quickly, but can increase fragmentation, and thus increase the
1315 overall memory footprint of a program.
1317 This malloc manages fastbins very conservatively yet still
1318 efficiently, so fragmentation is rarely a problem for values less
1319 than or equal to the default. The maximum supported value of MXFAST
1320 is 80. You wouldn't want it any higher than this anyway. Fastbins
1321 are designed especially for use with many small structs, objects or
1322 strings -- the default handles structs/objects/arrays with sizes up
1323 to 8 4byte fields, or small strings representing words, tokens,
1324 etc. Using fastbins for larger objects normally worsens
1325 fragmentation without improving speed.
1327 M_MXFAST is set in REQUEST size units. It is internally used in
1328 chunksize units, which adds padding and alignment. You can reduce
1329 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1330 algorithm to be a closer approximation of fifo-best-fit in all cases,
1331 not just for larger requests, but will generally cause it to be
1336 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1341 #ifndef DEFAULT_MXFAST
1342 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
1347 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1348 to keep before releasing via malloc_trim in free().
1350 Automatic trimming is mainly useful in long-lived programs.
1351 Because trimming via sbrk can be slow on some systems, and can
1352 sometimes be wasteful (in cases where programs immediately
1353 afterward allocate more large chunks) the value should be high
1354 enough so that your overall system performance would improve by
1355 releasing this much memory.
1357 The trim threshold and the mmap control parameters (see below)
1358 can be traded off with one another. Trimming and mmapping are
1359 two different ways of releasing unused memory back to the
1360 system. Between these two, it is often possible to keep
1361 system-level demands of a long-lived program down to a bare
1362 minimum. For example, in one test suite of sessions measuring
1363 the XF86 X server on Linux, using a trim threshold of 128K and a
1364 mmap threshold of 192K led to near-minimal long term resource
1367 If you are using this malloc in a long-lived program, it should
1368 pay to experiment with these values. As a rough guide, you
1369 might set to a value close to the average size of a process
1370 (program) running on your system. Releasing this much memory
1371 would allow such a process to run in memory. Generally, it's
1372 worth it to tune for trimming rather tham memory mapping when a
1373 program undergoes phases where several large chunks are
1374 allocated and released in ways that can reuse each other's
1375 storage, perhaps mixed with phases where there are no such
1376 chunks at all. And in well-behaved long-lived programs,
1377 controlling release of large blocks via trimming versus mapping
1380 However, in most programs, these parameters serve mainly as
1381 protection against the system-level effects of carrying around
1382 massive amounts of unneeded memory. Since frequent calls to
1383 sbrk, mmap, and munmap otherwise degrade performance, the default
1384 parameters are set to relatively high values that serve only as
1387 The trim value It must be greater than page size to have any useful
1388 effect. To disable trimming completely, you can set to
1391 Trim settings interact with fastbin (MXFAST) settings: Unless
1392 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1393 freeing a chunk with size less than or equal to MXFAST. Trimming is
1394 instead delayed until subsequent freeing of larger chunks. However,
1395 you can still force an attempted trim by calling malloc_trim.
1397 Also, trimming is not generally possible in cases where
1398 the main arena is obtained via mmap.
1400 Note that the trick some people use of mallocing a huge space and
1401 then freeing it at program startup, in an attempt to reserve system
1402 memory, doesn't have the intended effect under automatic trimming,
1403 since that memory will immediately be returned to the system.
1406 #define M_TRIM_THRESHOLD -1
1408 #ifndef DEFAULT_TRIM_THRESHOLD
1409 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1413 M_TOP_PAD is the amount of extra `padding' space to allocate or
1414 retain whenever sbrk is called. It is used in two ways internally:
1416 * When sbrk is called to extend the top of the arena to satisfy
1417 a new malloc request, this much padding is added to the sbrk
1420 * When malloc_trim is called automatically from free(),
1421 it is used as the `pad' argument.
1423 In both cases, the actual amount of padding is rounded
1424 so that the end of the arena is always a system page boundary.
1426 The main reason for using padding is to avoid calling sbrk so
1427 often. Having even a small pad greatly reduces the likelihood
1428 that nearly every malloc request during program start-up (or
1429 after trimming) will invoke sbrk, which needlessly wastes
1432 Automatic rounding-up to page-size units is normally sufficient
1433 to avoid measurable overhead, so the default is 0. However, in
1434 systems where sbrk is relatively slow, it can pay to increase
1435 this value, at the expense of carrying around more memory than
1439 #define M_TOP_PAD -2
1441 #ifndef DEFAULT_TOP_PAD
1442 #define DEFAULT_TOP_PAD (0)
1446 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
1447 adjusted MMAP_THRESHOLD.
1450 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
1451 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
1454 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
1455 /* For 32-bit platforms we cannot increase the maximum mmap
1456 threshold much because it is also the minimum value for the
1457 maximum heap size and its alignment. Going above 512k (i.e., 1M
1458 for new heaps) wastes too much address space. */
1459 # if __WORDSIZE == 32
1460 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
1462 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
1467 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1468 to service a request. Requests of at least this size that cannot
1469 be allocated using already-existing space will be serviced via mmap.
1470 (If enough normal freed space already exists it is used instead.)
1472 Using mmap segregates relatively large chunks of memory so that
1473 they can be individually obtained and released from the host
1474 system. A request serviced through mmap is never reused by any
1475 other request (at least not directly; the system may just so
1476 happen to remap successive requests to the same locations).
1478 Segregating space in this way has the benefits that:
1480 1. Mmapped space can ALWAYS be individually released back
1481 to the system, which helps keep the system level memory
1482 demands of a long-lived program low.
1483 2. Mapped memory can never become `locked' between
1484 other chunks, as can happen with normally allocated chunks, which
1485 means that even trimming via malloc_trim would not release them.
1486 3. On some systems with "holes" in address spaces, mmap can obtain
1487 memory that sbrk cannot.
1489 However, it has the disadvantages that:
1491 1. The space cannot be reclaimed, consolidated, and then
1492 used to service later requests, as happens with normal chunks.
1493 2. It can lead to more wastage because of mmap page alignment
1495 3. It causes malloc performance to be more dependent on host
1496 system memory management support routines which may vary in
1497 implementation quality and may impose arbitrary
1498 limitations. Generally, servicing a request via normal
1499 malloc steps is faster than going through a system's mmap.
1501 The advantages of mmap nearly always outweigh disadvantages for
1502 "large" chunks, but the value of "large" varies across systems. The
1503 default is an empirically derived value that works well in most
1508 The above was written in 2001. Since then the world has changed a lot.
1509 Memory got bigger. Applications got bigger. The virtual address space
1510 layout in 32 bit linux changed.
1512 In the new situation, brk() and mmap space is shared and there are no
1513 artificial limits on brk size imposed by the kernel. What is more,
1514 applications have started using transient allocations larger than the
1515 128Kb as was imagined in 2001.
1517 The price for mmap is also high now; each time glibc mmaps from the
1518 kernel, the kernel is forced to zero out the memory it gives to the
1519 application. Zeroing memory is expensive and eats a lot of cache and
1520 memory bandwidth. This has nothing to do with the efficiency of the
1521 virtual memory system, by doing mmap the kernel just has no choice but
1524 In 2001, the kernel had a maximum size for brk() which was about 800
1525 megabytes on 32 bit x86, at that point brk() would hit the first
1526 mmaped shared libaries and couldn't expand anymore. With current 2.6
1527 kernels, the VA space layout is different and brk() and mmap
1528 both can span the entire heap at will.
1530 Rather than using a static threshold for the brk/mmap tradeoff,
1531 we are now using a simple dynamic one. The goal is still to avoid
1532 fragmentation. The old goals we kept are
1533 1) try to get the long lived large allocations to use mmap()
1534 2) really large allocations should always use mmap()
1535 and we're adding now:
1536 3) transient allocations should use brk() to avoid forcing the kernel
1537 having to zero memory over and over again
1539 The implementation works with a sliding threshold, which is by default
1540 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1541 out at 128Kb as per the 2001 default.
1543 This allows us to satisfy requirement 1) under the assumption that long
1544 lived allocations are made early in the process' lifespan, before it has
1545 started doing dynamic allocations of the same size (which will
1546 increase the threshold).
1548 The upperbound on the threshold satisfies requirement 2)
1550 The threshold goes up in value when the application frees memory that was
1551 allocated with the mmap allocator. The idea is that once the application
1552 starts freeing memory of a certain size, it's highly probable that this is
1553 a size the application uses for transient allocations. This estimator
1554 is there to satisfy the new third requirement.
1558 #define M_MMAP_THRESHOLD -3
1560 #ifndef DEFAULT_MMAP_THRESHOLD
1561 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1565 M_MMAP_MAX is the maximum number of requests to simultaneously
1566 service using mmap. This parameter exists because
1567 some systems have a limited number of internal tables for
1568 use by mmap, and using more than a few of them may degrade
1571 The default is set to a value that serves only as a safeguard.
1572 Setting to 0 disables use of mmap for servicing large requests. If
1573 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1574 to non-zero values in mallopt will fail.
1577 #define M_MMAP_MAX -4
1579 #ifndef DEFAULT_MMAP_MAX
1581 #define DEFAULT_MMAP_MAX (65536)
1583 #define DEFAULT_MMAP_MAX (0)
1588 } /* end of extern "C" */
1594 #define BOUNDED_N(ptr, sz) (ptr)
1596 #ifndef RETURN_ADDRESS
1597 #define RETURN_ADDRESS(X_) (NULL)
1600 /* On some platforms we can compile internal, not exported functions better.
1601 Let the environment provide a macro and define it to be empty if it
1602 is not available. */
1603 #ifndef internal_function
1604 # define internal_function
1607 /* Forward declarations. */
1608 struct malloc_chunk
;
1609 typedef struct malloc_chunk
* mchunkptr
;
1611 /* Internal routines. */
1615 static Void_t
* _int_malloc(mstate
, size_t);
1616 #ifdef ATOMIC_FASTBINS
1617 static void _int_free(mstate
, mchunkptr
, int);
1619 static void _int_free(mstate
, mchunkptr
);
1621 static Void_t
* _int_realloc(mstate
, mchunkptr
, INTERNAL_SIZE_T
,
1623 static Void_t
* _int_memalign(mstate
, size_t, size_t);
1624 static Void_t
* _int_valloc(mstate
, size_t);
1625 static Void_t
* _int_pvalloc(mstate
, size_t);
1626 /*static Void_t* cALLOc(size_t, size_t);*/
1628 static Void_t
** _int_icalloc(mstate
, size_t, size_t, Void_t
**);
1629 static Void_t
** _int_icomalloc(mstate
, size_t, size_t*, Void_t
**);
1631 static int mTRIm(mstate
, size_t);
1632 static size_t mUSABLe(Void_t
*);
1633 static void mSTATs(void);
1634 static int mALLOPt(int, int);
1635 static struct mallinfo
mALLINFo(mstate
);
1636 static void malloc_printerr(int action
, const char *str
, void *ptr
);
1638 static Void_t
* internal_function
mem2mem_check(Void_t
*p
, size_t sz
);
1639 static int internal_function
top_check(void);
1640 static void internal_function
munmap_chunk(mchunkptr p
);
1642 static mchunkptr internal_function
mremap_chunk(mchunkptr p
, size_t new_size
);
1645 static Void_t
* malloc_check(size_t sz
, const Void_t
*caller
);
1646 static void free_check(Void_t
* mem
, const Void_t
*caller
);
1647 static Void_t
* realloc_check(Void_t
* oldmem
, size_t bytes
,
1648 const Void_t
*caller
);
1649 static Void_t
* memalign_check(size_t alignment
, size_t bytes
,
1650 const Void_t
*caller
);
1652 /* These routines are never needed in this configuration. */
1653 static Void_t
* malloc_atfork(size_t sz
, const Void_t
*caller
);
1654 static void free_atfork(Void_t
* mem
, const Void_t
*caller
);
1659 static Void_t
* _int_malloc();
1660 static void _int_free();
1661 static Void_t
* _int_realloc();
1662 static Void_t
* _int_memalign();
1663 static Void_t
* _int_valloc();
1664 static Void_t
* _int_pvalloc();
1665 /*static Void_t* cALLOc();*/
1666 static Void_t
** _int_icalloc();
1667 static Void_t
** _int_icomalloc();
1669 static size_t mUSABLe();
1670 static void mSTATs();
1671 static int mALLOPt();
1672 static struct mallinfo
mALLINFo();
1679 /* ------------- Optional versions of memcopy ---------------- */
1685 Note: memcpy is ONLY invoked with non-overlapping regions,
1686 so the (usually slower) memmove is not needed.
1689 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1690 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1692 #else /* !USE_MEMCPY */
1694 /* Use Duff's device for good zeroing/copying performance. */
1696 #define MALLOC_ZERO(charp, nbytes) \
1698 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1699 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1701 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1703 case 0: for(;;) { *mzp++ = 0; \
1704 case 7: *mzp++ = 0; \
1705 case 6: *mzp++ = 0; \
1706 case 5: *mzp++ = 0; \
1707 case 4: *mzp++ = 0; \
1708 case 3: *mzp++ = 0; \
1709 case 2: *mzp++ = 0; \
1710 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1714 #define MALLOC_COPY(dest,src,nbytes) \
1716 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1717 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1718 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1720 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1722 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1723 case 7: *mcdst++ = *mcsrc++; \
1724 case 6: *mcdst++ = *mcsrc++; \
1725 case 5: *mcdst++ = *mcsrc++; \
1726 case 4: *mcdst++ = *mcsrc++; \
1727 case 3: *mcdst++ = *mcsrc++; \
1728 case 2: *mcdst++ = *mcsrc++; \
1729 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1735 /* ------------------ MMAP support ------------------ */
1741 #ifndef LACKS_SYS_MMAN_H
1742 #include <sys/mman.h>
1745 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1746 # define MAP_ANONYMOUS MAP_ANON
1748 #if !defined(MAP_FAILED)
1749 # define MAP_FAILED ((char*)-1)
1752 #ifndef MAP_NORESERVE
1753 # ifdef MAP_AUTORESRV
1754 # define MAP_NORESERVE MAP_AUTORESRV
1756 # define MAP_NORESERVE 0
1761 Nearly all versions of mmap support MAP_ANONYMOUS,
1762 so the following is unlikely to be needed, but is
1763 supplied just in case.
1766 #ifndef MAP_ANONYMOUS
1768 static int dev_zero_fd
= -1; /* Cached file descriptor for /dev/zero. */
1770 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1771 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1772 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1773 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1777 #define MMAP(addr, size, prot, flags) \
1778 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1783 #endif /* HAVE_MMAP */
1787 ----------------------- Chunk representations -----------------------
1792 This struct declaration is misleading (but accurate and necessary).
1793 It declares a "view" into memory allowing access to necessary
1794 fields at known offsets from a given base. See explanation below.
1797 struct malloc_chunk
{
1799 INTERNAL_SIZE_T prev_size
; /* Size of previous chunk (if free). */
1800 INTERNAL_SIZE_T size
; /* Size in bytes, including overhead. */
1802 struct malloc_chunk
* fd
; /* double links -- used only if free. */
1803 struct malloc_chunk
* bk
;
1805 /* Only used for large blocks: pointer to next larger size. */
1806 struct malloc_chunk
* fd_nextsize
; /* double links -- used only if free. */
1807 struct malloc_chunk
* bk_nextsize
;
1812 malloc_chunk details:
1814 (The following includes lightly edited explanations by Colin Plumb.)
1816 Chunks of memory are maintained using a `boundary tag' method as
1817 described in e.g., Knuth or Standish. (See the paper by Paul
1818 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1819 survey of such techniques.) Sizes of free chunks are stored both
1820 in the front of each chunk and at the end. This makes
1821 consolidating fragmented chunks into bigger chunks very fast. The
1822 size fields also hold bits representing whether chunks are free or
1825 An allocated chunk looks like this:
1828 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1829 | Size of previous chunk, if allocated | |
1830 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1831 | Size of chunk, in bytes |M|P|
1832 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1833 | User data starts here... .
1835 . (malloc_usable_size() bytes) .
1837 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1839 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1842 Where "chunk" is the front of the chunk for the purpose of most of
1843 the malloc code, but "mem" is the pointer that is returned to the
1844 user. "Nextchunk" is the beginning of the next contiguous chunk.
1846 Chunks always begin on even word boundries, so the mem portion
1847 (which is returned to the user) is also on an even word boundary, and
1848 thus at least double-word aligned.
1850 Free chunks are stored in circular doubly-linked lists, and look like this:
1852 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1853 | Size of previous chunk |
1854 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1855 `head:' | Size of chunk, in bytes |P|
1856 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1857 | Forward pointer to next chunk in list |
1858 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1859 | Back pointer to previous chunk in list |
1860 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1861 | Unused space (may be 0 bytes long) .
1864 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1865 `foot:' | Size of chunk, in bytes |
1866 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1868 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1869 chunk size (which is always a multiple of two words), is an in-use
1870 bit for the *previous* chunk. If that bit is *clear*, then the
1871 word before the current chunk size contains the previous chunk
1872 size, and can be used to find the front of the previous chunk.
1873 The very first chunk allocated always has this bit set,
1874 preventing access to non-existent (or non-owned) memory. If
1875 prev_inuse is set for any given chunk, then you CANNOT determine
1876 the size of the previous chunk, and might even get a memory
1877 addressing fault when trying to do so.
1879 Note that the `foot' of the current chunk is actually represented
1880 as the prev_size of the NEXT chunk. This makes it easier to
1881 deal with alignments etc but can be very confusing when trying
1882 to extend or adapt this code.
1884 The two exceptions to all this are
1886 1. The special chunk `top' doesn't bother using the
1887 trailing size field since there is no next contiguous chunk
1888 that would have to index off it. After initialization, `top'
1889 is forced to always exist. If it would become less than
1890 MINSIZE bytes long, it is replenished.
1892 2. Chunks allocated via mmap, which have the second-lowest-order
1893 bit M (IS_MMAPPED) set in their size fields. Because they are
1894 allocated one-by-one, each must contain its own trailing size field.
1899 ---------- Size and alignment checks and conversions ----------
1902 /* conversion from malloc headers to user pointers, and back */
1904 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1905 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1907 /* The smallest possible chunk */
1908 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1910 /* The smallest size we can malloc is an aligned minimal chunk */
1913 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1915 /* Check if m has acceptable alignment */
1917 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1919 #define misaligned_chunk(p) \
1920 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1921 & MALLOC_ALIGN_MASK)
1925 Check if a request is so large that it would wrap around zero when
1926 padded and aligned. To simplify some other code, the bound is made
1927 low enough so that adding MINSIZE will also not wrap around zero.
1930 #define REQUEST_OUT_OF_RANGE(req) \
1931 ((unsigned long)(req) >= \
1932 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1934 /* pad request bytes into a usable size -- internal version */
1936 #define request2size(req) \
1937 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1939 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1941 /* Same, except also perform argument check */
1943 #define checked_request2size(req, sz) \
1944 if (REQUEST_OUT_OF_RANGE(req)) { \
1945 MALLOC_FAILURE_ACTION; \
1948 (sz) = request2size(req);
1951 --------------- Physical chunk operations ---------------
1955 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1956 #define PREV_INUSE 0x1
1958 /* extract inuse bit of previous chunk */
1959 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1962 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1963 #define IS_MMAPPED 0x2
1965 /* check for mmap()'ed chunk */
1966 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1969 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1970 from a non-main arena. This is only set immediately before handing
1971 the chunk to the user, if necessary. */
1972 #define NON_MAIN_ARENA 0x4
1974 /* check for chunk from non-main arena */
1975 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1979 Bits to mask off when extracting size
1981 Note: IS_MMAPPED is intentionally not masked off from size field in
1982 macros for which mmapped chunks should never be seen. This should
1983 cause helpful core dumps to occur if it is tried by accident by
1984 people extending or adapting this malloc.
1986 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1988 /* Get size, ignoring use bits */
1989 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1992 /* Ptr to next physical malloc_chunk. */
1993 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1995 /* Ptr to previous physical malloc_chunk */
1996 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1998 /* Treat space at ptr + offset as a chunk */
1999 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2001 /* extract p's inuse bit */
2003 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
2005 /* set/clear chunk as being inuse without otherwise disturbing */
2006 #define set_inuse(p)\
2007 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
2009 #define clear_inuse(p)\
2010 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
2013 /* check/set/clear inuse bits in known places */
2014 #define inuse_bit_at_offset(p, s)\
2015 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
2017 #define set_inuse_bit_at_offset(p, s)\
2018 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
2020 #define clear_inuse_bit_at_offset(p, s)\
2021 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
2024 /* Set size at head, without disturbing its use bit */
2025 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
2027 /* Set size/use field */
2028 #define set_head(p, s) ((p)->size = (s))
2030 /* Set size at footer (only when chunk is not in use) */
2031 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
2035 -------------------- Internal data structures --------------------
2037 All internal state is held in an instance of malloc_state defined
2038 below. There are no other static variables, except in two optional
2040 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
2041 * If HAVE_MMAP is true, but mmap doesn't support
2042 MAP_ANONYMOUS, a dummy file descriptor for mmap.
2044 Beware of lots of tricks that minimize the total bookkeeping space
2045 requirements. The result is a little over 1K bytes (for 4byte
2046 pointers and size_t.)
2052 An array of bin headers for free chunks. Each bin is doubly
2053 linked. The bins are approximately proportionally (log) spaced.
2054 There are a lot of these bins (128). This may look excessive, but
2055 works very well in practice. Most bins hold sizes that are
2056 unusual as malloc request sizes, but are more usual for fragments
2057 and consolidated sets of chunks, which is what these bins hold, so
2058 they can be found quickly. All procedures maintain the invariant
2059 that no consolidated chunk physically borders another one, so each
2060 chunk in a list is known to be preceeded and followed by either
2061 inuse chunks or the ends of memory.
2063 Chunks in bins are kept in size order, with ties going to the
2064 approximately least recently used chunk. Ordering isn't needed
2065 for the small bins, which all contain the same-sized chunks, but
2066 facilitates best-fit allocation for larger chunks. These lists
2067 are just sequential. Keeping them in order almost never requires
2068 enough traversal to warrant using fancier ordered data
2071 Chunks of the same size are linked with the most
2072 recently freed at the front, and allocations are taken from the
2073 back. This results in LRU (FIFO) allocation order, which tends
2074 to give each chunk an equal opportunity to be consolidated with
2075 adjacent freed chunks, resulting in larger free chunks and less
2078 To simplify use in double-linked lists, each bin header acts
2079 as a malloc_chunk. This avoids special-casing for headers.
2080 But to conserve space and improve locality, we allocate
2081 only the fd/bk pointers of bins, and then use repositioning tricks
2082 to treat these as the fields of a malloc_chunk*.
2085 typedef struct malloc_chunk
* mbinptr
;
2087 /* addressing -- note that bin_at(0) does not exist */
2088 #define bin_at(m, i) \
2089 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
2090 - offsetof (struct malloc_chunk, fd))
2092 /* analog of ++bin */
2093 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
2095 /* Reminders about list directionality within bins */
2096 #define first(b) ((b)->fd)
2097 #define last(b) ((b)->bk)
2099 /* Take a chunk off a bin list */
2100 #define unlink(P, BK, FD) { \
2103 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
2104 malloc_printerr (check_action, "corrupted double-linked list", P); \
2108 if (!in_smallbin_range (P->size) \
2109 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
2110 assert (P->fd_nextsize->bk_nextsize == P); \
2111 assert (P->bk_nextsize->fd_nextsize == P); \
2112 if (FD->fd_nextsize == NULL) { \
2113 if (P->fd_nextsize == P) \
2114 FD->fd_nextsize = FD->bk_nextsize = FD; \
2116 FD->fd_nextsize = P->fd_nextsize; \
2117 FD->bk_nextsize = P->bk_nextsize; \
2118 P->fd_nextsize->bk_nextsize = FD; \
2119 P->bk_nextsize->fd_nextsize = FD; \
2122 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
2123 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
2132 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2133 8 bytes apart. Larger bins are approximately logarithmically spaced:
2139 4 bins of size 32768
2140 2 bins of size 262144
2141 1 bin of size what's left
2143 There is actually a little bit of slop in the numbers in bin_index
2144 for the sake of speed. This makes no difference elsewhere.
2146 The bins top out around 1MB because we expect to service large
2151 #define NSMALLBINS 64
2152 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
2153 #define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH)
2155 #define in_smallbin_range(sz) \
2156 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2158 #define smallbin_index(sz) \
2159 (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
2161 #define largebin_index_32(sz) \
2162 (((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
2163 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2164 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2165 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2166 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2169 // XXX It remains to be seen whether it is good to keep the widths of
2170 // XXX the buckets the same or whether it should be scaled by a factor
2171 // XXX of two as well.
2172 #define largebin_index_64(sz) \
2173 (((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \
2174 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2175 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2176 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2177 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2180 #define largebin_index(sz) \
2181 (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
2183 #define bin_index(sz) \
2184 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2190 All remainders from chunk splits, as well as all returned chunks,
2191 are first placed in the "unsorted" bin. They are then placed
2192 in regular bins after malloc gives them ONE chance to be used before
2193 binning. So, basically, the unsorted_chunks list acts as a queue,
2194 with chunks being placed on it in free (and malloc_consolidate),
2195 and taken off (to be either used or placed in bins) in malloc.
2197 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2198 does not have to be taken into account in size comparisons.
2201 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2202 #define unsorted_chunks(M) (bin_at(M, 1))
2207 The top-most available chunk (i.e., the one bordering the end of
2208 available memory) is treated specially. It is never included in
2209 any bin, is used only if no other chunk is available, and is
2210 released back to the system if it is very large (see
2211 M_TRIM_THRESHOLD). Because top initially
2212 points to its own bin with initial zero size, thus forcing
2213 extension on the first malloc request, we avoid having any special
2214 code in malloc to check whether it even exists yet. But we still
2215 need to do so when getting memory from system, so we make
2216 initial_top treat the bin as a legal but unusable chunk during the
2217 interval between initialization and the first call to
2218 sYSMALLOc. (This is somewhat delicate, since it relies on
2219 the 2 preceding words to be zero during this interval as well.)
2222 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2223 #define initial_top(M) (unsorted_chunks(M))
2228 To help compensate for the large number of bins, a one-level index
2229 structure is used for bin-by-bin searching. `binmap' is a
2230 bitvector recording whether bins are definitely empty so they can
2231 be skipped over during during traversals. The bits are NOT always
2232 cleared as soon as bins are empty, but instead only
2233 when they are noticed to be empty during traversal in malloc.
2236 /* Conservatively use 32 bits per map word, even if on 64bit system */
2237 #define BINMAPSHIFT 5
2238 #define BITSPERMAP (1U << BINMAPSHIFT)
2239 #define BINMAPSIZE (NBINS / BITSPERMAP)
2241 #define idx2block(i) ((i) >> BINMAPSHIFT)
2242 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2244 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2245 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2246 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2251 An array of lists holding recently freed small chunks. Fastbins
2252 are not doubly linked. It is faster to single-link them, and
2253 since chunks are never removed from the middles of these lists,
2254 double linking is not necessary. Also, unlike regular bins, they
2255 are not even processed in FIFO order (they use faster LIFO) since
2256 ordering doesn't much matter in the transient contexts in which
2257 fastbins are normally used.
2259 Chunks in fastbins keep their inuse bit set, so they cannot
2260 be consolidated with other free chunks. malloc_consolidate
2261 releases all chunks in fastbins and consolidates them with
2265 typedef struct malloc_chunk
* mfastbinptr
;
2266 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
2268 /* offset 2 to use otherwise unindexable first 2 bins */
2269 #define fastbin_index(sz) \
2270 ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
2273 /* The maximum fastbin request size we support */
2274 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
2276 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2279 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2280 that triggers automatic consolidation of possibly-surrounding
2281 fastbin chunks. This is a heuristic, so the exact value should not
2282 matter too much. It is defined at half the default trim threshold as a
2283 compromise heuristic to only attempt consolidation if it is likely
2284 to lead to trimming. However, it is not dynamically tunable, since
2285 consolidation reduces fragmentation surrounding large chunks even
2286 if trimming is not used.
2289 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2292 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2293 they are used as flags.
2297 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2298 some fastbin chunks. It is set true on entering a chunk into any
2299 fastbin, and cleared only in malloc_consolidate.
2301 The truth value is inverted so that have_fastchunks will be true
2302 upon startup (since statics are zero-filled), simplifying
2303 initialization checks.
2306 #define FASTCHUNKS_BIT (1U)
2308 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
2309 #ifdef ATOMIC_FASTBINS
2310 #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
2311 #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
2313 #define clear_fastchunks(M) ((M)->flags |= FASTCHUNKS_BIT)
2314 #define set_fastchunks(M) ((M)->flags &= ~FASTCHUNKS_BIT)
2318 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2319 regions. Otherwise, contiguity is exploited in merging together,
2320 when possible, results from consecutive MORECORE calls.
2322 The initial value comes from MORECORE_CONTIGUOUS, but is
2323 changed dynamically if mmap is ever used as an sbrk substitute.
2326 #define NONCONTIGUOUS_BIT (2U)
2328 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
2329 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
2330 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
2331 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
2334 Set value of max_fast.
2335 Use impossibly small value if 0.
2336 Precondition: there are no existing fastbin chunks.
2337 Setting the value clears fastchunk bit but preserves noncontiguous bit.
2340 #define set_max_fast(s) \
2341 global_max_fast = (((s) == 0) \
2342 ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
2343 #define get_max_fast() global_max_fast
2347 ----------- Internal state representation and initialization -----------
2350 struct malloc_state
{
2351 /* Serialize access. */
2354 /* Flags (formerly in max_fast). */
2358 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2359 long stat_lock_direct
, stat_lock_loop
, stat_lock_wait
;
2363 mfastbinptr fastbinsY
[NFASTBINS
];
2365 /* Base of the topmost chunk -- not otherwise kept in a bin */
2368 /* The remainder from the most recent split of a small request */
2369 mchunkptr last_remainder
;
2371 /* Normal bins packed as described above */
2372 mchunkptr bins
[NBINS
* 2 - 2];
2374 /* Bitmap of bins */
2375 unsigned int binmap
[BINMAPSIZE
];
2378 struct malloc_state
*next
;
2381 /* Linked list for free arenas. */
2382 struct malloc_state
*next_free
;
2385 /* Memory allocated from the system in this arena. */
2386 INTERNAL_SIZE_T system_mem
;
2387 INTERNAL_SIZE_T max_system_mem
;
2391 /* Tunable parameters */
2392 unsigned long trim_threshold
;
2393 INTERNAL_SIZE_T top_pad
;
2394 INTERNAL_SIZE_T mmap_threshold
;
2396 INTERNAL_SIZE_T arena_test
;
2397 INTERNAL_SIZE_T arena_max
;
2400 /* Memory map support */
2404 /* the mmap_threshold is dynamic, until the user sets
2405 it manually, at which point we need to disable any
2406 dynamic behavior. */
2407 int no_dyn_threshold
;
2409 /* Cache malloc_getpagesize */
2410 unsigned int pagesize
;
2413 INTERNAL_SIZE_T mmapped_mem
;
2414 /*INTERNAL_SIZE_T sbrked_mem;*/
2415 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2416 INTERNAL_SIZE_T max_mmapped_mem
;
2417 INTERNAL_SIZE_T max_total_mem
; /* only kept for NO_THREADS */
2419 /* First address handed out by MORECORE/sbrk. */
2423 /* There are several instances of this struct ("arenas") in this
2424 malloc. If you are adapting this malloc in a way that does NOT use
2425 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2426 before using. This malloc relies on the property that malloc_state
2427 is initialized to all zeroes (as is true of C statics). */
2429 static struct malloc_state main_arena
;
2431 /* There is only one instance of the malloc parameters. */
2433 static struct malloc_par mp_
;
2437 /* Non public mallopt parameters. */
2438 #define M_ARENA_TEST -7
2439 #define M_ARENA_MAX -8
2443 /* Maximum size of memory handled in fastbins. */
2444 static INTERNAL_SIZE_T global_max_fast
;
2447 Initialize a malloc_state struct.
2449 This is called only from within malloc_consolidate, which needs
2450 be called in the same contexts anyway. It is never called directly
2451 outside of malloc_consolidate because some optimizing compilers try
2452 to inline it at all call points, which turns out not to be an
2453 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2457 static void malloc_init_state(mstate av
)
2459 static void malloc_init_state(av
) mstate av
;
2465 /* Establish circular links for normal bins */
2466 for (i
= 1; i
< NBINS
; ++i
) {
2468 bin
->fd
= bin
->bk
= bin
;
2471 #if MORECORE_CONTIGUOUS
2472 if (av
!= &main_arena
)
2474 set_noncontiguous(av
);
2475 if (av
== &main_arena
)
2476 set_max_fast(DEFAULT_MXFAST
);
2477 av
->flags
|= FASTCHUNKS_BIT
;
2479 av
->top
= initial_top(av
);
2483 Other internal utilities operating on mstates
2487 static Void_t
* sYSMALLOc(INTERNAL_SIZE_T
, mstate
);
2488 static int sYSTRIm(size_t, mstate
);
2489 static void malloc_consolidate(mstate
);
2491 static Void_t
** iALLOc(mstate
, size_t, size_t*, int, Void_t
**);
2494 static Void_t
* sYSMALLOc();
2495 static int sYSTRIm();
2496 static void malloc_consolidate();
2497 static Void_t
** iALLOc();
2501 /* -------------- Early definitions for debugging hooks ---------------- */
2503 /* Define and initialize the hook variables. These weak definitions must
2504 appear before any use of the variables in a function (arena.c uses one). */
2505 #ifndef weak_variable
2507 #define weak_variable /**/
2509 /* In GNU libc we want the hook variables to be weak definitions to
2510 avoid a problem with Emacs. */
2511 #define weak_variable weak_function
2515 /* Forward declarations. */
2516 static Void_t
* malloc_hook_ini
__MALLOC_P ((size_t sz
,
2517 const __malloc_ptr_t caller
));
2518 static Void_t
* realloc_hook_ini
__MALLOC_P ((Void_t
* ptr
, size_t sz
,
2519 const __malloc_ptr_t caller
));
2520 static Void_t
* memalign_hook_ini
__MALLOC_P ((size_t alignment
, size_t sz
,
2521 const __malloc_ptr_t caller
));
2523 void weak_variable (*__malloc_initialize_hook
) (void) = NULL
;
2524 void weak_variable (*__free_hook
) (__malloc_ptr_t __ptr
,
2525 const __malloc_ptr_t
) = NULL
;
2526 __malloc_ptr_t
weak_variable (*__malloc_hook
)
2527 (size_t __size
, const __malloc_ptr_t
) = malloc_hook_ini
;
2528 __malloc_ptr_t
weak_variable (*__realloc_hook
)
2529 (__malloc_ptr_t __ptr
, size_t __size
, const __malloc_ptr_t
)
2531 __malloc_ptr_t
weak_variable (*__memalign_hook
)
2532 (size_t __alignment
, size_t __size
, const __malloc_ptr_t
)
2533 = memalign_hook_ini
;
2534 void weak_variable (*__after_morecore_hook
) (void) = NULL
;
2537 /* ---------------- Error behavior ------------------------------------ */
2539 #ifndef DEFAULT_CHECK_ACTION
2540 #define DEFAULT_CHECK_ACTION 3
2543 static int check_action
= DEFAULT_CHECK_ACTION
;
2546 /* ------------------ Testing support ----------------------------------*/
2548 static int perturb_byte
;
2550 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
2551 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
2554 /* ------------------- Support for multiple arenas -------------------- */
2560 These routines make a number of assertions about the states
2561 of data structures that should be true at all times. If any
2562 are not true, it's very likely that a user program has somehow
2563 trashed memory. (It's also possible that there is a coding error
2564 in malloc. In which case, please report it!)
2569 #define check_chunk(A,P)
2570 #define check_free_chunk(A,P)
2571 #define check_inuse_chunk(A,P)
2572 #define check_remalloced_chunk(A,P,N)
2573 #define check_malloced_chunk(A,P,N)
2574 #define check_malloc_state(A)
2578 #define check_chunk(A,P) do_check_chunk(A,P)
2579 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2580 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2581 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2582 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2583 #define check_malloc_state(A) do_check_malloc_state(A)
2586 Properties of all chunks
2590 static void do_check_chunk(mstate av
, mchunkptr p
)
2592 static void do_check_chunk(av
, p
) mstate av
; mchunkptr p
;
2595 unsigned long sz
= chunksize(p
);
2596 /* min and max possible addresses assuming contiguous allocation */
2597 char* max_address
= (char*)(av
->top
) + chunksize(av
->top
);
2598 char* min_address
= max_address
- av
->system_mem
;
2600 if (!chunk_is_mmapped(p
)) {
2602 /* Has legal address ... */
2604 if (contiguous(av
)) {
2605 assert(((char*)p
) >= min_address
);
2606 assert(((char*)p
+ sz
) <= ((char*)(av
->top
)));
2610 /* top size is always at least MINSIZE */
2611 assert((unsigned long)(sz
) >= MINSIZE
);
2612 /* top predecessor always marked inuse */
2613 assert(prev_inuse(p
));
2619 /* address is outside main heap */
2620 if (contiguous(av
) && av
->top
!= initial_top(av
)) {
2621 assert(((char*)p
) < min_address
|| ((char*)p
) >= max_address
);
2623 /* chunk is page-aligned */
2624 assert(((p
->prev_size
+ sz
) & (mp_
.pagesize
-1)) == 0);
2625 /* mem is aligned */
2626 assert(aligned_OK(chunk2mem(p
)));
2628 /* force an appropriate assert violation if debug set */
2629 assert(!chunk_is_mmapped(p
));
2635 Properties of free chunks
2639 static void do_check_free_chunk(mstate av
, mchunkptr p
)
2641 static void do_check_free_chunk(av
, p
) mstate av
; mchunkptr p
;
2644 INTERNAL_SIZE_T sz
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
2645 mchunkptr next
= chunk_at_offset(p
, sz
);
2647 do_check_chunk(av
, p
);
2649 /* Chunk must claim to be free ... */
2651 assert (!chunk_is_mmapped(p
));
2653 /* Unless a special marker, must have OK fields */
2654 if ((unsigned long)(sz
) >= MINSIZE
)
2656 assert((sz
& MALLOC_ALIGN_MASK
) == 0);
2657 assert(aligned_OK(chunk2mem(p
)));
2658 /* ... matching footer field */
2659 assert(next
->prev_size
== sz
);
2660 /* ... and is fully consolidated */
2661 assert(prev_inuse(p
));
2662 assert (next
== av
->top
|| inuse(next
));
2664 /* ... and has minimally sane links */
2665 assert(p
->fd
->bk
== p
);
2666 assert(p
->bk
->fd
== p
);
2668 else /* markers are always of size SIZE_SZ */
2669 assert(sz
== SIZE_SZ
);
2673 Properties of inuse chunks
2677 static void do_check_inuse_chunk(mstate av
, mchunkptr p
)
2679 static void do_check_inuse_chunk(av
, p
) mstate av
; mchunkptr p
;
2684 do_check_chunk(av
, p
);
2686 if (chunk_is_mmapped(p
))
2687 return; /* mmapped chunks have no next/prev */
2689 /* Check whether it claims to be in use ... */
2692 next
= next_chunk(p
);
2694 /* ... and is surrounded by OK chunks.
2695 Since more things can be checked with free chunks than inuse ones,
2696 if an inuse chunk borders them and debug is on, it's worth doing them.
2698 if (!prev_inuse(p
)) {
2699 /* Note that we cannot even look at prev unless it is not inuse */
2700 mchunkptr prv
= prev_chunk(p
);
2701 assert(next_chunk(prv
) == p
);
2702 do_check_free_chunk(av
, prv
);
2705 if (next
== av
->top
) {
2706 assert(prev_inuse(next
));
2707 assert(chunksize(next
) >= MINSIZE
);
2709 else if (!inuse(next
))
2710 do_check_free_chunk(av
, next
);
2714 Properties of chunks recycled from fastbins
2718 static void do_check_remalloced_chunk(mstate av
, mchunkptr p
, INTERNAL_SIZE_T s
)
2720 static void do_check_remalloced_chunk(av
, p
, s
)
2721 mstate av
; mchunkptr p
; INTERNAL_SIZE_T s
;
2724 INTERNAL_SIZE_T sz
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
2726 if (!chunk_is_mmapped(p
)) {
2727 assert(av
== arena_for_chunk(p
));
2728 if (chunk_non_main_arena(p
))
2729 assert(av
!= &main_arena
);
2731 assert(av
== &main_arena
);
2734 do_check_inuse_chunk(av
, p
);
2736 /* Legal size ... */
2737 assert((sz
& MALLOC_ALIGN_MASK
) == 0);
2738 assert((unsigned long)(sz
) >= MINSIZE
);
2739 /* ... and alignment */
2740 assert(aligned_OK(chunk2mem(p
)));
2741 /* chunk is less than MINSIZE more than request */
2742 assert((long)(sz
) - (long)(s
) >= 0);
2743 assert((long)(sz
) - (long)(s
+ MINSIZE
) < 0);
2747 Properties of nonrecycled chunks at the point they are malloced
2751 static void do_check_malloced_chunk(mstate av
, mchunkptr p
, INTERNAL_SIZE_T s
)
2753 static void do_check_malloced_chunk(av
, p
, s
)
2754 mstate av
; mchunkptr p
; INTERNAL_SIZE_T s
;
2757 /* same as recycled case ... */
2758 do_check_remalloced_chunk(av
, p
, s
);
2761 ... plus, must obey implementation invariant that prev_inuse is
2762 always true of any allocated chunk; i.e., that each allocated
2763 chunk borders either a previously allocated and still in-use
2764 chunk, or the base of its memory arena. This is ensured
2765 by making all allocations from the `lowest' part of any found
2766 chunk. This does not necessarily hold however for chunks
2767 recycled via fastbins.
2770 assert(prev_inuse(p
));
2775 Properties of malloc_state.
2777 This may be useful for debugging malloc, as well as detecting user
2778 programmer errors that somehow write into malloc_state.
2780 If you are extending or experimenting with this malloc, you can
2781 probably figure out how to hack this routine to print out or
2782 display chunk addresses, sizes, bins, and other instrumentation.
2785 static void do_check_malloc_state(mstate av
)
2792 INTERNAL_SIZE_T size
;
2793 unsigned long total
= 0;
2796 /* internal size_t must be no wider than pointer type */
2797 assert(sizeof(INTERNAL_SIZE_T
) <= sizeof(char*));
2799 /* alignment is a power of 2 */
2800 assert((MALLOC_ALIGNMENT
& (MALLOC_ALIGNMENT
-1)) == 0);
2802 /* cannot run remaining checks until fully initialized */
2803 if (av
->top
== 0 || av
->top
== initial_top(av
))
2806 /* pagesize is a power of 2 */
2807 assert((mp_
.pagesize
& (mp_
.pagesize
-1)) == 0);
2809 /* A contiguous main_arena is consistent with sbrk_base. */
2810 if (av
== &main_arena
&& contiguous(av
))
2811 assert((char*)mp_
.sbrk_base
+ av
->system_mem
==
2812 (char*)av
->top
+ chunksize(av
->top
));
2814 /* properties of fastbins */
2816 /* max_fast is in allowed range */
2817 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE
));
2819 max_fast_bin
= fastbin_index(get_max_fast ());
2821 for (i
= 0; i
< NFASTBINS
; ++i
) {
2822 p
= fastbin (av
, i
);
2824 /* The following test can only be performed for the main arena.
2825 While mallopt calls malloc_consolidate to get rid of all fast
2826 bins (especially those larger than the new maximum) this does
2827 only happen for the main arena. Trying to do this for any
2828 other arena would mean those arenas have to be locked and
2829 malloc_consolidate be called for them. This is excessive. And
2830 even if this is acceptable to somebody it still cannot solve
2831 the problem completely since if the arena is locked a
2832 concurrent malloc call might create a new arena which then
2833 could use the newly invalid fast bins. */
2835 /* all bins past max_fast are empty */
2836 if (av
== &main_arena
&& i
> max_fast_bin
)
2840 /* each chunk claims to be inuse */
2841 do_check_inuse_chunk(av
, p
);
2842 total
+= chunksize(p
);
2843 /* chunk belongs in this bin */
2844 assert(fastbin_index(chunksize(p
)) == i
);
2850 assert(have_fastchunks(av
));
2851 else if (!have_fastchunks(av
))
2854 /* check normal bins */
2855 for (i
= 1; i
< NBINS
; ++i
) {
2858 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2860 unsigned int binbit
= get_binmap(av
,i
);
2861 int empty
= last(b
) == b
;
2868 for (p
= last(b
); p
!= b
; p
= p
->bk
) {
2869 /* each chunk claims to be free */
2870 do_check_free_chunk(av
, p
);
2871 size
= chunksize(p
);
2874 /* chunk belongs in bin */
2875 idx
= bin_index(size
);
2877 /* lists are sorted */
2878 assert(p
->bk
== b
||
2879 (unsigned long)chunksize(p
->bk
) >= (unsigned long)chunksize(p
));
2881 if (!in_smallbin_range(size
))
2883 if (p
->fd_nextsize
!= NULL
)
2885 if (p
->fd_nextsize
== p
)
2886 assert (p
->bk_nextsize
== p
);
2889 if (p
->fd_nextsize
== first (b
))
2890 assert (chunksize (p
) < chunksize (p
->fd_nextsize
));
2892 assert (chunksize (p
) > chunksize (p
->fd_nextsize
));
2895 assert (chunksize (p
) > chunksize (p
->bk_nextsize
));
2897 assert (chunksize (p
) < chunksize (p
->bk_nextsize
));
2901 assert (p
->bk_nextsize
== NULL
);
2903 } else if (!in_smallbin_range(size
))
2904 assert (p
->fd_nextsize
== NULL
&& p
->bk_nextsize
== NULL
);
2905 /* chunk is followed by a legal chain of inuse chunks */
2906 for (q
= next_chunk(p
);
2907 (q
!= av
->top
&& inuse(q
) &&
2908 (unsigned long)(chunksize(q
)) >= MINSIZE
);
2910 do_check_inuse_chunk(av
, q
);
2914 /* top chunk is OK */
2915 check_chunk(av
, av
->top
);
2917 /* sanity checks for statistics */
2920 assert(total
<= (unsigned long)(mp_
.max_total_mem
));
2921 assert(mp_
.n_mmaps
>= 0);
2923 assert(mp_
.n_mmaps
<= mp_
.max_n_mmaps
);
2925 assert((unsigned long)(av
->system_mem
) <=
2926 (unsigned long)(av
->max_system_mem
));
2928 assert((unsigned long)(mp_
.mmapped_mem
) <=
2929 (unsigned long)(mp_
.max_mmapped_mem
));
2932 assert((unsigned long)(mp_
.max_total_mem
) >=
2933 (unsigned long)(mp_
.mmapped_mem
) + (unsigned long)(av
->system_mem
));
2939 /* ----------------- Support for debugging hooks -------------------- */
2943 /* ----------- Routines dealing with system allocation -------------- */
2946 sysmalloc handles malloc cases requiring more memory from the system.
2947 On entry, it is assumed that av->top does not have enough
2948 space to service request for nb bytes, thus requiring that av->top
2949 be extended or replaced.
2953 static Void_t
* sYSMALLOc(INTERNAL_SIZE_T nb
, mstate av
)
2955 static Void_t
* sYSMALLOc(nb
, av
) INTERNAL_SIZE_T nb
; mstate av
;
2958 mchunkptr old_top
; /* incoming value of av->top */
2959 INTERNAL_SIZE_T old_size
; /* its size */
2960 char* old_end
; /* its end address */
2962 long size
; /* arg to first MORECORE or mmap call */
2963 char* brk
; /* return value from MORECORE */
2965 long correction
; /* arg to 2nd MORECORE call */
2966 char* snd_brk
; /* 2nd return val */
2968 INTERNAL_SIZE_T front_misalign
; /* unusable bytes at front of new space */
2969 INTERNAL_SIZE_T end_misalign
; /* partial page left at end of new space */
2970 char* aligned_brk
; /* aligned offset into brk */
2972 mchunkptr p
; /* the allocated/returned chunk */
2973 mchunkptr remainder
; /* remainder from allocation */
2974 unsigned long remainder_size
; /* its size */
2976 unsigned long sum
; /* for updating stats */
2978 size_t pagemask
= mp_
.pagesize
- 1;
2979 bool tried_mmap
= false;
2985 If have mmap, and the request size meets the mmap threshold, and
2986 the system supports mmap, and there are few enough currently
2987 allocated mmapped regions, try to directly map this request
2988 rather than expanding top.
2991 if ((unsigned long)(nb
) >= (unsigned long)(mp_
.mmap_threshold
) &&
2992 (mp_
.n_mmaps
< mp_
.n_mmaps_max
)) {
2994 char* mm
; /* return value from mmap call*/
2998 Round up size to nearest page. For mmapped chunks, the overhead
2999 is one SIZE_SZ unit larger than for normal chunks, because there
3000 is no following chunk whose prev_size field could be used.
3003 /* See the front_misalign handling below, for glibc there is no
3004 need for further alignments. */
3005 size
= (nb
+ SIZE_SZ
+ pagemask
) & ~pagemask
;
3007 size
= (nb
+ SIZE_SZ
+ MALLOC_ALIGN_MASK
+ pagemask
) & ~pagemask
;
3011 /* Don't try if size wraps around 0 */
3012 if ((unsigned long)(size
) > (unsigned long)(nb
)) {
3014 mm
= (char*)(MMAP(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
));
3016 if (mm
!= MAP_FAILED
) {
3019 The offset to the start of the mmapped region is stored
3020 in the prev_size field of the chunk. This allows us to adjust
3021 returned start address to meet alignment requirements here
3022 and in memalign(), and still be able to compute proper
3023 address argument for later munmap in free() and realloc().
3027 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
3028 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
3029 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
3030 assert (((INTERNAL_SIZE_T
)chunk2mem(mm
) & MALLOC_ALIGN_MASK
) == 0);
3032 front_misalign
= (INTERNAL_SIZE_T
)chunk2mem(mm
) & MALLOC_ALIGN_MASK
;
3033 if (front_misalign
> 0) {
3034 correction
= MALLOC_ALIGNMENT
- front_misalign
;
3035 p
= (mchunkptr
)(mm
+ correction
);
3036 p
->prev_size
= correction
;
3037 set_head(p
, (size
- correction
) |IS_MMAPPED
);
3043 set_head(p
, size
|IS_MMAPPED
);
3046 /* update statistics */
3048 if (++mp_
.n_mmaps
> mp_
.max_n_mmaps
)
3049 mp_
.max_n_mmaps
= mp_
.n_mmaps
;
3051 sum
= mp_
.mmapped_mem
+= size
;
3052 if (sum
> (unsigned long)(mp_
.max_mmapped_mem
))
3053 mp_
.max_mmapped_mem
= sum
;
3055 sum
+= av
->system_mem
;
3056 if (sum
> (unsigned long)(mp_
.max_total_mem
))
3057 mp_
.max_total_mem
= sum
;
3062 return chunk2mem(p
);
3068 /* Record incoming configuration of top */
3071 old_size
= chunksize(old_top
);
3072 old_end
= (char*)(chunk_at_offset(old_top
, old_size
));
3074 brk
= snd_brk
= (char*)(MORECORE_FAILURE
);
3077 If not the first time through, we require old_size to be
3078 at least MINSIZE and to have prev_inuse set.
3081 assert((old_top
== initial_top(av
) && old_size
== 0) ||
3082 ((unsigned long) (old_size
) >= MINSIZE
&&
3083 prev_inuse(old_top
) &&
3084 ((unsigned long)old_end
& pagemask
) == 0));
3086 /* Precondition: not enough current space to satisfy nb request */
3087 assert((unsigned long)(old_size
) < (unsigned long)(nb
+ MINSIZE
));
3089 #ifndef ATOMIC_FASTBINS
3090 /* Precondition: all fastbins are consolidated */
3091 assert(!have_fastchunks(av
));
3095 if (av
!= &main_arena
) {
3097 heap_info
*old_heap
, *heap
;
3098 size_t old_heap_size
;
3100 /* First try to extend the current heap. */
3101 old_heap
= heap_for_ptr(old_top
);
3102 old_heap_size
= old_heap
->size
;
3103 if ((long) (MINSIZE
+ nb
- old_size
) > 0
3104 && grow_heap(old_heap
, MINSIZE
+ nb
- old_size
) == 0) {
3105 av
->system_mem
+= old_heap
->size
- old_heap_size
;
3106 arena_mem
+= old_heap
->size
- old_heap_size
;
3108 if(mmapped_mem
+ arena_mem
+ sbrked_mem
> max_total_mem
)
3109 max_total_mem
= mmapped_mem
+ arena_mem
+ sbrked_mem
;
3111 set_head(old_top
, (((char *)old_heap
+ old_heap
->size
) - (char *)old_top
)
3114 else if ((heap
= new_heap(nb
+ (MINSIZE
+ sizeof(*heap
)), mp_
.top_pad
))) {
3115 /* Use a newly allocated heap. */
3117 heap
->prev
= old_heap
;
3118 av
->system_mem
+= heap
->size
;
3119 arena_mem
+= heap
->size
;
3121 if((unsigned long)(mmapped_mem
+ arena_mem
+ sbrked_mem
) > max_total_mem
)
3122 max_total_mem
= mmapped_mem
+ arena_mem
+ sbrked_mem
;
3124 /* Set up the new top. */
3125 top(av
) = chunk_at_offset(heap
, sizeof(*heap
));
3126 set_head(top(av
), (heap
->size
- sizeof(*heap
)) | PREV_INUSE
);
3128 /* Setup fencepost and free the old top chunk. */
3129 /* The fencepost takes at least MINSIZE bytes, because it might
3130 become the top chunk again later. Note that a footer is set
3131 up, too, although the chunk is marked in use. */
3132 old_size
-= MINSIZE
;
3133 set_head(chunk_at_offset(old_top
, old_size
+ 2*SIZE_SZ
), 0|PREV_INUSE
);
3134 if (old_size
>= MINSIZE
) {
3135 set_head(chunk_at_offset(old_top
, old_size
), (2*SIZE_SZ
)|PREV_INUSE
);
3136 set_foot(chunk_at_offset(old_top
, old_size
), (2*SIZE_SZ
));
3137 set_head(old_top
, old_size
|PREV_INUSE
|NON_MAIN_ARENA
);
3138 #ifdef ATOMIC_FASTBINS
3139 _int_free(av
, old_top
, 1);
3141 _int_free(av
, old_top
);
3144 set_head(old_top
, (old_size
+ 2*SIZE_SZ
)|PREV_INUSE
);
3145 set_foot(old_top
, (old_size
+ 2*SIZE_SZ
));
3148 else if (!tried_mmap
)
3149 /* We can at least try to use to mmap memory. */
3152 } else { /* av == main_arena */
3155 /* Request enough space for nb + pad + overhead */
3157 size
= nb
+ mp_
.top_pad
+ MINSIZE
;
3160 If contiguous, we can subtract out existing space that we hope to
3161 combine with new space. We add it back later only if
3162 we don't actually get contiguous space.
3169 Round to a multiple of page size.
3170 If MORECORE is not contiguous, this ensures that we only call it
3171 with whole-page arguments. And if MORECORE is contiguous and
3172 this is not first time through, this preserves page-alignment of
3173 previous calls. Otherwise, we correct to page-align below.
3176 size
= (size
+ pagemask
) & ~pagemask
;
3179 Don't try to call MORECORE if argument is so big as to appear
3180 negative. Note that since mmap takes size_t arg, it may succeed
3181 below even if we cannot call MORECORE.
3185 brk
= (char*)(MORECORE(size
));
3187 if (brk
!= (char*)(MORECORE_FAILURE
)) {
3188 /* Call the `morecore' hook if necessary. */
3189 void (*hook
) (void) = force_reg (__after_morecore_hook
);
3190 if (__builtin_expect (hook
!= NULL
, 0))
3194 If have mmap, try using it as a backup when MORECORE fails or
3195 cannot be used. This is worth doing on systems that have "holes" in
3196 address space, so sbrk cannot extend to give contiguous space, but
3197 space is available elsewhere. Note that we ignore mmap max count
3198 and threshold limits, since the space will not be used as a
3199 segregated mmap region.
3203 /* Cannot merge with old top, so add its size back in */
3205 size
= (size
+ old_size
+ pagemask
) & ~pagemask
;
3207 /* If we are relying on mmap as backup, then use larger units */
3208 if ((unsigned long)(size
) < (unsigned long)(MMAP_AS_MORECORE_SIZE
))
3209 size
= MMAP_AS_MORECORE_SIZE
;
3211 /* Don't try if size wraps around 0 */
3212 if ((unsigned long)(size
) > (unsigned long)(nb
)) {
3214 char *mbrk
= (char*)(MMAP(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
));
3216 if (mbrk
!= MAP_FAILED
) {
3218 /* We do not need, and cannot use, another sbrk call to find end */
3220 snd_brk
= brk
+ size
;
3223 Record that we no longer have a contiguous sbrk region.
3224 After the first time mmap is used as backup, we do not
3225 ever rely on contiguous space since this could incorrectly
3228 set_noncontiguous(av
);
3234 if (brk
!= (char*)(MORECORE_FAILURE
)) {
3235 if (mp_
.sbrk_base
== 0)
3236 mp_
.sbrk_base
= brk
;
3237 av
->system_mem
+= size
;
3240 If MORECORE extends previous space, we can likewise extend top size.
3243 if (brk
== old_end
&& snd_brk
== (char*)(MORECORE_FAILURE
))
3244 set_head(old_top
, (size
+ old_size
) | PREV_INUSE
);
3246 else if (contiguous(av
) && old_size
&& brk
< old_end
) {
3247 /* Oops! Someone else killed our space.. Can't touch anything. */
3248 malloc_printerr (3, "break adjusted to free malloc space", brk
);
3252 Otherwise, make adjustments:
3254 * If the first time through or noncontiguous, we need to call sbrk
3255 just to find out where the end of memory lies.
3257 * We need to ensure that all returned chunks from malloc will meet
3260 * If there was an intervening foreign sbrk, we need to adjust sbrk
3261 request size to account for fact that we will not be able to
3262 combine new space with existing space in old_top.
3264 * Almost all systems internally allocate whole pages at a time, in
3265 which case we might as well use the whole last page of request.
3266 So we allocate enough more memory to hit a page boundary now,
3267 which in turn causes future contiguous calls to page-align.
3276 /* handle contiguous cases */
3277 if (contiguous(av
)) {
3279 /* Count foreign sbrk as system_mem. */
3281 av
->system_mem
+= brk
- old_end
;
3283 /* Guarantee alignment of first new chunk made from this space */
3285 front_misalign
= (INTERNAL_SIZE_T
)chunk2mem(brk
) & MALLOC_ALIGN_MASK
;
3286 if (front_misalign
> 0) {
3289 Skip over some bytes to arrive at an aligned position.
3290 We don't need to specially mark these wasted front bytes.
3291 They will never be accessed anyway because
3292 prev_inuse of av->top (and any chunk created from its start)
3293 is always true after initialization.
3296 correction
= MALLOC_ALIGNMENT
- front_misalign
;
3297 aligned_brk
+= correction
;
3301 If this isn't adjacent to existing space, then we will not
3302 be able to merge with old_top space, so must add to 2nd request.
3305 correction
+= old_size
;
3307 /* Extend the end address to hit a page boundary */
3308 end_misalign
= (INTERNAL_SIZE_T
)(brk
+ size
+ correction
);
3309 correction
+= ((end_misalign
+ pagemask
) & ~pagemask
) - end_misalign
;
3311 assert(correction
>= 0);
3312 snd_brk
= (char*)(MORECORE(correction
));
3315 If can't allocate correction, try to at least find out current
3316 brk. It might be enough to proceed without failing.
3318 Note that if second sbrk did NOT fail, we assume that space
3319 is contiguous with first sbrk. This is a safe assumption unless
3320 program is multithreaded but doesn't use locks and a foreign sbrk
3321 occurred between our first and second calls.
3324 if (snd_brk
== (char*)(MORECORE_FAILURE
)) {
3326 snd_brk
= (char*)(MORECORE(0));
3328 /* Call the `morecore' hook if necessary. */
3329 void (*hook
) (void) = force_reg (__after_morecore_hook
);
3330 if (__builtin_expect (hook
!= NULL
, 0))
3335 /* handle non-contiguous cases */
3337 /* MORECORE/mmap must correctly align */
3338 assert(((unsigned long)chunk2mem(brk
) & MALLOC_ALIGN_MASK
) == 0);
3340 /* Find out current end of memory */
3341 if (snd_brk
== (char*)(MORECORE_FAILURE
)) {
3342 snd_brk
= (char*)(MORECORE(0));
3346 /* Adjust top based on results of second sbrk */
3347 if (snd_brk
!= (char*)(MORECORE_FAILURE
)) {
3348 av
->top
= (mchunkptr
)aligned_brk
;
3349 set_head(av
->top
, (snd_brk
- aligned_brk
+ correction
) | PREV_INUSE
);
3350 av
->system_mem
+= correction
;
3353 If not the first time through, we either have a
3354 gap due to foreign sbrk or a non-contiguous region. Insert a
3355 double fencepost at old_top to prevent consolidation with space
3356 we don't own. These fenceposts are artificial chunks that are
3357 marked as inuse and are in any case too small to use. We need
3358 two to make sizes and alignments work out.
3361 if (old_size
!= 0) {
3363 Shrink old_top to insert fenceposts, keeping size a
3364 multiple of MALLOC_ALIGNMENT. We know there is at least
3365 enough space in old_top to do this.
3367 old_size
= (old_size
- 4*SIZE_SZ
) & ~MALLOC_ALIGN_MASK
;
3368 set_head(old_top
, old_size
| PREV_INUSE
);
3371 Note that the following assignments completely overwrite
3372 old_top when old_size was previously MINSIZE. This is
3373 intentional. We need the fencepost, even if old_top otherwise gets
3376 chunk_at_offset(old_top
, old_size
)->size
=
3377 (2*SIZE_SZ
)|PREV_INUSE
;
3379 chunk_at_offset(old_top
, old_size
+ 2*SIZE_SZ
)->size
=
3380 (2*SIZE_SZ
)|PREV_INUSE
;
3382 /* If possible, release the rest. */
3383 if (old_size
>= MINSIZE
) {
3384 #ifdef ATOMIC_FASTBINS
3385 _int_free(av
, old_top
, 1);
3387 _int_free(av
, old_top
);
3395 /* Update statistics */
3397 sum
= av
->system_mem
+ mp_
.mmapped_mem
;
3398 if (sum
> (unsigned long)(mp_
.max_total_mem
))
3399 mp_
.max_total_mem
= sum
;
3404 } /* if (av != &main_arena) */
3406 if ((unsigned long)av
->system_mem
> (unsigned long)(av
->max_system_mem
))
3407 av
->max_system_mem
= av
->system_mem
;
3408 check_malloc_state(av
);
3410 /* finally, do the allocation */
3412 size
= chunksize(p
);
3414 /* check that one of the above allocation paths succeeded */
3415 if ((unsigned long)(size
) >= (unsigned long)(nb
+ MINSIZE
)) {
3416 remainder_size
= size
- nb
;
3417 remainder
= chunk_at_offset(p
, nb
);
3418 av
->top
= remainder
;
3419 set_head(p
, nb
| PREV_INUSE
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
3420 set_head(remainder
, remainder_size
| PREV_INUSE
);
3421 check_malloced_chunk(av
, p
, nb
);
3422 return chunk2mem(p
);
3425 /* catch all failure paths */
3426 MALLOC_FAILURE_ACTION
;
3432 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3433 to the system (via negative arguments to sbrk) if there is unused
3434 memory at the `high' end of the malloc pool. It is called
3435 automatically by free() when top space exceeds the trim
3436 threshold. It is also called by the public malloc_trim routine. It
3437 returns 1 if it actually released any memory, else 0.
3441 static int sYSTRIm(size_t pad
, mstate av
)
3443 static int sYSTRIm(pad
, av
) size_t pad
; mstate av
;
3446 long top_size
; /* Amount of top-most memory */
3447 long extra
; /* Amount to release */
3448 long released
; /* Amount actually released */
3449 char* current_brk
; /* address returned by pre-check sbrk call */
3450 char* new_brk
; /* address returned by post-check sbrk call */
3453 pagesz
= mp_
.pagesize
;
3454 top_size
= chunksize(av
->top
);
3456 /* Release in pagesize units, keeping at least one page */
3457 extra
= (top_size
- pad
- MINSIZE
- 1) & ~(pagesz
- 1);
3462 Only proceed if end of memory is where we last set it.
3463 This avoids problems if there were foreign sbrk calls.
3465 current_brk
= (char*)(MORECORE(0));
3466 if (current_brk
== (char*)(av
->top
) + top_size
) {
3469 Attempt to release memory. We ignore MORECORE return value,
3470 and instead call again to find out where new end of memory is.
3471 This avoids problems if first call releases less than we asked,
3472 of if failure somehow altered brk value. (We could still
3473 encounter problems if it altered brk in some very bad way,
3474 but the only thing we can do is adjust anyway, which will cause
3475 some downstream failure.)
3479 /* Call the `morecore' hook if necessary. */
3480 void (*hook
) (void) = force_reg (__after_morecore_hook
);
3481 if (__builtin_expect (hook
!= NULL
, 0))
3483 new_brk
= (char*)(MORECORE(0));
3485 if (new_brk
!= (char*)MORECORE_FAILURE
) {
3486 released
= (long)(current_brk
- new_brk
);
3488 if (released
!= 0) {
3489 /* Success. Adjust top. */
3490 av
->system_mem
-= released
;
3491 set_head(av
->top
, (top_size
- released
) | PREV_INUSE
);
3492 check_malloc_state(av
);
3506 munmap_chunk(mchunkptr p
)
3508 munmap_chunk(p
) mchunkptr p
;
3511 INTERNAL_SIZE_T size
= chunksize(p
);
3513 assert (chunk_is_mmapped(p
));
3515 assert(! ((char*)p
>= mp_
.sbrk_base
&& (char*)p
< mp_
.sbrk_base
+ mp_
.sbrked_mem
));
3516 assert((mp_
.n_mmaps
> 0));
3519 uintptr_t block
= (uintptr_t) p
- p
->prev_size
;
3520 size_t total_size
= p
->prev_size
+ size
;
3521 /* Unfortunately we have to do the compilers job by hand here. Normally
3522 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3523 page size. But gcc does not recognize the optimization possibility
3524 (in the moment at least) so we combine the two values into one before
3526 if (__builtin_expect (((block
| total_size
) & (mp_
.pagesize
- 1)) != 0, 0))
3528 malloc_printerr (check_action
, "munmap_chunk(): invalid pointer",
3534 mp_
.mmapped_mem
-= total_size
;
3536 int ret
__attribute__ ((unused
)) = munmap((char *)block
, total_size
);
3538 /* munmap returns non-zero on failure */
3547 mremap_chunk(mchunkptr p
, size_t new_size
)
3549 mremap_chunk(p
, new_size
) mchunkptr p
; size_t new_size
;
3552 size_t page_mask
= mp_
.pagesize
- 1;
3553 INTERNAL_SIZE_T offset
= p
->prev_size
;
3554 INTERNAL_SIZE_T size
= chunksize(p
);
3557 assert (chunk_is_mmapped(p
));
3559 assert(! ((char*)p
>= mp_
.sbrk_base
&& (char*)p
< mp_
.sbrk_base
+ mp_
.sbrked_mem
));
3560 assert((mp_
.n_mmaps
> 0));
3562 assert(((size
+ offset
) & (mp_
.pagesize
-1)) == 0);
3564 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3565 new_size
= (new_size
+ offset
+ SIZE_SZ
+ page_mask
) & ~page_mask
;
3567 /* No need to remap if the number of pages does not change. */
3568 if (size
+ offset
== new_size
)
3571 cp
= (char *)mremap((char *)p
- offset
, size
+ offset
, new_size
,
3574 if (cp
== MAP_FAILED
) return 0;
3576 p
= (mchunkptr
)(cp
+ offset
);
3578 assert(aligned_OK(chunk2mem(p
)));
3580 assert((p
->prev_size
== offset
));
3581 set_head(p
, (new_size
- offset
)|IS_MMAPPED
);
3583 mp_
.mmapped_mem
-= size
+ offset
;
3584 mp_
.mmapped_mem
+= new_size
;
3585 if ((unsigned long)mp_
.mmapped_mem
> (unsigned long)mp_
.max_mmapped_mem
)
3586 mp_
.max_mmapped_mem
= mp_
.mmapped_mem
;
3588 if ((unsigned long)(mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
) >
3590 mp_
.max_total_mem
= mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
;
3595 #endif /* HAVE_MREMAP */
3597 #endif /* HAVE_MMAP */
3599 /*------------------------ Public wrappers. --------------------------------*/
3602 public_mALLOc(size_t bytes
)
3607 __malloc_ptr_t (*hook
) (size_t, __const __malloc_ptr_t
)
3608 = force_reg (__malloc_hook
);
3609 if (__builtin_expect (hook
!= NULL
, 0))
3610 return (*hook
)(bytes
, RETURN_ADDRESS (0));
3612 arena_lookup(ar_ptr
);
3614 // XXX We need double-word CAS and fastbins must be extended to also
3615 // XXX hold a generation counter for each entry.
3617 INTERNAL_SIZE_T nb
; /* normalized request size */
3618 checked_request2size(bytes
, nb
);
3619 if (nb
<= get_max_fast ()) {
3620 long int idx
= fastbin_index(nb
);
3621 mfastbinptr
* fb
= &fastbin (ar_ptr
, idx
);
3630 while ((pp
= catomic_compare_and_exchange_val_acq (fb
, v
->fd
, v
)) != v
);
3632 if (__builtin_expect (fastbin_index (chunksize (v
)) != idx
, 0))
3633 malloc_printerr (check_action
, "malloc(): memory corruption (fast)",
3635 check_remalloced_chunk(ar_ptr
, v
, nb
);
3636 void *p
= chunk2mem(v
);
3637 if (__builtin_expect (perturb_byte
, 0))
3638 alloc_perturb (p
, bytes
);
3645 arena_lock(ar_ptr
, bytes
);
3648 victim
= _int_malloc(ar_ptr
, bytes
);
3650 /* Maybe the failure is due to running out of mmapped areas. */
3651 if(ar_ptr
!= &main_arena
) {
3652 (void)mutex_unlock(&ar_ptr
->mutex
);
3653 ar_ptr
= &main_arena
;
3654 (void)mutex_lock(&ar_ptr
->mutex
);
3655 victim
= _int_malloc(ar_ptr
, bytes
);
3656 (void)mutex_unlock(&ar_ptr
->mutex
);
3659 /* ... or sbrk() has failed and there is still a chance to mmap() */
3660 ar_ptr
= arena_get2(ar_ptr
->next
? ar_ptr
: 0, bytes
);
3661 (void)mutex_unlock(&main_arena
.mutex
);
3663 victim
= _int_malloc(ar_ptr
, bytes
);
3664 (void)mutex_unlock(&ar_ptr
->mutex
);
3669 (void)mutex_unlock(&ar_ptr
->mutex
);
3670 assert(!victim
|| chunk_is_mmapped(mem2chunk(victim
)) ||
3671 ar_ptr
== arena_for_chunk(mem2chunk(victim
)));
3674 #ifdef libc_hidden_def
3675 libc_hidden_def(public_mALLOc
)
3679 public_fREe(Void_t
* mem
)
3682 mchunkptr p
; /* chunk corresponding to mem */
3684 void (*hook
) (__malloc_ptr_t
, __const __malloc_ptr_t
)
3685 = force_reg (__free_hook
);
3686 if (__builtin_expect (hook
!= NULL
, 0)) {
3687 (*hook
)(mem
, RETURN_ADDRESS (0));
3691 if (mem
== 0) /* free(0) has no effect */
3697 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
3699 /* see if the dynamic brk/mmap threshold needs adjusting */
3700 if (!mp_
.no_dyn_threshold
3701 && p
->size
> mp_
.mmap_threshold
3702 && p
->size
<= DEFAULT_MMAP_THRESHOLD_MAX
)
3704 mp_
.mmap_threshold
= chunksize (p
);
3705 mp_
.trim_threshold
= 2 * mp_
.mmap_threshold
;
3712 ar_ptr
= arena_for_chunk(p
);
3713 #ifdef ATOMIC_FASTBINS
3714 _int_free(ar_ptr
, p
, 0);
3717 if(!mutex_trylock(&ar_ptr
->mutex
))
3718 ++(ar_ptr
->stat_lock_direct
);
3720 (void)mutex_lock(&ar_ptr
->mutex
);
3721 ++(ar_ptr
->stat_lock_wait
);
3724 (void)mutex_lock(&ar_ptr
->mutex
);
3726 _int_free(ar_ptr
, p
);
3727 (void)mutex_unlock(&ar_ptr
->mutex
);
3730 #ifdef libc_hidden_def
3731 libc_hidden_def (public_fREe
)
3735 public_rEALLOc(Void_t
* oldmem
, size_t bytes
)
3738 INTERNAL_SIZE_T nb
; /* padded request size */
3740 Void_t
* newp
; /* chunk to return */
3742 __malloc_ptr_t (*hook
) (__malloc_ptr_t
, size_t, __const __malloc_ptr_t
) =
3743 force_reg (__realloc_hook
);
3744 if (__builtin_expect (hook
!= NULL
, 0))
3745 return (*hook
)(oldmem
, bytes
, RETURN_ADDRESS (0));
3747 #if REALLOC_ZERO_BYTES_FREES
3748 if (bytes
== 0 && oldmem
!= NULL
) { public_fREe(oldmem
); return 0; }
3751 /* realloc of null is supposed to be same as malloc */
3752 if (oldmem
== 0) return public_mALLOc(bytes
);
3754 /* chunk corresponding to oldmem */
3755 const mchunkptr oldp
= mem2chunk(oldmem
);
3757 const INTERNAL_SIZE_T oldsize
= chunksize(oldp
);
3759 /* Little security check which won't hurt performance: the
3760 allocator never wrapps around at the end of the address space.
3761 Therefore we can exclude some size values which might appear
3762 here by accident or by "design" from some intruder. */
3763 if (__builtin_expect ((uintptr_t) oldp
> (uintptr_t) -oldsize
, 0)
3764 || __builtin_expect (misaligned_chunk (oldp
), 0))
3766 malloc_printerr (check_action
, "realloc(): invalid pointer", oldmem
);
3770 checked_request2size(bytes
, nb
);
3773 if (chunk_is_mmapped(oldp
))
3778 newp
= mremap_chunk(oldp
, nb
);
3779 if(newp
) return chunk2mem(newp
);
3781 /* Note the extra SIZE_SZ overhead. */
3782 if(oldsize
- SIZE_SZ
>= nb
) return oldmem
; /* do nothing */
3783 /* Must alloc, copy, free. */
3784 newmem
= public_mALLOc(bytes
);
3785 if (newmem
== 0) return 0; /* propagate failure */
3786 MALLOC_COPY(newmem
, oldmem
, oldsize
- 2*SIZE_SZ
);
3792 ar_ptr
= arena_for_chunk(oldp
);
3794 if(!mutex_trylock(&ar_ptr
->mutex
))
3795 ++(ar_ptr
->stat_lock_direct
);
3797 (void)mutex_lock(&ar_ptr
->mutex
);
3798 ++(ar_ptr
->stat_lock_wait
);
3801 (void)mutex_lock(&ar_ptr
->mutex
);
3804 #if !defined NO_THREADS && !defined PER_THREAD
3805 /* As in malloc(), remember this arena for the next allocation. */
3806 tsd_setspecific(arena_key
, (Void_t
*)ar_ptr
);
3809 newp
= _int_realloc(ar_ptr
, oldp
, oldsize
, nb
);
3811 (void)mutex_unlock(&ar_ptr
->mutex
);
3812 assert(!newp
|| chunk_is_mmapped(mem2chunk(newp
)) ||
3813 ar_ptr
== arena_for_chunk(mem2chunk(newp
)));
3817 /* Try harder to allocate memory in other arenas. */
3818 newp
= public_mALLOc(bytes
);
3821 MALLOC_COPY (newp
, oldmem
, oldsize
- SIZE_SZ
);
3822 #ifdef ATOMIC_FASTBINS
3823 _int_free(ar_ptr
, oldp
, 0);
3826 if(!mutex_trylock(&ar_ptr
->mutex
))
3827 ++(ar_ptr
->stat_lock_direct
);
3829 (void)mutex_lock(&ar_ptr
->mutex
);
3830 ++(ar_ptr
->stat_lock_wait
);
3833 (void)mutex_lock(&ar_ptr
->mutex
);
3835 _int_free(ar_ptr
, oldp
);
3836 (void)mutex_unlock(&ar_ptr
->mutex
);
3843 #ifdef libc_hidden_def
3844 libc_hidden_def (public_rEALLOc
)
3848 public_mEMALIGn(size_t alignment
, size_t bytes
)
3853 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3854 __const __malloc_ptr_t
)) =
3855 force_reg (__memalign_hook
);
3856 if (__builtin_expect (hook
!= NULL
, 0))
3857 return (*hook
)(alignment
, bytes
, RETURN_ADDRESS (0));
3859 /* If need less alignment than we give anyway, just relay to malloc */
3860 if (alignment
<= MALLOC_ALIGNMENT
) return public_mALLOc(bytes
);
3862 /* Otherwise, ensure that it is at least a minimum chunk size */
3863 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
3865 arena_get(ar_ptr
, bytes
+ alignment
+ MINSIZE
);
3868 p
= _int_memalign(ar_ptr
, alignment
, bytes
);
3870 /* Maybe the failure is due to running out of mmapped areas. */
3871 if(ar_ptr
!= &main_arena
) {
3872 (void)mutex_unlock(&ar_ptr
->mutex
);
3873 ar_ptr
= &main_arena
;
3874 (void)mutex_lock(&ar_ptr
->mutex
);
3875 p
= _int_memalign(ar_ptr
, alignment
, bytes
);
3876 (void)mutex_unlock(&ar_ptr
->mutex
);
3879 /* ... or sbrk() has failed and there is still a chance to mmap() */
3880 mstate prev
= ar_ptr
->next
? ar_ptr
: 0;
3881 (void)mutex_unlock(&ar_ptr
->mutex
);
3882 ar_ptr
= arena_get2(prev
, bytes
);
3884 p
= _int_memalign(ar_ptr
, alignment
, bytes
);
3885 (void)mutex_unlock(&ar_ptr
->mutex
);
3890 (void)mutex_unlock(&ar_ptr
->mutex
);
3891 assert(!p
|| chunk_is_mmapped(mem2chunk(p
)) ||
3892 ar_ptr
== arena_for_chunk(mem2chunk(p
)));
3895 #ifdef libc_hidden_def
3896 libc_hidden_def (public_mEMALIGn
)
3900 public_vALLOc(size_t bytes
)
3905 if(__malloc_initialized
< 0)
3908 size_t pagesz
= mp_
.pagesize
;
3910 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3911 __const __malloc_ptr_t
)) =
3912 force_reg (__memalign_hook
);
3913 if (__builtin_expect (hook
!= NULL
, 0))
3914 return (*hook
)(pagesz
, bytes
, RETURN_ADDRESS (0));
3916 arena_get(ar_ptr
, bytes
+ pagesz
+ MINSIZE
);
3919 p
= _int_valloc(ar_ptr
, bytes
);
3920 (void)mutex_unlock(&ar_ptr
->mutex
);
3922 /* Maybe the failure is due to running out of mmapped areas. */
3923 if(ar_ptr
!= &main_arena
) {
3924 ar_ptr
= &main_arena
;
3925 (void)mutex_lock(&ar_ptr
->mutex
);
3926 p
= _int_memalign(ar_ptr
, pagesz
, bytes
);
3927 (void)mutex_unlock(&ar_ptr
->mutex
);
3930 /* ... or sbrk() has failed and there is still a chance to mmap() */
3931 ar_ptr
= arena_get2(ar_ptr
->next
? ar_ptr
: 0, bytes
);
3933 p
= _int_memalign(ar_ptr
, pagesz
, bytes
);
3934 (void)mutex_unlock(&ar_ptr
->mutex
);
3939 assert(!p
|| chunk_is_mmapped(mem2chunk(p
)) ||
3940 ar_ptr
== arena_for_chunk(mem2chunk(p
)));
3946 public_pVALLOc(size_t bytes
)
3951 if(__malloc_initialized
< 0)
3954 size_t pagesz
= mp_
.pagesize
;
3955 size_t page_mask
= mp_
.pagesize
- 1;
3956 size_t rounded_bytes
= (bytes
+ page_mask
) & ~(page_mask
);
3958 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3959 __const __malloc_ptr_t
)) =
3960 force_reg (__memalign_hook
);
3961 if (__builtin_expect (hook
!= NULL
, 0))
3962 return (*hook
)(pagesz
, rounded_bytes
, RETURN_ADDRESS (0));
3964 arena_get(ar_ptr
, bytes
+ 2*pagesz
+ MINSIZE
);
3965 p
= _int_pvalloc(ar_ptr
, bytes
);
3966 (void)mutex_unlock(&ar_ptr
->mutex
);
3968 /* Maybe the failure is due to running out of mmapped areas. */
3969 if(ar_ptr
!= &main_arena
) {
3970 ar_ptr
= &main_arena
;
3971 (void)mutex_lock(&ar_ptr
->mutex
);
3972 p
= _int_memalign(ar_ptr
, pagesz
, rounded_bytes
);
3973 (void)mutex_unlock(&ar_ptr
->mutex
);
3976 /* ... or sbrk() has failed and there is still a chance to mmap() */
3977 ar_ptr
= arena_get2(ar_ptr
->next
? ar_ptr
: 0,
3978 bytes
+ 2*pagesz
+ MINSIZE
);
3980 p
= _int_memalign(ar_ptr
, pagesz
, rounded_bytes
);
3981 (void)mutex_unlock(&ar_ptr
->mutex
);
3986 assert(!p
|| chunk_is_mmapped(mem2chunk(p
)) ||
3987 ar_ptr
== arena_for_chunk(mem2chunk(p
)));
3993 public_cALLOc(size_t n
, size_t elem_size
)
3996 mchunkptr oldtop
, p
;
3997 INTERNAL_SIZE_T bytes
, sz
, csz
, oldtopsize
;
3999 unsigned long clearsize
;
4000 unsigned long nclears
;
4003 /* size_t is unsigned so the behavior on overflow is defined. */
4004 bytes
= n
* elem_size
;
4005 #define HALF_INTERNAL_SIZE_T \
4006 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
4007 if (__builtin_expect ((n
| elem_size
) >= HALF_INTERNAL_SIZE_T
, 0)) {
4008 if (elem_size
!= 0 && bytes
/ elem_size
!= n
) {
4009 MALLOC_FAILURE_ACTION
;
4014 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, __const __malloc_ptr_t
)) =
4015 force_reg (__malloc_hook
);
4016 if (__builtin_expect (hook
!= NULL
, 0)) {
4018 mem
= (*hook
)(sz
, RETURN_ADDRESS (0));
4022 return memset(mem
, 0, sz
);
4024 while(sz
> 0) ((char*)mem
)[--sz
] = 0; /* rather inefficient */
4035 /* Check if we hand out the top chunk, in which case there may be no
4039 oldtopsize
= chunksize(top(av
));
4040 #if MORECORE_CLEARS < 2
4041 /* Only newly allocated memory is guaranteed to be cleared. */
4042 if (av
== &main_arena
&&
4043 oldtopsize
< mp_
.sbrk_base
+ av
->max_system_mem
- (char *)oldtop
)
4044 oldtopsize
= (mp_
.sbrk_base
+ av
->max_system_mem
- (char *)oldtop
);
4046 if (av
!= &main_arena
)
4048 heap_info
*heap
= heap_for_ptr (oldtop
);
4049 if (oldtopsize
< (char *) heap
+ heap
->mprotect_size
- (char *) oldtop
)
4050 oldtopsize
= (char *) heap
+ heap
->mprotect_size
- (char *) oldtop
;
4053 mem
= _int_malloc(av
, sz
);
4055 /* Only clearing follows, so we can unlock early. */
4056 (void)mutex_unlock(&av
->mutex
);
4058 assert(!mem
|| chunk_is_mmapped(mem2chunk(mem
)) ||
4059 av
== arena_for_chunk(mem2chunk(mem
)));
4062 /* Maybe the failure is due to running out of mmapped areas. */
4063 if(av
!= &main_arena
) {
4064 (void)mutex_lock(&main_arena
.mutex
);
4065 mem
= _int_malloc(&main_arena
, sz
);
4066 (void)mutex_unlock(&main_arena
.mutex
);
4069 /* ... or sbrk() has failed and there is still a chance to mmap() */
4070 (void)mutex_lock(&main_arena
.mutex
);
4071 av
= arena_get2(av
->next
? av
: 0, sz
);
4072 (void)mutex_unlock(&main_arena
.mutex
);
4074 mem
= _int_malloc(av
, sz
);
4075 (void)mutex_unlock(&av
->mutex
);
4079 if (mem
== 0) return 0;
4083 /* Two optional cases in which clearing not necessary */
4085 if (chunk_is_mmapped (p
))
4087 if (__builtin_expect (perturb_byte
, 0))
4088 MALLOC_ZERO (mem
, sz
);
4096 if (perturb_byte
== 0 && (p
== oldtop
&& csz
> oldtopsize
)) {
4097 /* clear only the bytes from non-freshly-sbrked memory */
4102 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
4103 contents have an odd number of INTERNAL_SIZE_T-sized words;
4105 d
= (INTERNAL_SIZE_T
*)mem
;
4106 clearsize
= csz
- SIZE_SZ
;
4107 nclears
= clearsize
/ sizeof(INTERNAL_SIZE_T
);
4108 assert(nclears
>= 3);
4111 MALLOC_ZERO(d
, clearsize
);
4137 public_iCALLOc(size_t n
, size_t elem_size
, Void_t
** chunks
)
4142 arena_get(ar_ptr
, n
*elem_size
);
4146 m
= _int_icalloc(ar_ptr
, n
, elem_size
, chunks
);
4147 (void)mutex_unlock(&ar_ptr
->mutex
);
4152 public_iCOMALLOc(size_t n
, size_t sizes
[], Void_t
** chunks
)
4157 arena_get(ar_ptr
, 0);
4161 m
= _int_icomalloc(ar_ptr
, n
, sizes
, chunks
);
4162 (void)mutex_unlock(&ar_ptr
->mutex
);
4167 public_cFREe(Void_t
* m
)
4175 public_mTRIm(size_t s
)
4179 if(__malloc_initialized
< 0)
4182 mstate ar_ptr
= &main_arena
;
4185 (void) mutex_lock (&ar_ptr
->mutex
);
4186 result
|= mTRIm (ar_ptr
, s
);
4187 (void) mutex_unlock (&ar_ptr
->mutex
);
4189 ar_ptr
= ar_ptr
->next
;
4191 while (ar_ptr
!= &main_arena
);
4197 public_mUSABLe(Void_t
* m
)
4201 result
= mUSABLe(m
);
4211 struct mallinfo
public_mALLINFo()
4215 if(__malloc_initialized
< 0)
4217 (void)mutex_lock(&main_arena
.mutex
);
4218 m
= mALLINFo(&main_arena
);
4219 (void)mutex_unlock(&main_arena
.mutex
);
4224 public_mALLOPt(int p
, int v
)
4227 result
= mALLOPt(p
, v
);
4232 ------------------------------ malloc ------------------------------
4236 _int_malloc(mstate av
, size_t bytes
)
4238 INTERNAL_SIZE_T nb
; /* normalized request size */
4239 unsigned int idx
; /* associated bin index */
4240 mbinptr bin
; /* associated bin */
4242 mchunkptr victim
; /* inspected/selected chunk */
4243 INTERNAL_SIZE_T size
; /* its size */
4244 int victim_index
; /* its bin index */
4246 mchunkptr remainder
; /* remainder from a split */
4247 unsigned long remainder_size
; /* its size */
4249 unsigned int block
; /* bit map traverser */
4250 unsigned int bit
; /* bit map traverser */
4251 unsigned int map
; /* current word of binmap */
4253 mchunkptr fwd
; /* misc temp for linking */
4254 mchunkptr bck
; /* misc temp for linking */
4256 const char *errstr
= NULL
;
4259 Convert request size to internal form by adding SIZE_SZ bytes
4260 overhead plus possibly more to obtain necessary alignment and/or
4261 to obtain a size of at least MINSIZE, the smallest allocatable
4262 size. Also, checked_request2size traps (returning 0) request sizes
4263 that are so large that they wrap around zero when padded and
4267 checked_request2size(bytes
, nb
);
4270 If the size qualifies as a fastbin, first check corresponding bin.
4271 This code is safe to execute even if av is not yet initialized, so we
4272 can try it without checking, which saves some time on this fast path.
4275 if ((unsigned long)(nb
) <= (unsigned long)(get_max_fast ())) {
4276 idx
= fastbin_index(nb
);
4277 mfastbinptr
* fb
= &fastbin (av
, idx
);
4278 #ifdef ATOMIC_FASTBINS
4286 while ((pp
= catomic_compare_and_exchange_val_acq (fb
, victim
->fd
, victim
))
4292 if (__builtin_expect (fastbin_index (chunksize (victim
)) != idx
, 0))
4294 errstr
= "malloc(): memory corruption (fast)";
4296 malloc_printerr (check_action
, errstr
, chunk2mem (victim
));
4299 #ifndef ATOMIC_FASTBINS
4302 check_remalloced_chunk(av
, victim
, nb
);
4303 void *p
= chunk2mem(victim
);
4304 if (__builtin_expect (perturb_byte
, 0))
4305 alloc_perturb (p
, bytes
);
4311 If a small request, check regular bin. Since these "smallbins"
4312 hold one size each, no searching within bins is necessary.
4313 (For a large request, we need to wait until unsorted chunks are
4314 processed to find best fit. But for small ones, fits are exact
4315 anyway, so we can check now, which is faster.)
4318 if (in_smallbin_range(nb
)) {
4319 idx
= smallbin_index(nb
);
4320 bin
= bin_at(av
,idx
);
4322 if ( (victim
= last(bin
)) != bin
) {
4323 if (victim
== 0) /* initialization check */
4324 malloc_consolidate(av
);
4327 if (__builtin_expect (bck
->fd
!= victim
, 0))
4329 errstr
= "malloc(): smallbin double linked list corrupted";
4332 set_inuse_bit_at_offset(victim
, nb
);
4336 if (av
!= &main_arena
)
4337 victim
->size
|= NON_MAIN_ARENA
;
4338 check_malloced_chunk(av
, victim
, nb
);
4339 void *p
= chunk2mem(victim
);
4340 if (__builtin_expect (perturb_byte
, 0))
4341 alloc_perturb (p
, bytes
);
4348 If this is a large request, consolidate fastbins before continuing.
4349 While it might look excessive to kill all fastbins before
4350 even seeing if there is space available, this avoids
4351 fragmentation problems normally associated with fastbins.
4352 Also, in practice, programs tend to have runs of either small or
4353 large requests, but less often mixtures, so consolidation is not
4354 invoked all that often in most programs. And the programs that
4355 it is called frequently in otherwise tend to fragment.
4359 idx
= largebin_index(nb
);
4360 if (have_fastchunks(av
))
4361 malloc_consolidate(av
);
4365 Process recently freed or remaindered chunks, taking one only if
4366 it is exact fit, or, if this a small request, the chunk is remainder from
4367 the most recent non-exact fit. Place other traversed chunks in
4368 bins. Note that this step is the only place in any routine where
4369 chunks are placed in bins.
4371 The outer loop here is needed because we might not realize until
4372 near the end of malloc that we should have consolidated, so must
4373 do so and retry. This happens at most once, and only when we would
4374 otherwise need to expand memory to service a "small" request.
4380 while ( (victim
= unsorted_chunks(av
)->bk
) != unsorted_chunks(av
)) {
4382 if (__builtin_expect (victim
->size
<= 2 * SIZE_SZ
, 0)
4383 || __builtin_expect (victim
->size
> av
->system_mem
, 0))
4384 malloc_printerr (check_action
, "malloc(): memory corruption",
4385 chunk2mem (victim
));
4386 size
= chunksize(victim
);
4389 If a small request, try to use last remainder if it is the
4390 only chunk in unsorted bin. This helps promote locality for
4391 runs of consecutive small requests. This is the only
4392 exception to best-fit, and applies only when there is
4393 no exact fit for a small chunk.
4396 if (in_smallbin_range(nb
) &&
4397 bck
== unsorted_chunks(av
) &&
4398 victim
== av
->last_remainder
&&
4399 (unsigned long)(size
) > (unsigned long)(nb
+ MINSIZE
)) {
4401 /* split and reattach remainder */
4402 remainder_size
= size
- nb
;
4403 remainder
= chunk_at_offset(victim
, nb
);
4404 unsorted_chunks(av
)->bk
= unsorted_chunks(av
)->fd
= remainder
;
4405 av
->last_remainder
= remainder
;
4406 remainder
->bk
= remainder
->fd
= unsorted_chunks(av
);
4407 if (!in_smallbin_range(remainder_size
))
4409 remainder
->fd_nextsize
= NULL
;
4410 remainder
->bk_nextsize
= NULL
;
4413 set_head(victim
, nb
| PREV_INUSE
|
4414 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4415 set_head(remainder
, remainder_size
| PREV_INUSE
);
4416 set_foot(remainder
, remainder_size
);
4418 check_malloced_chunk(av
, victim
, nb
);
4419 void *p
= chunk2mem(victim
);
4420 if (__builtin_expect (perturb_byte
, 0))
4421 alloc_perturb (p
, bytes
);
4425 /* remove from unsorted list */
4426 unsorted_chunks(av
)->bk
= bck
;
4427 bck
->fd
= unsorted_chunks(av
);
4429 /* Take now instead of binning if exact fit */
4432 set_inuse_bit_at_offset(victim
, size
);
4433 if (av
!= &main_arena
)
4434 victim
->size
|= NON_MAIN_ARENA
;
4435 check_malloced_chunk(av
, victim
, nb
);
4436 void *p
= chunk2mem(victim
);
4437 if (__builtin_expect (perturb_byte
, 0))
4438 alloc_perturb (p
, bytes
);
4442 /* place chunk in bin */
4444 if (in_smallbin_range(size
)) {
4445 victim_index
= smallbin_index(size
);
4446 bck
= bin_at(av
, victim_index
);
4450 victim_index
= largebin_index(size
);
4451 bck
= bin_at(av
, victim_index
);
4454 /* maintain large bins in sorted order */
4456 /* Or with inuse bit to speed comparisons */
4458 /* if smaller than smallest, bypass loop below */
4459 assert((bck
->bk
->size
& NON_MAIN_ARENA
) == 0);
4460 if ((unsigned long)(size
) < (unsigned long)(bck
->bk
->size
)) {
4464 victim
->fd_nextsize
= fwd
->fd
;
4465 victim
->bk_nextsize
= fwd
->fd
->bk_nextsize
;
4466 fwd
->fd
->bk_nextsize
= victim
->bk_nextsize
->fd_nextsize
= victim
;
4469 assert((fwd
->size
& NON_MAIN_ARENA
) == 0);
4470 while ((unsigned long) size
< fwd
->size
)
4472 fwd
= fwd
->fd_nextsize
;
4473 assert((fwd
->size
& NON_MAIN_ARENA
) == 0);
4476 if ((unsigned long) size
== (unsigned long) fwd
->size
)
4477 /* Always insert in the second position. */
4481 victim
->fd_nextsize
= fwd
;
4482 victim
->bk_nextsize
= fwd
->bk_nextsize
;
4483 fwd
->bk_nextsize
= victim
;
4484 victim
->bk_nextsize
->fd_nextsize
= victim
;
4489 victim
->fd_nextsize
= victim
->bk_nextsize
= victim
;
4492 mark_bin(av
, victim_index
);
4498 #define MAX_ITERS 10000
4499 if (++iters
>= MAX_ITERS
)
4504 If a large request, scan through the chunks of current bin in
4505 sorted order to find smallest that fits. Use the skip list for this.
4508 if (!in_smallbin_range(nb
)) {
4509 bin
= bin_at(av
, idx
);
4511 /* skip scan if empty or largest chunk is too small */
4512 if ((victim
= first(bin
)) != bin
&&
4513 (unsigned long)(victim
->size
) >= (unsigned long)(nb
)) {
4515 victim
= victim
->bk_nextsize
;
4516 while (((unsigned long)(size
= chunksize(victim
)) <
4517 (unsigned long)(nb
)))
4518 victim
= victim
->bk_nextsize
;
4520 /* Avoid removing the first entry for a size so that the skip
4521 list does not have to be rerouted. */
4522 if (victim
!= last(bin
) && victim
->size
== victim
->fd
->size
)
4523 victim
= victim
->fd
;
4525 remainder_size
= size
- nb
;
4526 unlink(victim
, bck
, fwd
);
4529 if (remainder_size
< MINSIZE
) {
4530 set_inuse_bit_at_offset(victim
, size
);
4531 if (av
!= &main_arena
)
4532 victim
->size
|= NON_MAIN_ARENA
;
4536 remainder
= chunk_at_offset(victim
, nb
);
4537 /* We cannot assume the unsorted list is empty and therefore
4538 have to perform a complete insert here. */
4539 bck
= unsorted_chunks(av
);
4541 if (__builtin_expect (fwd
->bk
!= bck
, 0))
4543 errstr
= "malloc(): corrupted unsorted chunks";
4546 remainder
->bk
= bck
;
4547 remainder
->fd
= fwd
;
4548 bck
->fd
= remainder
;
4549 fwd
->bk
= remainder
;
4550 if (!in_smallbin_range(remainder_size
))
4552 remainder
->fd_nextsize
= NULL
;
4553 remainder
->bk_nextsize
= NULL
;
4555 set_head(victim
, nb
| PREV_INUSE
|
4556 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4557 set_head(remainder
, remainder_size
| PREV_INUSE
);
4558 set_foot(remainder
, remainder_size
);
4560 check_malloced_chunk(av
, victim
, nb
);
4561 void *p
= chunk2mem(victim
);
4562 if (__builtin_expect (perturb_byte
, 0))
4563 alloc_perturb (p
, bytes
);
4569 Search for a chunk by scanning bins, starting with next largest
4570 bin. This search is strictly by best-fit; i.e., the smallest
4571 (with ties going to approximately the least recently used) chunk
4572 that fits is selected.
4574 The bitmap avoids needing to check that most blocks are nonempty.
4575 The particular case of skipping all bins during warm-up phases
4576 when no chunks have been returned yet is faster than it might look.
4580 bin
= bin_at(av
,idx
);
4581 block
= idx2block(idx
);
4582 map
= av
->binmap
[block
];
4587 /* Skip rest of block if there are no more set bits in this block. */
4588 if (bit
> map
|| bit
== 0) {
4590 if (++block
>= BINMAPSIZE
) /* out of bins */
4592 } while ( (map
= av
->binmap
[block
]) == 0);
4594 bin
= bin_at(av
, (block
<< BINMAPSHIFT
));
4598 /* Advance to bin with set bit. There must be one. */
4599 while ((bit
& map
) == 0) {
4600 bin
= next_bin(bin
);
4605 /* Inspect the bin. It is likely to be non-empty */
4608 /* If a false alarm (empty bin), clear the bit. */
4609 if (victim
== bin
) {
4610 av
->binmap
[block
] = map
&= ~bit
; /* Write through */
4611 bin
= next_bin(bin
);
4616 size
= chunksize(victim
);
4618 /* We know the first chunk in this bin is big enough to use. */
4619 assert((unsigned long)(size
) >= (unsigned long)(nb
));
4621 remainder_size
= size
- nb
;
4624 unlink(victim
, bck
, fwd
);
4627 if (remainder_size
< MINSIZE
) {
4628 set_inuse_bit_at_offset(victim
, size
);
4629 if (av
!= &main_arena
)
4630 victim
->size
|= NON_MAIN_ARENA
;
4635 remainder
= chunk_at_offset(victim
, nb
);
4637 /* We cannot assume the unsorted list is empty and therefore
4638 have to perform a complete insert here. */
4639 bck
= unsorted_chunks(av
);
4641 if (__builtin_expect (fwd
->bk
!= bck
, 0))
4643 errstr
= "malloc(): corrupted unsorted chunks 2";
4646 remainder
->bk
= bck
;
4647 remainder
->fd
= fwd
;
4648 bck
->fd
= remainder
;
4649 fwd
->bk
= remainder
;
4651 /* advertise as last remainder */
4652 if (in_smallbin_range(nb
))
4653 av
->last_remainder
= remainder
;
4654 if (!in_smallbin_range(remainder_size
))
4656 remainder
->fd_nextsize
= NULL
;
4657 remainder
->bk_nextsize
= NULL
;
4659 set_head(victim
, nb
| PREV_INUSE
|
4660 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4661 set_head(remainder
, remainder_size
| PREV_INUSE
);
4662 set_foot(remainder
, remainder_size
);
4664 check_malloced_chunk(av
, victim
, nb
);
4665 void *p
= chunk2mem(victim
);
4666 if (__builtin_expect (perturb_byte
, 0))
4667 alloc_perturb (p
, bytes
);
4674 If large enough, split off the chunk bordering the end of memory
4675 (held in av->top). Note that this is in accord with the best-fit
4676 search rule. In effect, av->top is treated as larger (and thus
4677 less well fitting) than any other available chunk since it can
4678 be extended to be as large as necessary (up to system
4681 We require that av->top always exists (i.e., has size >=
4682 MINSIZE) after initialization, so if it would otherwise be
4683 exhausted by current request, it is replenished. (The main
4684 reason for ensuring it exists is that we may need MINSIZE space
4685 to put in fenceposts in sysmalloc.)
4689 size
= chunksize(victim
);
4691 if ((unsigned long)(size
) >= (unsigned long)(nb
+ MINSIZE
)) {
4692 remainder_size
= size
- nb
;
4693 remainder
= chunk_at_offset(victim
, nb
);
4694 av
->top
= remainder
;
4695 set_head(victim
, nb
| PREV_INUSE
|
4696 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4697 set_head(remainder
, remainder_size
| PREV_INUSE
);
4699 check_malloced_chunk(av
, victim
, nb
);
4700 void *p
= chunk2mem(victim
);
4701 if (__builtin_expect (perturb_byte
, 0))
4702 alloc_perturb (p
, bytes
);
4706 #ifdef ATOMIC_FASTBINS
4707 /* When we are using atomic ops to free fast chunks we can get
4708 here for all block sizes. */
4709 else if (have_fastchunks(av
)) {
4710 malloc_consolidate(av
);
4711 /* restore original bin index */
4712 if (in_smallbin_range(nb
))
4713 idx
= smallbin_index(nb
);
4715 idx
= largebin_index(nb
);
4719 If there is space available in fastbins, consolidate and retry,
4720 to possibly avoid expanding memory. This can occur only if nb is
4721 in smallbin range so we didn't consolidate upon entry.
4724 else if (have_fastchunks(av
)) {
4725 assert(in_smallbin_range(nb
));
4726 malloc_consolidate(av
);
4727 idx
= smallbin_index(nb
); /* restore original bin index */
4732 Otherwise, relay to handle system-dependent cases
4735 void *p
= sYSMALLOc(nb
, av
);
4736 if (p
!= NULL
&& __builtin_expect (perturb_byte
, 0))
4737 alloc_perturb (p
, bytes
);
4744 ------------------------------ free ------------------------------
4748 #ifdef ATOMIC_FASTBINS
4749 _int_free(mstate av
, mchunkptr p
, int have_lock
)
4751 _int_free(mstate av
, mchunkptr p
)
4754 INTERNAL_SIZE_T size
; /* its size */
4755 mfastbinptr
* fb
; /* associated fastbin */
4756 mchunkptr nextchunk
; /* next contiguous chunk */
4757 INTERNAL_SIZE_T nextsize
; /* its size */
4758 int nextinuse
; /* true if nextchunk is used */
4759 INTERNAL_SIZE_T prevsize
; /* size of previous contiguous chunk */
4760 mchunkptr bck
; /* misc temp for linking */
4761 mchunkptr fwd
; /* misc temp for linking */
4763 const char *errstr
= NULL
;
4764 #ifdef ATOMIC_FASTBINS
4768 size
= chunksize(p
);
4770 /* Little security check which won't hurt performance: the
4771 allocator never wrapps around at the end of the address space.
4772 Therefore we can exclude some size values which might appear
4773 here by accident or by "design" from some intruder. */
4774 if (__builtin_expect ((uintptr_t) p
> (uintptr_t) -size
, 0)
4775 || __builtin_expect (misaligned_chunk (p
), 0))
4777 errstr
= "free(): invalid pointer";
4779 #ifdef ATOMIC_FASTBINS
4780 if (! have_lock
&& locked
)
4781 (void)mutex_unlock(&av
->mutex
);
4783 malloc_printerr (check_action
, errstr
, chunk2mem(p
));
4786 /* We know that each chunk is at least MINSIZE bytes in size. */
4787 if (__builtin_expect (size
< MINSIZE
, 0))
4789 errstr
= "free(): invalid size";
4793 check_inuse_chunk(av
, p
);
4796 If eligible, place chunk on a fastbin so it can be found
4797 and used quickly in malloc.
4800 if ((unsigned long)(size
) <= (unsigned long)(get_max_fast ())
4804 If TRIM_FASTBINS set, don't place chunks
4805 bordering top into fastbins
4807 && (chunk_at_offset(p
, size
) != av
->top
)
4811 if (__builtin_expect (chunk_at_offset (p
, size
)->size
<= 2 * SIZE_SZ
, 0)
4812 || __builtin_expect (chunksize (chunk_at_offset (p
, size
))
4813 >= av
->system_mem
, 0))
4815 #ifdef ATOMIC_FASTBINS
4816 /* We might not have a lock at this point and concurrent modifications
4817 of system_mem might have let to a false positive. Redo the test
4818 after getting the lock. */
4820 || ({ assert (locked
== 0);
4821 mutex_lock(&av
->mutex
);
4823 chunk_at_offset (p
, size
)->size
<= 2 * SIZE_SZ
4824 || chunksize (chunk_at_offset (p
, size
)) >= av
->system_mem
;
4828 errstr
= "free(): invalid next size (fast)";
4831 #ifdef ATOMIC_FASTBINS
4834 (void)mutex_unlock(&av
->mutex
);
4840 if (__builtin_expect (perturb_byte
, 0))
4841 free_perturb (chunk2mem(p
), size
- 2 * SIZE_SZ
);
4844 unsigned int idx
= fastbin_index(size
);
4845 fb
= &fastbin (av
, idx
);
4847 #ifdef ATOMIC_FASTBINS
4849 mchunkptr old
= *fb
;
4850 unsigned int old_idx
= ~0u;
4853 /* Another simple check: make sure the top of the bin is not the
4854 record we are going to add (i.e., double free). */
4855 if (__builtin_expect (old
== p
, 0))
4857 errstr
= "double free or corruption (fasttop)";
4861 old_idx
= fastbin_index(chunksize(old
));
4864 while ((old
= catomic_compare_and_exchange_val_rel (fb
, p
, fd
)) != fd
);
4866 if (fd
!= NULL
&& __builtin_expect (old_idx
!= idx
, 0))
4868 errstr
= "invalid fastbin entry (free)";
4872 /* Another simple check: make sure the top of the bin is not the
4873 record we are going to add (i.e., double free). */
4874 if (__builtin_expect (*fb
== p
, 0))
4876 errstr
= "double free or corruption (fasttop)";
4880 && __builtin_expect (fastbin_index(chunksize(*fb
)) != idx
, 0))
4882 errstr
= "invalid fastbin entry (free)";
4892 Consolidate other non-mmapped chunks as they arrive.
4895 else if (!chunk_is_mmapped(p
)) {
4896 #ifdef ATOMIC_FASTBINS
4899 if(!mutex_trylock(&av
->mutex
))
4900 ++(av
->stat_lock_direct
);
4902 (void)mutex_lock(&av
->mutex
);
4903 ++(av
->stat_lock_wait
);
4906 (void)mutex_lock(&av
->mutex
);
4912 nextchunk
= chunk_at_offset(p
, size
);
4914 /* Lightweight tests: check whether the block is already the
4916 if (__builtin_expect (p
== av
->top
, 0))
4918 errstr
= "double free or corruption (top)";
4921 /* Or whether the next chunk is beyond the boundaries of the arena. */
4922 if (__builtin_expect (contiguous (av
)
4923 && (char *) nextchunk
4924 >= ((char *) av
->top
+ chunksize(av
->top
)), 0))
4926 errstr
= "double free or corruption (out)";
4929 /* Or whether the block is actually not marked used. */
4930 if (__builtin_expect (!prev_inuse(nextchunk
), 0))
4932 errstr
= "double free or corruption (!prev)";
4936 nextsize
= chunksize(nextchunk
);
4937 if (__builtin_expect (nextchunk
->size
<= 2 * SIZE_SZ
, 0)
4938 || __builtin_expect (nextsize
>= av
->system_mem
, 0))
4940 errstr
= "free(): invalid next size (normal)";
4944 if (__builtin_expect (perturb_byte
, 0))
4945 free_perturb (chunk2mem(p
), size
- 2 * SIZE_SZ
);
4947 /* consolidate backward */
4948 if (!prev_inuse(p
)) {
4949 prevsize
= p
->prev_size
;
4951 p
= chunk_at_offset(p
, -((long) prevsize
));
4952 unlink(p
, bck
, fwd
);
4955 if (nextchunk
!= av
->top
) {
4956 /* get and clear inuse bit */
4957 nextinuse
= inuse_bit_at_offset(nextchunk
, nextsize
);
4959 /* consolidate forward */
4961 unlink(nextchunk
, bck
, fwd
);
4964 clear_inuse_bit_at_offset(nextchunk
, 0);
4967 Place the chunk in unsorted chunk list. Chunks are
4968 not placed into regular bins until after they have
4969 been given one chance to be used in malloc.
4972 bck
= unsorted_chunks(av
);
4974 if (__builtin_expect (fwd
->bk
!= bck
, 0))
4976 errstr
= "free(): corrupted unsorted chunks";
4981 if (!in_smallbin_range(size
))
4983 p
->fd_nextsize
= NULL
;
4984 p
->bk_nextsize
= NULL
;
4989 set_head(p
, size
| PREV_INUSE
);
4992 check_free_chunk(av
, p
);
4996 If the chunk borders the current high end of memory,
4997 consolidate into top
5002 set_head(p
, size
| PREV_INUSE
);
5008 If freeing a large space, consolidate possibly-surrounding
5009 chunks. Then, if the total unused topmost memory exceeds trim
5010 threshold, ask malloc_trim to reduce top.
5012 Unless max_fast is 0, we don't know if there are fastbins
5013 bordering top, so we cannot tell for sure whether threshold
5014 has been reached unless fastbins are consolidated. But we
5015 don't want to consolidate on each free. As a compromise,
5016 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
5020 if ((unsigned long)(size
) >= FASTBIN_CONSOLIDATION_THRESHOLD
) {
5021 if (have_fastchunks(av
))
5022 malloc_consolidate(av
);
5024 if (av
== &main_arena
) {
5025 #ifndef MORECORE_CANNOT_TRIM
5026 if ((unsigned long)(chunksize(av
->top
)) >=
5027 (unsigned long)(mp_
.trim_threshold
))
5028 sYSTRIm(mp_
.top_pad
, av
);
5031 /* Always try heap_trim(), even if the top chunk is not
5032 large, because the corresponding heap might go away. */
5033 heap_info
*heap
= heap_for_ptr(top(av
));
5035 assert(heap
->ar_ptr
== av
);
5036 heap_trim(heap
, mp_
.top_pad
);
5040 #ifdef ATOMIC_FASTBINS
5043 (void)mutex_unlock(&av
->mutex
);
5048 If the chunk was allocated via mmap, release via munmap(). Note
5049 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
5050 user must have overwritten memory. There's nothing we can do to
5051 catch this error unless MALLOC_DEBUG is set, in which case
5052 check_inuse_chunk (above) will have triggered error.
5063 ------------------------- malloc_consolidate -------------------------
5065 malloc_consolidate is a specialized version of free() that tears
5066 down chunks held in fastbins. Free itself cannot be used for this
5067 purpose since, among other things, it might place chunks back onto
5068 fastbins. So, instead, we need to use a minor variant of the same
5071 Also, because this routine needs to be called the first time through
5072 malloc anyway, it turns out to be the perfect place to trigger
5073 initialization code.
5077 static void malloc_consolidate(mstate av
)
5079 static void malloc_consolidate(av
) mstate av
;
5082 mfastbinptr
* fb
; /* current fastbin being consolidated */
5083 mfastbinptr
* maxfb
; /* last fastbin (for loop control) */
5084 mchunkptr p
; /* current chunk being consolidated */
5085 mchunkptr nextp
; /* next chunk to consolidate */
5086 mchunkptr unsorted_bin
; /* bin header */
5087 mchunkptr first_unsorted
; /* chunk to link to */
5089 /* These have same use as in free() */
5090 mchunkptr nextchunk
;
5091 INTERNAL_SIZE_T size
;
5092 INTERNAL_SIZE_T nextsize
;
5093 INTERNAL_SIZE_T prevsize
;
5099 If max_fast is 0, we know that av hasn't
5100 yet been initialized, in which case do so below
5103 if (get_max_fast () != 0) {
5104 clear_fastchunks(av
);
5106 unsorted_bin
= unsorted_chunks(av
);
5109 Remove each chunk from fast bin and consolidate it, placing it
5110 then in unsorted bin. Among other reasons for doing this,
5111 placing in unsorted bin avoids needing to calculate actual bins
5112 until malloc is sure that chunks aren't immediately going to be
5117 /* It is wrong to limit the fast bins to search using get_max_fast
5118 because, except for the main arena, all the others might have
5119 blocks in the high fast bins. It's not worth it anyway, just
5120 search all bins all the time. */
5121 maxfb
= &fastbin (av
, fastbin_index(get_max_fast ()));
5123 maxfb
= &fastbin (av
, NFASTBINS
- 1);
5125 fb
= &fastbin (av
, 0);
5127 #ifdef ATOMIC_FASTBINS
5128 p
= atomic_exchange_acq (fb
, 0);
5133 #ifndef ATOMIC_FASTBINS
5137 check_inuse_chunk(av
, p
);
5140 /* Slightly streamlined version of consolidation code in free() */
5141 size
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
5142 nextchunk
= chunk_at_offset(p
, size
);
5143 nextsize
= chunksize(nextchunk
);
5145 if (!prev_inuse(p
)) {
5146 prevsize
= p
->prev_size
;
5148 p
= chunk_at_offset(p
, -((long) prevsize
));
5149 unlink(p
, bck
, fwd
);
5152 if (nextchunk
!= av
->top
) {
5153 nextinuse
= inuse_bit_at_offset(nextchunk
, nextsize
);
5157 unlink(nextchunk
, bck
, fwd
);
5159 clear_inuse_bit_at_offset(nextchunk
, 0);
5161 first_unsorted
= unsorted_bin
->fd
;
5162 unsorted_bin
->fd
= p
;
5163 first_unsorted
->bk
= p
;
5165 if (!in_smallbin_range (size
)) {
5166 p
->fd_nextsize
= NULL
;
5167 p
->bk_nextsize
= NULL
;
5170 set_head(p
, size
| PREV_INUSE
);
5171 p
->bk
= unsorted_bin
;
5172 p
->fd
= first_unsorted
;
5178 set_head(p
, size
| PREV_INUSE
);
5182 } while ( (p
= nextp
) != 0);
5185 } while (fb
++ != maxfb
);
5188 malloc_init_state(av
);
5189 check_malloc_state(av
);
5194 ------------------------------ realloc ------------------------------
5198 _int_realloc(mstate av
, mchunkptr oldp
, INTERNAL_SIZE_T oldsize
,
5201 mchunkptr newp
; /* chunk to return */
5202 INTERNAL_SIZE_T newsize
; /* its size */
5203 Void_t
* newmem
; /* corresponding user mem */
5205 mchunkptr next
; /* next contiguous chunk after oldp */
5207 mchunkptr remainder
; /* extra space at end of newp */
5208 unsigned long remainder_size
; /* its size */
5210 mchunkptr bck
; /* misc temp for linking */
5211 mchunkptr fwd
; /* misc temp for linking */
5213 unsigned long copysize
; /* bytes to copy */
5214 unsigned int ncopies
; /* INTERNAL_SIZE_T words to copy */
5215 INTERNAL_SIZE_T
* s
; /* copy source */
5216 INTERNAL_SIZE_T
* d
; /* copy destination */
5218 const char *errstr
= NULL
;
5221 if (__builtin_expect (oldp
->size
<= 2 * SIZE_SZ
, 0)
5222 || __builtin_expect (oldsize
>= av
->system_mem
, 0))
5224 errstr
= "realloc(): invalid old size";
5226 malloc_printerr (check_action
, errstr
, chunk2mem(oldp
));
5230 check_inuse_chunk(av
, oldp
);
5232 /* All callers already filter out mmap'ed chunks. */
5234 if (!chunk_is_mmapped(oldp
))
5236 assert (!chunk_is_mmapped(oldp
));
5240 next
= chunk_at_offset(oldp
, oldsize
);
5241 INTERNAL_SIZE_T nextsize
= chunksize(next
);
5242 if (__builtin_expect (next
->size
<= 2 * SIZE_SZ
, 0)
5243 || __builtin_expect (nextsize
>= av
->system_mem
, 0))
5245 errstr
= "realloc(): invalid next size";
5249 if ((unsigned long)(oldsize
) >= (unsigned long)(nb
)) {
5250 /* already big enough; split below */
5256 /* Try to expand forward into top */
5257 if (next
== av
->top
&&
5258 (unsigned long)(newsize
= oldsize
+ nextsize
) >=
5259 (unsigned long)(nb
+ MINSIZE
)) {
5260 set_head_size(oldp
, nb
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5261 av
->top
= chunk_at_offset(oldp
, nb
);
5262 set_head(av
->top
, (newsize
- nb
) | PREV_INUSE
);
5263 check_inuse_chunk(av
, oldp
);
5264 return chunk2mem(oldp
);
5267 /* Try to expand forward into next chunk; split off remainder below */
5268 else if (next
!= av
->top
&&
5270 (unsigned long)(newsize
= oldsize
+ nextsize
) >=
5271 (unsigned long)(nb
)) {
5273 unlink(next
, bck
, fwd
);
5276 /* allocate, copy, free */
5278 newmem
= _int_malloc(av
, nb
- MALLOC_ALIGN_MASK
);
5280 return 0; /* propagate failure */
5282 newp
= mem2chunk(newmem
);
5283 newsize
= chunksize(newp
);
5286 Avoid copy if newp is next chunk after oldp.
5294 Unroll copy of <= 36 bytes (72 if 8byte sizes)
5295 We know that contents have an odd number of
5296 INTERNAL_SIZE_T-sized words; minimally 3.
5299 copysize
= oldsize
- SIZE_SZ
;
5300 s
= (INTERNAL_SIZE_T
*)(chunk2mem(oldp
));
5301 d
= (INTERNAL_SIZE_T
*)(newmem
);
5302 ncopies
= copysize
/ sizeof(INTERNAL_SIZE_T
);
5303 assert(ncopies
>= 3);
5306 MALLOC_COPY(d
, s
, copysize
);
5326 #ifdef ATOMIC_FASTBINS
5327 _int_free(av
, oldp
, 1);
5329 _int_free(av
, oldp
);
5331 check_inuse_chunk(av
, newp
);
5332 return chunk2mem(newp
);
5337 /* If possible, free extra space in old or extended chunk */
5339 assert((unsigned long)(newsize
) >= (unsigned long)(nb
));
5341 remainder_size
= newsize
- nb
;
5343 if (remainder_size
< MINSIZE
) { /* not enough extra to split off */
5344 set_head_size(newp
, newsize
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5345 set_inuse_bit_at_offset(newp
, newsize
);
5347 else { /* split remainder */
5348 remainder
= chunk_at_offset(newp
, nb
);
5349 set_head_size(newp
, nb
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5350 set_head(remainder
, remainder_size
| PREV_INUSE
|
5351 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5352 /* Mark remainder as inuse so free() won't complain */
5353 set_inuse_bit_at_offset(remainder
, remainder_size
);
5354 #ifdef ATOMIC_FASTBINS
5355 _int_free(av
, remainder
, 1);
5357 _int_free(av
, remainder
);
5361 check_inuse_chunk(av
, newp
);
5362 return chunk2mem(newp
);
5374 INTERNAL_SIZE_T offset
= oldp
->prev_size
;
5375 size_t pagemask
= mp_
.pagesize
- 1;
5379 /* Note the extra SIZE_SZ overhead */
5380 newsize
= (nb
+ offset
+ SIZE_SZ
+ pagemask
) & ~pagemask
;
5382 /* don't need to remap if still within same page */
5383 if (oldsize
== newsize
- offset
)
5384 return chunk2mem(oldp
);
5386 cp
= (char*)mremap((char*)oldp
- offset
, oldsize
+ offset
, newsize
, 1);
5388 if (cp
!= MAP_FAILED
) {
5390 newp
= (mchunkptr
)(cp
+ offset
);
5391 set_head(newp
, (newsize
- offset
)|IS_MMAPPED
);
5393 assert(aligned_OK(chunk2mem(newp
)));
5394 assert((newp
->prev_size
== offset
));
5396 /* update statistics */
5397 sum
= mp_
.mmapped_mem
+= newsize
- oldsize
;
5398 if (sum
> (unsigned long)(mp_
.max_mmapped_mem
))
5399 mp_
.max_mmapped_mem
= sum
;
5401 sum
+= main_arena
.system_mem
;
5402 if (sum
> (unsigned long)(mp_
.max_total_mem
))
5403 mp_
.max_total_mem
= sum
;
5406 return chunk2mem(newp
);
5410 /* Note the extra SIZE_SZ overhead. */
5411 if ((unsigned long)(oldsize
) >= (unsigned long)(nb
+ SIZE_SZ
))
5412 newmem
= chunk2mem(oldp
); /* do nothing */
5414 /* Must alloc, copy, free. */
5415 newmem
= _int_malloc(av
, nb
- MALLOC_ALIGN_MASK
);
5417 MALLOC_COPY(newmem
, chunk2mem(oldp
), oldsize
- 2*SIZE_SZ
);
5418 #ifdef ATOMIC_FASTBINS
5419 _int_free(av
, oldp
, 1);
5421 _int_free(av
, oldp
);
5428 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
5429 check_malloc_state(av
);
5430 MALLOC_FAILURE_ACTION
;
5438 ------------------------------ memalign ------------------------------
5442 _int_memalign(mstate av
, size_t alignment
, size_t bytes
)
5444 INTERNAL_SIZE_T nb
; /* padded request size */
5445 char* m
; /* memory returned by malloc call */
5446 mchunkptr p
; /* corresponding chunk */
5447 char* brk
; /* alignment point within p */
5448 mchunkptr newp
; /* chunk to return */
5449 INTERNAL_SIZE_T newsize
; /* its size */
5450 INTERNAL_SIZE_T leadsize
; /* leading space before alignment point */
5451 mchunkptr remainder
; /* spare room at end to split off */
5452 unsigned long remainder_size
; /* its size */
5453 INTERNAL_SIZE_T size
;
5455 /* If need less alignment than we give anyway, just relay to malloc */
5457 if (alignment
<= MALLOC_ALIGNMENT
) return _int_malloc(av
, bytes
);
5459 /* Otherwise, ensure that it is at least a minimum chunk size */
5461 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
5463 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5464 if ((alignment
& (alignment
- 1)) != 0) {
5465 size_t a
= MALLOC_ALIGNMENT
* 2;
5466 while ((unsigned long)a
< (unsigned long)alignment
) a
<<= 1;
5470 checked_request2size(bytes
, nb
);
5473 Strategy: find a spot within that chunk that meets the alignment
5474 request, and then possibly free the leading and trailing space.
5478 /* Call malloc with worst case padding to hit alignment. */
5480 m
= (char*)(_int_malloc(av
, nb
+ alignment
+ MINSIZE
));
5482 if (m
== 0) return 0; /* propagate failure */
5486 if ((((unsigned long)(m
)) % alignment
) != 0) { /* misaligned */
5489 Find an aligned spot inside chunk. Since we need to give back
5490 leading space in a chunk of at least MINSIZE, if the first
5491 calculation places us at a spot with less than MINSIZE leader,
5492 we can move to the next aligned spot -- we've allocated enough
5493 total room so that this is always possible.
5496 brk
= (char*)mem2chunk(((unsigned long)(m
+ alignment
- 1)) &
5497 -((signed long) alignment
));
5498 if ((unsigned long)(brk
- (char*)(p
)) < MINSIZE
)
5501 newp
= (mchunkptr
)brk
;
5502 leadsize
= brk
- (char*)(p
);
5503 newsize
= chunksize(p
) - leadsize
;
5505 /* For mmapped chunks, just adjust offset */
5506 if (chunk_is_mmapped(p
)) {
5507 newp
->prev_size
= p
->prev_size
+ leadsize
;
5508 set_head(newp
, newsize
|IS_MMAPPED
);
5509 return chunk2mem(newp
);
5512 /* Otherwise, give back leader, use the rest */
5513 set_head(newp
, newsize
| PREV_INUSE
|
5514 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5515 set_inuse_bit_at_offset(newp
, newsize
);
5516 set_head_size(p
, leadsize
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5517 #ifdef ATOMIC_FASTBINS
5518 _int_free(av
, p
, 1);
5524 assert (newsize
>= nb
&&
5525 (((unsigned long)(chunk2mem(p
))) % alignment
) == 0);
5528 /* Also give back spare room at the end */
5529 if (!chunk_is_mmapped(p
)) {
5530 size
= chunksize(p
);
5531 if ((unsigned long)(size
) > (unsigned long)(nb
+ MINSIZE
)) {
5532 remainder_size
= size
- nb
;
5533 remainder
= chunk_at_offset(p
, nb
);
5534 set_head(remainder
, remainder_size
| PREV_INUSE
|
5535 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5536 set_head_size(p
, nb
);
5537 #ifdef ATOMIC_FASTBINS
5538 _int_free(av
, remainder
, 1);
5540 _int_free(av
, remainder
);
5545 check_inuse_chunk(av
, p
);
5546 return chunk2mem(p
);
5551 ------------------------------ calloc ------------------------------
5555 Void_t
* cALLOc(size_t n_elements
, size_t elem_size
)
5557 Void_t
* cALLOc(n_elements
, elem_size
) size_t n_elements
; size_t elem_size
;
5561 unsigned long clearsize
;
5562 unsigned long nclears
;
5565 Void_t
* mem
= mALLOc(n_elements
* elem_size
);
5571 if (!chunk_is_mmapped(p
)) /* don't need to clear mmapped space */
5575 Unroll clear of <= 36 bytes (72 if 8byte sizes)
5576 We know that contents have an odd number of
5577 INTERNAL_SIZE_T-sized words; minimally 3.
5580 d
= (INTERNAL_SIZE_T
*)mem
;
5581 clearsize
= chunksize(p
) - SIZE_SZ
;
5582 nclears
= clearsize
/ sizeof(INTERNAL_SIZE_T
);
5583 assert(nclears
>= 3);
5586 MALLOC_ZERO(d
, clearsize
);
5613 ------------------------- independent_calloc -------------------------
5618 _int_icalloc(mstate av
, size_t n_elements
, size_t elem_size
, Void_t
* chunks
[])
5620 _int_icalloc(av
, n_elements
, elem_size
, chunks
)
5621 mstate av
; size_t n_elements
; size_t elem_size
; Void_t
* chunks
[];
5624 size_t sz
= elem_size
; /* serves as 1-element array */
5625 /* opts arg of 3 means all elements are same size, and should be cleared */
5626 return iALLOc(av
, n_elements
, &sz
, 3, chunks
);
5630 ------------------------- independent_comalloc -------------------------
5635 _int_icomalloc(mstate av
, size_t n_elements
, size_t sizes
[], Void_t
* chunks
[])
5637 _int_icomalloc(av
, n_elements
, sizes
, chunks
)
5638 mstate av
; size_t n_elements
; size_t sizes
[]; Void_t
* chunks
[];
5641 return iALLOc(av
, n_elements
, sizes
, 0, chunks
);
5646 ------------------------------ ialloc ------------------------------
5647 ialloc provides common support for independent_X routines, handling all of
5648 the combinations that can result.
5651 bit 0 set if all elements are same size (using sizes[0])
5652 bit 1 set if elements should be zeroed
5658 iALLOc(mstate av
, size_t n_elements
, size_t* sizes
, int opts
, Void_t
* chunks
[])
5660 iALLOc(av
, n_elements
, sizes
, opts
, chunks
)
5661 mstate av
; size_t n_elements
; size_t* sizes
; int opts
; Void_t
* chunks
[];
5664 INTERNAL_SIZE_T element_size
; /* chunksize of each element, if all same */
5665 INTERNAL_SIZE_T contents_size
; /* total size of elements */
5666 INTERNAL_SIZE_T array_size
; /* request size of pointer array */
5667 Void_t
* mem
; /* malloced aggregate space */
5668 mchunkptr p
; /* corresponding chunk */
5669 INTERNAL_SIZE_T remainder_size
; /* remaining bytes while splitting */
5670 Void_t
** marray
; /* either "chunks" or malloced ptr array */
5671 mchunkptr array_chunk
; /* chunk for malloced ptr array */
5672 int mmx
; /* to disable mmap */
5673 INTERNAL_SIZE_T size
;
5674 INTERNAL_SIZE_T size_flags
;
5677 /* Ensure initialization/consolidation */
5678 if (have_fastchunks(av
)) malloc_consolidate(av
);
5680 /* compute array length, if needed */
5682 if (n_elements
== 0)
5683 return chunks
; /* nothing to do */
5688 /* if empty req, must still return chunk representing empty array */
5689 if (n_elements
== 0)
5690 return (Void_t
**) _int_malloc(av
, 0);
5692 array_size
= request2size(n_elements
* (sizeof(Void_t
*)));
5695 /* compute total element size */
5696 if (opts
& 0x1) { /* all-same-size */
5697 element_size
= request2size(*sizes
);
5698 contents_size
= n_elements
* element_size
;
5700 else { /* add up all the sizes */
5703 for (i
= 0; i
!= n_elements
; ++i
)
5704 contents_size
+= request2size(sizes
[i
]);
5707 /* subtract out alignment bytes from total to minimize overallocation */
5708 size
= contents_size
+ array_size
- MALLOC_ALIGN_MASK
;
5711 Allocate the aggregate chunk.
5712 But first disable mmap so malloc won't use it, since
5713 we would not be able to later free/realloc space internal
5714 to a segregated mmap region.
5716 mmx
= mp_
.n_mmaps_max
; /* disable mmap */
5717 mp_
.n_mmaps_max
= 0;
5718 mem
= _int_malloc(av
, size
);
5719 mp_
.n_mmaps_max
= mmx
; /* reset mmap */
5724 assert(!chunk_is_mmapped(p
));
5725 remainder_size
= chunksize(p
);
5727 if (opts
& 0x2) { /* optionally clear the elements */
5728 MALLOC_ZERO(mem
, remainder_size
- SIZE_SZ
- array_size
);
5731 size_flags
= PREV_INUSE
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0);
5733 /* If not provided, allocate the pointer array as final part of chunk */
5735 array_chunk
= chunk_at_offset(p
, contents_size
);
5736 marray
= (Void_t
**) (chunk2mem(array_chunk
));
5737 set_head(array_chunk
, (remainder_size
- contents_size
) | size_flags
);
5738 remainder_size
= contents_size
;
5741 /* split out elements */
5742 for (i
= 0; ; ++i
) {
5743 marray
[i
] = chunk2mem(p
);
5744 if (i
!= n_elements
-1) {
5745 if (element_size
!= 0)
5746 size
= element_size
;
5748 size
= request2size(sizes
[i
]);
5749 remainder_size
-= size
;
5750 set_head(p
, size
| size_flags
);
5751 p
= chunk_at_offset(p
, size
);
5753 else { /* the final element absorbs any overallocation slop */
5754 set_head(p
, remainder_size
| size_flags
);
5760 if (marray
!= chunks
) {
5761 /* final element must have exactly exhausted chunk */
5762 if (element_size
!= 0)
5763 assert(remainder_size
== element_size
);
5765 assert(remainder_size
== request2size(sizes
[i
]));
5766 check_inuse_chunk(av
, mem2chunk(marray
));
5769 for (i
= 0; i
!= n_elements
; ++i
)
5770 check_inuse_chunk(av
, mem2chunk(marray
[i
]));
5779 ------------------------------ valloc ------------------------------
5784 _int_valloc(mstate av
, size_t bytes
)
5786 _int_valloc(av
, bytes
) mstate av
; size_t bytes
;
5789 /* Ensure initialization/consolidation */
5790 if (have_fastchunks(av
)) malloc_consolidate(av
);
5791 return _int_memalign(av
, mp_
.pagesize
, bytes
);
5795 ------------------------------ pvalloc ------------------------------
5801 _int_pvalloc(mstate av
, size_t bytes
)
5803 _int_pvalloc(av
, bytes
) mstate av
, size_t bytes
;
5808 /* Ensure initialization/consolidation */
5809 if (have_fastchunks(av
)) malloc_consolidate(av
);
5810 pagesz
= mp_
.pagesize
;
5811 return _int_memalign(av
, pagesz
, (bytes
+ pagesz
- 1) & ~(pagesz
- 1));
5816 ------------------------------ malloc_trim ------------------------------
5820 static int mTRIm(mstate av
, size_t pad
)
5822 static int mTRIm(av
, pad
) mstate av
; size_t pad
;
5825 /* Ensure initialization/consolidation */
5826 malloc_consolidate (av
);
5828 const size_t ps
= mp_
.pagesize
;
5829 int psindex
= bin_index (ps
);
5830 const size_t psm1
= ps
- 1;
5833 for (int i
= 1; i
< NBINS
; ++i
)
5834 if (i
== 1 || i
>= psindex
)
5836 mbinptr bin
= bin_at (av
, i
);
5838 for (mchunkptr p
= last (bin
); p
!= bin
; p
= p
->bk
)
5840 INTERNAL_SIZE_T size
= chunksize (p
);
5842 if (size
> psm1
+ sizeof (struct malloc_chunk
))
5844 /* See whether the chunk contains at least one unused page. */
5845 char *paligned_mem
= (char *) (((uintptr_t) p
5846 + sizeof (struct malloc_chunk
)
5849 assert ((char *) chunk2mem (p
) + 4 * SIZE_SZ
<= paligned_mem
);
5850 assert ((char *) p
+ size
> paligned_mem
);
5852 /* This is the size we could potentially free. */
5853 size
-= paligned_mem
- (char *) p
;
5858 /* When debugging we simulate destroying the memory
5860 memset (paligned_mem
, 0x89, size
& ~psm1
);
5862 madvise (paligned_mem
, size
& ~psm1
, MADV_DONTNEED
);
5870 #ifndef MORECORE_CANNOT_TRIM
5871 return result
| (av
== &main_arena
? sYSTRIm (pad
, av
) : 0);
5879 ------------------------- malloc_usable_size -------------------------
5883 size_t mUSABLe(Void_t
* mem
)
5885 size_t mUSABLe(mem
) Void_t
* mem
;
5891 if (chunk_is_mmapped(p
))
5892 return chunksize(p
) - 2*SIZE_SZ
;
5894 return chunksize(p
) - SIZE_SZ
;
5900 ------------------------------ mallinfo ------------------------------
5903 struct mallinfo
mALLINFo(mstate av
)
5909 INTERNAL_SIZE_T avail
;
5910 INTERNAL_SIZE_T fastavail
;
5914 /* Ensure initialization */
5915 if (av
->top
== 0) malloc_consolidate(av
);
5917 check_malloc_state(av
);
5919 /* Account for top */
5920 avail
= chunksize(av
->top
);
5921 nblocks
= 1; /* top always exists */
5923 /* traverse fastbins */
5927 for (i
= 0; i
< NFASTBINS
; ++i
) {
5928 for (p
= fastbin (av
, i
); p
!= 0; p
= p
->fd
) {
5930 fastavail
+= chunksize(p
);
5936 /* traverse regular bins */
5937 for (i
= 1; i
< NBINS
; ++i
) {
5939 for (p
= last(b
); p
!= b
; p
= p
->bk
) {
5941 avail
+= chunksize(p
);
5945 mi
.smblks
= nfastblocks
;
5946 mi
.ordblks
= nblocks
;
5947 mi
.fordblks
= avail
;
5948 mi
.uordblks
= av
->system_mem
- avail
;
5949 mi
.arena
= av
->system_mem
;
5950 mi
.hblks
= mp_
.n_mmaps
;
5951 mi
.hblkhd
= mp_
.mmapped_mem
;
5952 mi
.fsmblks
= fastavail
;
5953 mi
.keepcost
= chunksize(av
->top
);
5954 mi
.usmblks
= mp_
.max_total_mem
;
5959 ------------------------------ malloc_stats ------------------------------
5967 unsigned int in_use_b
= mp_
.mmapped_mem
, system_b
= in_use_b
;
5969 long stat_lock_direct
= 0, stat_lock_loop
= 0, stat_lock_wait
= 0;
5972 if(__malloc_initialized
< 0)
5975 _IO_flockfile (stderr
);
5976 int old_flags2
= ((_IO_FILE
*) stderr
)->_flags2
;
5977 ((_IO_FILE
*) stderr
)->_flags2
|= _IO_FLAGS2_NOTCANCEL
;
5979 for (i
=0, ar_ptr
= &main_arena
;; i
++) {
5980 (void)mutex_lock(&ar_ptr
->mutex
);
5981 mi
= mALLINFo(ar_ptr
);
5982 fprintf(stderr
, "Arena %d:\n", i
);
5983 fprintf(stderr
, "system bytes = %10u\n", (unsigned int)mi
.arena
);
5984 fprintf(stderr
, "in use bytes = %10u\n", (unsigned int)mi
.uordblks
);
5985 #if MALLOC_DEBUG > 1
5987 dump_heap(heap_for_ptr(top(ar_ptr
)));
5989 system_b
+= mi
.arena
;
5990 in_use_b
+= mi
.uordblks
;
5992 stat_lock_direct
+= ar_ptr
->stat_lock_direct
;
5993 stat_lock_loop
+= ar_ptr
->stat_lock_loop
;
5994 stat_lock_wait
+= ar_ptr
->stat_lock_wait
;
5996 (void)mutex_unlock(&ar_ptr
->mutex
);
5997 ar_ptr
= ar_ptr
->next
;
5998 if(ar_ptr
== &main_arena
) break;
6001 fprintf(stderr
, "Total (incl. mmap):\n");
6003 fprintf(stderr
, "Total:\n");
6005 fprintf(stderr
, "system bytes = %10u\n", system_b
);
6006 fprintf(stderr
, "in use bytes = %10u\n", in_use_b
);
6008 fprintf(stderr
, "max system bytes = %10u\n", (unsigned int)mp_
.max_total_mem
);
6011 fprintf(stderr
, "max mmap regions = %10u\n", (unsigned int)mp_
.max_n_mmaps
);
6012 fprintf(stderr
, "max mmap bytes = %10lu\n",
6013 (unsigned long)mp_
.max_mmapped_mem
);
6016 fprintf(stderr
, "heaps created = %10d\n", stat_n_heaps
);
6017 fprintf(stderr
, "locked directly = %10ld\n", stat_lock_direct
);
6018 fprintf(stderr
, "locked in loop = %10ld\n", stat_lock_loop
);
6019 fprintf(stderr
, "locked waiting = %10ld\n", stat_lock_wait
);
6020 fprintf(stderr
, "locked total = %10ld\n",
6021 stat_lock_direct
+ stat_lock_loop
+ stat_lock_wait
);
6024 ((_IO_FILE
*) stderr
)->_flags2
|= old_flags2
;
6025 _IO_funlockfile (stderr
);
6031 ------------------------------ mallopt ------------------------------
6035 int mALLOPt(int param_number
, int value
)
6037 int mALLOPt(param_number
, value
) int param_number
; int value
;
6040 mstate av
= &main_arena
;
6043 if(__malloc_initialized
< 0)
6045 (void)mutex_lock(&av
->mutex
);
6046 /* Ensure initialization/consolidation */
6047 malloc_consolidate(av
);
6049 switch(param_number
) {
6051 if (value
>= 0 && value
<= MAX_FAST_SIZE
) {
6052 set_max_fast(value
);
6058 case M_TRIM_THRESHOLD
:
6059 mp_
.trim_threshold
= value
;
6060 mp_
.no_dyn_threshold
= 1;
6064 mp_
.top_pad
= value
;
6065 mp_
.no_dyn_threshold
= 1;
6068 case M_MMAP_THRESHOLD
:
6070 /* Forbid setting the threshold too high. */
6071 if((unsigned long)value
> HEAP_MAX_SIZE
/2)
6075 mp_
.mmap_threshold
= value
;
6076 mp_
.no_dyn_threshold
= 1;
6085 mp_
.n_mmaps_max
= value
;
6086 mp_
.no_dyn_threshold
= 1;
6089 case M_CHECK_ACTION
:
6090 check_action
= value
;
6094 perturb_byte
= value
;
6100 mp_
.arena_test
= value
;
6105 mp_
.arena_max
= value
;
6109 (void)mutex_unlock(&av
->mutex
);
6115 -------------------- Alternative MORECORE functions --------------------
6120 General Requirements for MORECORE.
6122 The MORECORE function must have the following properties:
6124 If MORECORE_CONTIGUOUS is false:
6126 * MORECORE must allocate in multiples of pagesize. It will
6127 only be called with arguments that are multiples of pagesize.
6129 * MORECORE(0) must return an address that is at least
6130 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
6132 else (i.e. If MORECORE_CONTIGUOUS is true):
6134 * Consecutive calls to MORECORE with positive arguments
6135 return increasing addresses, indicating that space has been
6136 contiguously extended.
6138 * MORECORE need not allocate in multiples of pagesize.
6139 Calls to MORECORE need not have args of multiples of pagesize.
6141 * MORECORE need not page-align.
6145 * MORECORE may allocate more memory than requested. (Or even less,
6146 but this will generally result in a malloc failure.)
6148 * MORECORE must not allocate memory when given argument zero, but
6149 instead return one past the end address of memory from previous
6150 nonzero call. This malloc does NOT call MORECORE(0)
6151 until at least one call with positive arguments is made, so
6152 the initial value returned is not important.
6154 * Even though consecutive calls to MORECORE need not return contiguous
6155 addresses, it must be OK for malloc'ed chunks to span multiple
6156 regions in those cases where they do happen to be contiguous.
6158 * MORECORE need not handle negative arguments -- it may instead
6159 just return MORECORE_FAILURE when given negative arguments.
6160 Negative arguments are always multiples of pagesize. MORECORE
6161 must not misinterpret negative args as large positive unsigned
6162 args. You can suppress all such calls from even occurring by defining
6163 MORECORE_CANNOT_TRIM,
6165 There is some variation across systems about the type of the
6166 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
6167 actually be size_t, because sbrk supports negative args, so it is
6168 normally the signed type of the same width as size_t (sometimes
6169 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
6170 matter though. Internally, we use "long" as arguments, which should
6171 work across all reasonable possibilities.
6173 Additionally, if MORECORE ever returns failure for a positive
6174 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
6175 system allocator. This is a useful backup strategy for systems with
6176 holes in address spaces -- in this case sbrk cannot contiguously
6177 expand the heap, but mmap may be able to map noncontiguous space.
6179 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
6180 a function that always returns MORECORE_FAILURE.
6182 If you are using this malloc with something other than sbrk (or its
6183 emulation) to supply memory regions, you probably want to set
6184 MORECORE_CONTIGUOUS as false. As an example, here is a custom
6185 allocator kindly contributed for pre-OSX macOS. It uses virtually
6186 but not necessarily physically contiguous non-paged memory (locked
6187 in, present and won't get swapped out). You can use it by
6188 uncommenting this section, adding some #includes, and setting up the
6189 appropriate defines above:
6191 #define MORECORE osMoreCore
6192 #define MORECORE_CONTIGUOUS 0
6194 There is also a shutdown routine that should somehow be called for
6195 cleanup upon program exit.
6197 #define MAX_POOL_ENTRIES 100
6198 #define MINIMUM_MORECORE_SIZE (64 * 1024)
6199 static int next_os_pool;
6200 void *our_os_pools[MAX_POOL_ENTRIES];
6202 void *osMoreCore(int size)
6205 static void *sbrk_top = 0;
6209 if (size < MINIMUM_MORECORE_SIZE)
6210 size = MINIMUM_MORECORE_SIZE;
6211 if (CurrentExecutionLevel() == kTaskLevel)
6212 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
6215 return (void *) MORECORE_FAILURE;
6217 // save ptrs so they can be freed during cleanup
6218 our_os_pools[next_os_pool] = ptr;
6220 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
6221 sbrk_top = (char *) ptr + size;
6226 // we don't currently support shrink behavior
6227 return (void *) MORECORE_FAILURE;
6235 // cleanup any allocated memory pools
6236 // called as last thing before shutting down driver
6238 void osCleanupMem(void)
6242 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
6245 PoolDeallocate(*ptr);
6255 extern char **__libc_argv attribute_hidden
;
6258 malloc_printerr(int action
, const char *str
, void *ptr
)
6260 if ((action
& 5) == 5)
6261 __libc_message (action
& 2, "%s\n", str
);
6262 else if (action
& 1)
6264 char buf
[2 * sizeof (uintptr_t) + 1];
6266 buf
[sizeof (buf
) - 1] = '\0';
6267 char *cp
= _itoa_word ((uintptr_t) ptr
, &buf
[sizeof (buf
) - 1], 16, 0);
6271 __libc_message (action
& 2,
6272 "*** glibc detected *** %s: %s: 0x%s ***\n",
6273 __libc_argv
[0] ?: "<unknown>", str
, cp
);
6275 else if (action
& 2)
6280 # include <sys/param.h>
6282 /* We need a wrapper function for one of the additions of POSIX. */
6284 __posix_memalign (void **memptr
, size_t alignment
, size_t size
)
6288 /* Test whether the SIZE argument is valid. It must be a power of
6289 two multiple of sizeof (void *). */
6290 if (alignment
% sizeof (void *) != 0
6291 || !powerof2 (alignment
/ sizeof (void *)) != 0
6295 /* Call the hook here, so that caller is posix_memalign's caller
6296 and not posix_memalign itself. */
6297 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
6298 __const __malloc_ptr_t
)) =
6299 force_reg (__memalign_hook
);
6300 if (__builtin_expect (hook
!= NULL
, 0))
6301 mem
= (*hook
)(alignment
, size
, RETURN_ADDRESS (0));
6303 mem
= public_mEMALIGn (alignment
, size
);
6312 weak_alias (__posix_memalign
, posix_memalign
)
6316 malloc_info (int options
, FILE *fp
)
6318 /* For now, at least. */
6323 size_t total_nblocks
= 0;
6324 size_t total_nfastblocks
= 0;
6325 size_t total_avail
= 0;
6326 size_t total_fastavail
= 0;
6327 size_t total_system
= 0;
6328 size_t total_max_system
= 0;
6329 size_t total_aspace
= 0;
6330 size_t total_aspace_mprotect
= 0;
6332 void mi_arena (mstate ar_ptr
)
6334 fprintf (fp
, "<heap nr=\"%d\">\n<sizes>\n", n
++);
6337 size_t nfastblocks
= 0;
6339 size_t fastavail
= 0;
6346 } sizes
[NFASTBINS
+ NBINS
- 1];
6347 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
6349 mutex_lock (&ar_ptr
->mutex
);
6351 for (size_t i
= 0; i
< NFASTBINS
; ++i
)
6353 mchunkptr p
= fastbin (ar_ptr
, i
);
6356 size_t nthissize
= 0;
6357 size_t thissize
= chunksize (p
);
6365 fastavail
+= nthissize
* thissize
;
6366 nfastblocks
+= nthissize
;
6367 sizes
[i
].from
= thissize
- (MALLOC_ALIGNMENT
- 1);
6368 sizes
[i
].to
= thissize
;
6369 sizes
[i
].count
= nthissize
;
6372 sizes
[i
].from
= sizes
[i
].to
= sizes
[i
].count
= 0;
6374 sizes
[i
].total
= sizes
[i
].count
* sizes
[i
].to
;
6377 mbinptr bin
= bin_at (ar_ptr
, 1);
6378 struct malloc_chunk
*r
= bin
->fd
;
6383 ++sizes
[NFASTBINS
].count
;
6384 sizes
[NFASTBINS
].total
+= r
->size
;
6385 sizes
[NFASTBINS
].from
= MIN (sizes
[NFASTBINS
].from
, r
->size
);
6386 sizes
[NFASTBINS
].to
= MAX (sizes
[NFASTBINS
].to
, r
->size
);
6389 nblocks
+= sizes
[NFASTBINS
].count
;
6390 avail
+= sizes
[NFASTBINS
].total
;
6393 for (size_t i
= 2; i
< NBINS
; ++i
)
6395 bin
= bin_at (ar_ptr
, i
);
6397 sizes
[NFASTBINS
- 1 + i
].from
= ~((size_t) 0);
6398 sizes
[NFASTBINS
- 1 + i
].to
= sizes
[NFASTBINS
- 1 + i
].total
6399 = sizes
[NFASTBINS
- 1 + i
].count
= 0;
6404 ++sizes
[NFASTBINS
- 1 + i
].count
;
6405 sizes
[NFASTBINS
- 1 + i
].total
+= r
->size
;
6406 sizes
[NFASTBINS
- 1 + i
].from
6407 = MIN (sizes
[NFASTBINS
- 1 + i
].from
, r
->size
);
6408 sizes
[NFASTBINS
- 1 + i
].to
= MAX (sizes
[NFASTBINS
- 1 + i
].to
,
6414 if (sizes
[NFASTBINS
- 1 + i
].count
== 0)
6415 sizes
[NFASTBINS
- 1 + i
].from
= 0;
6416 nblocks
+= sizes
[NFASTBINS
- 1 + i
].count
;
6417 avail
+= sizes
[NFASTBINS
- 1 + i
].total
;
6420 mutex_unlock (&ar_ptr
->mutex
);
6422 total_nfastblocks
+= nfastblocks
;
6423 total_fastavail
+= fastavail
;
6425 total_nblocks
+= nblocks
;
6426 total_avail
+= avail
;
6428 for (size_t i
= 0; i
< nsizes
; ++i
)
6429 if (sizes
[i
].count
!= 0 && i
!= NFASTBINS
)
6431 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6432 sizes
[i
].from
, sizes
[i
].to
, sizes
[i
].total
, sizes
[i
].count
);
6434 if (sizes
[NFASTBINS
].count
!= 0)
6436 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6437 sizes
[NFASTBINS
].from
, sizes
[NFASTBINS
].to
,
6438 sizes
[NFASTBINS
].total
, sizes
[NFASTBINS
].count
);
6440 total_system
+= ar_ptr
->system_mem
;
6441 total_max_system
+= ar_ptr
->max_system_mem
;
6444 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6445 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6446 "<system type=\"current\" size=\"%zu\"/>\n"
6447 "<system type=\"max\" size=\"%zu\"/>\n",
6448 nfastblocks
, fastavail
, nblocks
, avail
,
6449 ar_ptr
->system_mem
, ar_ptr
->max_system_mem
);
6451 if (ar_ptr
!= &main_arena
)
6453 heap_info
*heap
= heap_for_ptr(top(ar_ptr
));
6455 "<aspace type=\"total\" size=\"%zu\"/>\n"
6456 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
6457 heap
->size
, heap
->mprotect_size
);
6458 total_aspace
+= heap
->size
;
6459 total_aspace_mprotect
+= heap
->mprotect_size
;
6464 "<aspace type=\"total\" size=\"%zu\"/>\n"
6465 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
6466 ar_ptr
->system_mem
, ar_ptr
->system_mem
);
6467 total_aspace
+= ar_ptr
->system_mem
;
6468 total_aspace_mprotect
+= ar_ptr
->system_mem
;
6471 fputs ("</heap>\n", fp
);
6474 if(__malloc_initialized
< 0)
6477 fputs ("<malloc version=\"1\">\n", fp
);
6479 /* Iterate over all arenas currently in use. */
6480 mstate ar_ptr
= &main_arena
;
6484 ar_ptr
= ar_ptr
->next
;
6486 while (ar_ptr
!= &main_arena
);
6489 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6490 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6491 "<system type=\"current\" size=\"%zu\"/>\n"
6492 "<system type=\"max\" size=\"%zu\"/>\n"
6493 "<aspace type=\"total\" size=\"%zu\"/>\n"
6494 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
6496 total_nfastblocks
, total_fastavail
, total_nblocks
, total_avail
,
6497 total_system
, total_max_system
,
6498 total_aspace
, total_aspace_mprotect
);
6504 strong_alias (__libc_calloc
, __calloc
) weak_alias (__libc_calloc
, calloc
)
6505 strong_alias (__libc_free
, __cfree
) weak_alias (__libc_free
, cfree
)
6506 strong_alias (__libc_free
, __free
) strong_alias (__libc_free
, free
)
6507 strong_alias (__libc_malloc
, __malloc
) strong_alias (__libc_malloc
, malloc
)
6508 strong_alias (__libc_memalign
, __memalign
)
6509 weak_alias (__libc_memalign
, memalign
)
6510 strong_alias (__libc_realloc
, __realloc
) strong_alias (__libc_realloc
, realloc
)
6511 strong_alias (__libc_valloc
, __valloc
) weak_alias (__libc_valloc
, valloc
)
6512 strong_alias (__libc_pvalloc
, __pvalloc
) weak_alias (__libc_pvalloc
, pvalloc
)
6513 strong_alias (__libc_mallinfo
, __mallinfo
)
6514 weak_alias (__libc_mallinfo
, mallinfo
)
6515 strong_alias (__libc_mallopt
, __mallopt
) weak_alias (__libc_mallopt
, mallopt
)
6517 weak_alias (__malloc_stats
, malloc_stats
)
6518 weak_alias (__malloc_usable_size
, malloc_usable_size
)
6519 weak_alias (__malloc_trim
, malloc_trim
)
6520 weak_alias (__malloc_get_state
, malloc_get_state
)
6521 weak_alias (__malloc_set_state
, malloc_set_state
)
6525 /* ------------------------------------------------------------
6528 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]