]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/malloc.c
[BZ #420]
[thirdparty/glibc.git] / malloc / malloc.c
CommitLineData
d3290b09 1 /* Malloc implementation for multiple threads without lock contention.
aa420660 2 Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc.
f65fd747 3 This file is part of the GNU C Library.
fa8d436c
UD
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
f65fd747
UD
6
7 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
fa8d436c 10 License, or (at your option) any later version.
f65fd747
UD
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 15 Lesser General Public License for more details.
f65fd747 16
cc7375ce 17 You should have received a copy of the GNU Lesser General Public
fa8d436c
UD
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
f65fd747 21
fa8d436c
UD
22/*
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
25
26* Version ptmalloc2-20011215
27 $Id$
28 based on:
29 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
f65fd747 30
fa8d436c
UD
31 Note: There may be an updated version of this malloc obtainable at
32 http://www.malloc.de/malloc/ptmalloc2.tar.gz
33 Check before installing!
f65fd747 34
fa8d436c 35* Quickstart
f65fd747 36
fa8d436c
UD
37 In order to compile this implementation, a Makefile is provided with
38 the ptmalloc2 distribution, which has pre-defined targets for some
39 popular systems (e.g. "make posix" for Posix threads). All that is
40 typically required with regard to compiler flags is the selection of
41 the thread package via defining one out of USE_PTHREADS, USE_THR or
42 USE_SPROC. Check the thread-m.h file for what effects this has.
43 Many/most systems will additionally require USE_TSD_DATA_HACK to be
44 defined, so this is the default for "make posix".
f65fd747
UD
45
46* Why use this malloc?
47
48 This is not the fastest, most space-conserving, most portable, or
49 most tunable malloc ever written. However it is among the fastest
50 while also being among the most space-conserving, portable and tunable.
51 Consistent balance across these factors results in a good general-purpose
fa8d436c
UD
52 allocator for malloc-intensive programs.
53
54 The main properties of the algorithms are:
55 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
56 with ties normally decided via FIFO (i.e. least recently used).
57 * For small (<= 64 bytes by default) requests, it is a caching
58 allocator, that maintains pools of quickly recycled chunks.
59 * In between, and for combinations of large and small requests, it does
60 the best it can trying to meet both goals at once.
61 * For very large requests (>= 128KB by default), it relies on system
62 memory mapping facilities, if supported.
63
64 For a longer but slightly out of date high-level description, see
65 http://gee.cs.oswego.edu/dl/html/malloc.html
66
67 You may already by default be using a C library containing a malloc
68 that is based on some version of this malloc (for example in
69 linux). You might still want to use the one in this file in order to
70 customize settings or to avoid overheads associated with library
71 versions.
72
73* Contents, described in more detail in "description of public routines" below.
74
75 Standard (ANSI/SVID/...) functions:
76 malloc(size_t n);
77 calloc(size_t n_elements, size_t element_size);
78 free(Void_t* p);
79 realloc(Void_t* p, size_t n);
80 memalign(size_t alignment, size_t n);
81 valloc(size_t n);
82 mallinfo()
83 mallopt(int parameter_number, int parameter_value)
84
85 Additional functions:
86 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
87 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
88 pvalloc(size_t n);
89 cfree(Void_t* p);
90 malloc_trim(size_t pad);
91 malloc_usable_size(Void_t* p);
92 malloc_stats();
f65fd747
UD
93
94* Vital statistics:
95
fa8d436c 96 Supported pointer representation: 4 or 8 bytes
a9177ff5 97 Supported size_t representation: 4 or 8 bytes
f65fd747 98 Note that size_t is allowed to be 4 bytes even if pointers are 8.
fa8d436c
UD
99 You can adjust this by defining INTERNAL_SIZE_T
100
101 Alignment: 2 * sizeof(size_t) (default)
102 (i.e., 8 byte alignment with 4byte size_t). This suffices for
103 nearly all current machines and C compilers. However, you can
104 define MALLOC_ALIGNMENT to be wider than this if necessary.
f65fd747 105
fa8d436c
UD
106 Minimum overhead per allocated chunk: 4 or 8 bytes
107 Each malloced chunk has a hidden word of overhead holding size
f65fd747
UD
108 and status information.
109
110 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
111 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
112
113 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
114 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
fa8d436c
UD
115 needed; 4 (8) for a trailing size field and 8 (16) bytes for
116 free list pointers. Thus, the minimum allocatable size is
117 16/24/32 bytes.
f65fd747
UD
118
119 Even a request for zero bytes (i.e., malloc(0)) returns a
120 pointer to something of the minimum allocatable size.
121
fa8d436c
UD
122 The maximum overhead wastage (i.e., number of extra bytes
123 allocated than were requested in malloc) is less than or equal
124 to the minimum size, except for requests >= mmap_threshold that
125 are serviced via mmap(), where the worst case wastage is 2 *
126 sizeof(size_t) bytes plus the remainder from a system page (the
127 minimal mmap unit); typically 4096 or 8192 bytes.
f65fd747 128
a9177ff5 129 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
fa8d436c
UD
130 8-byte size_t: 2^64 minus about two pages
131
132 It is assumed that (possibly signed) size_t values suffice to
f65fd747
UD
133 represent chunk sizes. `Possibly signed' is due to the fact
134 that `size_t' may be defined on a system as either a signed or
fa8d436c
UD
135 an unsigned type. The ISO C standard says that it must be
136 unsigned, but a few systems are known not to adhere to this.
137 Additionally, even when size_t is unsigned, sbrk (which is by
138 default used to obtain memory from system) accepts signed
139 arguments, and may not be able to handle size_t-wide arguments
140 with negative sign bit. Generally, values that would
141 appear as negative after accounting for overhead and alignment
142 are supported only via mmap(), which does not have this
143 limitation.
144
145 Requests for sizes outside the allowed range will perform an optional
146 failure action and then return null. (Requests may also
147 also fail because a system is out of memory.)
148
149 Thread-safety: thread-safe unless NO_THREADS is defined
150
151 Compliance: I believe it is compliant with the 1997 Single Unix Specification
a9177ff5 152 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
fa8d436c 153 others as well.
f65fd747
UD
154
155* Synopsis of compile-time options:
156
157 People have reported using previous versions of this malloc on all
158 versions of Unix, sometimes by tweaking some of the defines
159 below. It has been tested most extensively on Solaris and
fa8d436c
UD
160 Linux. It is also reported to work on WIN32 platforms.
161 People also report using it in stand-alone embedded systems.
162
163 The implementation is in straight, hand-tuned ANSI C. It is not
164 at all modular. (Sorry!) It uses a lot of macros. To be at all
165 usable, this code should be compiled using an optimizing compiler
166 (for example gcc -O3) that can simplify expressions and control
167 paths. (FAQ: some macros import variables as arguments rather than
168 declare locals because people reported that some debuggers
169 otherwise get confused.)
170
171 OPTION DEFAULT VALUE
172
173 Compilation Environment options:
174
175 __STD_C derived from C compiler defines
176 WIN32 NOT defined
177 HAVE_MEMCPY defined
178 USE_MEMCPY 1 if HAVE_MEMCPY is defined
a9177ff5 179 HAVE_MMAP defined as 1
fa8d436c
UD
180 MMAP_CLEARS 1
181 HAVE_MREMAP 0 unless linux defined
182 USE_ARENAS the same as HAVE_MMAP
183 malloc_getpagesize derived from system #includes, or 4096 if not
184 HAVE_USR_INCLUDE_MALLOC_H NOT defined
185 LACKS_UNISTD_H NOT defined unless WIN32
186 LACKS_SYS_PARAM_H NOT defined unless WIN32
187 LACKS_SYS_MMAN_H NOT defined unless WIN32
188
189 Changing default word sizes:
190
191 INTERNAL_SIZE_T size_t
192 MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
193
194 Configuration and functionality options:
195
196 USE_DL_PREFIX NOT defined
197 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
198 USE_MALLOC_LOCK NOT defined
199 MALLOC_DEBUG NOT defined
200 REALLOC_ZERO_BYTES_FREES 1
201 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
202 TRIM_FASTBINS 0
203
204 Options for customizing MORECORE:
205
206 MORECORE sbrk
207 MORECORE_FAILURE -1
a9177ff5 208 MORECORE_CONTIGUOUS 1
fa8d436c
UD
209 MORECORE_CANNOT_TRIM NOT defined
210 MORECORE_CLEARS 1
a9177ff5 211 MMAP_AS_MORECORE_SIZE (1024 * 1024)
fa8d436c
UD
212
213 Tuning options that are also dynamically changeable via mallopt:
214
215 DEFAULT_MXFAST 64
216 DEFAULT_TRIM_THRESHOLD 128 * 1024
217 DEFAULT_TOP_PAD 0
218 DEFAULT_MMAP_THRESHOLD 128 * 1024
219 DEFAULT_MMAP_MAX 65536
220
221 There are several other #defined constants and macros that you
222 probably don't want to touch unless you are extending or adapting malloc. */
f65fd747
UD
223
224/*
fa8d436c
UD
225 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
226 compiler, or a C compiler sufficiently close to ANSI to get away
227 with it.
f65fd747
UD
228*/
229
f65fd747 230#ifndef __STD_C
fa8d436c 231#if defined(__STDC__) || defined(__cplusplus)
f65fd747
UD
232#define __STD_C 1
233#else
234#define __STD_C 0
a9177ff5 235#endif
f65fd747
UD
236#endif /*__STD_C*/
237
fa8d436c
UD
238
239/*
240 Void_t* is the pointer type that malloc should say it returns
241*/
242
f65fd747 243#ifndef Void_t
fa8d436c 244#if (__STD_C || defined(WIN32))
f65fd747
UD
245#define Void_t void
246#else
247#define Void_t char
248#endif
249#endif /*Void_t*/
250
251#if __STD_C
fa8d436c
UD
252#include <stddef.h> /* for size_t */
253#include <stdlib.h> /* for getenv(), abort() */
f65fd747 254#else
fa8d436c 255#include <sys/types.h>
f65fd747
UD
256#endif
257
3c6904fb
UD
258#include <malloc-machine.h>
259
f65fd747
UD
260#ifdef __cplusplus
261extern "C" {
262#endif
263
fa8d436c 264/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
f65fd747 265
fa8d436c 266/* #define LACKS_UNISTD_H */
f65fd747 267
fa8d436c
UD
268#ifndef LACKS_UNISTD_H
269#include <unistd.h>
270#endif
f65fd747 271
fa8d436c
UD
272/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
273
274/* #define LACKS_SYS_PARAM_H */
275
276
277#include <stdio.h> /* needed for malloc_stats */
278#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
f65fd747 279
5d78bb43
UD
280/* For uintptr_t. */
281#include <stdint.h>
f65fd747 282
3e030bd5
UD
283/* For va_arg, va_start, va_end. */
284#include <stdarg.h>
285
6bf4302e
UD
286/* For writev and struct iovec. */
287#include <sys/uio.h>
54915e9e
UD
288 /* For syslog. */
289#include <sys/syslog.h>
6bf4302e 290
fa8d436c
UD
291/*
292 Debugging:
293
294 Because freed chunks may be overwritten with bookkeeping fields, this
295 malloc will often die when freed memory is overwritten by user
296 programs. This can be very effective (albeit in an annoying way)
297 in helping track down dangling pointers.
298
299 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
300 enabled that will catch more memory errors. You probably won't be
301 able to make much sense of the actual assertion errors, but they
302 should help you locate incorrectly overwritten memory. The checking
303 is fairly extensive, and will slow down execution
304 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
305 will attempt to check every non-mmapped allocated and free chunk in
306 the course of computing the summmaries. (By nature, mmapped regions
307 cannot be checked very much automatically.)
308
309 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
310 this code. The assertions in the check routines spell out in more
311 detail the assumptions and invariants underlying the algorithms.
312
313 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
314 checking that all accesses to malloced memory stay within their
315 bounds. However, there are several add-ons and adaptations of this
316 or other mallocs available that do this.
f65fd747
UD
317*/
318
319#if MALLOC_DEBUG
320#include <assert.h>
321#else
57449fa3 322#undef assert
f65fd747
UD
323#define assert(x) ((void)0)
324#endif
325
326
327/*
328 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
fa8d436c
UD
329 of chunk sizes.
330
331 The default version is the same as size_t.
332
333 While not strictly necessary, it is best to define this as an
334 unsigned type, even if size_t is a signed type. This may avoid some
335 artificial size limitations on some systems.
336
337 On a 64-bit machine, you may be able to reduce malloc overhead by
338 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
339 expense of not being able to handle more than 2^32 of malloced
340 space. If this limitation is acceptable, you are encouraged to set
341 this unless you are on a platform requiring 16byte alignments. In
342 this case the alignment requirements turn out to negate any
343 potential advantages of decreasing size_t word size.
344
345 Implementors: Beware of the possible combinations of:
346 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
347 and might be the same width as int or as long
348 - size_t might have different width and signedness as INTERNAL_SIZE_T
349 - int and long might be 32 or 64 bits, and might be the same width
350 To deal with this, most comparisons and difference computations
351 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
352 aware of the fact that casting an unsigned int to a wider long does
353 not sign-extend. (This also makes checking for negative numbers
354 awkward.) Some of these casts result in harmless compiler warnings
355 on some systems.
f65fd747
UD
356*/
357
358#ifndef INTERNAL_SIZE_T
359#define INTERNAL_SIZE_T size_t
360#endif
361
fa8d436c
UD
362/* The corresponding word size */
363#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
364
365
366/*
367 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
368 It must be a power of two at least 2 * SIZE_SZ, even on machines
369 for which smaller alignments would suffice. It may be defined as
370 larger than this though. Note however that code and data structures
371 are optimized for the case of 8-byte alignment.
372*/
373
374
375#ifndef MALLOC_ALIGNMENT
376#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
377#endif
378
379/* The corresponding bit mask value */
380#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
381
382
383
384/*
385 REALLOC_ZERO_BYTES_FREES should be set if a call to
386 realloc with zero bytes should be the same as a call to free.
387 This is required by the C standard. Otherwise, since this malloc
388 returns a unique pointer for malloc(0), so does realloc(p, 0).
389*/
390
391#ifndef REALLOC_ZERO_BYTES_FREES
392#define REALLOC_ZERO_BYTES_FREES 1
393#endif
394
395/*
396 TRIM_FASTBINS controls whether free() of a very small chunk can
397 immediately lead to trimming. Setting to true (1) can reduce memory
398 footprint, but will almost always slow down programs that use a lot
399 of small chunks.
400
401 Define this only if you are willing to give up some speed to more
402 aggressively reduce system-level memory footprint when releasing
403 memory in programs that use many small chunks. You can get
404 essentially the same effect by setting MXFAST to 0, but this can
405 lead to even greater slowdowns in programs using many small chunks.
406 TRIM_FASTBINS is an in-between compile-time option, that disables
407 only those chunks bordering topmost memory from being placed in
408 fastbins.
409*/
410
411#ifndef TRIM_FASTBINS
412#define TRIM_FASTBINS 0
413#endif
414
415
f65fd747 416/*
fa8d436c 417 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
a9177ff5 418 This is necessary when you only want to use this malloc in one part
fa8d436c
UD
419 of a program, using your regular system malloc elsewhere.
420*/
421
422/* #define USE_DL_PREFIX */
423
424
a9177ff5 425/*
fa8d436c
UD
426 Two-phase name translation.
427 All of the actual routines are given mangled names.
428 When wrappers are used, they become the public callable versions.
429 When DL_PREFIX is used, the callable names are prefixed.
f65fd747
UD
430*/
431
fa8d436c
UD
432#ifdef USE_DL_PREFIX
433#define public_cALLOc dlcalloc
434#define public_fREe dlfree
435#define public_cFREe dlcfree
436#define public_mALLOc dlmalloc
437#define public_mEMALIGn dlmemalign
438#define public_rEALLOc dlrealloc
439#define public_vALLOc dlvalloc
440#define public_pVALLOc dlpvalloc
441#define public_mALLINFo dlmallinfo
442#define public_mALLOPt dlmallopt
443#define public_mTRIm dlmalloc_trim
444#define public_mSTATs dlmalloc_stats
445#define public_mUSABLe dlmalloc_usable_size
446#define public_iCALLOc dlindependent_calloc
447#define public_iCOMALLOc dlindependent_comalloc
448#define public_gET_STATe dlget_state
449#define public_sET_STATe dlset_state
450#else /* USE_DL_PREFIX */
451#ifdef _LIBC
452
453/* Special defines for the GNU C library. */
454#define public_cALLOc __libc_calloc
455#define public_fREe __libc_free
456#define public_cFREe __libc_cfree
457#define public_mALLOc __libc_malloc
458#define public_mEMALIGn __libc_memalign
459#define public_rEALLOc __libc_realloc
460#define public_vALLOc __libc_valloc
461#define public_pVALLOc __libc_pvalloc
462#define public_mALLINFo __libc_mallinfo
463#define public_mALLOPt __libc_mallopt
464#define public_mTRIm __malloc_trim
465#define public_mSTATs __malloc_stats
466#define public_mUSABLe __malloc_usable_size
467#define public_iCALLOc __libc_independent_calloc
468#define public_iCOMALLOc __libc_independent_comalloc
469#define public_gET_STATe __malloc_get_state
470#define public_sET_STATe __malloc_set_state
471#define malloc_getpagesize __getpagesize()
472#define open __open
473#define mmap __mmap
474#define munmap __munmap
475#define mremap __mremap
476#define mprotect __mprotect
477#define MORECORE (*__morecore)
478#define MORECORE_FAILURE 0
479
480Void_t * __default_morecore (ptrdiff_t);
481Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
f65fd747 482
fa8d436c
UD
483#else /* !_LIBC */
484#define public_cALLOc calloc
485#define public_fREe free
486#define public_cFREe cfree
487#define public_mALLOc malloc
488#define public_mEMALIGn memalign
489#define public_rEALLOc realloc
490#define public_vALLOc valloc
491#define public_pVALLOc pvalloc
492#define public_mALLINFo mallinfo
493#define public_mALLOPt mallopt
494#define public_mTRIm malloc_trim
495#define public_mSTATs malloc_stats
496#define public_mUSABLe malloc_usable_size
497#define public_iCALLOc independent_calloc
498#define public_iCOMALLOc independent_comalloc
499#define public_gET_STATe malloc_get_state
500#define public_sET_STATe malloc_set_state
501#endif /* _LIBC */
502#endif /* USE_DL_PREFIX */
f65fd747 503
d9af917d
UD
504#ifndef _LIBC
505#define __builtin_expect(expr, val) (expr)
3ba06713
UD
506
507#define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
d9af917d 508#endif
f65fd747
UD
509
510/*
511 HAVE_MEMCPY should be defined if you are not otherwise using
512 ANSI STD C, but still have memcpy and memset in your C library
513 and want to use them in calloc and realloc. Otherwise simple
fa8d436c 514 macro versions are defined below.
f65fd747
UD
515
516 USE_MEMCPY should be defined as 1 if you actually want to
517 have memset and memcpy called. People report that the macro
fa8d436c 518 versions are faster than libc versions on some systems.
a9177ff5 519
fa8d436c
UD
520 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
521 (of <= 36 bytes) are manually unrolled in realloc and calloc.
f65fd747
UD
522*/
523
fa8d436c 524#define HAVE_MEMCPY
f65fd747
UD
525
526#ifndef USE_MEMCPY
527#ifdef HAVE_MEMCPY
528#define USE_MEMCPY 1
529#else
530#define USE_MEMCPY 0
531#endif
532#endif
533
fa8d436c 534
f65fd747
UD
535#if (__STD_C || defined(HAVE_MEMCPY))
536
c2afe833
RM
537#ifdef _LIBC
538# include <string.h>
539#else
fa8d436c
UD
540#ifdef WIN32
541/* On Win32 memset and memcpy are already declared in windows.h */
542#else
f65fd747
UD
543#if __STD_C
544void* memset(void*, int, size_t);
545void* memcpy(void*, const void*, size_t);
546#else
547Void_t* memset();
548Void_t* memcpy();
fa8d436c 549#endif
f65fd747
UD
550#endif
551#endif
c2afe833 552#endif
f65fd747 553
fa8d436c
UD
554/*
555 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
556 malloc fails to be able to return memory, either because memory is
557 exhausted or because of illegal arguments.
a9177ff5
RM
558
559 By default, sets errno if running on STD_C platform, else does nothing.
fa8d436c 560*/
09f5e163 561
fa8d436c
UD
562#ifndef MALLOC_FAILURE_ACTION
563#if __STD_C
564#define MALLOC_FAILURE_ACTION \
565 errno = ENOMEM;
f65fd747 566
fa8d436c
UD
567#else
568#define MALLOC_FAILURE_ACTION
569#endif
570#endif
f65fd747 571
fa8d436c
UD
572/*
573 MORECORE-related declarations. By default, rely on sbrk
574*/
09f5e163 575
f65fd747 576
fa8d436c
UD
577#ifdef LACKS_UNISTD_H
578#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
579#if __STD_C
580extern Void_t* sbrk(ptrdiff_t);
581#else
582extern Void_t* sbrk();
583#endif
584#endif
585#endif
f65fd747 586
fa8d436c
UD
587/*
588 MORECORE is the name of the routine to call to obtain more memory
589 from the system. See below for general guidance on writing
590 alternative MORECORE functions, as well as a version for WIN32 and a
591 sample version for pre-OSX macos.
592*/
f65fd747 593
fa8d436c
UD
594#ifndef MORECORE
595#define MORECORE sbrk
596#endif
f65fd747 597
fa8d436c
UD
598/*
599 MORECORE_FAILURE is the value returned upon failure of MORECORE
600 as well as mmap. Since it cannot be an otherwise valid memory address,
601 and must reflect values of standard sys calls, you probably ought not
602 try to redefine it.
603*/
09f5e163 604
fa8d436c
UD
605#ifndef MORECORE_FAILURE
606#define MORECORE_FAILURE (-1)
607#endif
608
609/*
610 If MORECORE_CONTIGUOUS is true, take advantage of fact that
611 consecutive calls to MORECORE with positive arguments always return
612 contiguous increasing addresses. This is true of unix sbrk. Even
613 if not defined, when regions happen to be contiguous, malloc will
614 permit allocations spanning regions obtained from different
615 calls. But defining this when applicable enables some stronger
616 consistency checks and space efficiencies.
617*/
f65fd747 618
fa8d436c
UD
619#ifndef MORECORE_CONTIGUOUS
620#define MORECORE_CONTIGUOUS 1
f65fd747
UD
621#endif
622
fa8d436c
UD
623/*
624 Define MORECORE_CANNOT_TRIM if your version of MORECORE
625 cannot release space back to the system when given negative
626 arguments. This is generally necessary only if you are using
627 a hand-crafted MORECORE function that cannot handle negative arguments.
628*/
629
630/* #define MORECORE_CANNOT_TRIM */
f65fd747 631
fa8d436c
UD
632/* MORECORE_CLEARS (default 1)
633 The degree to which the routine mapped to MORECORE zeroes out
634 memory: never (0), only for newly allocated space (1) or always
635 (2). The distinction between (1) and (2) is necessary because on
636 some systems, if the application first decrements and then
637 increments the break value, the contents of the reallocated space
638 are unspecified.
639*/
640
641#ifndef MORECORE_CLEARS
642#define MORECORE_CLEARS 1
7cabd57c
UD
643#endif
644
fa8d436c 645
f65fd747 646/*
fa8d436c
UD
647 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
648 allocate very large blocks. These will be returned to the
649 operating system immediately after a free(). Also, if mmap
650 is available, it is used as a backup strategy in cases where
651 MORECORE fails to provide space from system.
652
653 This malloc is best tuned to work with mmap for large requests.
654 If you do not have mmap, operations involving very large chunks (1MB
655 or so) may be slower than you'd like.
f65fd747
UD
656*/
657
658#ifndef HAVE_MMAP
fa8d436c
UD
659#define HAVE_MMAP 1
660
a9177ff5 661/*
fa8d436c
UD
662 Standard unix mmap using /dev/zero clears memory so calloc doesn't
663 need to.
664*/
665
666#ifndef MMAP_CLEARS
667#define MMAP_CLEARS 1
668#endif
669
670#else /* no mmap */
671#ifndef MMAP_CLEARS
672#define MMAP_CLEARS 0
673#endif
674#endif
675
676
a9177ff5 677/*
fa8d436c
UD
678 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
679 sbrk fails, and mmap is used as a backup (which is done only if
680 HAVE_MMAP). The value must be a multiple of page size. This
681 backup strategy generally applies only when systems have "holes" in
682 address space, so sbrk cannot perform contiguous expansion, but
683 there is still space available on system. On systems for which
684 this is known to be useful (i.e. most linux kernels), this occurs
685 only when programs allocate huge amounts of memory. Between this,
686 and the fact that mmap regions tend to be limited, the size should
687 be large, to avoid too many mmap calls and thus avoid running out
688 of kernel resources.
689*/
690
691#ifndef MMAP_AS_MORECORE_SIZE
692#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
f65fd747
UD
693#endif
694
695/*
696 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
697 large blocks. This is currently only possible on Linux with
698 kernel versions newer than 1.3.77.
699*/
700
701#ifndef HAVE_MREMAP
fa8d436c
UD
702#ifdef linux
703#define HAVE_MREMAP 1
704#else
705#define HAVE_MREMAP 0
f65fd747
UD
706#endif
707
fa8d436c
UD
708#endif /* HAVE_MMAP */
709
e9b3e3c5
UD
710/* Define USE_ARENAS to enable support for multiple `arenas'. These
711 are allocated using mmap(), are necessary for threads and
712 occasionally useful to overcome address space limitations affecting
713 sbrk(). */
714
715#ifndef USE_ARENAS
716#define USE_ARENAS HAVE_MMAP
717#endif
718
f65fd747
UD
719
720/*
fa8d436c
UD
721 The system page size. To the extent possible, this malloc manages
722 memory from the system in page-size units. Note that this value is
723 cached during initialization into a field of malloc_state. So even
724 if malloc_getpagesize is a function, it is only called once.
725
726 The following mechanics for getpagesize were adapted from bsd/gnu
727 getpagesize.h. If none of the system-probes here apply, a value of
728 4096 is used, which should be OK: If they don't apply, then using
729 the actual value probably doesn't impact performance.
f65fd747
UD
730*/
731
fa8d436c 732
f65fd747 733#ifndef malloc_getpagesize
fa8d436c
UD
734
735#ifndef LACKS_UNISTD_H
736# include <unistd.h>
737#endif
738
f65fd747
UD
739# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
740# ifndef _SC_PAGE_SIZE
741# define _SC_PAGE_SIZE _SC_PAGESIZE
742# endif
743# endif
fa8d436c 744
f65fd747
UD
745# ifdef _SC_PAGE_SIZE
746# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
747# else
748# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
749 extern size_t getpagesize();
750# define malloc_getpagesize getpagesize()
751# else
fa8d436c 752# ifdef WIN32 /* use supplied emulation of getpagesize */
a9177ff5 753# define malloc_getpagesize getpagesize()
f65fd747 754# else
fa8d436c
UD
755# ifndef LACKS_SYS_PARAM_H
756# include <sys/param.h>
757# endif
758# ifdef EXEC_PAGESIZE
759# define malloc_getpagesize EXEC_PAGESIZE
f65fd747 760# else
fa8d436c
UD
761# ifdef NBPG
762# ifndef CLSIZE
763# define malloc_getpagesize NBPG
764# else
765# define malloc_getpagesize (NBPG * CLSIZE)
766# endif
f65fd747 767# else
fa8d436c
UD
768# ifdef NBPC
769# define malloc_getpagesize NBPC
f65fd747 770# else
fa8d436c
UD
771# ifdef PAGESIZE
772# define malloc_getpagesize PAGESIZE
773# else /* just guess */
a9177ff5 774# define malloc_getpagesize (4096)
fa8d436c 775# endif
f65fd747
UD
776# endif
777# endif
778# endif
779# endif
780# endif
781# endif
782#endif
783
f65fd747 784/*
f65fd747 785 This version of malloc supports the standard SVID/XPG mallinfo
fa8d436c
UD
786 routine that returns a struct containing usage properties and
787 statistics. It should work on any SVID/XPG compliant system that has
788 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
789 install such a thing yourself, cut out the preliminary declarations
790 as described above and below and save them in a malloc.h file. But
791 there's no compelling reason to bother to do this.)
f65fd747
UD
792
793 The main declaration needed is the mallinfo struct that is returned
794 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
fa8d436c
UD
795 bunch of fields that are not even meaningful in this version of
796 malloc. These fields are are instead filled by mallinfo() with
797 other numbers that might be of interest.
f65fd747
UD
798
799 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
800 /usr/include/malloc.h file that includes a declaration of struct
801 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
802 version is declared below. These must be precisely the same for
fa8d436c
UD
803 mallinfo() to work. The original SVID version of this struct,
804 defined on most systems with mallinfo, declares all fields as
805 ints. But some others define as unsigned long. If your system
806 defines the fields using a type of different width than listed here,
807 you must #include your system version and #define
808 HAVE_USR_INCLUDE_MALLOC_H.
f65fd747
UD
809*/
810
811/* #define HAVE_USR_INCLUDE_MALLOC_H */
812
fa8d436c
UD
813#ifdef HAVE_USR_INCLUDE_MALLOC_H
814#include "/usr/include/malloc.h"
f65fd747
UD
815#endif
816
f65fd747 817
fa8d436c 818/* ---------- description of public routines ------------ */
f65fd747
UD
819
820/*
fa8d436c
UD
821 malloc(size_t n)
822 Returns a pointer to a newly allocated chunk of at least n bytes, or null
823 if no space is available. Additionally, on failure, errno is
824 set to ENOMEM on ANSI C systems.
825
826 If n is zero, malloc returns a minumum-sized chunk. (The minimum
827 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
828 systems.) On most systems, size_t is an unsigned type, so calls
829 with negative arguments are interpreted as requests for huge amounts
830 of space, which will often fail. The maximum supported value of n
831 differs across systems, but is in all cases less than the maximum
832 representable value of a size_t.
f65fd747 833*/
fa8d436c
UD
834#if __STD_C
835Void_t* public_mALLOc(size_t);
836#else
837Void_t* public_mALLOc();
838#endif
aa420660
UD
839#ifdef libc_hidden_proto
840libc_hidden_proto (public_mALLOc)
841#endif
f65fd747 842
fa8d436c
UD
843/*
844 free(Void_t* p)
845 Releases the chunk of memory pointed to by p, that had been previously
846 allocated using malloc or a related routine such as realloc.
847 It has no effect if p is null. It can have arbitrary (i.e., bad!)
848 effects if p has already been freed.
849
850 Unless disabled (using mallopt), freeing very large spaces will
851 when possible, automatically trigger operations that give
852 back unused memory to the system, thus reducing program footprint.
853*/
854#if __STD_C
855void public_fREe(Void_t*);
856#else
857void public_fREe();
858#endif
aa420660
UD
859#ifdef libc_hidden_proto
860libc_hidden_proto (public_fREe)
861#endif
f65fd747 862
fa8d436c
UD
863/*
864 calloc(size_t n_elements, size_t element_size);
865 Returns a pointer to n_elements * element_size bytes, with all locations
866 set to zero.
867*/
868#if __STD_C
869Void_t* public_cALLOc(size_t, size_t);
870#else
871Void_t* public_cALLOc();
f65fd747
UD
872#endif
873
874/*
fa8d436c
UD
875 realloc(Void_t* p, size_t n)
876 Returns a pointer to a chunk of size n that contains the same data
877 as does chunk p up to the minimum of (n, p's size) bytes, or null
a9177ff5 878 if no space is available.
f65fd747 879
fa8d436c
UD
880 The returned pointer may or may not be the same as p. The algorithm
881 prefers extending p when possible, otherwise it employs the
882 equivalent of a malloc-copy-free sequence.
f65fd747 883
a9177ff5 884 If p is null, realloc is equivalent to malloc.
f65fd747 885
fa8d436c
UD
886 If space is not available, realloc returns null, errno is set (if on
887 ANSI) and p is NOT freed.
f65fd747 888
fa8d436c
UD
889 if n is for fewer bytes than already held by p, the newly unused
890 space is lopped off and freed if possible. Unless the #define
891 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
892 zero (re)allocates a minimum-sized chunk.
f65fd747 893
fa8d436c
UD
894 Large chunks that were internally obtained via mmap will always
895 be reallocated using malloc-copy-free sequences unless
896 the system supports MREMAP (currently only linux).
f65fd747 897
fa8d436c
UD
898 The old unix realloc convention of allowing the last-free'd chunk
899 to be used as an argument to realloc is not supported.
f65fd747 900*/
fa8d436c
UD
901#if __STD_C
902Void_t* public_rEALLOc(Void_t*, size_t);
903#else
904Void_t* public_rEALLOc();
905#endif
aa420660
UD
906#ifdef libc_hidden_proto
907libc_hidden_proto (public_rEALLOc)
908#endif
f65fd747 909
fa8d436c
UD
910/*
911 memalign(size_t alignment, size_t n);
912 Returns a pointer to a newly allocated chunk of n bytes, aligned
913 in accord with the alignment argument.
914
915 The alignment argument should be a power of two. If the argument is
916 not a power of two, the nearest greater power is used.
917 8-byte alignment is guaranteed by normal malloc calls, so don't
918 bother calling memalign with an argument of 8 or less.
919
920 Overreliance on memalign is a sure way to fragment space.
921*/
922#if __STD_C
923Void_t* public_mEMALIGn(size_t, size_t);
924#else
925Void_t* public_mEMALIGn();
f65fd747 926#endif
aa420660
UD
927#ifdef libc_hidden_proto
928libc_hidden_proto (public_mEMALIGn)
929#endif
f65fd747
UD
930
931/*
fa8d436c
UD
932 valloc(size_t n);
933 Equivalent to memalign(pagesize, n), where pagesize is the page
934 size of the system. If the pagesize is unknown, 4096 is used.
935*/
936#if __STD_C
937Void_t* public_vALLOc(size_t);
938#else
939Void_t* public_vALLOc();
940#endif
941
f65fd747 942
f65fd747 943
fa8d436c
UD
944/*
945 mallopt(int parameter_number, int parameter_value)
946 Sets tunable parameters The format is to provide a
947 (parameter-number, parameter-value) pair. mallopt then sets the
948 corresponding parameter to the argument value if it can (i.e., so
949 long as the value is meaningful), and returns 1 if successful else
950 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
951 normally defined in malloc.h. Only one of these (M_MXFAST) is used
952 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
953 so setting them has no effect. But this malloc also supports four
954 other options in mallopt. See below for details. Briefly, supported
955 parameters are as follows (listed defaults are for "typical"
956 configurations).
957
958 Symbol param # default allowed param values
959 M_MXFAST 1 64 0-80 (0 disables fastbins)
960 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
a9177ff5 961 M_TOP_PAD -2 0 any
fa8d436c
UD
962 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
963 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
964*/
965#if __STD_C
966int public_mALLOPt(int, int);
967#else
968int public_mALLOPt();
969#endif
970
971
972/*
973 mallinfo()
974 Returns (by copy) a struct containing various summary statistics:
975
a9177ff5
RM
976 arena: current total non-mmapped bytes allocated from system
977 ordblks: the number of free chunks
fa8d436c
UD
978 smblks: the number of fastbin blocks (i.e., small chunks that
979 have been freed but not use resused or consolidated)
a9177ff5
RM
980 hblks: current number of mmapped regions
981 hblkhd: total bytes held in mmapped regions
fa8d436c
UD
982 usmblks: the maximum total allocated space. This will be greater
983 than current total if trimming has occurred.
a9177ff5 984 fsmblks: total bytes held in fastbin blocks
fa8d436c 985 uordblks: current total allocated space (normal or mmapped)
a9177ff5 986 fordblks: total free space
fa8d436c
UD
987 keepcost: the maximum number of bytes that could ideally be released
988 back to system via malloc_trim. ("ideally" means that
989 it ignores page restrictions etc.)
990
991 Because these fields are ints, but internal bookkeeping may
a9177ff5 992 be kept as longs, the reported values may wrap around zero and
fa8d436c
UD
993 thus be inaccurate.
994*/
995#if __STD_C
996struct mallinfo public_mALLINFo(void);
997#else
998struct mallinfo public_mALLINFo();
999#endif
f65fd747 1000
fa8d436c
UD
1001/*
1002 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1003
1004 independent_calloc is similar to calloc, but instead of returning a
1005 single cleared space, it returns an array of pointers to n_elements
1006 independent elements that can hold contents of size elem_size, each
1007 of which starts out cleared, and can be independently freed,
1008 realloc'ed etc. The elements are guaranteed to be adjacently
1009 allocated (this is not guaranteed to occur with multiple callocs or
1010 mallocs), which may also improve cache locality in some
1011 applications.
1012
1013 The "chunks" argument is optional (i.e., may be null, which is
1014 probably the most typical usage). If it is null, the returned array
1015 is itself dynamically allocated and should also be freed when it is
1016 no longer needed. Otherwise, the chunks array must be of at least
1017 n_elements in length. It is filled in with the pointers to the
1018 chunks.
1019
1020 In either case, independent_calloc returns this pointer array, or
1021 null if the allocation failed. If n_elements is zero and "chunks"
1022 is null, it returns a chunk representing an array with zero elements
1023 (which should be freed if not wanted).
1024
1025 Each element must be individually freed when it is no longer
1026 needed. If you'd like to instead be able to free all at once, you
1027 should instead use regular calloc and assign pointers into this
1028 space to represent elements. (In this case though, you cannot
1029 independently free elements.)
a9177ff5 1030
fa8d436c
UD
1031 independent_calloc simplifies and speeds up implementations of many
1032 kinds of pools. It may also be useful when constructing large data
1033 structures that initially have a fixed number of fixed-sized nodes,
1034 but the number is not known at compile time, and some of the nodes
1035 may later need to be freed. For example:
1036
1037 struct Node { int item; struct Node* next; };
a9177ff5 1038
fa8d436c
UD
1039 struct Node* build_list() {
1040 struct Node** pool;
1041 int n = read_number_of_nodes_needed();
1042 if (n <= 0) return 0;
1043 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
a9177ff5
RM
1044 if (pool == 0) die();
1045 // organize into a linked list...
fa8d436c 1046 struct Node* first = pool[0];
a9177ff5 1047 for (i = 0; i < n-1; ++i)
fa8d436c
UD
1048 pool[i]->next = pool[i+1];
1049 free(pool); // Can now free the array (or not, if it is needed later)
1050 return first;
1051 }
1052*/
1053#if __STD_C
1054Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1055#else
1056Void_t** public_iCALLOc();
1057#endif
f65fd747 1058
fa8d436c
UD
1059/*
1060 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1061
1062 independent_comalloc allocates, all at once, a set of n_elements
1063 chunks with sizes indicated in the "sizes" array. It returns
1064 an array of pointers to these elements, each of which can be
1065 independently freed, realloc'ed etc. The elements are guaranteed to
1066 be adjacently allocated (this is not guaranteed to occur with
1067 multiple callocs or mallocs), which may also improve cache locality
1068 in some applications.
1069
1070 The "chunks" argument is optional (i.e., may be null). If it is null
1071 the returned array is itself dynamically allocated and should also
1072 be freed when it is no longer needed. Otherwise, the chunks array
1073 must be of at least n_elements in length. It is filled in with the
1074 pointers to the chunks.
1075
1076 In either case, independent_comalloc returns this pointer array, or
1077 null if the allocation failed. If n_elements is zero and chunks is
1078 null, it returns a chunk representing an array with zero elements
1079 (which should be freed if not wanted).
a9177ff5 1080
fa8d436c
UD
1081 Each element must be individually freed when it is no longer
1082 needed. If you'd like to instead be able to free all at once, you
1083 should instead use a single regular malloc, and assign pointers at
a9177ff5 1084 particular offsets in the aggregate space. (In this case though, you
fa8d436c
UD
1085 cannot independently free elements.)
1086
1087 independent_comallac differs from independent_calloc in that each
1088 element may have a different size, and also that it does not
1089 automatically clear elements.
1090
1091 independent_comalloc can be used to speed up allocation in cases
1092 where several structs or objects must always be allocated at the
1093 same time. For example:
1094
1095 struct Head { ... }
1096 struct Foot { ... }
1097
1098 void send_message(char* msg) {
1099 int msglen = strlen(msg);
1100 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1101 void* chunks[3];
1102 if (independent_comalloc(3, sizes, chunks) == 0)
1103 die();
1104 struct Head* head = (struct Head*)(chunks[0]);
1105 char* body = (char*)(chunks[1]);
1106 struct Foot* foot = (struct Foot*)(chunks[2]);
1107 // ...
1108 }
f65fd747 1109
fa8d436c
UD
1110 In general though, independent_comalloc is worth using only for
1111 larger values of n_elements. For small values, you probably won't
1112 detect enough difference from series of malloc calls to bother.
f65fd747 1113
fa8d436c
UD
1114 Overuse of independent_comalloc can increase overall memory usage,
1115 since it cannot reuse existing noncontiguous small chunks that
1116 might be available for some of the elements.
f65fd747 1117*/
fa8d436c
UD
1118#if __STD_C
1119Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1120#else
1121Void_t** public_iCOMALLOc();
1122#endif
f65fd747
UD
1123
1124
fa8d436c
UD
1125/*
1126 pvalloc(size_t n);
1127 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1128 round up n to nearest pagesize.
1129 */
1130#if __STD_C
1131Void_t* public_pVALLOc(size_t);
1132#else
1133Void_t* public_pVALLOc();
1134#endif
f65fd747 1135
fa8d436c
UD
1136/*
1137 cfree(Void_t* p);
1138 Equivalent to free(p).
1139
1140 cfree is needed/defined on some systems that pair it with calloc,
a9177ff5 1141 for odd historical reasons (such as: cfree is used in example
fa8d436c
UD
1142 code in the first edition of K&R).
1143*/
1144#if __STD_C
1145void public_cFREe(Void_t*);
f65fd747 1146#else
fa8d436c
UD
1147void public_cFREe();
1148#endif
1149
1150/*
1151 malloc_trim(size_t pad);
1152
1153 If possible, gives memory back to the system (via negative
1154 arguments to sbrk) if there is unused memory at the `high' end of
1155 the malloc pool. You can call this after freeing large blocks of
1156 memory to potentially reduce the system-level memory requirements
1157 of a program. However, it cannot guarantee to reduce memory. Under
1158 some allocation patterns, some large free blocks of memory will be
1159 locked between two used chunks, so they cannot be given back to
1160 the system.
a9177ff5 1161
fa8d436c
UD
1162 The `pad' argument to malloc_trim represents the amount of free
1163 trailing space to leave untrimmed. If this argument is zero,
1164 only the minimum amount of memory to maintain internal data
1165 structures will be left (one page or less). Non-zero arguments
1166 can be supplied to maintain enough trailing space to service
1167 future expected allocations without having to re-obtain memory
1168 from the system.
a9177ff5 1169
fa8d436c
UD
1170 Malloc_trim returns 1 if it actually released any memory, else 0.
1171 On systems that do not support "negative sbrks", it will always
1172 rreturn 0.
1173*/
1174#if __STD_C
1175int public_mTRIm(size_t);
1176#else
1177int public_mTRIm();
1178#endif
1179
1180/*
1181 malloc_usable_size(Void_t* p);
1182
1183 Returns the number of bytes you can actually use in
1184 an allocated chunk, which may be more than you requested (although
1185 often not) due to alignment and minimum size constraints.
1186 You can use this many bytes without worrying about
1187 overwriting other allocated objects. This is not a particularly great
1188 programming practice. malloc_usable_size can be more useful in
1189 debugging and assertions, for example:
1190
1191 p = malloc(n);
1192 assert(malloc_usable_size(p) >= 256);
1193
1194*/
1195#if __STD_C
1196size_t public_mUSABLe(Void_t*);
1197#else
1198size_t public_mUSABLe();
f65fd747 1199#endif
fa8d436c
UD
1200
1201/*
1202 malloc_stats();
1203 Prints on stderr the amount of space obtained from the system (both
1204 via sbrk and mmap), the maximum amount (which may be more than
1205 current if malloc_trim and/or munmap got called), and the current
1206 number of bytes allocated via malloc (or realloc, etc) but not yet
1207 freed. Note that this is the number of bytes allocated, not the
1208 number requested. It will be larger than the number requested
1209 because of alignment and bookkeeping overhead. Because it includes
1210 alignment wastage as being in use, this figure may be greater than
1211 zero even when no user-level chunks are allocated.
1212
1213 The reported current and maximum system memory can be inaccurate if
1214 a program makes other calls to system memory allocation functions
1215 (normally sbrk) outside of malloc.
1216
1217 malloc_stats prints only the most commonly interesting statistics.
1218 More information can be obtained by calling mallinfo.
1219
1220*/
1221#if __STD_C
1222void public_mSTATs(void);
1223#else
1224void public_mSTATs();
f65fd747
UD
1225#endif
1226
f7ddf3d3
UD
1227/*
1228 malloc_get_state(void);
1229
1230 Returns the state of all malloc variables in an opaque data
1231 structure.
1232*/
1233#if __STD_C
1234Void_t* public_gET_STATe(void);
1235#else
1236Void_t* public_gET_STATe();
1237#endif
1238
1239/*
1240 malloc_set_state(Void_t* state);
1241
1242 Restore the state of all malloc variables from data obtained with
1243 malloc_get_state().
1244*/
1245#if __STD_C
1246int public_sET_STATe(Void_t*);
1247#else
1248int public_sET_STATe();
1249#endif
1250
1251#ifdef _LIBC
1252/*
1253 posix_memalign(void **memptr, size_t alignment, size_t size);
1254
1255 POSIX wrapper like memalign(), checking for validity of size.
1256*/
1257int __posix_memalign(void **, size_t, size_t);
1258#endif
1259
fa8d436c
UD
1260/* mallopt tuning options */
1261
f65fd747 1262/*
fa8d436c
UD
1263 M_MXFAST is the maximum request size used for "fastbins", special bins
1264 that hold returned chunks without consolidating their spaces. This
1265 enables future requests for chunks of the same size to be handled
1266 very quickly, but can increase fragmentation, and thus increase the
1267 overall memory footprint of a program.
1268
1269 This malloc manages fastbins very conservatively yet still
1270 efficiently, so fragmentation is rarely a problem for values less
1271 than or equal to the default. The maximum supported value of MXFAST
1272 is 80. You wouldn't want it any higher than this anyway. Fastbins
1273 are designed especially for use with many small structs, objects or
1274 strings -- the default handles structs/objects/arrays with sizes up
1275 to 8 4byte fields, or small strings representing words, tokens,
1276 etc. Using fastbins for larger objects normally worsens
1277 fragmentation without improving speed.
1278
1279 M_MXFAST is set in REQUEST size units. It is internally used in
1280 chunksize units, which adds padding and alignment. You can reduce
1281 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1282 algorithm to be a closer approximation of fifo-best-fit in all cases,
1283 not just for larger requests, but will generally cause it to be
1284 slower.
f65fd747
UD
1285*/
1286
1287
fa8d436c
UD
1288/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1289#ifndef M_MXFAST
a9177ff5 1290#define M_MXFAST 1
fa8d436c 1291#endif
f65fd747 1292
fa8d436c
UD
1293#ifndef DEFAULT_MXFAST
1294#define DEFAULT_MXFAST 64
10dc2a90
UD
1295#endif
1296
10dc2a90 1297
fa8d436c
UD
1298/*
1299 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1300 to keep before releasing via malloc_trim in free().
1301
1302 Automatic trimming is mainly useful in long-lived programs.
1303 Because trimming via sbrk can be slow on some systems, and can
1304 sometimes be wasteful (in cases where programs immediately
1305 afterward allocate more large chunks) the value should be high
1306 enough so that your overall system performance would improve by
1307 releasing this much memory.
1308
1309 The trim threshold and the mmap control parameters (see below)
1310 can be traded off with one another. Trimming and mmapping are
1311 two different ways of releasing unused memory back to the
1312 system. Between these two, it is often possible to keep
1313 system-level demands of a long-lived program down to a bare
1314 minimum. For example, in one test suite of sessions measuring
1315 the XF86 X server on Linux, using a trim threshold of 128K and a
1316 mmap threshold of 192K led to near-minimal long term resource
1317 consumption.
1318
1319 If you are using this malloc in a long-lived program, it should
1320 pay to experiment with these values. As a rough guide, you
1321 might set to a value close to the average size of a process
1322 (program) running on your system. Releasing this much memory
1323 would allow such a process to run in memory. Generally, it's
1324 worth it to tune for trimming rather tham memory mapping when a
1325 program undergoes phases where several large chunks are
1326 allocated and released in ways that can reuse each other's
1327 storage, perhaps mixed with phases where there are no such
1328 chunks at all. And in well-behaved long-lived programs,
1329 controlling release of large blocks via trimming versus mapping
1330 is usually faster.
1331
1332 However, in most programs, these parameters serve mainly as
1333 protection against the system-level effects of carrying around
1334 massive amounts of unneeded memory. Since frequent calls to
1335 sbrk, mmap, and munmap otherwise degrade performance, the default
1336 parameters are set to relatively high values that serve only as
1337 safeguards.
1338
1339 The trim value It must be greater than page size to have any useful
a9177ff5 1340 effect. To disable trimming completely, you can set to
fa8d436c
UD
1341 (unsigned long)(-1)
1342
1343 Trim settings interact with fastbin (MXFAST) settings: Unless
1344 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1345 freeing a chunk with size less than or equal to MXFAST. Trimming is
1346 instead delayed until subsequent freeing of larger chunks. However,
1347 you can still force an attempted trim by calling malloc_trim.
1348
1349 Also, trimming is not generally possible in cases where
1350 the main arena is obtained via mmap.
1351
1352 Note that the trick some people use of mallocing a huge space and
1353 then freeing it at program startup, in an attempt to reserve system
1354 memory, doesn't have the intended effect under automatic trimming,
1355 since that memory will immediately be returned to the system.
1356*/
1357
1358#define M_TRIM_THRESHOLD -1
1359
1360#ifndef DEFAULT_TRIM_THRESHOLD
1361#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1362#endif
1363
1364/*
1365 M_TOP_PAD is the amount of extra `padding' space to allocate or
1366 retain whenever sbrk is called. It is used in two ways internally:
1367
1368 * When sbrk is called to extend the top of the arena to satisfy
1369 a new malloc request, this much padding is added to the sbrk
1370 request.
1371
1372 * When malloc_trim is called automatically from free(),
1373 it is used as the `pad' argument.
1374
1375 In both cases, the actual amount of padding is rounded
1376 so that the end of the arena is always a system page boundary.
1377
1378 The main reason for using padding is to avoid calling sbrk so
1379 often. Having even a small pad greatly reduces the likelihood
1380 that nearly every malloc request during program start-up (or
1381 after trimming) will invoke sbrk, which needlessly wastes
1382 time.
1383
1384 Automatic rounding-up to page-size units is normally sufficient
1385 to avoid measurable overhead, so the default is 0. However, in
1386 systems where sbrk is relatively slow, it can pay to increase
1387 this value, at the expense of carrying around more memory than
1388 the program needs.
1389*/
10dc2a90 1390
fa8d436c 1391#define M_TOP_PAD -2
10dc2a90 1392
fa8d436c
UD
1393#ifndef DEFAULT_TOP_PAD
1394#define DEFAULT_TOP_PAD (0)
1395#endif
f65fd747 1396
fa8d436c
UD
1397/*
1398 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1399 to service a request. Requests of at least this size that cannot
1400 be allocated using already-existing space will be serviced via mmap.
1401 (If enough normal freed space already exists it is used instead.)
1402
1403 Using mmap segregates relatively large chunks of memory so that
1404 they can be individually obtained and released from the host
1405 system. A request serviced through mmap is never reused by any
1406 other request (at least not directly; the system may just so
1407 happen to remap successive requests to the same locations).
1408
1409 Segregating space in this way has the benefits that:
1410
a9177ff5
RM
1411 1. Mmapped space can ALWAYS be individually released back
1412 to the system, which helps keep the system level memory
1413 demands of a long-lived program low.
fa8d436c
UD
1414 2. Mapped memory can never become `locked' between
1415 other chunks, as can happen with normally allocated chunks, which
1416 means that even trimming via malloc_trim would not release them.
1417 3. On some systems with "holes" in address spaces, mmap can obtain
1418 memory that sbrk cannot.
1419
1420 However, it has the disadvantages that:
1421
1422 1. The space cannot be reclaimed, consolidated, and then
1423 used to service later requests, as happens with normal chunks.
1424 2. It can lead to more wastage because of mmap page alignment
1425 requirements
1426 3. It causes malloc performance to be more dependent on host
1427 system memory management support routines which may vary in
1428 implementation quality and may impose arbitrary
1429 limitations. Generally, servicing a request via normal
1430 malloc steps is faster than going through a system's mmap.
1431
1432 The advantages of mmap nearly always outweigh disadvantages for
1433 "large" chunks, but the value of "large" varies across systems. The
1434 default is an empirically derived value that works well in most
1435 systems.
f65fd747
UD
1436*/
1437
fa8d436c 1438#define M_MMAP_THRESHOLD -3
f65fd747 1439
fa8d436c
UD
1440#ifndef DEFAULT_MMAP_THRESHOLD
1441#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
1442#endif
1443
1444/*
1445 M_MMAP_MAX is the maximum number of requests to simultaneously
1446 service using mmap. This parameter exists because
1447 some systems have a limited number of internal tables for
1448 use by mmap, and using more than a few of them may degrade
1449 performance.
1450
1451 The default is set to a value that serves only as a safeguard.
1452 Setting to 0 disables use of mmap for servicing large requests. If
1453 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1454 to non-zero values in mallopt will fail.
1455*/
f65fd747 1456
fa8d436c
UD
1457#define M_MMAP_MAX -4
1458
1459#ifndef DEFAULT_MMAP_MAX
1460#if HAVE_MMAP
1461#define DEFAULT_MMAP_MAX (65536)
1462#else
1463#define DEFAULT_MMAP_MAX (0)
1464#endif
f65fd747
UD
1465#endif
1466
fa8d436c 1467#ifdef __cplusplus
3c6904fb 1468} /* end of extern "C" */
fa8d436c 1469#endif
f65fd747 1470
100351c3 1471#include <malloc.h>
f65fd747 1472
fa8d436c
UD
1473#ifndef BOUNDED_N
1474#define BOUNDED_N(ptr, sz) (ptr)
1475#endif
1476#ifndef RETURN_ADDRESS
1477#define RETURN_ADDRESS(X_) (NULL)
9ae6fc54 1478#endif
431c33c0
UD
1479
1480/* On some platforms we can compile internal, not exported functions better.
1481 Let the environment provide a macro and define it to be empty if it
1482 is not available. */
1483#ifndef internal_function
1484# define internal_function
1485#endif
1486
fa8d436c
UD
1487/* Forward declarations. */
1488struct malloc_chunk;
1489typedef struct malloc_chunk* mchunkptr;
431c33c0 1490
fa8d436c 1491/* Internal routines. */
f65fd747 1492
fa8d436c 1493#if __STD_C
f65fd747 1494
f1c5213d
RM
1495Void_t* _int_malloc(mstate, size_t);
1496void _int_free(mstate, Void_t*);
1497Void_t* _int_realloc(mstate, Void_t*, size_t);
1498Void_t* _int_memalign(mstate, size_t, size_t);
1499Void_t* _int_valloc(mstate, size_t);
fa8d436c
UD
1500static Void_t* _int_pvalloc(mstate, size_t);
1501/*static Void_t* cALLOc(size_t, size_t);*/
1502static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
1503static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
1504static int mTRIm(size_t);
1505static size_t mUSABLe(Void_t*);
1506static void mSTATs(void);
1507static int mALLOPt(int, int);
1508static struct mallinfo mALLINFo(mstate);
6bf4302e 1509static void malloc_printerr(int action, const char *str, void *ptr);
fa8d436c
UD
1510
1511static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
1512static int internal_function top_check(void);
1513static void internal_function munmap_chunk(mchunkptr p);
a9177ff5 1514#if HAVE_MREMAP
fa8d436c 1515static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
a9177ff5 1516#endif
fa8d436c
UD
1517
1518static Void_t* malloc_check(size_t sz, const Void_t *caller);
1519static void free_check(Void_t* mem, const Void_t *caller);
1520static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
1521 const Void_t *caller);
1522static Void_t* memalign_check(size_t alignment, size_t bytes,
1523 const Void_t *caller);
1524#ifndef NO_THREADS
fde89ad0
RM
1525# ifdef _LIBC
1526# if USE___THREAD || (defined USE_TLS && !defined SHARED)
1527 /* These routines are never needed in this configuration. */
1528# define NO_STARTER
1529# endif
1530# endif
1531# ifdef NO_STARTER
1532# undef NO_STARTER
1533# else
fa8d436c 1534static Void_t* malloc_starter(size_t sz, const Void_t *caller);
fde89ad0 1535static Void_t* memalign_starter(size_t aln, size_t sz, const Void_t *caller);
fa8d436c 1536static void free_starter(Void_t* mem, const Void_t *caller);
fde89ad0 1537# endif
fa8d436c
UD
1538static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
1539static void free_atfork(Void_t* mem, const Void_t *caller);
1540#endif
f65fd747 1541
fa8d436c 1542#else
f65fd747 1543
fa8d436c
UD
1544Void_t* _int_malloc();
1545void _int_free();
1546Void_t* _int_realloc();
1547Void_t* _int_memalign();
1548Void_t* _int_valloc();
1549Void_t* _int_pvalloc();
1550/*static Void_t* cALLOc();*/
1551static Void_t** _int_icalloc();
1552static Void_t** _int_icomalloc();
1553static int mTRIm();
1554static size_t mUSABLe();
1555static void mSTATs();
1556static int mALLOPt();
1557static struct mallinfo mALLINFo();
f65fd747 1558
fa8d436c 1559#endif
f65fd747 1560
f65fd747 1561
f65fd747 1562
f65fd747 1563
fa8d436c 1564/* ------------- Optional versions of memcopy ---------------- */
f65fd747 1565
a1648746 1566
fa8d436c 1567#if USE_MEMCPY
a1648746 1568
a9177ff5 1569/*
fa8d436c
UD
1570 Note: memcpy is ONLY invoked with non-overlapping regions,
1571 so the (usually slower) memmove is not needed.
1572*/
a1648746 1573
fa8d436c
UD
1574#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1575#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
f65fd747 1576
fa8d436c 1577#else /* !USE_MEMCPY */
f65fd747 1578
fa8d436c 1579/* Use Duff's device for good zeroing/copying performance. */
f65fd747 1580
fa8d436c
UD
1581#define MALLOC_ZERO(charp, nbytes) \
1582do { \
1583 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1584 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1585 long mcn; \
1586 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1587 switch (mctmp) { \
1588 case 0: for(;;) { *mzp++ = 0; \
1589 case 7: *mzp++ = 0; \
1590 case 6: *mzp++ = 0; \
1591 case 5: *mzp++ = 0; \
1592 case 4: *mzp++ = 0; \
1593 case 3: *mzp++ = 0; \
1594 case 2: *mzp++ = 0; \
1595 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1596 } \
1597} while(0)
f65fd747 1598
fa8d436c
UD
1599#define MALLOC_COPY(dest,src,nbytes) \
1600do { \
1601 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1602 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1603 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1604 long mcn; \
1605 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1606 switch (mctmp) { \
1607 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1608 case 7: *mcdst++ = *mcsrc++; \
1609 case 6: *mcdst++ = *mcsrc++; \
1610 case 5: *mcdst++ = *mcsrc++; \
1611 case 4: *mcdst++ = *mcsrc++; \
1612 case 3: *mcdst++ = *mcsrc++; \
1613 case 2: *mcdst++ = *mcsrc++; \
1614 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1615 } \
1616} while(0)
f65fd747 1617
f65fd747
UD
1618#endif
1619
fa8d436c 1620/* ------------------ MMAP support ------------------ */
f65fd747 1621
f65fd747 1622
fa8d436c 1623#if HAVE_MMAP
f65fd747 1624
fa8d436c
UD
1625#include <fcntl.h>
1626#ifndef LACKS_SYS_MMAN_H
1627#include <sys/mman.h>
1628#endif
f65fd747 1629
fa8d436c
UD
1630#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1631# define MAP_ANONYMOUS MAP_ANON
1632#endif
1633#if !defined(MAP_FAILED)
1634# define MAP_FAILED ((char*)-1)
1635#endif
f65fd747 1636
fa8d436c
UD
1637#ifndef MAP_NORESERVE
1638# ifdef MAP_AUTORESRV
1639# define MAP_NORESERVE MAP_AUTORESRV
1640# else
1641# define MAP_NORESERVE 0
1642# endif
f65fd747
UD
1643#endif
1644
a9177ff5
RM
1645/*
1646 Nearly all versions of mmap support MAP_ANONYMOUS,
fa8d436c
UD
1647 so the following is unlikely to be needed, but is
1648 supplied just in case.
1649*/
f65fd747 1650
fa8d436c 1651#ifndef MAP_ANONYMOUS
f65fd747 1652
fa8d436c 1653static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
2f6d1f1b 1654
fa8d436c
UD
1655#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1656 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1657 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1658 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
f65fd747 1659
fa8d436c 1660#else
f65fd747 1661
fa8d436c
UD
1662#define MMAP(addr, size, prot, flags) \
1663 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
f65fd747 1664
e9b3e3c5 1665#endif
f65fd747
UD
1666
1667
fa8d436c
UD
1668#endif /* HAVE_MMAP */
1669
1670
f65fd747 1671/*
fa8d436c 1672 ----------------------- Chunk representations -----------------------
f65fd747
UD
1673*/
1674
1675
fa8d436c
UD
1676/*
1677 This struct declaration is misleading (but accurate and necessary).
1678 It declares a "view" into memory allowing access to necessary
1679 fields at known offsets from a given base. See explanation below.
1680*/
1681
1682struct malloc_chunk {
1683
1684 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1685 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1686
1687 struct malloc_chunk* fd; /* double links -- used only if free. */
f65fd747
UD
1688 struct malloc_chunk* bk;
1689};
1690
f65fd747
UD
1691
1692/*
f65fd747
UD
1693 malloc_chunk details:
1694
1695 (The following includes lightly edited explanations by Colin Plumb.)
1696
1697 Chunks of memory are maintained using a `boundary tag' method as
1698 described in e.g., Knuth or Standish. (See the paper by Paul
1699 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1700 survey of such techniques.) Sizes of free chunks are stored both
1701 in the front of each chunk and at the end. This makes
1702 consolidating fragmented chunks into bigger chunks very fast. The
1703 size fields also hold bits representing whether chunks are free or
1704 in use.
1705
1706 An allocated chunk looks like this:
1707
1708
1709 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1710 | Size of previous chunk, if allocated | |
1711 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1712 | Size of chunk, in bytes |P|
1713 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1714 | User data starts here... .
1715 . .
1716 . (malloc_usable_space() bytes) .
1717 . |
1718nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1719 | Size of chunk |
1720 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1721
1722
1723 Where "chunk" is the front of the chunk for the purpose of most of
1724 the malloc code, but "mem" is the pointer that is returned to the
1725 user. "Nextchunk" is the beginning of the next contiguous chunk.
1726
fa8d436c 1727 Chunks always begin on even word boundries, so the mem portion
f65fd747 1728 (which is returned to the user) is also on an even word boundary, and
fa8d436c 1729 thus at least double-word aligned.
f65fd747
UD
1730
1731 Free chunks are stored in circular doubly-linked lists, and look like this:
1732
1733 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1734 | Size of previous chunk |
1735 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1736 `head:' | Size of chunk, in bytes |P|
1737 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1738 | Forward pointer to next chunk in list |
1739 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1740 | Back pointer to previous chunk in list |
1741 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1742 | Unused space (may be 0 bytes long) .
1743 . .
1744 . |
1745nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1746 `foot:' | Size of chunk, in bytes |
1747 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1748
1749 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1750 chunk size (which is always a multiple of two words), is an in-use
1751 bit for the *previous* chunk. If that bit is *clear*, then the
1752 word before the current chunk size contains the previous chunk
1753 size, and can be used to find the front of the previous chunk.
fa8d436c
UD
1754 The very first chunk allocated always has this bit set,
1755 preventing access to non-existent (or non-owned) memory. If
1756 prev_inuse is set for any given chunk, then you CANNOT determine
1757 the size of the previous chunk, and might even get a memory
1758 addressing fault when trying to do so.
f65fd747
UD
1759
1760 Note that the `foot' of the current chunk is actually represented
fa8d436c
UD
1761 as the prev_size of the NEXT chunk. This makes it easier to
1762 deal with alignments etc but can be very confusing when trying
1763 to extend or adapt this code.
f65fd747
UD
1764
1765 The two exceptions to all this are
1766
fa8d436c
UD
1767 1. The special chunk `top' doesn't bother using the
1768 trailing size field since there is no next contiguous chunk
1769 that would have to index off it. After initialization, `top'
1770 is forced to always exist. If it would become less than
1771 MINSIZE bytes long, it is replenished.
f65fd747
UD
1772
1773 2. Chunks allocated via mmap, which have the second-lowest-order
1774 bit (IS_MMAPPED) set in their size fields. Because they are
fa8d436c 1775 allocated one-by-one, each must contain its own trailing size field.
f65fd747
UD
1776
1777*/
1778
1779/*
fa8d436c
UD
1780 ---------- Size and alignment checks and conversions ----------
1781*/
f65fd747 1782
fa8d436c 1783/* conversion from malloc headers to user pointers, and back */
f65fd747 1784
fa8d436c
UD
1785#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1786#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
f65fd747 1787
fa8d436c
UD
1788/* The smallest possible chunk */
1789#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
f65fd747 1790
fa8d436c 1791/* The smallest size we can malloc is an aligned minimal chunk */
f65fd747 1792
fa8d436c
UD
1793#define MINSIZE \
1794 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
f65fd747 1795
fa8d436c 1796/* Check if m has acceptable alignment */
f65fd747 1797
fa8d436c 1798#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
f65fd747 1799
f65fd747 1800
a9177ff5 1801/*
fa8d436c
UD
1802 Check if a request is so large that it would wrap around zero when
1803 padded and aligned. To simplify some other code, the bound is made
1804 low enough so that adding MINSIZE will also not wrap around zero.
1805*/
f65fd747 1806
fa8d436c
UD
1807#define REQUEST_OUT_OF_RANGE(req) \
1808 ((unsigned long)(req) >= \
a9177ff5 1809 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
f65fd747 1810
fa8d436c 1811/* pad request bytes into a usable size -- internal version */
f65fd747 1812
fa8d436c
UD
1813#define request2size(req) \
1814 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1815 MINSIZE : \
1816 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
f65fd747 1817
fa8d436c 1818/* Same, except also perform argument check */
f65fd747 1819
fa8d436c
UD
1820#define checked_request2size(req, sz) \
1821 if (REQUEST_OUT_OF_RANGE(req)) { \
1822 MALLOC_FAILURE_ACTION; \
1823 return 0; \
1824 } \
a9177ff5 1825 (sz) = request2size(req);
f65fd747
UD
1826
1827/*
fa8d436c 1828 --------------- Physical chunk operations ---------------
f65fd747
UD
1829*/
1830
10dc2a90 1831
fa8d436c
UD
1832/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1833#define PREV_INUSE 0x1
f65fd747 1834
fa8d436c
UD
1835/* extract inuse bit of previous chunk */
1836#define prev_inuse(p) ((p)->size & PREV_INUSE)
f65fd747 1837
f65fd747 1838
fa8d436c
UD
1839/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1840#define IS_MMAPPED 0x2
f65fd747 1841
fa8d436c
UD
1842/* check for mmap()'ed chunk */
1843#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
f65fd747 1844
f65fd747 1845
fa8d436c
UD
1846/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1847 from a non-main arena. This is only set immediately before handing
1848 the chunk to the user, if necessary. */
1849#define NON_MAIN_ARENA 0x4
f65fd747 1850
fa8d436c
UD
1851/* check for chunk from non-main arena */
1852#define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
f65fd747
UD
1853
1854
a9177ff5
RM
1855/*
1856 Bits to mask off when extracting size
f65fd747 1857
fa8d436c
UD
1858 Note: IS_MMAPPED is intentionally not masked off from size field in
1859 macros for which mmapped chunks should never be seen. This should
1860 cause helpful core dumps to occur if it is tried by accident by
1861 people extending or adapting this malloc.
f65fd747 1862*/
fa8d436c 1863#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
f65fd747 1864
fa8d436c
UD
1865/* Get size, ignoring use bits */
1866#define chunksize(p) ((p)->size & ~(SIZE_BITS))
f65fd747 1867
f65fd747 1868
fa8d436c
UD
1869/* Ptr to next physical malloc_chunk. */
1870#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
f65fd747 1871
fa8d436c
UD
1872/* Ptr to previous physical malloc_chunk */
1873#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
f65fd747 1874
fa8d436c
UD
1875/* Treat space at ptr + offset as a chunk */
1876#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1877
1878/* extract p's inuse bit */
1879#define inuse(p)\
1880((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
f65fd747 1881
fa8d436c
UD
1882/* set/clear chunk as being inuse without otherwise disturbing */
1883#define set_inuse(p)\
1884((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
f65fd747 1885
fa8d436c
UD
1886#define clear_inuse(p)\
1887((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
f65fd747
UD
1888
1889
fa8d436c
UD
1890/* check/set/clear inuse bits in known places */
1891#define inuse_bit_at_offset(p, s)\
1892 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
f65fd747 1893
fa8d436c
UD
1894#define set_inuse_bit_at_offset(p, s)\
1895 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
f65fd747 1896
fa8d436c
UD
1897#define clear_inuse_bit_at_offset(p, s)\
1898 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
f65fd747 1899
f65fd747 1900
fa8d436c
UD
1901/* Set size at head, without disturbing its use bit */
1902#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
f65fd747 1903
fa8d436c
UD
1904/* Set size/use field */
1905#define set_head(p, s) ((p)->size = (s))
f65fd747 1906
fa8d436c
UD
1907/* Set size at footer (only when chunk is not in use) */
1908#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
f65fd747
UD
1909
1910
fa8d436c
UD
1911/*
1912 -------------------- Internal data structures --------------------
1913
1914 All internal state is held in an instance of malloc_state defined
1915 below. There are no other static variables, except in two optional
a9177ff5
RM
1916 cases:
1917 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
fa8d436c
UD
1918 * If HAVE_MMAP is true, but mmap doesn't support
1919 MAP_ANONYMOUS, a dummy file descriptor for mmap.
1920
1921 Beware of lots of tricks that minimize the total bookkeeping space
1922 requirements. The result is a little over 1K bytes (for 4byte
1923 pointers and size_t.)
1924*/
f65fd747
UD
1925
1926/*
fa8d436c
UD
1927 Bins
1928
1929 An array of bin headers for free chunks. Each bin is doubly
1930 linked. The bins are approximately proportionally (log) spaced.
1931 There are a lot of these bins (128). This may look excessive, but
1932 works very well in practice. Most bins hold sizes that are
1933 unusual as malloc request sizes, but are more usual for fragments
1934 and consolidated sets of chunks, which is what these bins hold, so
1935 they can be found quickly. All procedures maintain the invariant
1936 that no consolidated chunk physically borders another one, so each
1937 chunk in a list is known to be preceeded and followed by either
1938 inuse chunks or the ends of memory.
1939
1940 Chunks in bins are kept in size order, with ties going to the
1941 approximately least recently used chunk. Ordering isn't needed
1942 for the small bins, which all contain the same-sized chunks, but
1943 facilitates best-fit allocation for larger chunks. These lists
1944 are just sequential. Keeping them in order almost never requires
1945 enough traversal to warrant using fancier ordered data
a9177ff5 1946 structures.
fa8d436c
UD
1947
1948 Chunks of the same size are linked with the most
1949 recently freed at the front, and allocations are taken from the
1950 back. This results in LRU (FIFO) allocation order, which tends
1951 to give each chunk an equal opportunity to be consolidated with
1952 adjacent freed chunks, resulting in larger free chunks and less
1953 fragmentation.
1954
1955 To simplify use in double-linked lists, each bin header acts
1956 as a malloc_chunk. This avoids special-casing for headers.
1957 But to conserve space and improve locality, we allocate
1958 only the fd/bk pointers of bins, and then use repositioning tricks
a9177ff5 1959 to treat these as the fields of a malloc_chunk*.
f65fd747
UD
1960*/
1961
fa8d436c 1962typedef struct malloc_chunk* mbinptr;
f65fd747 1963
fa8d436c
UD
1964/* addressing -- note that bin_at(0) does not exist */
1965#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
f65fd747 1966
fa8d436c
UD
1967/* analog of ++bin */
1968#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
f65fd747 1969
fa8d436c
UD
1970/* Reminders about list directionality within bins */
1971#define first(b) ((b)->fd)
1972#define last(b) ((b)->bk)
f65fd747 1973
fa8d436c
UD
1974/* Take a chunk off a bin list */
1975#define unlink(P, BK, FD) { \
1976 FD = P->fd; \
1977 BK = P->bk; \
3e030bd5 1978 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
6bf4302e
UD
1979 malloc_printerr (check_action, "corrupted double-linked list", P); \
1980 else { \
1981 FD->bk = BK; \
1982 BK->fd = FD; \
1983 } \
fa8d436c 1984}
f65fd747 1985
fa8d436c
UD
1986/*
1987 Indexing
1988
1989 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1990 8 bytes apart. Larger bins are approximately logarithmically spaced:
f65fd747 1991
fa8d436c
UD
1992 64 bins of size 8
1993 32 bins of size 64
1994 16 bins of size 512
1995 8 bins of size 4096
1996 4 bins of size 32768
1997 2 bins of size 262144
1998 1 bin of size what's left
f65fd747 1999
fa8d436c
UD
2000 There is actually a little bit of slop in the numbers in bin_index
2001 for the sake of speed. This makes no difference elsewhere.
f65fd747 2002
fa8d436c
UD
2003 The bins top out around 1MB because we expect to service large
2004 requests via mmap.
2005*/
f65fd747 2006
fa8d436c
UD
2007#define NBINS 128
2008#define NSMALLBINS 64
2009#define SMALLBIN_WIDTH 8
2010#define MIN_LARGE_SIZE 512
f65fd747 2011
fa8d436c
UD
2012#define in_smallbin_range(sz) \
2013 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
f65fd747 2014
fa8d436c 2015#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
f65fd747 2016
fa8d436c
UD
2017#define largebin_index(sz) \
2018(((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
2019 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2020 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2021 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2022 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2023 126)
f65fd747 2024
fa8d436c
UD
2025#define bin_index(sz) \
2026 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
f65fd747 2027
f65fd747
UD
2028
2029/*
fa8d436c
UD
2030 Unsorted chunks
2031
2032 All remainders from chunk splits, as well as all returned chunks,
2033 are first placed in the "unsorted" bin. They are then placed
2034 in regular bins after malloc gives them ONE chance to be used before
2035 binning. So, basically, the unsorted_chunks list acts as a queue,
2036 with chunks being placed on it in free (and malloc_consolidate),
2037 and taken off (to be either used or placed in bins) in malloc.
2038
2039 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2040 does not have to be taken into account in size comparisons.
f65fd747
UD
2041*/
2042
fa8d436c
UD
2043/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2044#define unsorted_chunks(M) (bin_at(M, 1))
f65fd747 2045
fa8d436c
UD
2046/*
2047 Top
2048
2049 The top-most available chunk (i.e., the one bordering the end of
2050 available memory) is treated specially. It is never included in
2051 any bin, is used only if no other chunk is available, and is
2052 released back to the system if it is very large (see
2053 M_TRIM_THRESHOLD). Because top initially
2054 points to its own bin with initial zero size, thus forcing
2055 extension on the first malloc request, we avoid having any special
2056 code in malloc to check whether it even exists yet. But we still
2057 need to do so when getting memory from system, so we make
2058 initial_top treat the bin as a legal but unusable chunk during the
2059 interval between initialization and the first call to
2060 sYSMALLOc. (This is somewhat delicate, since it relies on
2061 the 2 preceding words to be zero during this interval as well.)
2062*/
f65fd747 2063
fa8d436c
UD
2064/* Conveniently, the unsorted bin can be used as dummy top on first call */
2065#define initial_top(M) (unsorted_chunks(M))
f65fd747 2066
fa8d436c
UD
2067/*
2068 Binmap
f65fd747 2069
fa8d436c
UD
2070 To help compensate for the large number of bins, a one-level index
2071 structure is used for bin-by-bin searching. `binmap' is a
2072 bitvector recording whether bins are definitely empty so they can
2073 be skipped over during during traversals. The bits are NOT always
2074 cleared as soon as bins are empty, but instead only
2075 when they are noticed to be empty during traversal in malloc.
2076*/
f65fd747 2077
fa8d436c
UD
2078/* Conservatively use 32 bits per map word, even if on 64bit system */
2079#define BINMAPSHIFT 5
2080#define BITSPERMAP (1U << BINMAPSHIFT)
2081#define BINMAPSIZE (NBINS / BITSPERMAP)
f65fd747 2082
fa8d436c
UD
2083#define idx2block(i) ((i) >> BINMAPSHIFT)
2084#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
f65fd747 2085
fa8d436c
UD
2086#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2087#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2088#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
f65fd747 2089
fa8d436c
UD
2090/*
2091 Fastbins
2092
2093 An array of lists holding recently freed small chunks. Fastbins
2094 are not doubly linked. It is faster to single-link them, and
2095 since chunks are never removed from the middles of these lists,
2096 double linking is not necessary. Also, unlike regular bins, they
2097 are not even processed in FIFO order (they use faster LIFO) since
2098 ordering doesn't much matter in the transient contexts in which
2099 fastbins are normally used.
2100
2101 Chunks in fastbins keep their inuse bit set, so they cannot
2102 be consolidated with other free chunks. malloc_consolidate
2103 releases all chunks in fastbins and consolidates them with
a9177ff5 2104 other free chunks.
fa8d436c 2105*/
f65fd747 2106
fa8d436c 2107typedef struct malloc_chunk* mfastbinptr;
f65fd747 2108
fa8d436c
UD
2109/* offset 2 to use otherwise unindexable first 2 bins */
2110#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
f65fd747 2111
fa8d436c
UD
2112/* The maximum fastbin request size we support */
2113#define MAX_FAST_SIZE 80
f65fd747 2114
fa8d436c 2115#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
f65fd747
UD
2116
2117/*
fa8d436c
UD
2118 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2119 that triggers automatic consolidation of possibly-surrounding
2120 fastbin chunks. This is a heuristic, so the exact value should not
2121 matter too much. It is defined at half the default trim threshold as a
2122 compromise heuristic to only attempt consolidation if it is likely
2123 to lead to trimming. However, it is not dynamically tunable, since
a9177ff5 2124 consolidation reduces fragmentation surrounding large chunks even
fa8d436c 2125 if trimming is not used.
f65fd747
UD
2126*/
2127
fa8d436c 2128#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
f65fd747
UD
2129
2130/*
a9177ff5 2131 Since the lowest 2 bits in max_fast don't matter in size comparisons,
fa8d436c 2132 they are used as flags.
f65fd747
UD
2133*/
2134
fa8d436c
UD
2135/*
2136 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2137 some fastbin chunks. It is set true on entering a chunk into any
2138 fastbin, and cleared only in malloc_consolidate.
f65fd747 2139
fa8d436c
UD
2140 The truth value is inverted so that have_fastchunks will be true
2141 upon startup (since statics are zero-filled), simplifying
2142 initialization checks.
2143*/
f65fd747 2144
fa8d436c 2145#define FASTCHUNKS_BIT (1U)
f65fd747 2146
fa8d436c
UD
2147#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0)
2148#define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT)
2149#define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT)
f65fd747
UD
2150
2151/*
fa8d436c
UD
2152 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2153 regions. Otherwise, contiguity is exploited in merging together,
2154 when possible, results from consecutive MORECORE calls.
f65fd747 2155
fa8d436c
UD
2156 The initial value comes from MORECORE_CONTIGUOUS, but is
2157 changed dynamically if mmap is ever used as an sbrk substitute.
f65fd747
UD
2158*/
2159
fa8d436c 2160#define NONCONTIGUOUS_BIT (2U)
f65fd747 2161
fa8d436c
UD
2162#define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0)
2163#define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0)
2164#define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT)
2165#define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
f65fd747 2166
a9177ff5
RM
2167/*
2168 Set value of max_fast.
fa8d436c
UD
2169 Use impossibly small value if 0.
2170 Precondition: there are no existing fastbin chunks.
2171 Setting the value clears fastchunk bit but preserves noncontiguous bit.
f65fd747
UD
2172*/
2173
fa8d436c
UD
2174#define set_max_fast(M, s) \
2175 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2176 FASTCHUNKS_BIT | \
2177 ((M)->max_fast & NONCONTIGUOUS_BIT)
f65fd747 2178
f65fd747
UD
2179
2180/*
fa8d436c 2181 ----------- Internal state representation and initialization -----------
f65fd747
UD
2182*/
2183
fa8d436c
UD
2184struct malloc_state {
2185 /* Serialize access. */
2186 mutex_t mutex;
f65fd747 2187
fa8d436c
UD
2188 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2189 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
2190 long pad0_[1]; /* try to give the mutex its own cacheline */
f65fd747 2191
fa8d436c
UD
2192 /* The maximum chunk size to be eligible for fastbin */
2193 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
f65fd747 2194
fa8d436c
UD
2195 /* Fastbins */
2196 mfastbinptr fastbins[NFASTBINS];
f65fd747 2197
fa8d436c
UD
2198 /* Base of the topmost chunk -- not otherwise kept in a bin */
2199 mchunkptr top;
f65fd747 2200
fa8d436c
UD
2201 /* The remainder from the most recent split of a small request */
2202 mchunkptr last_remainder;
f65fd747 2203
fa8d436c
UD
2204 /* Normal bins packed as described above */
2205 mchunkptr bins[NBINS * 2];
f65fd747 2206
fa8d436c
UD
2207 /* Bitmap of bins */
2208 unsigned int binmap[BINMAPSIZE];
f65fd747 2209
fa8d436c
UD
2210 /* Linked list */
2211 struct malloc_state *next;
f65fd747 2212
fa8d436c
UD
2213 /* Memory allocated from the system in this arena. */
2214 INTERNAL_SIZE_T system_mem;
2215 INTERNAL_SIZE_T max_system_mem;
2216};
f65fd747 2217
fa8d436c
UD
2218struct malloc_par {
2219 /* Tunable parameters */
2220 unsigned long trim_threshold;
2221 INTERNAL_SIZE_T top_pad;
2222 INTERNAL_SIZE_T mmap_threshold;
2223
2224 /* Memory map support */
2225 int n_mmaps;
2226 int n_mmaps_max;
2227 int max_n_mmaps;
2228
2229 /* Cache malloc_getpagesize */
a9177ff5 2230 unsigned int pagesize;
fa8d436c
UD
2231
2232 /* Statistics */
2233 INTERNAL_SIZE_T mmapped_mem;
2234 /*INTERNAL_SIZE_T sbrked_mem;*/
2235 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2236 INTERNAL_SIZE_T max_mmapped_mem;
2237 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
2238
2239 /* First address handed out by MORECORE/sbrk. */
2240 char* sbrk_base;
2241};
f65fd747 2242
fa8d436c
UD
2243/* There are several instances of this struct ("arenas") in this
2244 malloc. If you are adapting this malloc in a way that does NOT use
2245 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2246 before using. This malloc relies on the property that malloc_state
2247 is initialized to all zeroes (as is true of C statics). */
f65fd747 2248
fa8d436c 2249static struct malloc_state main_arena;
f65fd747 2250
fa8d436c 2251/* There is only one instance of the malloc parameters. */
f65fd747 2252
fa8d436c 2253static struct malloc_par mp_;
f65fd747 2254
fa8d436c
UD
2255/*
2256 Initialize a malloc_state struct.
f65fd747 2257
fa8d436c
UD
2258 This is called only from within malloc_consolidate, which needs
2259 be called in the same contexts anyway. It is never called directly
2260 outside of malloc_consolidate because some optimizing compilers try
2261 to inline it at all call points, which turns out not to be an
2262 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2263*/
f65fd747 2264
fa8d436c
UD
2265#if __STD_C
2266static void malloc_init_state(mstate av)
2267#else
2268static void malloc_init_state(av) mstate av;
2269#endif
2270{
2271 int i;
2272 mbinptr bin;
a9177ff5 2273
fa8d436c 2274 /* Establish circular links for normal bins */
a9177ff5 2275 for (i = 1; i < NBINS; ++i) {
fa8d436c
UD
2276 bin = bin_at(av,i);
2277 bin->fd = bin->bk = bin;
2278 }
f65fd747 2279
fa8d436c
UD
2280#if MORECORE_CONTIGUOUS
2281 if (av != &main_arena)
2282#endif
2283 set_noncontiguous(av);
f65fd747 2284
fa8d436c 2285 set_max_fast(av, DEFAULT_MXFAST);
f65fd747 2286
fa8d436c
UD
2287 av->top = initial_top(av);
2288}
e9b3e3c5 2289
a9177ff5 2290/*
fa8d436c
UD
2291 Other internal utilities operating on mstates
2292*/
f65fd747 2293
fa8d436c
UD
2294#if __STD_C
2295static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2296static int sYSTRIm(size_t, mstate);
2297static void malloc_consolidate(mstate);
2298static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
831372e7 2299#else
fa8d436c
UD
2300static Void_t* sYSMALLOc();
2301static int sYSTRIm();
2302static void malloc_consolidate();
2303static Void_t** iALLOc();
831372e7 2304#endif
7e3be507 2305
404d4cef
RM
2306
2307/* -------------- Early definitions for debugging hooks ---------------- */
2308
2309/* Define and initialize the hook variables. These weak definitions must
2310 appear before any use of the variables in a function (arena.c uses one). */
2311#ifndef weak_variable
2312#ifndef _LIBC
2313#define weak_variable /**/
2314#else
2315/* In GNU libc we want the hook variables to be weak definitions to
2316 avoid a problem with Emacs. */
2317#define weak_variable weak_function
2318#endif
2319#endif
2320
2321/* Forward declarations. */
2322static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz,
2323 const __malloc_ptr_t caller));
2324static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz,
2325 const __malloc_ptr_t caller));
2326static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
2327 const __malloc_ptr_t caller));
2328
06d6611a
UD
2329void weak_variable (*__malloc_initialize_hook) (void) = NULL;
2330void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
2331 const __malloc_ptr_t) = NULL;
404d4cef 2332__malloc_ptr_t weak_variable (*__malloc_hook)
06d6611a 2333 (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
404d4cef 2334__malloc_ptr_t weak_variable (*__realloc_hook)
06d6611a 2335 (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
404d4cef
RM
2336 = realloc_hook_ini;
2337__malloc_ptr_t weak_variable (*__memalign_hook)
06d6611a 2338 (size_t __alignment, size_t __size, const __malloc_ptr_t)
404d4cef 2339 = memalign_hook_ini;
06d6611a 2340void weak_variable (*__after_morecore_hook) (void) = NULL;
404d4cef
RM
2341
2342
3e030bd5
UD
2343/* ---------------- Error behavior ------------------------------------ */
2344
2345#ifndef DEFAULT_CHECK_ACTION
2346#define DEFAULT_CHECK_ACTION 3
2347#endif
2348
2349static int check_action = DEFAULT_CHECK_ACTION;
2350
2351
fa8d436c
UD
2352/* ------------------- Support for multiple arenas -------------------- */
2353#include "arena.c"
f65fd747 2354
fa8d436c
UD
2355/*
2356 Debugging support
f65fd747 2357
fa8d436c
UD
2358 These routines make a number of assertions about the states
2359 of data structures that should be true at all times. If any
2360 are not true, it's very likely that a user program has somehow
2361 trashed memory. (It's also possible that there is a coding error
2362 in malloc. In which case, please report it!)
2363*/
ee74a442 2364
fa8d436c 2365#if ! MALLOC_DEBUG
d8f00d46 2366
fa8d436c
UD
2367#define check_chunk(A,P)
2368#define check_free_chunk(A,P)
2369#define check_inuse_chunk(A,P)
2370#define check_remalloced_chunk(A,P,N)
2371#define check_malloced_chunk(A,P,N)
2372#define check_malloc_state(A)
d8f00d46 2373
fa8d436c 2374#else
ca34d7a7 2375
fa8d436c
UD
2376#define check_chunk(A,P) do_check_chunk(A,P)
2377#define check_free_chunk(A,P) do_check_free_chunk(A,P)
2378#define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2379#define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2380#define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2381#define check_malloc_state(A) do_check_malloc_state(A)
ca34d7a7 2382
fa8d436c
UD
2383/*
2384 Properties of all chunks
2385*/
ca34d7a7 2386
fa8d436c
UD
2387#if __STD_C
2388static void do_check_chunk(mstate av, mchunkptr p)
2389#else
2390static void do_check_chunk(av, p) mstate av; mchunkptr p;
ca34d7a7 2391#endif
ca34d7a7 2392{
fa8d436c
UD
2393 unsigned long sz = chunksize(p);
2394 /* min and max possible addresses assuming contiguous allocation */
2395 char* max_address = (char*)(av->top) + chunksize(av->top);
2396 char* min_address = max_address - av->system_mem;
2397
2398 if (!chunk_is_mmapped(p)) {
a9177ff5 2399
fa8d436c
UD
2400 /* Has legal address ... */
2401 if (p != av->top) {
2402 if (contiguous(av)) {
2403 assert(((char*)p) >= min_address);
2404 assert(((char*)p + sz) <= ((char*)(av->top)));
2405 }
2406 }
2407 else {
2408 /* top size is always at least MINSIZE */
2409 assert((unsigned long)(sz) >= MINSIZE);
2410 /* top predecessor always marked inuse */
2411 assert(prev_inuse(p));
2412 }
a9177ff5 2413
ca34d7a7 2414 }
fa8d436c
UD
2415 else {
2416#if HAVE_MMAP
2417 /* address is outside main heap */
2418 if (contiguous(av) && av->top != initial_top(av)) {
2419 assert(((char*)p) < min_address || ((char*)p) > max_address);
2420 }
2421 /* chunk is page-aligned */
2422 assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
2423 /* mem is aligned */
2424 assert(aligned_OK(chunk2mem(p)));
2425#else
2426 /* force an appropriate assert violation if debug set */
2427 assert(!chunk_is_mmapped(p));
eb406346 2428#endif
eb406346 2429 }
eb406346
UD
2430}
2431
fa8d436c
UD
2432/*
2433 Properties of free chunks
2434*/
ee74a442 2435
fa8d436c
UD
2436#if __STD_C
2437static void do_check_free_chunk(mstate av, mchunkptr p)
2438#else
2439static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
10dc2a90 2440#endif
67c94753 2441{
fa8d436c
UD
2442 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2443 mchunkptr next = chunk_at_offset(p, sz);
67c94753 2444
fa8d436c 2445 do_check_chunk(av, p);
67c94753 2446
fa8d436c
UD
2447 /* Chunk must claim to be free ... */
2448 assert(!inuse(p));
2449 assert (!chunk_is_mmapped(p));
67c94753 2450
fa8d436c
UD
2451 /* Unless a special marker, must have OK fields */
2452 if ((unsigned long)(sz) >= MINSIZE)
2453 {
2454 assert((sz & MALLOC_ALIGN_MASK) == 0);
2455 assert(aligned_OK(chunk2mem(p)));
2456 /* ... matching footer field */
2457 assert(next->prev_size == sz);
2458 /* ... and is fully consolidated */
2459 assert(prev_inuse(p));
2460 assert (next == av->top || inuse(next));
2461
2462 /* ... and has minimally sane links */
2463 assert(p->fd->bk == p);
2464 assert(p->bk->fd == p);
2465 }
2466 else /* markers are always of size SIZE_SZ */
2467 assert(sz == SIZE_SZ);
67c94753 2468}
67c94753 2469
fa8d436c
UD
2470/*
2471 Properties of inuse chunks
2472*/
2473
2474#if __STD_C
2475static void do_check_inuse_chunk(mstate av, mchunkptr p)
f65fd747 2476#else
fa8d436c 2477static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
f65fd747
UD
2478#endif
2479{
fa8d436c 2480 mchunkptr next;
f65fd747 2481
fa8d436c 2482 do_check_chunk(av, p);
f65fd747 2483
fa8d436c
UD
2484 if (chunk_is_mmapped(p))
2485 return; /* mmapped chunks have no next/prev */
ca34d7a7 2486
fa8d436c
UD
2487 /* Check whether it claims to be in use ... */
2488 assert(inuse(p));
10dc2a90 2489
fa8d436c 2490 next = next_chunk(p);
10dc2a90 2491
fa8d436c
UD
2492 /* ... and is surrounded by OK chunks.
2493 Since more things can be checked with free chunks than inuse ones,
2494 if an inuse chunk borders them and debug is on, it's worth doing them.
2495 */
2496 if (!prev_inuse(p)) {
2497 /* Note that we cannot even look at prev unless it is not inuse */
2498 mchunkptr prv = prev_chunk(p);
2499 assert(next_chunk(prv) == p);
2500 do_check_free_chunk(av, prv);
2501 }
2502
2503 if (next == av->top) {
2504 assert(prev_inuse(next));
2505 assert(chunksize(next) >= MINSIZE);
2506 }
2507 else if (!inuse(next))
2508 do_check_free_chunk(av, next);
10dc2a90
UD
2509}
2510
fa8d436c
UD
2511/*
2512 Properties of chunks recycled from fastbins
2513*/
2514
10dc2a90 2515#if __STD_C
fa8d436c 2516static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2517#else
fa8d436c
UD
2518static void do_check_remalloced_chunk(av, p, s)
2519mstate av; mchunkptr p; INTERNAL_SIZE_T s;
a2b08ee5 2520#endif
10dc2a90 2521{
fa8d436c
UD
2522 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2523
2524 if (!chunk_is_mmapped(p)) {
2525 assert(av == arena_for_chunk(p));
2526 if (chunk_non_main_arena(p))
2527 assert(av != &main_arena);
2528 else
2529 assert(av == &main_arena);
2530 }
2531
2532 do_check_inuse_chunk(av, p);
2533
2534 /* Legal size ... */
2535 assert((sz & MALLOC_ALIGN_MASK) == 0);
2536 assert((unsigned long)(sz) >= MINSIZE);
2537 /* ... and alignment */
2538 assert(aligned_OK(chunk2mem(p)));
2539 /* chunk is less than MINSIZE more than request */
2540 assert((long)(sz) - (long)(s) >= 0);
2541 assert((long)(sz) - (long)(s + MINSIZE) < 0);
10dc2a90
UD
2542}
2543
fa8d436c
UD
2544/*
2545 Properties of nonrecycled chunks at the point they are malloced
2546*/
2547
10dc2a90 2548#if __STD_C
fa8d436c 2549static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2550#else
fa8d436c
UD
2551static void do_check_malloced_chunk(av, p, s)
2552mstate av; mchunkptr p; INTERNAL_SIZE_T s;
a2b08ee5 2553#endif
10dc2a90 2554{
fa8d436c
UD
2555 /* same as recycled case ... */
2556 do_check_remalloced_chunk(av, p, s);
10dc2a90 2557
fa8d436c
UD
2558 /*
2559 ... plus, must obey implementation invariant that prev_inuse is
2560 always true of any allocated chunk; i.e., that each allocated
2561 chunk borders either a previously allocated and still in-use
2562 chunk, or the base of its memory arena. This is ensured
2563 by making all allocations from the the `lowest' part of any found
2564 chunk. This does not necessarily hold however for chunks
2565 recycled via fastbins.
2566 */
10dc2a90 2567
fa8d436c
UD
2568 assert(prev_inuse(p));
2569}
10dc2a90 2570
f65fd747 2571
fa8d436c
UD
2572/*
2573 Properties of malloc_state.
f65fd747 2574
fa8d436c
UD
2575 This may be useful for debugging malloc, as well as detecting user
2576 programmer errors that somehow write into malloc_state.
f65fd747 2577
fa8d436c
UD
2578 If you are extending or experimenting with this malloc, you can
2579 probably figure out how to hack this routine to print out or
2580 display chunk addresses, sizes, bins, and other instrumentation.
2581*/
f65fd747 2582
fa8d436c
UD
2583static void do_check_malloc_state(mstate av)
2584{
2585 int i;
2586 mchunkptr p;
2587 mchunkptr q;
2588 mbinptr b;
2589 unsigned int binbit;
2590 int empty;
2591 unsigned int idx;
2592 INTERNAL_SIZE_T size;
2593 unsigned long total = 0;
2594 int max_fast_bin;
f65fd747 2595
fa8d436c
UD
2596 /* internal size_t must be no wider than pointer type */
2597 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
f65fd747 2598
fa8d436c
UD
2599 /* alignment is a power of 2 */
2600 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
f65fd747 2601
fa8d436c
UD
2602 /* cannot run remaining checks until fully initialized */
2603 if (av->top == 0 || av->top == initial_top(av))
2604 return;
f65fd747 2605
fa8d436c
UD
2606 /* pagesize is a power of 2 */
2607 assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
f65fd747 2608
fa8d436c
UD
2609 /* A contiguous main_arena is consistent with sbrk_base. */
2610 if (av == &main_arena && contiguous(av))
2611 assert((char*)mp_.sbrk_base + av->system_mem ==
2612 (char*)av->top + chunksize(av->top));
2613
2614 /* properties of fastbins */
2615
2616 /* max_fast is in allowed range */
2617 assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE));
2618
2619 max_fast_bin = fastbin_index(av->max_fast);
2620
2621 for (i = 0; i < NFASTBINS; ++i) {
2622 p = av->fastbins[i];
2623
2624 /* all bins past max_fast are empty */
2625 if (i > max_fast_bin)
2626 assert(p == 0);
2627
2628 while (p != 0) {
2629 /* each chunk claims to be inuse */
2630 do_check_inuse_chunk(av, p);
2631 total += chunksize(p);
2632 /* chunk belongs in this bin */
2633 assert(fastbin_index(chunksize(p)) == i);
2634 p = p->fd;
2635 }
2636 }
2637
2638 if (total != 0)
2639 assert(have_fastchunks(av));
2640 else if (!have_fastchunks(av))
2641 assert(total == 0);
2642
2643 /* check normal bins */
2644 for (i = 1; i < NBINS; ++i) {
2645 b = bin_at(av,i);
2646
2647 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2648 if (i >= 2) {
2649 binbit = get_binmap(av,i);
2650 empty = last(b) == b;
2651 if (!binbit)
2652 assert(empty);
2653 else if (!empty)
2654 assert(binbit);
2655 }
2656
2657 for (p = last(b); p != b; p = p->bk) {
2658 /* each chunk claims to be free */
2659 do_check_free_chunk(av, p);
2660 size = chunksize(p);
2661 total += size;
2662 if (i >= 2) {
2663 /* chunk belongs in bin */
2664 idx = bin_index(size);
2665 assert(idx == i);
2666 /* lists are sorted */
a9177ff5 2667 assert(p->bk == b ||
fa8d436c
UD
2668 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2669 }
2670 /* chunk is followed by a legal chain of inuse chunks */
2671 for (q = next_chunk(p);
a9177ff5 2672 (q != av->top && inuse(q) &&
fa8d436c
UD
2673 (unsigned long)(chunksize(q)) >= MINSIZE);
2674 q = next_chunk(q))
2675 do_check_inuse_chunk(av, q);
2676 }
2677 }
f65fd747 2678
fa8d436c
UD
2679 /* top chunk is OK */
2680 check_chunk(av, av->top);
2681
2682 /* sanity checks for statistics */
2683
2684#ifdef NO_THREADS
2685 assert(total <= (unsigned long)(mp_.max_total_mem));
2686 assert(mp_.n_mmaps >= 0);
f65fd747 2687#endif
fa8d436c
UD
2688 assert(mp_.n_mmaps <= mp_.n_mmaps_max);
2689 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2690
2691 assert((unsigned long)(av->system_mem) <=
2692 (unsigned long)(av->max_system_mem));
f65fd747 2693
fa8d436c
UD
2694 assert((unsigned long)(mp_.mmapped_mem) <=
2695 (unsigned long)(mp_.max_mmapped_mem));
2696
2697#ifdef NO_THREADS
2698 assert((unsigned long)(mp_.max_total_mem) >=
2699 (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
dfd2257a 2700#endif
fa8d436c
UD
2701}
2702#endif
2703
2704
2705/* ----------------- Support for debugging hooks -------------------- */
2706#include "hooks.c"
2707
2708
2709/* ----------- Routines dealing with system allocation -------------- */
2710
2711/*
2712 sysmalloc handles malloc cases requiring more memory from the system.
2713 On entry, it is assumed that av->top does not have enough
2714 space to service request for nb bytes, thus requiring that av->top
2715 be extended or replaced.
2716*/
2717
f65fd747 2718#if __STD_C
fa8d436c 2719static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
f65fd747 2720#else
fa8d436c 2721static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
f65fd747
UD
2722#endif
2723{
fa8d436c
UD
2724 mchunkptr old_top; /* incoming value of av->top */
2725 INTERNAL_SIZE_T old_size; /* its size */
2726 char* old_end; /* its end address */
f65fd747 2727
fa8d436c
UD
2728 long size; /* arg to first MORECORE or mmap call */
2729 char* brk; /* return value from MORECORE */
f65fd747 2730
fa8d436c
UD
2731 long correction; /* arg to 2nd MORECORE call */
2732 char* snd_brk; /* 2nd return val */
f65fd747 2733
fa8d436c
UD
2734 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2735 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2736 char* aligned_brk; /* aligned offset into brk */
f65fd747 2737
fa8d436c
UD
2738 mchunkptr p; /* the allocated/returned chunk */
2739 mchunkptr remainder; /* remainder from allocation */
2740 unsigned long remainder_size; /* its size */
2741
2742 unsigned long sum; /* for updating stats */
2743
2744 size_t pagemask = mp_.pagesize - 1;
2745
2746
2747#if HAVE_MMAP
2748
2749 /*
2750 If have mmap, and the request size meets the mmap threshold, and
2751 the system supports mmap, and there are few enough currently
2752 allocated mmapped regions, try to directly map this request
2753 rather than expanding top.
2754 */
f65fd747 2755
fa8d436c
UD
2756 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2757 (mp_.n_mmaps < mp_.n_mmaps_max)) {
f65fd747 2758
fa8d436c
UD
2759 char* mm; /* return value from mmap call*/
2760
2761 /*
2762 Round up size to nearest page. For mmapped chunks, the overhead
2763 is one SIZE_SZ unit larger than for normal chunks, because there
2764 is no following chunk whose prev_size field could be used.
2765 */
2766 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2767
2768 /* Don't try if size wraps around 0 */
2769 if ((unsigned long)(size) > (unsigned long)(nb)) {
2770
2771 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
a9177ff5 2772
fa8d436c 2773 if (mm != MAP_FAILED) {
a9177ff5 2774
fa8d436c
UD
2775 /*
2776 The offset to the start of the mmapped region is stored
2777 in the prev_size field of the chunk. This allows us to adjust
a9177ff5 2778 returned start address to meet alignment requirements here
fa8d436c
UD
2779 and in memalign(), and still be able to compute proper
2780 address argument for later munmap in free() and realloc().
2781 */
a9177ff5 2782
fa8d436c
UD
2783 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2784 if (front_misalign > 0) {
2785 correction = MALLOC_ALIGNMENT - front_misalign;
2786 p = (mchunkptr)(mm + correction);
2787 p->prev_size = correction;
2788 set_head(p, (size - correction) |IS_MMAPPED);
2789 }
2790 else {
2791 p = (mchunkptr)mm;
2792 set_head(p, size|IS_MMAPPED);
2793 }
a9177ff5 2794
fa8d436c 2795 /* update statistics */
a9177ff5
RM
2796
2797 if (++mp_.n_mmaps > mp_.max_n_mmaps)
fa8d436c 2798 mp_.max_n_mmaps = mp_.n_mmaps;
a9177ff5 2799
fa8d436c 2800 sum = mp_.mmapped_mem += size;
a9177ff5 2801 if (sum > (unsigned long)(mp_.max_mmapped_mem))
fa8d436c 2802 mp_.max_mmapped_mem = sum;
8a4b65b4 2803#ifdef NO_THREADS
fa8d436c 2804 sum += av->system_mem;
a9177ff5 2805 if (sum > (unsigned long)(mp_.max_total_mem))
fa8d436c 2806 mp_.max_total_mem = sum;
8a4b65b4 2807#endif
fa8d436c
UD
2808
2809 check_chunk(av, p);
a9177ff5 2810
fa8d436c
UD
2811 return chunk2mem(p);
2812 }
2813 }
2814 }
2815#endif
2816
2817 /* Record incoming configuration of top */
2818
2819 old_top = av->top;
2820 old_size = chunksize(old_top);
2821 old_end = (char*)(chunk_at_offset(old_top, old_size));
2822
a9177ff5 2823 brk = snd_brk = (char*)(MORECORE_FAILURE);
fa8d436c 2824
a9177ff5 2825 /*
fa8d436c
UD
2826 If not the first time through, we require old_size to be
2827 at least MINSIZE and to have prev_inuse set.
2828 */
2829
a9177ff5 2830 assert((old_top == initial_top(av) && old_size == 0) ||
fa8d436c
UD
2831 ((unsigned long) (old_size) >= MINSIZE &&
2832 prev_inuse(old_top) &&
2833 ((unsigned long)old_end & pagemask) == 0));
2834
2835 /* Precondition: not enough current space to satisfy nb request */
2836 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2837
2838 /* Precondition: all fastbins are consolidated */
2839 assert(!have_fastchunks(av));
2840
2841
2842 if (av != &main_arena) {
2843
2844 heap_info *old_heap, *heap;
2845 size_t old_heap_size;
2846
2847 /* First try to extend the current heap. */
2848 old_heap = heap_for_ptr(old_top);
2849 old_heap_size = old_heap->size;
2850 if (grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
2851 av->system_mem += old_heap->size - old_heap_size;
2852 arena_mem += old_heap->size - old_heap_size;
2853#if 0
2854 if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
2855 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2856#endif
2857 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2858 | PREV_INUSE);
e6ac0e78
UD
2859 }
2860 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
2861 /* Use a newly allocated heap. */
2862 heap->ar_ptr = av;
2863 heap->prev = old_heap;
2864 av->system_mem += heap->size;
2865 arena_mem += heap->size;
fa8d436c 2866#if 0
e6ac0e78
UD
2867 if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
2868 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
fa8d436c 2869#endif
fa8d436c
UD
2870 /* Set up the new top. */
2871 top(av) = chunk_at_offset(heap, sizeof(*heap));
2872 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2873
2874 /* Setup fencepost and free the old top chunk. */
2875 /* The fencepost takes at least MINSIZE bytes, because it might
2876 become the top chunk again later. Note that a footer is set
2877 up, too, although the chunk is marked in use. */
2878 old_size -= MINSIZE;
2879 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
2880 if (old_size >= MINSIZE) {
2881 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
2882 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
2883 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
2884 _int_free(av, chunk2mem(old_top));
2885 } else {
2886 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
2887 set_foot(old_top, (old_size + 2*SIZE_SZ));
2888 }
2889 }
2890
2891 } else { /* av == main_arena */
2892
2893
2894 /* Request enough space for nb + pad + overhead */
2895
2896 size = nb + mp_.top_pad + MINSIZE;
2897
2898 /*
2899 If contiguous, we can subtract out existing space that we hope to
2900 combine with new space. We add it back later only if
2901 we don't actually get contiguous space.
2902 */
2903
2904 if (contiguous(av))
2905 size -= old_size;
2906
2907 /*
2908 Round to a multiple of page size.
2909 If MORECORE is not contiguous, this ensures that we only call it
2910 with whole-page arguments. And if MORECORE is contiguous and
2911 this is not first time through, this preserves page-alignment of
2912 previous calls. Otherwise, we correct to page-align below.
2913 */
2914
2915 size = (size + pagemask) & ~pagemask;
2916
2917 /*
2918 Don't try to call MORECORE if argument is so big as to appear
2919 negative. Note that since mmap takes size_t arg, it may succeed
2920 below even if we cannot call MORECORE.
2921 */
2922
a9177ff5 2923 if (size > 0)
fa8d436c
UD
2924 brk = (char*)(MORECORE(size));
2925
2926 if (brk != (char*)(MORECORE_FAILURE)) {
2927 /* Call the `morecore' hook if necessary. */
2928 if (__after_morecore_hook)
2929 (*__after_morecore_hook) ();
2930 } else {
2931 /*
2932 If have mmap, try using it as a backup when MORECORE fails or
2933 cannot be used. This is worth doing on systems that have "holes" in
2934 address space, so sbrk cannot extend to give contiguous space, but
2935 space is available elsewhere. Note that we ignore mmap max count
2936 and threshold limits, since the space will not be used as a
2937 segregated mmap region.
2938 */
2939
2940#if HAVE_MMAP
2941 /* Cannot merge with old top, so add its size back in */
2942 if (contiguous(av))
2943 size = (size + old_size + pagemask) & ~pagemask;
2944
2945 /* If we are relying on mmap as backup, then use larger units */
2946 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
2947 size = MMAP_AS_MORECORE_SIZE;
2948
2949 /* Don't try if size wraps around 0 */
2950 if ((unsigned long)(size) > (unsigned long)(nb)) {
2951
75bfdfc7 2952 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
a9177ff5 2953
75bfdfc7 2954 if (mbrk != MAP_FAILED) {
a9177ff5 2955
fa8d436c 2956 /* We do not need, and cannot use, another sbrk call to find end */
75bfdfc7 2957 brk = mbrk;
fa8d436c 2958 snd_brk = brk + size;
a9177ff5
RM
2959
2960 /*
2961 Record that we no longer have a contiguous sbrk region.
fa8d436c
UD
2962 After the first time mmap is used as backup, we do not
2963 ever rely on contiguous space since this could incorrectly
2964 bridge regions.
2965 */
2966 set_noncontiguous(av);
2967 }
2968 }
2969#endif
2970 }
2971
2972 if (brk != (char*)(MORECORE_FAILURE)) {
2973 if (mp_.sbrk_base == 0)
2974 mp_.sbrk_base = brk;
2975 av->system_mem += size;
2976
2977 /*
2978 If MORECORE extends previous space, we can likewise extend top size.
2979 */
a9177ff5 2980
fa8d436c
UD
2981 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
2982 set_head(old_top, (size + old_size) | PREV_INUSE);
2983
886d5973 2984 else if (contiguous(av) && old_size && brk < old_end) {
fa8d436c
UD
2985 /* Oops! Someone else killed our space.. Can't touch anything. */
2986 assert(0);
2987 }
2988
2989 /*
2990 Otherwise, make adjustments:
a9177ff5 2991
fa8d436c
UD
2992 * If the first time through or noncontiguous, we need to call sbrk
2993 just to find out where the end of memory lies.
2994
2995 * We need to ensure that all returned chunks from malloc will meet
2996 MALLOC_ALIGNMENT
2997
2998 * If there was an intervening foreign sbrk, we need to adjust sbrk
2999 request size to account for fact that we will not be able to
3000 combine new space with existing space in old_top.
3001
3002 * Almost all systems internally allocate whole pages at a time, in
3003 which case we might as well use the whole last page of request.
3004 So we allocate enough more memory to hit a page boundary now,
3005 which in turn causes future contiguous calls to page-align.
3006 */
a9177ff5 3007
fa8d436c 3008 else {
fa8d436c
UD
3009 front_misalign = 0;
3010 end_misalign = 0;
3011 correction = 0;
3012 aligned_brk = brk;
a9177ff5 3013
fa8d436c 3014 /* handle contiguous cases */
a9177ff5
RM
3015 if (contiguous(av)) {
3016
0cb71e02
UD
3017 /* Count foreign sbrk as system_mem. */
3018 if (old_size)
3019 av->system_mem += brk - old_end;
3020
fa8d436c
UD
3021 /* Guarantee alignment of first new chunk made from this space */
3022
3023 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3024 if (front_misalign > 0) {
3025
3026 /*
3027 Skip over some bytes to arrive at an aligned position.
3028 We don't need to specially mark these wasted front bytes.
3029 They will never be accessed anyway because
3030 prev_inuse of av->top (and any chunk created from its start)
3031 is always true after initialization.
3032 */
3033
3034 correction = MALLOC_ALIGNMENT - front_misalign;
3035 aligned_brk += correction;
3036 }
a9177ff5 3037
fa8d436c
UD
3038 /*
3039 If this isn't adjacent to existing space, then we will not
3040 be able to merge with old_top space, so must add to 2nd request.
3041 */
a9177ff5 3042
fa8d436c 3043 correction += old_size;
a9177ff5 3044
fa8d436c
UD
3045 /* Extend the end address to hit a page boundary */
3046 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3047 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
a9177ff5 3048
fa8d436c
UD
3049 assert(correction >= 0);
3050 snd_brk = (char*)(MORECORE(correction));
a9177ff5 3051
fa8d436c
UD
3052 /*
3053 If can't allocate correction, try to at least find out current
3054 brk. It might be enough to proceed without failing.
a9177ff5 3055
fa8d436c
UD
3056 Note that if second sbrk did NOT fail, we assume that space
3057 is contiguous with first sbrk. This is a safe assumption unless
3058 program is multithreaded but doesn't use locks and a foreign sbrk
3059 occurred between our first and second calls.
3060 */
a9177ff5 3061
fa8d436c
UD
3062 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3063 correction = 0;
3064 snd_brk = (char*)(MORECORE(0));
3065 } else
3066 /* Call the `morecore' hook if necessary. */
3067 if (__after_morecore_hook)
3068 (*__after_morecore_hook) ();
3069 }
a9177ff5 3070
fa8d436c 3071 /* handle non-contiguous cases */
a9177ff5 3072 else {
fa8d436c
UD
3073 /* MORECORE/mmap must correctly align */
3074 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
a9177ff5 3075
fa8d436c
UD
3076 /* Find out current end of memory */
3077 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3078 snd_brk = (char*)(MORECORE(0));
3079 }
3080 }
a9177ff5 3081
fa8d436c
UD
3082 /* Adjust top based on results of second sbrk */
3083 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3084 av->top = (mchunkptr)aligned_brk;
3085 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3086 av->system_mem += correction;
a9177ff5 3087
fa8d436c
UD
3088 /*
3089 If not the first time through, we either have a
3090 gap due to foreign sbrk or a non-contiguous region. Insert a
3091 double fencepost at old_top to prevent consolidation with space
3092 we don't own. These fenceposts are artificial chunks that are
3093 marked as inuse and are in any case too small to use. We need
3094 two to make sizes and alignments work out.
3095 */
a9177ff5 3096
fa8d436c 3097 if (old_size != 0) {
a9177ff5 3098 /*
fa8d436c
UD
3099 Shrink old_top to insert fenceposts, keeping size a
3100 multiple of MALLOC_ALIGNMENT. We know there is at least
3101 enough space in old_top to do this.
3102 */
3103 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3104 set_head(old_top, old_size | PREV_INUSE);
a9177ff5 3105
fa8d436c
UD
3106 /*
3107 Note that the following assignments completely overwrite
3108 old_top when old_size was previously MINSIZE. This is
3109 intentional. We need the fencepost, even if old_top otherwise gets
3110 lost.
3111 */
3112 chunk_at_offset(old_top, old_size )->size =
3113 (2*SIZE_SZ)|PREV_INUSE;
3114
3115 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
3116 (2*SIZE_SZ)|PREV_INUSE;
3117
3118 /* If possible, release the rest. */
3119 if (old_size >= MINSIZE) {
3120 _int_free(av, chunk2mem(old_top));
3121 }
3122
3123 }
3124 }
3125 }
a9177ff5 3126
fa8d436c
UD
3127 /* Update statistics */
3128#ifdef NO_THREADS
3129 sum = av->system_mem + mp_.mmapped_mem;
3130 if (sum > (unsigned long)(mp_.max_total_mem))
3131 mp_.max_total_mem = sum;
3132#endif
3133
3134 }
3135
3136 } /* if (av != &main_arena) */
3137
3138 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
3139 av->max_system_mem = av->system_mem;
3140 check_malloc_state(av);
a9177ff5 3141
fa8d436c
UD
3142 /* finally, do the allocation */
3143 p = av->top;
3144 size = chunksize(p);
3145
3146 /* check that one of the above allocation paths succeeded */
3147 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3148 remainder_size = size - nb;
3149 remainder = chunk_at_offset(p, nb);
3150 av->top = remainder;
3151 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
3152 set_head(remainder, remainder_size | PREV_INUSE);
3153 check_malloced_chunk(av, p, nb);
3154 return chunk2mem(p);
3155 }
3156
3157 /* catch all failure paths */
3158 MALLOC_FAILURE_ACTION;
3159 return 0;
3160}
3161
3162
3163/*
3164 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3165 to the system (via negative arguments to sbrk) if there is unused
3166 memory at the `high' end of the malloc pool. It is called
3167 automatically by free() when top space exceeds the trim
3168 threshold. It is also called by the public malloc_trim routine. It
3169 returns 1 if it actually released any memory, else 0.
3170*/
3171
3172#if __STD_C
3173static int sYSTRIm(size_t pad, mstate av)
3174#else
3175static int sYSTRIm(pad, av) size_t pad; mstate av;
3176#endif
3177{
3178 long top_size; /* Amount of top-most memory */
3179 long extra; /* Amount to release */
3180 long released; /* Amount actually released */
3181 char* current_brk; /* address returned by pre-check sbrk call */
3182 char* new_brk; /* address returned by post-check sbrk call */
3183 size_t pagesz;
3184
3185 pagesz = mp_.pagesize;
3186 top_size = chunksize(av->top);
a9177ff5 3187
fa8d436c
UD
3188 /* Release in pagesize units, keeping at least one page */
3189 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
a9177ff5 3190
fa8d436c 3191 if (extra > 0) {
a9177ff5 3192
fa8d436c
UD
3193 /*
3194 Only proceed if end of memory is where we last set it.
3195 This avoids problems if there were foreign sbrk calls.
3196 */
3197 current_brk = (char*)(MORECORE(0));
3198 if (current_brk == (char*)(av->top) + top_size) {
a9177ff5 3199
fa8d436c
UD
3200 /*
3201 Attempt to release memory. We ignore MORECORE return value,
3202 and instead call again to find out where new end of memory is.
3203 This avoids problems if first call releases less than we asked,
3204 of if failure somehow altered brk value. (We could still
3205 encounter problems if it altered brk in some very bad way,
3206 but the only thing we can do is adjust anyway, which will cause
3207 some downstream failure.)
3208 */
a9177ff5 3209
fa8d436c
UD
3210 MORECORE(-extra);
3211 /* Call the `morecore' hook if necessary. */
3212 if (__after_morecore_hook)
3213 (*__after_morecore_hook) ();
3214 new_brk = (char*)(MORECORE(0));
a9177ff5 3215
fa8d436c
UD
3216 if (new_brk != (char*)MORECORE_FAILURE) {
3217 released = (long)(current_brk - new_brk);
a9177ff5 3218
fa8d436c
UD
3219 if (released != 0) {
3220 /* Success. Adjust top. */
3221 av->system_mem -= released;
3222 set_head(av->top, (top_size - released) | PREV_INUSE);
3223 check_malloc_state(av);
3224 return 1;
3225 }
3226 }
3227 }
3228 }
3229 return 0;
f65fd747
UD
3230}
3231
fa8d436c
UD
3232#ifdef HAVE_MMAP
3233
431c33c0
UD
3234static void
3235internal_function
f65fd747 3236#if __STD_C
431c33c0 3237munmap_chunk(mchunkptr p)
f65fd747 3238#else
431c33c0 3239munmap_chunk(p) mchunkptr p;
f65fd747
UD
3240#endif
3241{
3242 INTERNAL_SIZE_T size = chunksize(p);
3243 int ret;
3244
3245 assert (chunk_is_mmapped(p));
fa8d436c
UD
3246#if 0
3247 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3248 assert((mp_.n_mmaps > 0));
3249#endif
3250 assert(((p->prev_size + size) & (mp_.pagesize-1)) == 0);
f65fd747 3251
fa8d436c
UD
3252 mp_.n_mmaps--;
3253 mp_.mmapped_mem -= (size + p->prev_size);
f65fd747
UD
3254
3255 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
3256
3257 /* munmap returns non-zero on failure */
3258 assert(ret == 0);
3259}
3260
3261#if HAVE_MREMAP
3262
431c33c0
UD
3263static mchunkptr
3264internal_function
f65fd747 3265#if __STD_C
431c33c0 3266mremap_chunk(mchunkptr p, size_t new_size)
f65fd747 3267#else
431c33c0 3268mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
f65fd747
UD
3269#endif
3270{
fa8d436c 3271 size_t page_mask = mp_.pagesize - 1;
f65fd747
UD
3272 INTERNAL_SIZE_T offset = p->prev_size;
3273 INTERNAL_SIZE_T size = chunksize(p);
3274 char *cp;
3275
3276 assert (chunk_is_mmapped(p));
fa8d436c
UD
3277#if 0
3278 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3279 assert((mp_.n_mmaps > 0));
3280#endif
3281 assert(((size + offset) & (mp_.pagesize-1)) == 0);
f65fd747
UD
3282
3283 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3284 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
3285
3286 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
3287 MREMAP_MAYMOVE);
3288
431c33c0 3289 if (cp == MAP_FAILED) return 0;
f65fd747
UD
3290
3291 p = (mchunkptr)(cp + offset);
3292
3293 assert(aligned_OK(chunk2mem(p)));
3294
3295 assert((p->prev_size == offset));
3296 set_head(p, (new_size - offset)|IS_MMAPPED);
3297
fa8d436c
UD
3298 mp_.mmapped_mem -= size + offset;
3299 mp_.mmapped_mem += new_size;
3300 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
3301 mp_.max_mmapped_mem = mp_.mmapped_mem;
8a4b65b4 3302#ifdef NO_THREADS
fa8d436c
UD
3303 if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
3304 mp_.max_total_mem)
3305 mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
8a4b65b4 3306#endif
f65fd747
UD
3307 return p;
3308}
3309
3310#endif /* HAVE_MREMAP */
3311
3312#endif /* HAVE_MMAP */
3313
fa8d436c 3314/*------------------------ Public wrappers. --------------------------------*/
f65fd747 3315
fa8d436c
UD
3316Void_t*
3317public_mALLOc(size_t bytes)
3318{
3319 mstate ar_ptr;
3320 Void_t *victim;
f65fd747 3321
06d6611a 3322 __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook;
fa8d436c
UD
3323 if (hook != NULL)
3324 return (*hook)(bytes, RETURN_ADDRESS (0));
f65fd747 3325
fa8d436c
UD
3326 arena_get(ar_ptr, bytes);
3327 if(!ar_ptr)
f65fd747 3328 return 0;
fa8d436c
UD
3329 victim = _int_malloc(ar_ptr, bytes);
3330 if(!victim) {
3331 /* Maybe the failure is due to running out of mmapped areas. */
3332 if(ar_ptr != &main_arena) {
3333 (void)mutex_unlock(&ar_ptr->mutex);
3334 (void)mutex_lock(&main_arena.mutex);
3335 victim = _int_malloc(&main_arena, bytes);
3336 (void)mutex_unlock(&main_arena.mutex);
3337 } else {
3338#if USE_ARENAS
3339 /* ... or sbrk() has failed and there is still a chance to mmap() */
3340 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3341 (void)mutex_unlock(&main_arena.mutex);
3342 if(ar_ptr) {
3343 victim = _int_malloc(ar_ptr, bytes);
3344 (void)mutex_unlock(&ar_ptr->mutex);
3345 }
3346#endif
60f0e64b 3347 }
fa8d436c
UD
3348 } else
3349 (void)mutex_unlock(&ar_ptr->mutex);
3350 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
3351 ar_ptr == arena_for_chunk(mem2chunk(victim)));
3352 return victim;
f65fd747 3353}
aa420660
UD
3354#ifdef libc_hidden_def
3355libc_hidden_def(public_mALLOc)
3356#endif
f65fd747 3357
fa8d436c
UD
3358void
3359public_fREe(Void_t* mem)
f65fd747 3360{
fa8d436c
UD
3361 mstate ar_ptr;
3362 mchunkptr p; /* chunk corresponding to mem */
3363
06d6611a 3364 void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook;
fa8d436c
UD
3365 if (hook != NULL) {
3366 (*hook)(mem, RETURN_ADDRESS (0));
3367 return;
f65fd747 3368 }
f65fd747 3369
fa8d436c
UD
3370 if (mem == 0) /* free(0) has no effect */
3371 return;
f65fd747 3372
fa8d436c 3373 p = mem2chunk(mem);
f65fd747 3374
fa8d436c
UD
3375#if HAVE_MMAP
3376 if (chunk_is_mmapped(p)) /* release mmapped memory. */
3377 {
3378 munmap_chunk(p);
3379 return;
8a4b65b4 3380 }
f65fd747 3381#endif
f65fd747 3382
fa8d436c
UD
3383 ar_ptr = arena_for_chunk(p);
3384#if THREAD_STATS
3385 if(!mutex_trylock(&ar_ptr->mutex))
3386 ++(ar_ptr->stat_lock_direct);
3387 else {
3388 (void)mutex_lock(&ar_ptr->mutex);
3389 ++(ar_ptr->stat_lock_wait);
f65fd747 3390 }
f65fd747 3391#else
fa8d436c 3392 (void)mutex_lock(&ar_ptr->mutex);
f65fd747 3393#endif
fa8d436c
UD
3394 _int_free(ar_ptr, mem);
3395 (void)mutex_unlock(&ar_ptr->mutex);
f65fd747 3396}
aa420660
UD
3397#ifdef libc_hidden_def
3398libc_hidden_def (public_fREe)
3399#endif
f65fd747 3400
fa8d436c
UD
3401Void_t*
3402public_rEALLOc(Void_t* oldmem, size_t bytes)
f65fd747 3403{
fa8d436c
UD
3404 mstate ar_ptr;
3405 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 3406
fa8d436c
UD
3407 mchunkptr oldp; /* chunk corresponding to oldmem */
3408 INTERNAL_SIZE_T oldsize; /* its size */
8a4b65b4 3409
fa8d436c 3410 Void_t* newp; /* chunk to return */
f65fd747 3411
06d6611a 3412 __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
fa8d436c
UD
3413 __realloc_hook;
3414 if (hook != NULL)
3415 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
f65fd747 3416
fa8d436c
UD
3417#if REALLOC_ZERO_BYTES_FREES
3418 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
f65fd747 3419#endif
f65fd747 3420
fa8d436c
UD
3421 /* realloc of null is supposed to be same as malloc */
3422 if (oldmem == 0) return public_mALLOc(bytes);
f65fd747 3423
fa8d436c
UD
3424 oldp = mem2chunk(oldmem);
3425 oldsize = chunksize(oldp);
f65fd747 3426
fa8d436c 3427 checked_request2size(bytes, nb);
f65fd747 3428
fa8d436c
UD
3429#if HAVE_MMAP
3430 if (chunk_is_mmapped(oldp))
3431 {
3432 Void_t* newmem;
f65fd747 3433
fa8d436c
UD
3434#if HAVE_MREMAP
3435 newp = mremap_chunk(oldp, nb);
3436 if(newp) return chunk2mem(newp);
f65fd747 3437#endif
fa8d436c
UD
3438 /* Note the extra SIZE_SZ overhead. */
3439 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3440 /* Must alloc, copy, free. */
3441 newmem = public_mALLOc(bytes);
3442 if (newmem == 0) return 0; /* propagate failure */
3443 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3444 munmap_chunk(oldp);
3445 return newmem;
3446 }
dfd2257a 3447#endif
fa8d436c
UD
3448
3449 ar_ptr = arena_for_chunk(oldp);
3450#if THREAD_STATS
3451 if(!mutex_trylock(&ar_ptr->mutex))
3452 ++(ar_ptr->stat_lock_direct);
3453 else {
3454 (void)mutex_lock(&ar_ptr->mutex);
3455 ++(ar_ptr->stat_lock_wait);
3456 }
f65fd747 3457#else
fa8d436c 3458 (void)mutex_lock(&ar_ptr->mutex);
f65fd747 3459#endif
f65fd747 3460
fa8d436c
UD
3461#ifndef NO_THREADS
3462 /* As in malloc(), remember this arena for the next allocation. */
3463 tsd_setspecific(arena_key, (Void_t *)ar_ptr);
f65fd747
UD
3464#endif
3465
fa8d436c 3466 newp = _int_realloc(ar_ptr, oldmem, bytes);
f65fd747 3467
fa8d436c
UD
3468 (void)mutex_unlock(&ar_ptr->mutex);
3469 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3470 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3471 return newp;
3472}
aa420660
UD
3473#ifdef libc_hidden_def
3474libc_hidden_def (public_rEALLOc)
3475#endif
f65fd747 3476
fa8d436c
UD
3477Void_t*
3478public_mEMALIGn(size_t alignment, size_t bytes)
3479{
3480 mstate ar_ptr;
3481 Void_t *p;
f65fd747 3482
fa8d436c
UD
3483 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3484 __const __malloc_ptr_t)) =
3485 __memalign_hook;
3486 if (hook != NULL)
3487 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
f65fd747 3488
fa8d436c
UD
3489 /* If need less alignment than we give anyway, just relay to malloc */
3490 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
1228ed5c 3491
fa8d436c
UD
3492 /* Otherwise, ensure that it is at least a minimum chunk size */
3493 if (alignment < MINSIZE) alignment = MINSIZE;
f65fd747 3494
fa8d436c
UD
3495 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3496 if(!ar_ptr)
3497 return 0;
3498 p = _int_memalign(ar_ptr, alignment, bytes);
3499 (void)mutex_unlock(&ar_ptr->mutex);
3500 if(!p) {
3501 /* Maybe the failure is due to running out of mmapped areas. */
3502 if(ar_ptr != &main_arena) {
3503 (void)mutex_lock(&main_arena.mutex);
3504 p = _int_memalign(&main_arena, alignment, bytes);
3505 (void)mutex_unlock(&main_arena.mutex);
f65fd747 3506 } else {
e9b3e3c5 3507#if USE_ARENAS
fa8d436c
UD
3508 /* ... or sbrk() has failed and there is still a chance to mmap() */
3509 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3510 if(ar_ptr) {
3511 p = _int_memalign(ar_ptr, alignment, bytes);
3512 (void)mutex_unlock(&ar_ptr->mutex);
3513 }
e9b3e3c5 3514#endif
f65fd747
UD
3515 }
3516 }
fa8d436c
UD
3517 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3518 ar_ptr == arena_for_chunk(mem2chunk(p)));
3519 return p;
f65fd747 3520}
aa420660
UD
3521#ifdef libc_hidden_def
3522libc_hidden_def (public_mEMALIGn)
3523#endif
f65fd747 3524
fa8d436c
UD
3525Void_t*
3526public_vALLOc(size_t bytes)
3527{
3528 mstate ar_ptr;
3529 Void_t *p;
f65fd747 3530
fa8d436c
UD
3531 if(__malloc_initialized < 0)
3532 ptmalloc_init ();
3533 arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
3534 if(!ar_ptr)
3535 return 0;
3536 p = _int_valloc(ar_ptr, bytes);
3537 (void)mutex_unlock(&ar_ptr->mutex);
3538 return p;
3539}
f65fd747 3540
fa8d436c
UD
3541Void_t*
3542public_pVALLOc(size_t bytes)
3543{
3544 mstate ar_ptr;
3545 Void_t *p;
f65fd747 3546
fa8d436c
UD
3547 if(__malloc_initialized < 0)
3548 ptmalloc_init ();
3549 arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
3550 p = _int_pvalloc(ar_ptr, bytes);
3551 (void)mutex_unlock(&ar_ptr->mutex);
3552 return p;
3553}
f65fd747 3554
fa8d436c
UD
3555Void_t*
3556public_cALLOc(size_t n, size_t elem_size)
f65fd747 3557{
fa8d436c
UD
3558 mstate av;
3559 mchunkptr oldtop, p;
0950889b 3560 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
fa8d436c
UD
3561 Void_t* mem;
3562 unsigned long clearsize;
3563 unsigned long nclears;
3564 INTERNAL_SIZE_T* d;
6c6bb055 3565 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
fa8d436c 3566 __malloc_hook;
0950889b
UD
3567
3568 /* size_t is unsigned so the behavior on overflow is defined. */
3569 bytes = n * elem_size;
d9af917d
UD
3570#define HALF_INTERNAL_SIZE_T \
3571 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3572 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
0be405c2 3573 if (elem_size != 0 && bytes / elem_size != n) {
d9af917d
UD
3574 MALLOC_FAILURE_ACTION;
3575 return 0;
3576 }
0950889b
UD
3577 }
3578
6c6bb055 3579 if (hook != NULL) {
0950889b 3580 sz = bytes;
fa8d436c
UD
3581 mem = (*hook)(sz, RETURN_ADDRESS (0));
3582 if(mem == 0)
3583 return 0;
3584#ifdef HAVE_MEMCPY
3585 return memset(mem, 0, sz);
a2b08ee5 3586#else
fa8d436c
UD
3587 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3588 return mem;
a2b08ee5 3589#endif
10dc2a90 3590 }
10dc2a90 3591
0950889b 3592 sz = bytes;
fa8d436c
UD
3593
3594 arena_get(av, sz);
3595 if(!av)
f65fd747 3596 return 0;
fa8d436c
UD
3597
3598 /* Check if we hand out the top chunk, in which case there may be no
3599 need to clear. */
3600#if MORECORE_CLEARS
3601 oldtop = top(av);
3602 oldtopsize = chunksize(top(av));
3603#if MORECORE_CLEARS < 2
3604 /* Only newly allocated memory is guaranteed to be cleared. */
3605 if (av == &main_arena &&
3606 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3607 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3608#endif
3609#endif
3610 mem = _int_malloc(av, sz);
3611
3612 /* Only clearing follows, so we can unlock early. */
3613 (void)mutex_unlock(&av->mutex);
3614
3615 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3616 av == arena_for_chunk(mem2chunk(mem)));
3617
3618 if (mem == 0) {
7799b7b3 3619 /* Maybe the failure is due to running out of mmapped areas. */
fa8d436c 3620 if(av != &main_arena) {
7799b7b3 3621 (void)mutex_lock(&main_arena.mutex);
fa8d436c 3622 mem = _int_malloc(&main_arena, sz);
7799b7b3 3623 (void)mutex_unlock(&main_arena.mutex);
e9b3e3c5
UD
3624 } else {
3625#if USE_ARENAS
3626 /* ... or sbrk() has failed and there is still a chance to mmap() */
fa8d436c
UD
3627 (void)mutex_lock(&main_arena.mutex);
3628 av = arena_get2(av->next ? av : 0, sz);
e9b3e3c5 3629 (void)mutex_unlock(&main_arena.mutex);
fa8d436c
UD
3630 if(av) {
3631 mem = _int_malloc(av, sz);
3632 (void)mutex_unlock(&av->mutex);
e9b3e3c5
UD
3633 }
3634#endif
7799b7b3 3635 }
fa8d436c
UD
3636 if (mem == 0) return 0;
3637 }
3638 p = mem2chunk(mem);
f65fd747 3639
fa8d436c
UD
3640 /* Two optional cases in which clearing not necessary */
3641#if HAVE_MMAP
3642 if (chunk_is_mmapped(p))
3643 return mem;
f65fd747 3644#endif
f65fd747 3645
fa8d436c 3646 csz = chunksize(p);
f65fd747 3647
fa8d436c
UD
3648#if MORECORE_CLEARS
3649 if (p == oldtop && csz > oldtopsize) {
3650 /* clear only the bytes from non-freshly-sbrked memory */
3651 csz = oldtopsize;
f65fd747 3652 }
fa8d436c 3653#endif
f65fd747 3654
fa8d436c
UD
3655 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3656 contents have an odd number of INTERNAL_SIZE_T-sized words;
3657 minimally 3. */
3658 d = (INTERNAL_SIZE_T*)mem;
3659 clearsize = csz - SIZE_SZ;
3660 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3661 assert(nclears >= 3);
f65fd747 3662
fa8d436c
UD
3663 if (nclears > 9)
3664 MALLOC_ZERO(d, clearsize);
f65fd747 3665
fa8d436c
UD
3666 else {
3667 *(d+0) = 0;
3668 *(d+1) = 0;
3669 *(d+2) = 0;
3670 if (nclears > 4) {
3671 *(d+3) = 0;
3672 *(d+4) = 0;
3673 if (nclears > 6) {
3674 *(d+5) = 0;
3675 *(d+6) = 0;
3676 if (nclears > 8) {
3677 *(d+7) = 0;
3678 *(d+8) = 0;
3679 }
f65fd747
UD
3680 }
3681 }
f65fd747
UD
3682 }
3683
fa8d436c
UD
3684 return mem;
3685}
f65fd747 3686
fa8d436c
UD
3687Void_t**
3688public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
3689{
3690 mstate ar_ptr;
3691 Void_t** m;
f65fd747 3692
fa8d436c
UD
3693 arena_get(ar_ptr, n*elem_size);
3694 if(!ar_ptr)
3695 return 0;
f65fd747 3696
fa8d436c
UD
3697 m = _int_icalloc(ar_ptr, n, elem_size, chunks);
3698 (void)mutex_unlock(&ar_ptr->mutex);
3699 return m;
3700}
f65fd747 3701
fa8d436c
UD
3702Void_t**
3703public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
3704{
3705 mstate ar_ptr;
3706 Void_t** m;
f65fd747 3707
fa8d436c
UD
3708 arena_get(ar_ptr, 0);
3709 if(!ar_ptr)
3710 return 0;
f65fd747 3711
fa8d436c
UD
3712 m = _int_icomalloc(ar_ptr, n, sizes, chunks);
3713 (void)mutex_unlock(&ar_ptr->mutex);
3714 return m;
3715}
f65fd747 3716
fa8d436c 3717#ifndef _LIBC
f65fd747 3718
fa8d436c
UD
3719void
3720public_cFREe(Void_t* m)
3721{
3722 public_fREe(m);
3723}
f65fd747 3724
fa8d436c 3725#endif /* _LIBC */
f65fd747 3726
fa8d436c
UD
3727int
3728public_mTRIm(size_t s)
3729{
3730 int result;
f65fd747 3731
fa8d436c
UD
3732 (void)mutex_lock(&main_arena.mutex);
3733 result = mTRIm(s);
3734 (void)mutex_unlock(&main_arena.mutex);
3735 return result;
3736}
f65fd747 3737
fa8d436c
UD
3738size_t
3739public_mUSABLe(Void_t* m)
3740{
3741 size_t result;
f65fd747 3742
fa8d436c
UD
3743 result = mUSABLe(m);
3744 return result;
3745}
f65fd747 3746
fa8d436c
UD
3747void
3748public_mSTATs()
3749{
3750 mSTATs();
3751}
f65fd747 3752
fa8d436c
UD
3753struct mallinfo public_mALLINFo()
3754{
3755 struct mallinfo m;
f65fd747 3756
6a00759b
UD
3757 if(__malloc_initialized < 0)
3758 ptmalloc_init ();
fa8d436c
UD
3759 (void)mutex_lock(&main_arena.mutex);
3760 m = mALLINFo(&main_arena);
3761 (void)mutex_unlock(&main_arena.mutex);
3762 return m;
f65fd747
UD
3763}
3764
fa8d436c
UD
3765int
3766public_mALLOPt(int p, int v)
3767{
3768 int result;
3769 result = mALLOPt(p, v);
3770 return result;
3771}
f65fd747
UD
3772
3773/*
fa8d436c 3774 ------------------------------ malloc ------------------------------
f65fd747
UD
3775*/
3776
f1c5213d 3777Void_t*
fa8d436c 3778_int_malloc(mstate av, size_t bytes)
f65fd747 3779{
fa8d436c
UD
3780 INTERNAL_SIZE_T nb; /* normalized request size */
3781 unsigned int idx; /* associated bin index */
3782 mbinptr bin; /* associated bin */
3783 mfastbinptr* fb; /* associated fastbin */
f65fd747 3784
fa8d436c
UD
3785 mchunkptr victim; /* inspected/selected chunk */
3786 INTERNAL_SIZE_T size; /* its size */
3787 int victim_index; /* its bin index */
f65fd747 3788
fa8d436c
UD
3789 mchunkptr remainder; /* remainder from a split */
3790 unsigned long remainder_size; /* its size */
8a4b65b4 3791
fa8d436c
UD
3792 unsigned int block; /* bit map traverser */
3793 unsigned int bit; /* bit map traverser */
3794 unsigned int map; /* current word of binmap */
8a4b65b4 3795
fa8d436c
UD
3796 mchunkptr fwd; /* misc temp for linking */
3797 mchunkptr bck; /* misc temp for linking */
8a4b65b4 3798
fa8d436c
UD
3799 /*
3800 Convert request size to internal form by adding SIZE_SZ bytes
3801 overhead plus possibly more to obtain necessary alignment and/or
3802 to obtain a size of at least MINSIZE, the smallest allocatable
3803 size. Also, checked_request2size traps (returning 0) request sizes
3804 that are so large that they wrap around zero when padded and
3805 aligned.
3806 */
f65fd747 3807
fa8d436c 3808 checked_request2size(bytes, nb);
f65fd747 3809
fa8d436c
UD
3810 /*
3811 If the size qualifies as a fastbin, first check corresponding bin.
3812 This code is safe to execute even if av is not yet initialized, so we
3813 can try it without checking, which saves some time on this fast path.
3814 */
f65fd747 3815
a9177ff5 3816 if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
fa8d436c
UD
3817 fb = &(av->fastbins[(fastbin_index(nb))]);
3818 if ( (victim = *fb) != 0) {
3819 *fb = victim->fd;
3820 check_remalloced_chunk(av, victim, nb);
3821 return chunk2mem(victim);
3822 }
f65fd747
UD
3823 }
3824
fa8d436c
UD
3825 /*
3826 If a small request, check regular bin. Since these "smallbins"
3827 hold one size each, no searching within bins is necessary.
3828 (For a large request, we need to wait until unsorted chunks are
3829 processed to find best fit. But for small ones, fits are exact
3830 anyway, so we can check now, which is faster.)
3831 */
f65fd747 3832
fa8d436c
UD
3833 if (in_smallbin_range(nb)) {
3834 idx = smallbin_index(nb);
3835 bin = bin_at(av,idx);
7799b7b3 3836
fa8d436c
UD
3837 if ( (victim = last(bin)) != bin) {
3838 if (victim == 0) /* initialization check */
3839 malloc_consolidate(av);
3840 else {
3841 bck = victim->bk;
3842 set_inuse_bit_at_offset(victim, nb);
3843 bin->bk = bck;
3844 bck->fd = bin;
3845
3846 if (av != &main_arena)
3847 victim->size |= NON_MAIN_ARENA;
3848 check_malloced_chunk(av, victim, nb);
3849 return chunk2mem(victim);
3850 }
3851 }
f65fd747
UD
3852 }
3853
a9177ff5 3854 /*
fa8d436c
UD
3855 If this is a large request, consolidate fastbins before continuing.
3856 While it might look excessive to kill all fastbins before
3857 even seeing if there is space available, this avoids
3858 fragmentation problems normally associated with fastbins.
3859 Also, in practice, programs tend to have runs of either small or
a9177ff5 3860 large requests, but less often mixtures, so consolidation is not
fa8d436c
UD
3861 invoked all that often in most programs. And the programs that
3862 it is called frequently in otherwise tend to fragment.
3863 */
7799b7b3 3864
fa8d436c
UD
3865 else {
3866 idx = largebin_index(nb);
a9177ff5 3867 if (have_fastchunks(av))
fa8d436c 3868 malloc_consolidate(av);
7799b7b3 3869 }
f65fd747 3870
fa8d436c
UD
3871 /*
3872 Process recently freed or remaindered chunks, taking one only if
3873 it is exact fit, or, if this a small request, the chunk is remainder from
3874 the most recent non-exact fit. Place other traversed chunks in
3875 bins. Note that this step is the only place in any routine where
3876 chunks are placed in bins.
3877
3878 The outer loop here is needed because we might not realize until
3879 near the end of malloc that we should have consolidated, so must
3880 do so and retry. This happens at most once, and only when we would
3881 otherwise need to expand memory to service a "small" request.
3882 */
a9177ff5
RM
3883
3884 for(;;) {
3885
fa8d436c
UD
3886 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3887 bck = victim->bk;
3888 size = chunksize(victim);
3889
a9177ff5 3890 /*
fa8d436c
UD
3891 If a small request, try to use last remainder if it is the
3892 only chunk in unsorted bin. This helps promote locality for
3893 runs of consecutive small requests. This is the only
3894 exception to best-fit, and applies only when there is
3895 no exact fit for a small chunk.
3896 */
3897
a9177ff5 3898 if (in_smallbin_range(nb) &&
fa8d436c
UD
3899 bck == unsorted_chunks(av) &&
3900 victim == av->last_remainder &&
3901 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
3902
3903 /* split and reattach remainder */
3904 remainder_size = size - nb;
3905 remainder = chunk_at_offset(victim, nb);
3906 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
a9177ff5 3907 av->last_remainder = remainder;
fa8d436c 3908 remainder->bk = remainder->fd = unsorted_chunks(av);
a9177ff5 3909
fa8d436c
UD
3910 set_head(victim, nb | PREV_INUSE |
3911 (av != &main_arena ? NON_MAIN_ARENA : 0));
3912 set_head(remainder, remainder_size | PREV_INUSE);
3913 set_foot(remainder, remainder_size);
a9177ff5 3914
fa8d436c
UD
3915 check_malloced_chunk(av, victim, nb);
3916 return chunk2mem(victim);
3917 }
f65fd747 3918
fa8d436c
UD
3919 /* remove from unsorted list */
3920 unsorted_chunks(av)->bk = bck;
3921 bck->fd = unsorted_chunks(av);
a9177ff5 3922
fa8d436c 3923 /* Take now instead of binning if exact fit */
a9177ff5 3924
fa8d436c
UD
3925 if (size == nb) {
3926 set_inuse_bit_at_offset(victim, size);
3927 if (av != &main_arena)
3928 victim->size |= NON_MAIN_ARENA;
3929 check_malloced_chunk(av, victim, nb);
3930 return chunk2mem(victim);
3931 }
a9177ff5 3932
fa8d436c 3933 /* place chunk in bin */
a9177ff5 3934
fa8d436c
UD
3935 if (in_smallbin_range(size)) {
3936 victim_index = smallbin_index(size);
3937 bck = bin_at(av, victim_index);
3938 fwd = bck->fd;
3939 }
3940 else {
3941 victim_index = largebin_index(size);
3942 bck = bin_at(av, victim_index);
3943 fwd = bck->fd;
3944
3945 /* maintain large bins in sorted order */
3946 if (fwd != bck) {
3947 /* Or with inuse bit to speed comparisons */
3948 size |= PREV_INUSE;
3949 /* if smaller than smallest, bypass loop below */
3950 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
3951 if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
3952 fwd = bck;
3953 bck = bck->bk;
3954 }
3955 else {
3956 assert((fwd->size & NON_MAIN_ARENA) == 0);
3957 while ((unsigned long)(size) < (unsigned long)(fwd->size)) {
3958 fwd = fwd->fd;
3959 assert((fwd->size & NON_MAIN_ARENA) == 0);
3960 }
3961 bck = fwd->bk;
3962 }
3963 }
3964 }
a9177ff5 3965
fa8d436c
UD
3966 mark_bin(av, victim_index);
3967 victim->bk = bck;
3968 victim->fd = fwd;
3969 fwd->bk = victim;
3970 bck->fd = victim;
3971 }
a9177ff5 3972
fa8d436c
UD
3973 /*
3974 If a large request, scan through the chunks of current bin in
3975 sorted order to find smallest that fits. This is the only step
3976 where an unbounded number of chunks might be scanned without doing
3977 anything useful with them. However the lists tend to be short.
3978 */
a9177ff5 3979
fa8d436c
UD
3980 if (!in_smallbin_range(nb)) {
3981 bin = bin_at(av, idx);
f65fd747 3982
fa8d436c
UD
3983 /* skip scan if empty or largest chunk is too small */
3984 if ((victim = last(bin)) != bin &&
3985 (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
f65fd747 3986
a9177ff5 3987 while (((unsigned long)(size = chunksize(victim)) <
fa8d436c
UD
3988 (unsigned long)(nb)))
3989 victim = victim->bk;
f65fd747 3990
fa8d436c
UD
3991 remainder_size = size - nb;
3992 unlink(victim, bck, fwd);
a9177ff5 3993
fa8d436c
UD
3994 /* Exhaust */
3995 if (remainder_size < MINSIZE) {
3996 set_inuse_bit_at_offset(victim, size);
3997 if (av != &main_arena)
3998 victim->size |= NON_MAIN_ARENA;
3999 check_malloced_chunk(av, victim, nb);
4000 return chunk2mem(victim);
4001 }
4002 /* Split */
4003 else {
4004 remainder = chunk_at_offset(victim, nb);
4005 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4006 remainder->bk = remainder->fd = unsorted_chunks(av);
4007 set_head(victim, nb | PREV_INUSE |
4008 (av != &main_arena ? NON_MAIN_ARENA : 0));
4009 set_head(remainder, remainder_size | PREV_INUSE);
4010 set_foot(remainder, remainder_size);
4011 check_malloced_chunk(av, victim, nb);
4012 return chunk2mem(victim);
a9177ff5 4013 }
fa8d436c 4014 }
a9177ff5 4015 }
f65fd747 4016
fa8d436c
UD
4017 /*
4018 Search for a chunk by scanning bins, starting with next largest
4019 bin. This search is strictly by best-fit; i.e., the smallest
4020 (with ties going to approximately the least recently used) chunk
4021 that fits is selected.
a9177ff5 4022
fa8d436c
UD
4023 The bitmap avoids needing to check that most blocks are nonempty.
4024 The particular case of skipping all bins during warm-up phases
4025 when no chunks have been returned yet is faster than it might look.
4026 */
a9177ff5 4027
fa8d436c
UD
4028 ++idx;
4029 bin = bin_at(av,idx);
4030 block = idx2block(idx);
4031 map = av->binmap[block];
4032 bit = idx2bit(idx);
a9177ff5 4033
fa8d436c
UD
4034 for (;;) {
4035
4036 /* Skip rest of block if there are no more set bits in this block. */
4037 if (bit > map || bit == 0) {
4038 do {
4039 if (++block >= BINMAPSIZE) /* out of bins */
4040 goto use_top;
4041 } while ( (map = av->binmap[block]) == 0);
4042
4043 bin = bin_at(av, (block << BINMAPSHIFT));
4044 bit = 1;
4045 }
a9177ff5 4046
fa8d436c
UD
4047 /* Advance to bin with set bit. There must be one. */
4048 while ((bit & map) == 0) {
4049 bin = next_bin(bin);
4050 bit <<= 1;
4051 assert(bit != 0);
4052 }
a9177ff5 4053
fa8d436c
UD
4054 /* Inspect the bin. It is likely to be non-empty */
4055 victim = last(bin);
a9177ff5 4056
fa8d436c
UD
4057 /* If a false alarm (empty bin), clear the bit. */
4058 if (victim == bin) {
4059 av->binmap[block] = map &= ~bit; /* Write through */
4060 bin = next_bin(bin);
4061 bit <<= 1;
4062 }
a9177ff5 4063
fa8d436c
UD
4064 else {
4065 size = chunksize(victim);
4066
4067 /* We know the first chunk in this bin is big enough to use. */
4068 assert((unsigned long)(size) >= (unsigned long)(nb));
4069
4070 remainder_size = size - nb;
a9177ff5 4071
fa8d436c
UD
4072 /* unlink */
4073 bck = victim->bk;
4074 bin->bk = bck;
4075 bck->fd = bin;
a9177ff5 4076
fa8d436c
UD
4077 /* Exhaust */
4078 if (remainder_size < MINSIZE) {
4079 set_inuse_bit_at_offset(victim, size);
4080 if (av != &main_arena)
4081 victim->size |= NON_MAIN_ARENA;
4082 check_malloced_chunk(av, victim, nb);
4083 return chunk2mem(victim);
4084 }
a9177ff5 4085
fa8d436c
UD
4086 /* Split */
4087 else {
4088 remainder = chunk_at_offset(victim, nb);
a9177ff5 4089
fa8d436c
UD
4090 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4091 remainder->bk = remainder->fd = unsorted_chunks(av);
4092 /* advertise as last remainder */
a9177ff5
RM
4093 if (in_smallbin_range(nb))
4094 av->last_remainder = remainder;
4095
fa8d436c
UD
4096 set_head(victim, nb | PREV_INUSE |
4097 (av != &main_arena ? NON_MAIN_ARENA : 0));
4098 set_head(remainder, remainder_size | PREV_INUSE);
4099 set_foot(remainder, remainder_size);
4100 check_malloced_chunk(av, victim, nb);
4101 return chunk2mem(victim);
4102 }
4103 }
4104 }
f65fd747 4105
a9177ff5 4106 use_top:
fa8d436c
UD
4107 /*
4108 If large enough, split off the chunk bordering the end of memory
4109 (held in av->top). Note that this is in accord with the best-fit
4110 search rule. In effect, av->top is treated as larger (and thus
4111 less well fitting) than any other available chunk since it can
4112 be extended to be as large as necessary (up to system
4113 limitations).
4114
4115 We require that av->top always exists (i.e., has size >=
4116 MINSIZE) after initialization, so if it would otherwise be
4117 exhuasted by current request, it is replenished. (The main
4118 reason for ensuring it exists is that we may need MINSIZE space
4119 to put in fenceposts in sysmalloc.)
4120 */
f65fd747 4121
fa8d436c
UD
4122 victim = av->top;
4123 size = chunksize(victim);
a9177ff5 4124
fa8d436c
UD
4125 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
4126 remainder_size = size - nb;
4127 remainder = chunk_at_offset(victim, nb);
4128 av->top = remainder;
4129 set_head(victim, nb | PREV_INUSE |
4130 (av != &main_arena ? NON_MAIN_ARENA : 0));
4131 set_head(remainder, remainder_size | PREV_INUSE);
f65fd747 4132
fa8d436c
UD
4133 check_malloced_chunk(av, victim, nb);
4134 return chunk2mem(victim);
4135 }
f65fd747 4136
fa8d436c
UD
4137 /*
4138 If there is space available in fastbins, consolidate and retry,
4139 to possibly avoid expanding memory. This can occur only if nb is
4140 in smallbin range so we didn't consolidate upon entry.
4141 */
f65fd747 4142
fa8d436c
UD
4143 else if (have_fastchunks(av)) {
4144 assert(in_smallbin_range(nb));
4145 malloc_consolidate(av);
4146 idx = smallbin_index(nb); /* restore original bin index */
4147 }
f65fd747 4148
a9177ff5
RM
4149 /*
4150 Otherwise, relay to handle system-dependent cases
fa8d436c 4151 */
a9177ff5
RM
4152 else
4153 return sYSMALLOc(nb, av);
fa8d436c
UD
4154 }
4155}
f65fd747 4156
fa8d436c
UD
4157/*
4158 ------------------------------ free ------------------------------
f65fd747
UD
4159*/
4160
f1c5213d 4161void
fa8d436c 4162_int_free(mstate av, Void_t* mem)
f65fd747 4163{
fa8d436c
UD
4164 mchunkptr p; /* chunk corresponding to mem */
4165 INTERNAL_SIZE_T size; /* its size */
4166 mfastbinptr* fb; /* associated fastbin */
4167 mchunkptr nextchunk; /* next contiguous chunk */
4168 INTERNAL_SIZE_T nextsize; /* its size */
4169 int nextinuse; /* true if nextchunk is used */
4170 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4171 mchunkptr bck; /* misc temp for linking */
4172 mchunkptr fwd; /* misc temp for linking */
4173
4174
4175 /* free(0) has no effect */
4176 if (mem != 0) {
4177 p = mem2chunk(mem);
4178 size = chunksize(p);
f65fd747 4179
9a3a9dd8
UD
4180 /* Little security check which won't hurt performance: the
4181 allocator never wrapps around at the end of the address space.
4182 Therefore we can exclude some size values which might appear
4183 here by accident or by "design" from some intruder. */
5dfe6778
UD
4184 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0))
4185 {
6bf4302e 4186 malloc_printerr (check_action, "free(): invalid pointer", mem);
5dfe6778
UD
4187 return;
4188 }
9a3a9dd8 4189
fa8d436c 4190 check_inuse_chunk(av, p);
f65fd747 4191
fa8d436c
UD
4192 /*
4193 If eligible, place chunk on a fastbin so it can be found
4194 and used quickly in malloc.
4195 */
f65fd747 4196
fa8d436c 4197 if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
f65fd747 4198
fa8d436c 4199#if TRIM_FASTBINS
a9177ff5 4200 /*
fa8d436c
UD
4201 If TRIM_FASTBINS set, don't place chunks
4202 bordering top into fastbins
4203 */
4204 && (chunk_at_offset(p, size) != av->top)
f65fd747 4205#endif
fa8d436c 4206 ) {
f65fd747 4207
fa8d436c
UD
4208 set_fastchunks(av);
4209 fb = &(av->fastbins[fastbin_index(size)]);
9d0cdc0e
UD
4210 /* Another simple check: make sure the top of the bin is not the
4211 record we are going to add (i.e., double free). */
4212 if (__builtin_expect (*fb == p, 0))
4213 {
6bf4302e
UD
4214 double_free:
4215 malloc_printerr (check_action, "double free or corruption", mem);
9d0cdc0e
UD
4216 return;
4217 }
fa8d436c
UD
4218 p->fd = *fb;
4219 *fb = p;
4220 }
f65fd747 4221
fa8d436c
UD
4222 /*
4223 Consolidate other non-mmapped chunks as they arrive.
4224 */
f65fd747 4225
fa8d436c
UD
4226 else if (!chunk_is_mmapped(p)) {
4227 nextchunk = chunk_at_offset(p, size);
6bf4302e
UD
4228
4229 /* Lightweight tests: check whether the block is already the
4230 top block. */
4231 if (__builtin_expect (p == av->top, 0))
4232 goto double_free;
4233 /* Or whether the next chunk is beyond the boundaries of the arena. */
3e00177e
UD
4234 if (__builtin_expect (contiguous (av)
4235 && (char *) nextchunk
4236 >= ((char *) av->top + chunksize(av->top)), 0))
6bf4302e
UD
4237 goto double_free;
4238 /* Or whether the block is actually not marked used. */
4239 if (__builtin_expect (!prev_inuse(nextchunk), 0))
4240 goto double_free;
4241
fa8d436c
UD
4242 nextsize = chunksize(nextchunk);
4243 assert(nextsize > 0);
4244
4245 /* consolidate backward */
4246 if (!prev_inuse(p)) {
4247 prevsize = p->prev_size;
4248 size += prevsize;
4249 p = chunk_at_offset(p, -((long) prevsize));
4250 unlink(p, bck, fwd);
4251 }
f65fd747 4252
fa8d436c
UD
4253 if (nextchunk != av->top) {
4254 /* get and clear inuse bit */
4255 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4256
4257 /* consolidate forward */
4258 if (!nextinuse) {
4259 unlink(nextchunk, bck, fwd);
4260 size += nextsize;
4261 } else
4262 clear_inuse_bit_at_offset(nextchunk, 0);
4263
4264 /*
4265 Place the chunk in unsorted chunk list. Chunks are
4266 not placed into regular bins until after they have
4267 been given one chance to be used in malloc.
4268 */
4269
4270 bck = unsorted_chunks(av);
4271 fwd = bck->fd;
4272 p->bk = bck;
4273 p->fd = fwd;
4274 bck->fd = p;
4275 fwd->bk = p;
4276
4277 set_head(p, size | PREV_INUSE);
4278 set_foot(p, size);
a9177ff5 4279
fa8d436c
UD
4280 check_free_chunk(av, p);
4281 }
10dc2a90 4282
fa8d436c
UD
4283 /*
4284 If the chunk borders the current high end of memory,
4285 consolidate into top
4286 */
f65fd747 4287
fa8d436c
UD
4288 else {
4289 size += nextsize;
4290 set_head(p, size | PREV_INUSE);
4291 av->top = p;
4292 check_chunk(av, p);
4293 }
8a4b65b4 4294
fa8d436c
UD
4295 /*
4296 If freeing a large space, consolidate possibly-surrounding
4297 chunks. Then, if the total unused topmost memory exceeds trim
4298 threshold, ask malloc_trim to reduce top.
4299
4300 Unless max_fast is 0, we don't know if there are fastbins
4301 bordering top, so we cannot tell for sure whether threshold
4302 has been reached unless fastbins are consolidated. But we
4303 don't want to consolidate on each free. As a compromise,
4304 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4305 is reached.
4306 */
4307
a9177ff5
RM
4308 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4309 if (have_fastchunks(av))
fa8d436c
UD
4310 malloc_consolidate(av);
4311
4312 if (av == &main_arena) {
a9177ff5
RM
4313#ifndef MORECORE_CANNOT_TRIM
4314 if ((unsigned long)(chunksize(av->top)) >=
fa8d436c
UD
4315 (unsigned long)(mp_.trim_threshold))
4316 sYSTRIm(mp_.top_pad, av);
4317#endif
4318 } else {
4319 /* Always try heap_trim(), even if the top chunk is not
4320 large, because the corresponding heap might go away. */
4321 heap_info *heap = heap_for_ptr(top(av));
4322
4323 assert(heap->ar_ptr == av);
4324 heap_trim(heap, mp_.top_pad);
4325 }
4326 }
f65fd747 4327
fa8d436c
UD
4328 }
4329 /*
4330 If the chunk was allocated via mmap, release via munmap(). Note
4331 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4332 user must have overwritten memory. There's nothing we can do to
4333 catch this error unless MALLOC_DEBUG is set, in which case
4334 check_inuse_chunk (above) will have triggered error.
4335 */
10dc2a90 4336
fa8d436c
UD
4337 else {
4338#if HAVE_MMAP
4339 int ret;
4340 INTERNAL_SIZE_T offset = p->prev_size;
4341 mp_.n_mmaps--;
4342 mp_.mmapped_mem -= (size + offset);
4343 ret = munmap((char*)p - offset, size + offset);
4344 /* munmap returns non-zero on failure */
4345 assert(ret == 0);
4346#endif
4347 }
4348 }
10dc2a90
UD
4349}
4350
fa8d436c
UD
4351/*
4352 ------------------------- malloc_consolidate -------------------------
4353
4354 malloc_consolidate is a specialized version of free() that tears
4355 down chunks held in fastbins. Free itself cannot be used for this
4356 purpose since, among other things, it might place chunks back onto
4357 fastbins. So, instead, we need to use a minor variant of the same
4358 code.
a9177ff5 4359
fa8d436c
UD
4360 Also, because this routine needs to be called the first time through
4361 malloc anyway, it turns out to be the perfect place to trigger
4362 initialization code.
4363*/
4364
10dc2a90 4365#if __STD_C
fa8d436c 4366static void malloc_consolidate(mstate av)
10dc2a90 4367#else
fa8d436c 4368static void malloc_consolidate(av) mstate av;
10dc2a90
UD
4369#endif
4370{
fa8d436c
UD
4371 mfastbinptr* fb; /* current fastbin being consolidated */
4372 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4373 mchunkptr p; /* current chunk being consolidated */
4374 mchunkptr nextp; /* next chunk to consolidate */
4375 mchunkptr unsorted_bin; /* bin header */
4376 mchunkptr first_unsorted; /* chunk to link to */
4377
4378 /* These have same use as in free() */
4379 mchunkptr nextchunk;
4380 INTERNAL_SIZE_T size;
4381 INTERNAL_SIZE_T nextsize;
4382 INTERNAL_SIZE_T prevsize;
4383 int nextinuse;
4384 mchunkptr bck;
4385 mchunkptr fwd;
10dc2a90 4386
fa8d436c
UD
4387 /*
4388 If max_fast is 0, we know that av hasn't
4389 yet been initialized, in which case do so below
4390 */
10dc2a90 4391
fa8d436c
UD
4392 if (av->max_fast != 0) {
4393 clear_fastchunks(av);
10dc2a90 4394
fa8d436c 4395 unsorted_bin = unsorted_chunks(av);
10dc2a90 4396
fa8d436c
UD
4397 /*
4398 Remove each chunk from fast bin and consolidate it, placing it
4399 then in unsorted bin. Among other reasons for doing this,
4400 placing in unsorted bin avoids needing to calculate actual bins
4401 until malloc is sure that chunks aren't immediately going to be
4402 reused anyway.
4403 */
a9177ff5 4404
fa8d436c
UD
4405 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4406 fb = &(av->fastbins[0]);
4407 do {
4408 if ( (p = *fb) != 0) {
4409 *fb = 0;
a9177ff5 4410
fa8d436c
UD
4411 do {
4412 check_inuse_chunk(av, p);
4413 nextp = p->fd;
a9177ff5 4414
fa8d436c
UD
4415 /* Slightly streamlined version of consolidation code in free() */
4416 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4417 nextchunk = chunk_at_offset(p, size);
4418 nextsize = chunksize(nextchunk);
a9177ff5 4419
fa8d436c
UD
4420 if (!prev_inuse(p)) {
4421 prevsize = p->prev_size;
4422 size += prevsize;
4423 p = chunk_at_offset(p, -((long) prevsize));
4424 unlink(p, bck, fwd);
4425 }
a9177ff5 4426
fa8d436c
UD
4427 if (nextchunk != av->top) {
4428 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
a9177ff5 4429
fa8d436c
UD
4430 if (!nextinuse) {
4431 size += nextsize;
4432 unlink(nextchunk, bck, fwd);
4433 } else
4434 clear_inuse_bit_at_offset(nextchunk, 0);
a9177ff5 4435
fa8d436c
UD
4436 first_unsorted = unsorted_bin->fd;
4437 unsorted_bin->fd = p;
4438 first_unsorted->bk = p;
a9177ff5 4439
fa8d436c
UD
4440 set_head(p, size | PREV_INUSE);
4441 p->bk = unsorted_bin;
4442 p->fd = first_unsorted;
4443 set_foot(p, size);
4444 }
a9177ff5 4445
fa8d436c
UD
4446 else {
4447 size += nextsize;
4448 set_head(p, size | PREV_INUSE);
4449 av->top = p;
4450 }
a9177ff5 4451
fa8d436c 4452 } while ( (p = nextp) != 0);
a9177ff5 4453
fa8d436c
UD
4454 }
4455 } while (fb++ != maxfb);
4456 }
4457 else {
4458 malloc_init_state(av);
4459 check_malloc_state(av);
4460 }
4461}
10dc2a90 4462
fa8d436c
UD
4463/*
4464 ------------------------------ realloc ------------------------------
4465*/
f65fd747 4466
f1c5213d 4467Void_t*
fa8d436c
UD
4468_int_realloc(mstate av, Void_t* oldmem, size_t bytes)
4469{
4470 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 4471
fa8d436c
UD
4472 mchunkptr oldp; /* chunk corresponding to oldmem */
4473 INTERNAL_SIZE_T oldsize; /* its size */
f65fd747 4474
fa8d436c
UD
4475 mchunkptr newp; /* chunk to return */
4476 INTERNAL_SIZE_T newsize; /* its size */
4477 Void_t* newmem; /* corresponding user mem */
f65fd747 4478
fa8d436c 4479 mchunkptr next; /* next contiguous chunk after oldp */
f65fd747 4480
fa8d436c
UD
4481 mchunkptr remainder; /* extra space at end of newp */
4482 unsigned long remainder_size; /* its size */
f65fd747 4483
fa8d436c
UD
4484 mchunkptr bck; /* misc temp for linking */
4485 mchunkptr fwd; /* misc temp for linking */
2ed5fd9a 4486
fa8d436c
UD
4487 unsigned long copysize; /* bytes to copy */
4488 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
a9177ff5 4489 INTERNAL_SIZE_T* s; /* copy source */
fa8d436c 4490 INTERNAL_SIZE_T* d; /* copy destination */
f65fd747 4491
f65fd747 4492
fa8d436c
UD
4493#if REALLOC_ZERO_BYTES_FREES
4494 if (bytes == 0) {
4495 _int_free(av, oldmem);
4496 return 0;
4497 }
4498#endif
f65fd747 4499
fa8d436c
UD
4500 /* realloc of null is supposed to be same as malloc */
4501 if (oldmem == 0) return _int_malloc(av, bytes);
f65fd747 4502
fa8d436c 4503 checked_request2size(bytes, nb);
f65fd747 4504
fa8d436c
UD
4505 oldp = mem2chunk(oldmem);
4506 oldsize = chunksize(oldp);
f65fd747 4507
fa8d436c 4508 check_inuse_chunk(av, oldp);
f65fd747 4509
fa8d436c 4510 if (!chunk_is_mmapped(oldp)) {
f65fd747 4511
fa8d436c
UD
4512 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4513 /* already big enough; split below */
4514 newp = oldp;
4515 newsize = oldsize;
7799b7b3 4516 }
f65fd747 4517
fa8d436c
UD
4518 else {
4519 next = chunk_at_offset(oldp, oldsize);
4520
4521 /* Try to expand forward into top */
4522 if (next == av->top &&
4523 (unsigned long)(newsize = oldsize + chunksize(next)) >=
4524 (unsigned long)(nb + MINSIZE)) {
4525 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4526 av->top = chunk_at_offset(oldp, nb);
4527 set_head(av->top, (newsize - nb) | PREV_INUSE);
4528 check_inuse_chunk(av, oldp);
4529 return chunk2mem(oldp);
4530 }
a9177ff5 4531
fa8d436c 4532 /* Try to expand forward into next chunk; split off remainder below */
a9177ff5 4533 else if (next != av->top &&
fa8d436c
UD
4534 !inuse(next) &&
4535 (unsigned long)(newsize = oldsize + chunksize(next)) >=
4536 (unsigned long)(nb)) {
4537 newp = oldp;
4538 unlink(next, bck, fwd);
4539 }
f65fd747 4540
fa8d436c
UD
4541 /* allocate, copy, free */
4542 else {
4543 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4544 if (newmem == 0)
4545 return 0; /* propagate failure */
a9177ff5 4546
fa8d436c
UD
4547 newp = mem2chunk(newmem);
4548 newsize = chunksize(newp);
a9177ff5 4549
fa8d436c
UD
4550 /*
4551 Avoid copy if newp is next chunk after oldp.
4552 */
4553 if (newp == next) {
4554 newsize += oldsize;
4555 newp = oldp;
4556 }
4557 else {
4558 /*
4559 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4560 We know that contents have an odd number of
4561 INTERNAL_SIZE_T-sized words; minimally 3.
4562 */
a9177ff5 4563
fa8d436c
UD
4564 copysize = oldsize - SIZE_SZ;
4565 s = (INTERNAL_SIZE_T*)(oldmem);
4566 d = (INTERNAL_SIZE_T*)(newmem);
4567 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4568 assert(ncopies >= 3);
a9177ff5 4569
fa8d436c
UD
4570 if (ncopies > 9)
4571 MALLOC_COPY(d, s, copysize);
a9177ff5 4572
fa8d436c
UD
4573 else {
4574 *(d+0) = *(s+0);
4575 *(d+1) = *(s+1);
4576 *(d+2) = *(s+2);
4577 if (ncopies > 4) {
4578 *(d+3) = *(s+3);
4579 *(d+4) = *(s+4);
4580 if (ncopies > 6) {
4581 *(d+5) = *(s+5);
4582 *(d+6) = *(s+6);
4583 if (ncopies > 8) {
4584 *(d+7) = *(s+7);
4585 *(d+8) = *(s+8);
4586 }
4587 }
4588 }
4589 }
a9177ff5 4590
fa8d436c
UD
4591 _int_free(av, oldmem);
4592 check_inuse_chunk(av, newp);
4593 return chunk2mem(newp);
4594 }
4595 }
f65fd747
UD
4596 }
4597
fa8d436c 4598 /* If possible, free extra space in old or extended chunk */
f65fd747 4599
fa8d436c 4600 assert((unsigned long)(newsize) >= (unsigned long)(nb));
f65fd747 4601
f65fd747 4602 remainder_size = newsize - nb;
f65fd747 4603
fa8d436c
UD
4604 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4605 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4606 set_inuse_bit_at_offset(newp, newsize);
4607 }
4608 else { /* split remainder */
4609 remainder = chunk_at_offset(newp, nb);
4610 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4611 set_head(remainder, remainder_size | PREV_INUSE |
4612 (av != &main_arena ? NON_MAIN_ARENA : 0));
4613 /* Mark remainder as inuse so free() won't complain */
4614 set_inuse_bit_at_offset(remainder, remainder_size);
a9177ff5 4615 _int_free(av, chunk2mem(remainder));
fa8d436c 4616 }
f65fd747 4617
fa8d436c
UD
4618 check_inuse_chunk(av, newp);
4619 return chunk2mem(newp);
4620 }
f65fd747 4621
fa8d436c
UD
4622 /*
4623 Handle mmap cases
4624 */
f65fd747 4625
fa8d436c
UD
4626 else {
4627#if HAVE_MMAP
f65fd747 4628
fa8d436c
UD
4629#if HAVE_MREMAP
4630 INTERNAL_SIZE_T offset = oldp->prev_size;
4631 size_t pagemask = mp_.pagesize - 1;
4632 char *cp;
4633 unsigned long sum;
a9177ff5 4634
fa8d436c
UD
4635 /* Note the extra SIZE_SZ overhead */
4636 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4637
4638 /* don't need to remap if still within same page */
a9177ff5 4639 if (oldsize == newsize - offset)
fa8d436c
UD
4640 return oldmem;
4641
4642 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
a9177ff5 4643
fa8d436c
UD
4644 if (cp != MAP_FAILED) {
4645
4646 newp = (mchunkptr)(cp + offset);
4647 set_head(newp, (newsize - offset)|IS_MMAPPED);
a9177ff5 4648
fa8d436c
UD
4649 assert(aligned_OK(chunk2mem(newp)));
4650 assert((newp->prev_size == offset));
a9177ff5 4651
fa8d436c
UD
4652 /* update statistics */
4653 sum = mp_.mmapped_mem += newsize - oldsize;
a9177ff5 4654 if (sum > (unsigned long)(mp_.max_mmapped_mem))
fa8d436c
UD
4655 mp_.max_mmapped_mem = sum;
4656#ifdef NO_THREADS
4657 sum += main_arena.system_mem;
a9177ff5 4658 if (sum > (unsigned long)(mp_.max_total_mem))
fa8d436c
UD
4659 mp_.max_total_mem = sum;
4660#endif
a9177ff5 4661
fa8d436c
UD
4662 return chunk2mem(newp);
4663 }
f65fd747 4664#endif
10dc2a90 4665
fa8d436c 4666 /* Note the extra SIZE_SZ overhead. */
a9177ff5 4667 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
fa8d436c
UD
4668 newmem = oldmem; /* do nothing */
4669 else {
4670 /* Must alloc, copy, free. */
4671 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4672 if (newmem != 0) {
4673 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
4674 _int_free(av, oldmem);
4675 }
4676 }
4677 return newmem;
10dc2a90 4678
a9177ff5 4679#else
fa8d436c
UD
4680 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4681 check_malloc_state(av);
4682 MALLOC_FAILURE_ACTION;
4683 return 0;
a2b08ee5 4684#endif
10dc2a90 4685 }
fa8d436c
UD
4686}
4687
4688/*
4689 ------------------------------ memalign ------------------------------
4690*/
4691
f1c5213d 4692Void_t*
fa8d436c
UD
4693_int_memalign(mstate av, size_t alignment, size_t bytes)
4694{
4695 INTERNAL_SIZE_T nb; /* padded request size */
4696 char* m; /* memory returned by malloc call */
4697 mchunkptr p; /* corresponding chunk */
4698 char* brk; /* alignment point within p */
4699 mchunkptr newp; /* chunk to return */
4700 INTERNAL_SIZE_T newsize; /* its size */
4701 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4702 mchunkptr remainder; /* spare room at end to split off */
4703 unsigned long remainder_size; /* its size */
4704 INTERNAL_SIZE_T size;
f65fd747
UD
4705
4706 /* If need less alignment than we give anyway, just relay to malloc */
4707
fa8d436c 4708 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
f65fd747
UD
4709
4710 /* Otherwise, ensure that it is at least a minimum chunk size */
4711
4712 if (alignment < MINSIZE) alignment = MINSIZE;
4713
fa8d436c
UD
4714 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4715 if ((alignment & (alignment - 1)) != 0) {
4716 size_t a = MALLOC_ALIGNMENT * 2;
4717 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4718 alignment = a;
7799b7b3 4719 }
f65fd747 4720
fa8d436c
UD
4721 checked_request2size(bytes, nb);
4722
4723 /*
4724 Strategy: find a spot within that chunk that meets the alignment
4725 request, and then possibly free the leading and trailing space.
4726 */
4727
4728
4729 /* Call malloc with worst case padding to hit alignment. */
4730
4731 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4732
4733 if (m == 0) return 0; /* propagate failure */
4734
4735 p = mem2chunk(m);
4736
4737 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4738
f65fd747 4739 /*
fa8d436c
UD
4740 Find an aligned spot inside chunk. Since we need to give back
4741 leading space in a chunk of at least MINSIZE, if the first
4742 calculation places us at a spot with less than MINSIZE leader,
4743 we can move to the next aligned spot -- we've allocated enough
4744 total room so that this is always possible.
f65fd747
UD
4745 */
4746
fa8d436c
UD
4747 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
4748 -((signed long) alignment));
4749 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4750 brk += alignment;
f65fd747 4751
fa8d436c 4752 newp = (mchunkptr)brk;
f65fd747
UD
4753 leadsize = brk - (char*)(p);
4754 newsize = chunksize(p) - leadsize;
4755
fa8d436c
UD
4756 /* For mmapped chunks, just adjust offset */
4757 if (chunk_is_mmapped(p)) {
f65fd747
UD
4758 newp->prev_size = p->prev_size + leadsize;
4759 set_head(newp, newsize|IS_MMAPPED);
fa8d436c 4760 return chunk2mem(newp);
f65fd747 4761 }
f65fd747 4762
fa8d436c
UD
4763 /* Otherwise, give back leader, use the rest */
4764 set_head(newp, newsize | PREV_INUSE |
4765 (av != &main_arena ? NON_MAIN_ARENA : 0));
f65fd747 4766 set_inuse_bit_at_offset(newp, newsize);
fa8d436c
UD
4767 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4768 _int_free(av, chunk2mem(p));
f65fd747
UD
4769 p = newp;
4770
fa8d436c
UD
4771 assert (newsize >= nb &&
4772 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
f65fd747
UD
4773 }
4774
4775 /* Also give back spare room at the end */
fa8d436c
UD
4776 if (!chunk_is_mmapped(p)) {
4777 size = chunksize(p);
4778 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4779 remainder_size = size - nb;
4780 remainder = chunk_at_offset(p, nb);
4781 set_head(remainder, remainder_size | PREV_INUSE |
4782 (av != &main_arena ? NON_MAIN_ARENA : 0));
4783 set_head_size(p, nb);
4784 _int_free(av, chunk2mem(remainder));
4785 }
f65fd747
UD
4786 }
4787
fa8d436c
UD
4788 check_inuse_chunk(av, p);
4789 return chunk2mem(p);
f65fd747
UD
4790}
4791
fa8d436c
UD
4792#if 0
4793/*
4794 ------------------------------ calloc ------------------------------
4795*/
4796
4797#if __STD_C
4798Void_t* cALLOc(size_t n_elements, size_t elem_size)
4799#else
4800Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
4801#endif
4802{
4803 mchunkptr p;
4804 unsigned long clearsize;
4805 unsigned long nclears;
4806 INTERNAL_SIZE_T* d;
4807
4808 Void_t* mem = mALLOc(n_elements * elem_size);
4809
4810 if (mem != 0) {
4811 p = mem2chunk(mem);
4812
4813#if MMAP_CLEARS
4814 if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
4815#endif
a9177ff5 4816 {
fa8d436c
UD
4817 /*
4818 Unroll clear of <= 36 bytes (72 if 8byte sizes)
4819 We know that contents have an odd number of
4820 INTERNAL_SIZE_T-sized words; minimally 3.
4821 */
4822
4823 d = (INTERNAL_SIZE_T*)mem;
4824 clearsize = chunksize(p) - SIZE_SZ;
4825 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
4826 assert(nclears >= 3);
f65fd747 4827
fa8d436c
UD
4828 if (nclears > 9)
4829 MALLOC_ZERO(d, clearsize);
4830
4831 else {
4832 *(d+0) = 0;
4833 *(d+1) = 0;
4834 *(d+2) = 0;
4835 if (nclears > 4) {
4836 *(d+3) = 0;
4837 *(d+4) = 0;
4838 if (nclears > 6) {
4839 *(d+5) = 0;
4840 *(d+6) = 0;
4841 if (nclears > 8) {
4842 *(d+7) = 0;
4843 *(d+8) = 0;
4844 }
4845 }
4846 }
4847 }
4848 }
4849 }
4850 return mem;
4851}
4852#endif /* 0 */
f65fd747
UD
4853
4854/*
fa8d436c 4855 ------------------------- independent_calloc -------------------------
f65fd747
UD
4856*/
4857
f1c5213d 4858Void_t**
f65fd747 4859#if __STD_C
fa8d436c 4860_int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
f65fd747 4861#else
fa8d436c
UD
4862_int_icalloc(av, n_elements, elem_size, chunks)
4863mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
f65fd747
UD
4864#endif
4865{
fa8d436c
UD
4866 size_t sz = elem_size; /* serves as 1-element array */
4867 /* opts arg of 3 means all elements are same size, and should be cleared */
4868 return iALLOc(av, n_elements, &sz, 3, chunks);
f65fd747
UD
4869}
4870
4871/*
fa8d436c 4872 ------------------------- independent_comalloc -------------------------
f65fd747
UD
4873*/
4874
f1c5213d 4875Void_t**
f65fd747 4876#if __STD_C
fa8d436c 4877_int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
f65fd747 4878#else
fa8d436c
UD
4879_int_icomalloc(av, n_elements, sizes, chunks)
4880mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
f65fd747
UD
4881#endif
4882{
fa8d436c 4883 return iALLOc(av, n_elements, sizes, 0, chunks);
f65fd747
UD
4884}
4885
f65fd747 4886
fa8d436c
UD
4887/*
4888 ------------------------------ ialloc ------------------------------
4889 ialloc provides common support for independent_X routines, handling all of
4890 the combinations that can result.
f65fd747 4891
fa8d436c
UD
4892 The opts arg has:
4893 bit 0 set if all elements are same size (using sizes[0])
4894 bit 1 set if elements should be zeroed
f65fd747
UD
4895*/
4896
fa8d436c
UD
4897
4898static Void_t**
f65fd747 4899#if __STD_C
fa8d436c 4900iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
f65fd747 4901#else
fa8d436c
UD
4902iALLOc(av, n_elements, sizes, opts, chunks)
4903mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
f65fd747
UD
4904#endif
4905{
fa8d436c
UD
4906 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
4907 INTERNAL_SIZE_T contents_size; /* total size of elements */
4908 INTERNAL_SIZE_T array_size; /* request size of pointer array */
4909 Void_t* mem; /* malloced aggregate space */
4910 mchunkptr p; /* corresponding chunk */
4911 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
4912 Void_t** marray; /* either "chunks" or malloced ptr array */
4913 mchunkptr array_chunk; /* chunk for malloced ptr array */
4914 int mmx; /* to disable mmap */
a9177ff5 4915 INTERNAL_SIZE_T size;
fa8d436c
UD
4916 INTERNAL_SIZE_T size_flags;
4917 size_t i;
4918
4919 /* Ensure initialization/consolidation */
4920 if (have_fastchunks(av)) malloc_consolidate(av);
4921
4922 /* compute array length, if needed */
4923 if (chunks != 0) {
4924 if (n_elements == 0)
4925 return chunks; /* nothing to do */
4926 marray = chunks;
4927 array_size = 0;
4928 }
4929 else {
4930 /* if empty req, must still return chunk representing empty array */
a9177ff5 4931 if (n_elements == 0)
fa8d436c
UD
4932 return (Void_t**) _int_malloc(av, 0);
4933 marray = 0;
4934 array_size = request2size(n_elements * (sizeof(Void_t*)));
4935 }
f65fd747 4936
fa8d436c
UD
4937 /* compute total element size */
4938 if (opts & 0x1) { /* all-same-size */
4939 element_size = request2size(*sizes);
4940 contents_size = n_elements * element_size;
4941 }
4942 else { /* add up all the sizes */
4943 element_size = 0;
4944 contents_size = 0;
a9177ff5
RM
4945 for (i = 0; i != n_elements; ++i)
4946 contents_size += request2size(sizes[i]);
10dc2a90 4947 }
f65fd747 4948
fa8d436c
UD
4949 /* subtract out alignment bytes from total to minimize overallocation */
4950 size = contents_size + array_size - MALLOC_ALIGN_MASK;
a9177ff5
RM
4951
4952 /*
fa8d436c
UD
4953 Allocate the aggregate chunk.
4954 But first disable mmap so malloc won't use it, since
4955 we would not be able to later free/realloc space internal
4956 to a segregated mmap region.
4957 */
4958 mmx = mp_.n_mmaps_max; /* disable mmap */
4959 mp_.n_mmaps_max = 0;
4960 mem = _int_malloc(av, size);
4961 mp_.n_mmaps_max = mmx; /* reset mmap */
a9177ff5 4962 if (mem == 0)
f65fd747
UD
4963 return 0;
4964
fa8d436c 4965 p = mem2chunk(mem);
a9177ff5 4966 assert(!chunk_is_mmapped(p));
fa8d436c 4967 remainder_size = chunksize(p);
f65fd747 4968
fa8d436c
UD
4969 if (opts & 0x2) { /* optionally clear the elements */
4970 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
7799b7b3 4971 }
f65fd747 4972
fa8d436c 4973 size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
f65fd747 4974
fa8d436c
UD
4975 /* If not provided, allocate the pointer array as final part of chunk */
4976 if (marray == 0) {
4977 array_chunk = chunk_at_offset(p, contents_size);
4978 marray = (Void_t**) (chunk2mem(array_chunk));
4979 set_head(array_chunk, (remainder_size - contents_size) | size_flags);
4980 remainder_size = contents_size;
4981 }
f65fd747 4982
fa8d436c
UD
4983 /* split out elements */
4984 for (i = 0; ; ++i) {
4985 marray[i] = chunk2mem(p);
4986 if (i != n_elements-1) {
a9177ff5 4987 if (element_size != 0)
fa8d436c
UD
4988 size = element_size;
4989 else
a9177ff5 4990 size = request2size(sizes[i]);
fa8d436c
UD
4991 remainder_size -= size;
4992 set_head(p, size | size_flags);
4993 p = chunk_at_offset(p, size);
4994 }
4995 else { /* the final element absorbs any overallocation slop */
4996 set_head(p, remainder_size | size_flags);
4997 break;
4998 }
4999 }
f65fd747 5000
fa8d436c
UD
5001#if MALLOC_DEBUG
5002 if (marray != chunks) {
5003 /* final element must have exactly exhausted chunk */
a9177ff5 5004 if (element_size != 0)
fa8d436c
UD
5005 assert(remainder_size == element_size);
5006 else
5007 assert(remainder_size == request2size(sizes[i]));
5008 check_inuse_chunk(av, mem2chunk(marray));
7799b7b3 5009 }
fa8d436c
UD
5010
5011 for (i = 0; i != n_elements; ++i)
5012 check_inuse_chunk(av, mem2chunk(marray[i]));
f65fd747
UD
5013#endif
5014
fa8d436c 5015 return marray;
f65fd747
UD
5016}
5017
f65fd747 5018
fa8d436c
UD
5019/*
5020 ------------------------------ valloc ------------------------------
f65fd747
UD
5021*/
5022
f1c5213d 5023Void_t*
f65fd747 5024#if __STD_C
fa8d436c 5025_int_valloc(mstate av, size_t bytes)
f65fd747 5026#else
fa8d436c 5027_int_valloc(av, bytes) mstate av; size_t bytes;
f65fd747
UD
5028#endif
5029{
fa8d436c
UD
5030 /* Ensure initialization/consolidation */
5031 if (have_fastchunks(av)) malloc_consolidate(av);
5032 return _int_memalign(av, mp_.pagesize, bytes);
f65fd747 5033}
f65fd747
UD
5034
5035/*
fa8d436c 5036 ------------------------------ pvalloc ------------------------------
f65fd747
UD
5037*/
5038
fa8d436c 5039
f1c5213d 5040Void_t*
f65fd747 5041#if __STD_C
fa8d436c 5042_int_pvalloc(mstate av, size_t bytes)
f65fd747 5043#else
fa8d436c 5044_int_pvalloc(av, bytes) mstate av, size_t bytes;
f65fd747
UD
5045#endif
5046{
fa8d436c 5047 size_t pagesz;
f65fd747 5048
fa8d436c
UD
5049 /* Ensure initialization/consolidation */
5050 if (have_fastchunks(av)) malloc_consolidate(av);
5051 pagesz = mp_.pagesize;
5052 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
f65fd747 5053}
a9177ff5 5054
f65fd747 5055
fa8d436c
UD
5056/*
5057 ------------------------------ malloc_trim ------------------------------
5058*/
8a4b65b4 5059
f65fd747 5060#if __STD_C
fa8d436c 5061int mTRIm(size_t pad)
f65fd747 5062#else
fa8d436c 5063int mTRIm(pad) size_t pad;
f65fd747
UD
5064#endif
5065{
fa8d436c 5066 mstate av = &main_arena; /* already locked */
f65fd747 5067
fa8d436c
UD
5068 /* Ensure initialization/consolidation */
5069 malloc_consolidate(av);
8a4b65b4 5070
a9177ff5 5071#ifndef MORECORE_CANNOT_TRIM
fa8d436c 5072 return sYSTRIm(pad, av);
8a4b65b4 5073#else
fa8d436c 5074 return 0;
f65fd747 5075#endif
f65fd747
UD
5076}
5077
f65fd747
UD
5078
5079/*
fa8d436c 5080 ------------------------- malloc_usable_size -------------------------
f65fd747
UD
5081*/
5082
5083#if __STD_C
fa8d436c 5084size_t mUSABLe(Void_t* mem)
f65fd747 5085#else
fa8d436c 5086size_t mUSABLe(mem) Void_t* mem;
f65fd747
UD
5087#endif
5088{
5089 mchunkptr p;
fa8d436c 5090 if (mem != 0) {
f65fd747 5091 p = mem2chunk(mem);
fa8d436c
UD
5092 if (chunk_is_mmapped(p))
5093 return chunksize(p) - 2*SIZE_SZ;
5094 else if (inuse(p))
f65fd747 5095 return chunksize(p) - SIZE_SZ;
f65fd747 5096 }
fa8d436c 5097 return 0;
f65fd747
UD
5098}
5099
fa8d436c
UD
5100/*
5101 ------------------------------ mallinfo ------------------------------
5102*/
f65fd747 5103
fa8d436c 5104struct mallinfo mALLINFo(mstate av)
f65fd747 5105{
fa8d436c 5106 struct mallinfo mi;
6dd67bd5 5107 size_t i;
f65fd747
UD
5108 mbinptr b;
5109 mchunkptr p;
f65fd747 5110 INTERNAL_SIZE_T avail;
fa8d436c
UD
5111 INTERNAL_SIZE_T fastavail;
5112 int nblocks;
5113 int nfastblocks;
f65fd747 5114
fa8d436c
UD
5115 /* Ensure initialization */
5116 if (av->top == 0) malloc_consolidate(av);
8a4b65b4 5117
fa8d436c 5118 check_malloc_state(av);
8a4b65b4 5119
fa8d436c
UD
5120 /* Account for top */
5121 avail = chunksize(av->top);
5122 nblocks = 1; /* top always exists */
f65fd747 5123
fa8d436c
UD
5124 /* traverse fastbins */
5125 nfastblocks = 0;
5126 fastavail = 0;
5127
5128 for (i = 0; i < NFASTBINS; ++i) {
5129 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5130 ++nfastblocks;
5131 fastavail += chunksize(p);
5132 }
5133 }
5134
5135 avail += fastavail;
f65fd747 5136
fa8d436c
UD
5137 /* traverse regular bins */
5138 for (i = 1; i < NBINS; ++i) {
5139 b = bin_at(av, i);
5140 for (p = last(b); p != b; p = p->bk) {
5141 ++nblocks;
5142 avail += chunksize(p);
5143 }
5144 }
f65fd747 5145
fa8d436c
UD
5146 mi.smblks = nfastblocks;
5147 mi.ordblks = nblocks;
5148 mi.fordblks = avail;
5149 mi.uordblks = av->system_mem - avail;
5150 mi.arena = av->system_mem;
5151 mi.hblks = mp_.n_mmaps;
5152 mi.hblkhd = mp_.mmapped_mem;
5153 mi.fsmblks = fastavail;
5154 mi.keepcost = chunksize(av->top);
5155 mi.usmblks = mp_.max_total_mem;
5156 return mi;
5157}
f65fd747 5158
fa8d436c
UD
5159/*
5160 ------------------------------ malloc_stats ------------------------------
f65fd747
UD
5161*/
5162
fa8d436c 5163void mSTATs()
f65fd747 5164{
8a4b65b4 5165 int i;
fa8d436c 5166 mstate ar_ptr;
8a4b65b4 5167 struct mallinfo mi;
fa8d436c 5168 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
8a4b65b4
UD
5169#if THREAD_STATS
5170 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
5171#endif
5172
a234e27d
UD
5173 if(__malloc_initialized < 0)
5174 ptmalloc_init ();
8dab36a1
UD
5175#ifdef _LIBC
5176 _IO_flockfile (stderr);
5177 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
5178 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5179#endif
fa8d436c
UD
5180 for (i=0, ar_ptr = &main_arena;; i++) {
5181 (void)mutex_lock(&ar_ptr->mutex);
5182 mi = mALLINFo(ar_ptr);
8a4b65b4
UD
5183 fprintf(stderr, "Arena %d:\n", i);
5184 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
5185 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
fa8d436c
UD
5186#if MALLOC_DEBUG > 1
5187 if (i > 0)
5188 dump_heap(heap_for_ptr(top(ar_ptr)));
5189#endif
8a4b65b4
UD
5190 system_b += mi.arena;
5191 in_use_b += mi.uordblks;
5192#if THREAD_STATS
5193 stat_lock_direct += ar_ptr->stat_lock_direct;
5194 stat_lock_loop += ar_ptr->stat_lock_loop;
5195 stat_lock_wait += ar_ptr->stat_lock_wait;
5196#endif
fa8d436c 5197 (void)mutex_unlock(&ar_ptr->mutex);
7e3be507
UD
5198 ar_ptr = ar_ptr->next;
5199 if(ar_ptr == &main_arena) break;
8a4b65b4 5200 }
7799b7b3 5201#if HAVE_MMAP
8a4b65b4 5202 fprintf(stderr, "Total (incl. mmap):\n");
7799b7b3
UD
5203#else
5204 fprintf(stderr, "Total:\n");
5205#endif
8a4b65b4
UD
5206 fprintf(stderr, "system bytes = %10u\n", system_b);
5207 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
5208#ifdef NO_THREADS
fa8d436c 5209 fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
8a4b65b4 5210#endif
f65fd747 5211#if HAVE_MMAP
fa8d436c
UD
5212 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
5213 fprintf(stderr, "max mmap bytes = %10lu\n",
5214 (unsigned long)mp_.max_mmapped_mem);
f65fd747
UD
5215#endif
5216#if THREAD_STATS
8a4b65b4 5217 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
f65fd747
UD
5218 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
5219 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
8a4b65b4
UD
5220 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
5221 fprintf(stderr, "locked total = %10ld\n",
5222 stat_lock_direct + stat_lock_loop + stat_lock_wait);
f65fd747 5223#endif
8dab36a1
UD
5224#ifdef _LIBC
5225 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
5226 _IO_funlockfile (stderr);
5227#endif
f65fd747
UD
5228}
5229
f65fd747
UD
5230
5231/*
fa8d436c 5232 ------------------------------ mallopt ------------------------------
f65fd747
UD
5233*/
5234
5235#if __STD_C
5236int mALLOPt(int param_number, int value)
5237#else
5238int mALLOPt(param_number, value) int param_number; int value;
5239#endif
5240{
fa8d436c
UD
5241 mstate av = &main_arena;
5242 int res = 1;
f65fd747 5243
0cb71e02
UD
5244 if(__malloc_initialized < 0)
5245 ptmalloc_init ();
fa8d436c
UD
5246 (void)mutex_lock(&av->mutex);
5247 /* Ensure initialization/consolidation */
5248 malloc_consolidate(av);
2f6d1f1b 5249
fa8d436c
UD
5250 switch(param_number) {
5251 case M_MXFAST:
5252 if (value >= 0 && value <= MAX_FAST_SIZE) {
5253 set_max_fast(av, value);
5254 }
5255 else
5256 res = 0;
5257 break;
2f6d1f1b 5258
fa8d436c
UD
5259 case M_TRIM_THRESHOLD:
5260 mp_.trim_threshold = value;
5261 break;
2f6d1f1b 5262
fa8d436c
UD
5263 case M_TOP_PAD:
5264 mp_.top_pad = value;
5265 break;
2f6d1f1b 5266
fa8d436c
UD
5267 case M_MMAP_THRESHOLD:
5268#if USE_ARENAS
5269 /* Forbid setting the threshold too high. */
5270 if((unsigned long)value > HEAP_MAX_SIZE/2)
5271 res = 0;
5272 else
2f6d1f1b 5273#endif
fa8d436c
UD
5274 mp_.mmap_threshold = value;
5275 break;
2f6d1f1b 5276
fa8d436c
UD
5277 case M_MMAP_MAX:
5278#if !HAVE_MMAP
5279 if (value != 0)
5280 res = 0;
5281 else
9a51759b 5282#endif
fa8d436c
UD
5283 mp_.n_mmaps_max = value;
5284 break;
10dc2a90 5285
fa8d436c
UD
5286 case M_CHECK_ACTION:
5287 check_action = value;
5288 break;
b22fc5f5 5289 }
fa8d436c
UD
5290 (void)mutex_unlock(&av->mutex);
5291 return res;
b22fc5f5
UD
5292}
5293
10dc2a90 5294
a9177ff5 5295/*
fa8d436c
UD
5296 -------------------- Alternative MORECORE functions --------------------
5297*/
10dc2a90 5298
b22fc5f5 5299
fa8d436c
UD
5300/*
5301 General Requirements for MORECORE.
b22fc5f5 5302
fa8d436c 5303 The MORECORE function must have the following properties:
b22fc5f5 5304
fa8d436c 5305 If MORECORE_CONTIGUOUS is false:
10dc2a90 5306
fa8d436c
UD
5307 * MORECORE must allocate in multiples of pagesize. It will
5308 only be called with arguments that are multiples of pagesize.
10dc2a90 5309
a9177ff5 5310 * MORECORE(0) must return an address that is at least
fa8d436c 5311 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
10dc2a90 5312
fa8d436c 5313 else (i.e. If MORECORE_CONTIGUOUS is true):
10dc2a90 5314
fa8d436c
UD
5315 * Consecutive calls to MORECORE with positive arguments
5316 return increasing addresses, indicating that space has been
5317 contiguously extended.
10dc2a90 5318
fa8d436c
UD
5319 * MORECORE need not allocate in multiples of pagesize.
5320 Calls to MORECORE need not have args of multiples of pagesize.
10dc2a90 5321
fa8d436c 5322 * MORECORE need not page-align.
10dc2a90 5323
fa8d436c 5324 In either case:
10dc2a90 5325
fa8d436c
UD
5326 * MORECORE may allocate more memory than requested. (Or even less,
5327 but this will generally result in a malloc failure.)
10dc2a90 5328
fa8d436c
UD
5329 * MORECORE must not allocate memory when given argument zero, but
5330 instead return one past the end address of memory from previous
5331 nonzero call. This malloc does NOT call MORECORE(0)
5332 until at least one call with positive arguments is made, so
5333 the initial value returned is not important.
10dc2a90 5334
fa8d436c
UD
5335 * Even though consecutive calls to MORECORE need not return contiguous
5336 addresses, it must be OK for malloc'ed chunks to span multiple
5337 regions in those cases where they do happen to be contiguous.
10dc2a90 5338
fa8d436c
UD
5339 * MORECORE need not handle negative arguments -- it may instead
5340 just return MORECORE_FAILURE when given negative arguments.
5341 Negative arguments are always multiples of pagesize. MORECORE
5342 must not misinterpret negative args as large positive unsigned
5343 args. You can suppress all such calls from even occurring by defining
5344 MORECORE_CANNOT_TRIM,
10dc2a90 5345
fa8d436c
UD
5346 There is some variation across systems about the type of the
5347 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5348 actually be size_t, because sbrk supports negative args, so it is
5349 normally the signed type of the same width as size_t (sometimes
5350 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5351 matter though. Internally, we use "long" as arguments, which should
5352 work across all reasonable possibilities.
ee74a442 5353
fa8d436c
UD
5354 Additionally, if MORECORE ever returns failure for a positive
5355 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
5356 system allocator. This is a useful backup strategy for systems with
5357 holes in address spaces -- in this case sbrk cannot contiguously
5358 expand the heap, but mmap may be able to map noncontiguous space.
7e3be507 5359
fa8d436c
UD
5360 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5361 a function that always returns MORECORE_FAILURE.
2e65ca2b 5362
fa8d436c
UD
5363 If you are using this malloc with something other than sbrk (or its
5364 emulation) to supply memory regions, you probably want to set
5365 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5366 allocator kindly contributed for pre-OSX macOS. It uses virtually
5367 but not necessarily physically contiguous non-paged memory (locked
5368 in, present and won't get swapped out). You can use it by
5369 uncommenting this section, adding some #includes, and setting up the
5370 appropriate defines above:
7e3be507 5371
fa8d436c
UD
5372 #define MORECORE osMoreCore
5373 #define MORECORE_CONTIGUOUS 0
7e3be507 5374
fa8d436c
UD
5375 There is also a shutdown routine that should somehow be called for
5376 cleanup upon program exit.
7e3be507 5377
fa8d436c
UD
5378 #define MAX_POOL_ENTRIES 100
5379 #define MINIMUM_MORECORE_SIZE (64 * 1024)
5380 static int next_os_pool;
5381 void *our_os_pools[MAX_POOL_ENTRIES];
7e3be507 5382
fa8d436c
UD
5383 void *osMoreCore(int size)
5384 {
5385 void *ptr = 0;
5386 static void *sbrk_top = 0;
ca34d7a7 5387
fa8d436c
UD
5388 if (size > 0)
5389 {
5390 if (size < MINIMUM_MORECORE_SIZE)
5391 size = MINIMUM_MORECORE_SIZE;
5392 if (CurrentExecutionLevel() == kTaskLevel)
5393 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5394 if (ptr == 0)
5395 {
5396 return (void *) MORECORE_FAILURE;
5397 }
5398 // save ptrs so they can be freed during cleanup
5399 our_os_pools[next_os_pool] = ptr;
5400 next_os_pool++;
5401 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5402 sbrk_top = (char *) ptr + size;
5403 return ptr;
5404 }
5405 else if (size < 0)
5406 {
5407 // we don't currently support shrink behavior
5408 return (void *) MORECORE_FAILURE;
5409 }
5410 else
5411 {
5412 return sbrk_top;
431c33c0 5413 }
ca34d7a7 5414 }
ca34d7a7 5415
fa8d436c
UD
5416 // cleanup any allocated memory pools
5417 // called as last thing before shutting down driver
ca34d7a7 5418
fa8d436c 5419 void osCleanupMem(void)
ca34d7a7 5420 {
fa8d436c 5421 void **ptr;
ca34d7a7 5422
fa8d436c
UD
5423 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5424 if (*ptr)
5425 {
5426 PoolDeallocate(*ptr);
5427 *ptr = 0;
5428 }
5429 }
ee74a442 5430
fa8d436c 5431*/
f65fd747 5432
7e3be507 5433
3e030bd5
UD
5434/* Helper code. */
5435
5436static void
6bf4302e 5437malloc_printerr(int action, const char *str, void *ptr)
3e030bd5
UD
5438{
5439 if (action & 1)
5440 {
6bf4302e
UD
5441 /* output string will be ": ADDR ***\n" */
5442 static const char suffix[] = " ***\n";
5443 static const char prefix[] = ": 0x";
5444 char buf[sizeof (prefix) - 1 + sizeof (void *) * 2 + sizeof (suffix)];
5445 char *cp;
5446 if (action & 4)
5447 cp = memcpy (&buf[sizeof (buf) - 2], "\n", 2);
5448 else
5449 {
5450 cp = memcpy (&buf[sizeof (buf) - sizeof (suffix)], suffix,
5451 sizeof (suffix));
5452 cp = _itoa_word ((unsigned long int) ptr, cp, 16, 0);
5453 while (cp > &buf[sizeof (prefix) - 1])
5454 *--cp = '0';
5455 cp = memcpy (buf, prefix, sizeof (prefix) - 1);
5456 }
3e030bd5 5457
6bf4302e 5458 struct iovec iov[3];
a962d7dc
UD
5459 int n = 0;
5460 if ((action & 4) == 0)
5461 {
5462 iov[0].iov_base = (char *) "*** glibc detected *** ";
5463 iov[0].iov_len = strlen (iov[0].iov_base);
5464 ++n;
5465 }
5466 iov[n].iov_base = (char *) str;
5467 iov[n].iov_len = strlen (str);
5468 ++n;
5469 iov[n].iov_base = cp;
5470 iov[n].iov_len = &buf[sizeof (buf) - 1] - cp;
5471 ++n;
54915e9e
UD
5472 if (TEMP_FAILURE_RETRY (__writev (STDERR_FILENO, iov, n)) == -1
5473 && errno == EBADF)
5474 /* Standard error is not opened. Try using syslog. */
5475 syslog (LOG_ERR, "%s%s%s", (char *) iov[0].iov_base,
5476 (char *) iov[1].iov_base,
5477 n == 3 ? (const char *) iov[2].iov_base : "");
3e030bd5
UD
5478 }
5479 if (action & 2)
5480 abort ();
5481}
5482
7e3be507 5483#ifdef _LIBC
b2bffca2 5484# include <sys/param.h>
fa8d436c 5485
a204dbb2
UD
5486/* We need a wrapper function for one of the additions of POSIX. */
5487int
5488__posix_memalign (void **memptr, size_t alignment, size_t size)
5489{
5490 void *mem;
e796f92f
UD
5491 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
5492 __const __malloc_ptr_t)) =
5493 __memalign_hook;
a204dbb2
UD
5494
5495 /* Test whether the SIZE argument is valid. It must be a power of
5496 two multiple of sizeof (void *). */
de02bd05
UD
5497 if (alignment % sizeof (void *) != 0
5498 || !powerof2 (alignment / sizeof (void *)) != 0
5499 || alignment == 0)
a204dbb2
UD
5500 return EINVAL;
5501
e796f92f
UD
5502 /* Call the hook here, so that caller is posix_memalign's caller
5503 and not posix_memalign itself. */
5504 if (hook != NULL)
5505 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
5506 else
aa420660 5507 mem = public_mEMALIGn (alignment, size);
a204dbb2 5508
fa8d436c
UD
5509 if (mem != NULL) {
5510 *memptr = mem;
5511 return 0;
5512 }
a204dbb2
UD
5513
5514 return ENOMEM;
5515}
5516weak_alias (__posix_memalign, posix_memalign)
5517
eba19d2b
UD
5518strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5519strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5520strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5521strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5522strong_alias (__libc_memalign, __memalign)
5523weak_alias (__libc_memalign, memalign)
5524strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5525strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5526strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5527strong_alias (__libc_mallinfo, __mallinfo)
5528weak_alias (__libc_mallinfo, mallinfo)
5529strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
7e3be507
UD
5530
5531weak_alias (__malloc_stats, malloc_stats)
5532weak_alias (__malloc_usable_size, malloc_usable_size)
5533weak_alias (__malloc_trim, malloc_trim)
2f6d1f1b
UD
5534weak_alias (__malloc_get_state, malloc_get_state)
5535weak_alias (__malloc_set_state, malloc_set_state)
7e3be507 5536
fa8d436c 5537#endif /* _LIBC */
f65fd747 5538
fa8d436c 5539/* ------------------------------------------------------------
f65fd747
UD
5540History:
5541
fa8d436c 5542[see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
f65fd747
UD
5543
5544*/
fa8d436c
UD
5545/*
5546 * Local variables:
5547 * c-basic-offset: 2
5548 * End:
5549 */