]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/malloc.c
* malloc/malloc.c (bin_at): Rewrite to be more clear and to not
[thirdparty/glibc.git] / malloc / malloc.c
CommitLineData
56137dbc 1/* Malloc implementation for multiple threads without lock contention.
7d013a64 2 Copyright (C) 1996-2002,2003,2004,2005,2006 Free Software Foundation, Inc.
f65fd747 3 This file is part of the GNU C Library.
fa8d436c
UD
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
f65fd747
UD
6
7 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
fa8d436c 10 License, or (at your option) any later version.
f65fd747
UD
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 15 Lesser General Public License for more details.
f65fd747 16
cc7375ce 17 You should have received a copy of the GNU Lesser General Public
fa8d436c
UD
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
f65fd747 21
fa8d436c
UD
22/*
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
25
26* Version ptmalloc2-20011215
fa8d436c
UD
27 based on:
28 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
f65fd747 29
fa8d436c
UD
30 Note: There may be an updated version of this malloc obtainable at
31 http://www.malloc.de/malloc/ptmalloc2.tar.gz
32 Check before installing!
f65fd747 33
fa8d436c 34* Quickstart
f65fd747 35
fa8d436c
UD
36 In order to compile this implementation, a Makefile is provided with
37 the ptmalloc2 distribution, which has pre-defined targets for some
38 popular systems (e.g. "make posix" for Posix threads). All that is
39 typically required with regard to compiler flags is the selection of
40 the thread package via defining one out of USE_PTHREADS, USE_THR or
41 USE_SPROC. Check the thread-m.h file for what effects this has.
42 Many/most systems will additionally require USE_TSD_DATA_HACK to be
43 defined, so this is the default for "make posix".
f65fd747
UD
44
45* Why use this malloc?
46
47 This is not the fastest, most space-conserving, most portable, or
48 most tunable malloc ever written. However it is among the fastest
49 while also being among the most space-conserving, portable and tunable.
50 Consistent balance across these factors results in a good general-purpose
fa8d436c
UD
51 allocator for malloc-intensive programs.
52
53 The main properties of the algorithms are:
54 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
55 with ties normally decided via FIFO (i.e. least recently used).
56 * For small (<= 64 bytes by default) requests, it is a caching
57 allocator, that maintains pools of quickly recycled chunks.
58 * In between, and for combinations of large and small requests, it does
59 the best it can trying to meet both goals at once.
60 * For very large requests (>= 128KB by default), it relies on system
61 memory mapping facilities, if supported.
62
63 For a longer but slightly out of date high-level description, see
64 http://gee.cs.oswego.edu/dl/html/malloc.html
65
66 You may already by default be using a C library containing a malloc
67 that is based on some version of this malloc (for example in
68 linux). You might still want to use the one in this file in order to
69 customize settings or to avoid overheads associated with library
70 versions.
71
72* Contents, described in more detail in "description of public routines" below.
73
74 Standard (ANSI/SVID/...) functions:
75 malloc(size_t n);
76 calloc(size_t n_elements, size_t element_size);
77 free(Void_t* p);
78 realloc(Void_t* p, size_t n);
79 memalign(size_t alignment, size_t n);
80 valloc(size_t n);
81 mallinfo()
82 mallopt(int parameter_number, int parameter_value)
83
84 Additional functions:
85 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
86 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
87 pvalloc(size_t n);
88 cfree(Void_t* p);
89 malloc_trim(size_t pad);
90 malloc_usable_size(Void_t* p);
91 malloc_stats();
f65fd747
UD
92
93* Vital statistics:
94
fa8d436c 95 Supported pointer representation: 4 or 8 bytes
a9177ff5 96 Supported size_t representation: 4 or 8 bytes
f65fd747 97 Note that size_t is allowed to be 4 bytes even if pointers are 8.
fa8d436c
UD
98 You can adjust this by defining INTERNAL_SIZE_T
99
100 Alignment: 2 * sizeof(size_t) (default)
101 (i.e., 8 byte alignment with 4byte size_t). This suffices for
102 nearly all current machines and C compilers. However, you can
103 define MALLOC_ALIGNMENT to be wider than this if necessary.
f65fd747 104
fa8d436c
UD
105 Minimum overhead per allocated chunk: 4 or 8 bytes
106 Each malloced chunk has a hidden word of overhead holding size
f65fd747
UD
107 and status information.
108
109 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
110 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
111
112 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
113 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
fa8d436c
UD
114 needed; 4 (8) for a trailing size field and 8 (16) bytes for
115 free list pointers. Thus, the minimum allocatable size is
116 16/24/32 bytes.
f65fd747
UD
117
118 Even a request for zero bytes (i.e., malloc(0)) returns a
119 pointer to something of the minimum allocatable size.
120
fa8d436c
UD
121 The maximum overhead wastage (i.e., number of extra bytes
122 allocated than were requested in malloc) is less than or equal
123 to the minimum size, except for requests >= mmap_threshold that
124 are serviced via mmap(), where the worst case wastage is 2 *
125 sizeof(size_t) bytes plus the remainder from a system page (the
126 minimal mmap unit); typically 4096 or 8192 bytes.
f65fd747 127
a9177ff5 128 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
fa8d436c
UD
129 8-byte size_t: 2^64 minus about two pages
130
131 It is assumed that (possibly signed) size_t values suffice to
f65fd747
UD
132 represent chunk sizes. `Possibly signed' is due to the fact
133 that `size_t' may be defined on a system as either a signed or
fa8d436c
UD
134 an unsigned type. The ISO C standard says that it must be
135 unsigned, but a few systems are known not to adhere to this.
136 Additionally, even when size_t is unsigned, sbrk (which is by
137 default used to obtain memory from system) accepts signed
138 arguments, and may not be able to handle size_t-wide arguments
139 with negative sign bit. Generally, values that would
140 appear as negative after accounting for overhead and alignment
141 are supported only via mmap(), which does not have this
142 limitation.
143
144 Requests for sizes outside the allowed range will perform an optional
145 failure action and then return null. (Requests may also
146 also fail because a system is out of memory.)
147
148 Thread-safety: thread-safe unless NO_THREADS is defined
149
150 Compliance: I believe it is compliant with the 1997 Single Unix Specification
a9177ff5 151 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
fa8d436c 152 others as well.
f65fd747
UD
153
154* Synopsis of compile-time options:
155
156 People have reported using previous versions of this malloc on all
157 versions of Unix, sometimes by tweaking some of the defines
158 below. It has been tested most extensively on Solaris and
fa8d436c
UD
159 Linux. It is also reported to work on WIN32 platforms.
160 People also report using it in stand-alone embedded systems.
161
162 The implementation is in straight, hand-tuned ANSI C. It is not
163 at all modular. (Sorry!) It uses a lot of macros. To be at all
164 usable, this code should be compiled using an optimizing compiler
165 (for example gcc -O3) that can simplify expressions and control
166 paths. (FAQ: some macros import variables as arguments rather than
167 declare locals because people reported that some debuggers
168 otherwise get confused.)
169
170 OPTION DEFAULT VALUE
171
172 Compilation Environment options:
173
174 __STD_C derived from C compiler defines
175 WIN32 NOT defined
176 HAVE_MEMCPY defined
177 USE_MEMCPY 1 if HAVE_MEMCPY is defined
a9177ff5 178 HAVE_MMAP defined as 1
fa8d436c
UD
179 MMAP_CLEARS 1
180 HAVE_MREMAP 0 unless linux defined
181 USE_ARENAS the same as HAVE_MMAP
182 malloc_getpagesize derived from system #includes, or 4096 if not
183 HAVE_USR_INCLUDE_MALLOC_H NOT defined
184 LACKS_UNISTD_H NOT defined unless WIN32
185 LACKS_SYS_PARAM_H NOT defined unless WIN32
186 LACKS_SYS_MMAN_H NOT defined unless WIN32
187
188 Changing default word sizes:
189
190 INTERNAL_SIZE_T size_t
073f560e
UD
191 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
192 __alignof__ (long double))
fa8d436c
UD
193
194 Configuration and functionality options:
195
196 USE_DL_PREFIX NOT defined
197 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
198 USE_MALLOC_LOCK NOT defined
199 MALLOC_DEBUG NOT defined
200 REALLOC_ZERO_BYTES_FREES 1
201 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
202 TRIM_FASTBINS 0
203
204 Options for customizing MORECORE:
205
206 MORECORE sbrk
207 MORECORE_FAILURE -1
a9177ff5 208 MORECORE_CONTIGUOUS 1
fa8d436c
UD
209 MORECORE_CANNOT_TRIM NOT defined
210 MORECORE_CLEARS 1
a9177ff5 211 MMAP_AS_MORECORE_SIZE (1024 * 1024)
fa8d436c
UD
212
213 Tuning options that are also dynamically changeable via mallopt:
214
215 DEFAULT_MXFAST 64
216 DEFAULT_TRIM_THRESHOLD 128 * 1024
217 DEFAULT_TOP_PAD 0
218 DEFAULT_MMAP_THRESHOLD 128 * 1024
219 DEFAULT_MMAP_MAX 65536
220
221 There are several other #defined constants and macros that you
222 probably don't want to touch unless you are extending or adapting malloc. */
f65fd747
UD
223
224/*
fa8d436c
UD
225 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
226 compiler, or a C compiler sufficiently close to ANSI to get away
227 with it.
f65fd747
UD
228*/
229
f65fd747 230#ifndef __STD_C
fa8d436c 231#if defined(__STDC__) || defined(__cplusplus)
f65fd747
UD
232#define __STD_C 1
233#else
234#define __STD_C 0
a9177ff5 235#endif
f65fd747
UD
236#endif /*__STD_C*/
237
fa8d436c
UD
238
239/*
240 Void_t* is the pointer type that malloc should say it returns
241*/
242
f65fd747 243#ifndef Void_t
fa8d436c 244#if (__STD_C || defined(WIN32))
f65fd747
UD
245#define Void_t void
246#else
247#define Void_t char
248#endif
249#endif /*Void_t*/
250
251#if __STD_C
fa8d436c
UD
252#include <stddef.h> /* for size_t */
253#include <stdlib.h> /* for getenv(), abort() */
f65fd747 254#else
fa8d436c 255#include <sys/types.h>
f65fd747
UD
256#endif
257
3c6904fb
UD
258#include <malloc-machine.h>
259
c56da3a3
UD
260#ifdef _LIBC
261#include <stdio-common/_itoa.h>
e404fb16 262#include <bits/wordsize.h>
c56da3a3
UD
263#endif
264
f65fd747
UD
265#ifdef __cplusplus
266extern "C" {
267#endif
268
fa8d436c 269/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
f65fd747 270
fa8d436c 271/* #define LACKS_UNISTD_H */
f65fd747 272
fa8d436c
UD
273#ifndef LACKS_UNISTD_H
274#include <unistd.h>
275#endif
f65fd747 276
fa8d436c
UD
277/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
278
279/* #define LACKS_SYS_PARAM_H */
280
281
282#include <stdio.h> /* needed for malloc_stats */
283#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
f65fd747 284
5d78bb43
UD
285/* For uintptr_t. */
286#include <stdint.h>
f65fd747 287
3e030bd5
UD
288/* For va_arg, va_start, va_end. */
289#include <stdarg.h>
290
6bf4302e
UD
291/* For writev and struct iovec. */
292#include <sys/uio.h>
c0f62c56 293/* For syslog. */
54915e9e 294#include <sys/syslog.h>
6bf4302e 295
c0f62c56
UD
296/* For various dynamic linking things. */
297#include <dlfcn.h>
298
299
fa8d436c
UD
300/*
301 Debugging:
302
303 Because freed chunks may be overwritten with bookkeeping fields, this
304 malloc will often die when freed memory is overwritten by user
305 programs. This can be very effective (albeit in an annoying way)
306 in helping track down dangling pointers.
307
308 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
309 enabled that will catch more memory errors. You probably won't be
310 able to make much sense of the actual assertion errors, but they
311 should help you locate incorrectly overwritten memory. The checking
312 is fairly extensive, and will slow down execution
313 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
314 will attempt to check every non-mmapped allocated and free chunk in
315 the course of computing the summmaries. (By nature, mmapped regions
316 cannot be checked very much automatically.)
317
318 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
319 this code. The assertions in the check routines spell out in more
320 detail the assumptions and invariants underlying the algorithms.
321
322 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
323 checking that all accesses to malloced memory stay within their
324 bounds. However, there are several add-ons and adaptations of this
325 or other mallocs available that do this.
f65fd747
UD
326*/
327
328#if MALLOC_DEBUG
329#include <assert.h>
330#else
57449fa3 331#undef assert
f65fd747
UD
332#define assert(x) ((void)0)
333#endif
334
335
336/*
337 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
fa8d436c
UD
338 of chunk sizes.
339
340 The default version is the same as size_t.
341
342 While not strictly necessary, it is best to define this as an
343 unsigned type, even if size_t is a signed type. This may avoid some
344 artificial size limitations on some systems.
345
346 On a 64-bit machine, you may be able to reduce malloc overhead by
347 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
348 expense of not being able to handle more than 2^32 of malloced
349 space. If this limitation is acceptable, you are encouraged to set
350 this unless you are on a platform requiring 16byte alignments. In
351 this case the alignment requirements turn out to negate any
352 potential advantages of decreasing size_t word size.
353
354 Implementors: Beware of the possible combinations of:
355 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
356 and might be the same width as int or as long
357 - size_t might have different width and signedness as INTERNAL_SIZE_T
358 - int and long might be 32 or 64 bits, and might be the same width
359 To deal with this, most comparisons and difference computations
360 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
361 aware of the fact that casting an unsigned int to a wider long does
362 not sign-extend. (This also makes checking for negative numbers
363 awkward.) Some of these casts result in harmless compiler warnings
364 on some systems.
f65fd747
UD
365*/
366
367#ifndef INTERNAL_SIZE_T
368#define INTERNAL_SIZE_T size_t
369#endif
370
fa8d436c
UD
371/* The corresponding word size */
372#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
373
374
375/*
376 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
377 It must be a power of two at least 2 * SIZE_SZ, even on machines
378 for which smaller alignments would suffice. It may be defined as
379 larger than this though. Note however that code and data structures
380 are optimized for the case of 8-byte alignment.
381*/
382
383
384#ifndef MALLOC_ALIGNMENT
7d013a64
RM
385/* XXX This is the correct definition. It differs from 2*SIZE_SZ only on
386 powerpc32. For the time being, changing this is causing more
387 compatibility problems due to malloc_get_state/malloc_set_state than
388 will returning blocks not adequately aligned for long double objects
16a10468
RM
389 under -mlong-double-128.
390
073f560e
UD
391#define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
392 ? __alignof__ (long double) : 2 * SIZE_SZ)
7d013a64
RM
393*/
394#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
fa8d436c
UD
395#endif
396
397/* The corresponding bit mask value */
398#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
399
400
401
402/*
403 REALLOC_ZERO_BYTES_FREES should be set if a call to
404 realloc with zero bytes should be the same as a call to free.
405 This is required by the C standard. Otherwise, since this malloc
406 returns a unique pointer for malloc(0), so does realloc(p, 0).
407*/
408
409#ifndef REALLOC_ZERO_BYTES_FREES
410#define REALLOC_ZERO_BYTES_FREES 1
411#endif
412
413/*
414 TRIM_FASTBINS controls whether free() of a very small chunk can
415 immediately lead to trimming. Setting to true (1) can reduce memory
416 footprint, but will almost always slow down programs that use a lot
417 of small chunks.
418
419 Define this only if you are willing to give up some speed to more
420 aggressively reduce system-level memory footprint when releasing
421 memory in programs that use many small chunks. You can get
422 essentially the same effect by setting MXFAST to 0, but this can
423 lead to even greater slowdowns in programs using many small chunks.
424 TRIM_FASTBINS is an in-between compile-time option, that disables
425 only those chunks bordering topmost memory from being placed in
426 fastbins.
427*/
428
429#ifndef TRIM_FASTBINS
430#define TRIM_FASTBINS 0
431#endif
432
433
f65fd747 434/*
fa8d436c 435 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
a9177ff5 436 This is necessary when you only want to use this malloc in one part
fa8d436c
UD
437 of a program, using your regular system malloc elsewhere.
438*/
439
440/* #define USE_DL_PREFIX */
441
442
a9177ff5 443/*
fa8d436c
UD
444 Two-phase name translation.
445 All of the actual routines are given mangled names.
446 When wrappers are used, they become the public callable versions.
447 When DL_PREFIX is used, the callable names are prefixed.
f65fd747
UD
448*/
449
fa8d436c
UD
450#ifdef USE_DL_PREFIX
451#define public_cALLOc dlcalloc
452#define public_fREe dlfree
453#define public_cFREe dlcfree
454#define public_mALLOc dlmalloc
455#define public_mEMALIGn dlmemalign
456#define public_rEALLOc dlrealloc
457#define public_vALLOc dlvalloc
458#define public_pVALLOc dlpvalloc
459#define public_mALLINFo dlmallinfo
460#define public_mALLOPt dlmallopt
461#define public_mTRIm dlmalloc_trim
462#define public_mSTATs dlmalloc_stats
463#define public_mUSABLe dlmalloc_usable_size
464#define public_iCALLOc dlindependent_calloc
465#define public_iCOMALLOc dlindependent_comalloc
466#define public_gET_STATe dlget_state
467#define public_sET_STATe dlset_state
468#else /* USE_DL_PREFIX */
469#ifdef _LIBC
470
471/* Special defines for the GNU C library. */
472#define public_cALLOc __libc_calloc
473#define public_fREe __libc_free
474#define public_cFREe __libc_cfree
475#define public_mALLOc __libc_malloc
476#define public_mEMALIGn __libc_memalign
477#define public_rEALLOc __libc_realloc
478#define public_vALLOc __libc_valloc
479#define public_pVALLOc __libc_pvalloc
480#define public_mALLINFo __libc_mallinfo
481#define public_mALLOPt __libc_mallopt
482#define public_mTRIm __malloc_trim
483#define public_mSTATs __malloc_stats
484#define public_mUSABLe __malloc_usable_size
485#define public_iCALLOc __libc_independent_calloc
486#define public_iCOMALLOc __libc_independent_comalloc
487#define public_gET_STATe __malloc_get_state
488#define public_sET_STATe __malloc_set_state
489#define malloc_getpagesize __getpagesize()
490#define open __open
491#define mmap __mmap
492#define munmap __munmap
493#define mremap __mremap
494#define mprotect __mprotect
495#define MORECORE (*__morecore)
496#define MORECORE_FAILURE 0
497
498Void_t * __default_morecore (ptrdiff_t);
499Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
f65fd747 500
fa8d436c
UD
501#else /* !_LIBC */
502#define public_cALLOc calloc
503#define public_fREe free
504#define public_cFREe cfree
505#define public_mALLOc malloc
506#define public_mEMALIGn memalign
507#define public_rEALLOc realloc
508#define public_vALLOc valloc
509#define public_pVALLOc pvalloc
510#define public_mALLINFo mallinfo
511#define public_mALLOPt mallopt
512#define public_mTRIm malloc_trim
513#define public_mSTATs malloc_stats
514#define public_mUSABLe malloc_usable_size
515#define public_iCALLOc independent_calloc
516#define public_iCOMALLOc independent_comalloc
517#define public_gET_STATe malloc_get_state
518#define public_sET_STATe malloc_set_state
519#endif /* _LIBC */
520#endif /* USE_DL_PREFIX */
f65fd747 521
d9af917d
UD
522#ifndef _LIBC
523#define __builtin_expect(expr, val) (expr)
3ba06713
UD
524
525#define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
d9af917d 526#endif
f65fd747
UD
527
528/*
529 HAVE_MEMCPY should be defined if you are not otherwise using
530 ANSI STD C, but still have memcpy and memset in your C library
531 and want to use them in calloc and realloc. Otherwise simple
fa8d436c 532 macro versions are defined below.
f65fd747
UD
533
534 USE_MEMCPY should be defined as 1 if you actually want to
535 have memset and memcpy called. People report that the macro
fa8d436c 536 versions are faster than libc versions on some systems.
a9177ff5 537
fa8d436c
UD
538 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
539 (of <= 36 bytes) are manually unrolled in realloc and calloc.
f65fd747
UD
540*/
541
fa8d436c 542#define HAVE_MEMCPY
f65fd747
UD
543
544#ifndef USE_MEMCPY
545#ifdef HAVE_MEMCPY
546#define USE_MEMCPY 1
547#else
548#define USE_MEMCPY 0
549#endif
550#endif
551
fa8d436c 552
f65fd747
UD
553#if (__STD_C || defined(HAVE_MEMCPY))
554
c2afe833
RM
555#ifdef _LIBC
556# include <string.h>
557#else
fa8d436c
UD
558#ifdef WIN32
559/* On Win32 memset and memcpy are already declared in windows.h */
560#else
f65fd747
UD
561#if __STD_C
562void* memset(void*, int, size_t);
563void* memcpy(void*, const void*, size_t);
564#else
565Void_t* memset();
566Void_t* memcpy();
fa8d436c 567#endif
f65fd747
UD
568#endif
569#endif
c2afe833 570#endif
f65fd747 571
fa8d436c
UD
572/*
573 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
574 malloc fails to be able to return memory, either because memory is
575 exhausted or because of illegal arguments.
a9177ff5
RM
576
577 By default, sets errno if running on STD_C platform, else does nothing.
fa8d436c 578*/
09f5e163 579
fa8d436c
UD
580#ifndef MALLOC_FAILURE_ACTION
581#if __STD_C
582#define MALLOC_FAILURE_ACTION \
583 errno = ENOMEM;
f65fd747 584
fa8d436c
UD
585#else
586#define MALLOC_FAILURE_ACTION
587#endif
588#endif
f65fd747 589
fa8d436c
UD
590/*
591 MORECORE-related declarations. By default, rely on sbrk
592*/
09f5e163 593
f65fd747 594
fa8d436c
UD
595#ifdef LACKS_UNISTD_H
596#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
597#if __STD_C
598extern Void_t* sbrk(ptrdiff_t);
599#else
600extern Void_t* sbrk();
601#endif
602#endif
603#endif
f65fd747 604
fa8d436c
UD
605/*
606 MORECORE is the name of the routine to call to obtain more memory
607 from the system. See below for general guidance on writing
608 alternative MORECORE functions, as well as a version for WIN32 and a
609 sample version for pre-OSX macos.
610*/
f65fd747 611
fa8d436c
UD
612#ifndef MORECORE
613#define MORECORE sbrk
614#endif
f65fd747 615
fa8d436c
UD
616/*
617 MORECORE_FAILURE is the value returned upon failure of MORECORE
618 as well as mmap. Since it cannot be an otherwise valid memory address,
619 and must reflect values of standard sys calls, you probably ought not
620 try to redefine it.
621*/
09f5e163 622
fa8d436c
UD
623#ifndef MORECORE_FAILURE
624#define MORECORE_FAILURE (-1)
625#endif
626
627/*
628 If MORECORE_CONTIGUOUS is true, take advantage of fact that
629 consecutive calls to MORECORE with positive arguments always return
630 contiguous increasing addresses. This is true of unix sbrk. Even
631 if not defined, when regions happen to be contiguous, malloc will
632 permit allocations spanning regions obtained from different
633 calls. But defining this when applicable enables some stronger
634 consistency checks and space efficiencies.
635*/
f65fd747 636
fa8d436c
UD
637#ifndef MORECORE_CONTIGUOUS
638#define MORECORE_CONTIGUOUS 1
f65fd747
UD
639#endif
640
fa8d436c
UD
641/*
642 Define MORECORE_CANNOT_TRIM if your version of MORECORE
643 cannot release space back to the system when given negative
644 arguments. This is generally necessary only if you are using
645 a hand-crafted MORECORE function that cannot handle negative arguments.
646*/
647
648/* #define MORECORE_CANNOT_TRIM */
f65fd747 649
fa8d436c
UD
650/* MORECORE_CLEARS (default 1)
651 The degree to which the routine mapped to MORECORE zeroes out
652 memory: never (0), only for newly allocated space (1) or always
653 (2). The distinction between (1) and (2) is necessary because on
654 some systems, if the application first decrements and then
655 increments the break value, the contents of the reallocated space
656 are unspecified.
657*/
658
659#ifndef MORECORE_CLEARS
660#define MORECORE_CLEARS 1
7cabd57c
UD
661#endif
662
fa8d436c 663
f65fd747 664/*
fa8d436c
UD
665 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
666 allocate very large blocks. These will be returned to the
667 operating system immediately after a free(). Also, if mmap
668 is available, it is used as a backup strategy in cases where
669 MORECORE fails to provide space from system.
670
671 This malloc is best tuned to work with mmap for large requests.
672 If you do not have mmap, operations involving very large chunks (1MB
673 or so) may be slower than you'd like.
f65fd747
UD
674*/
675
676#ifndef HAVE_MMAP
fa8d436c
UD
677#define HAVE_MMAP 1
678
a9177ff5 679/*
fa8d436c
UD
680 Standard unix mmap using /dev/zero clears memory so calloc doesn't
681 need to.
682*/
683
684#ifndef MMAP_CLEARS
685#define MMAP_CLEARS 1
686#endif
687
688#else /* no mmap */
689#ifndef MMAP_CLEARS
690#define MMAP_CLEARS 0
691#endif
692#endif
693
694
a9177ff5 695/*
fa8d436c
UD
696 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
697 sbrk fails, and mmap is used as a backup (which is done only if
698 HAVE_MMAP). The value must be a multiple of page size. This
699 backup strategy generally applies only when systems have "holes" in
700 address space, so sbrk cannot perform contiguous expansion, but
701 there is still space available on system. On systems for which
702 this is known to be useful (i.e. most linux kernels), this occurs
703 only when programs allocate huge amounts of memory. Between this,
704 and the fact that mmap regions tend to be limited, the size should
705 be large, to avoid too many mmap calls and thus avoid running out
706 of kernel resources.
707*/
708
709#ifndef MMAP_AS_MORECORE_SIZE
710#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
f65fd747
UD
711#endif
712
713/*
714 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
715 large blocks. This is currently only possible on Linux with
716 kernel versions newer than 1.3.77.
717*/
718
719#ifndef HAVE_MREMAP
fa8d436c
UD
720#ifdef linux
721#define HAVE_MREMAP 1
722#else
723#define HAVE_MREMAP 0
f65fd747
UD
724#endif
725
fa8d436c
UD
726#endif /* HAVE_MMAP */
727
e9b3e3c5
UD
728/* Define USE_ARENAS to enable support for multiple `arenas'. These
729 are allocated using mmap(), are necessary for threads and
730 occasionally useful to overcome address space limitations affecting
731 sbrk(). */
732
733#ifndef USE_ARENAS
734#define USE_ARENAS HAVE_MMAP
735#endif
736
f65fd747
UD
737
738/*
fa8d436c
UD
739 The system page size. To the extent possible, this malloc manages
740 memory from the system in page-size units. Note that this value is
741 cached during initialization into a field of malloc_state. So even
742 if malloc_getpagesize is a function, it is only called once.
743
744 The following mechanics for getpagesize were adapted from bsd/gnu
745 getpagesize.h. If none of the system-probes here apply, a value of
746 4096 is used, which should be OK: If they don't apply, then using
747 the actual value probably doesn't impact performance.
f65fd747
UD
748*/
749
fa8d436c 750
f65fd747 751#ifndef malloc_getpagesize
fa8d436c
UD
752
753#ifndef LACKS_UNISTD_H
754# include <unistd.h>
755#endif
756
f65fd747
UD
757# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
758# ifndef _SC_PAGE_SIZE
759# define _SC_PAGE_SIZE _SC_PAGESIZE
760# endif
761# endif
fa8d436c 762
f65fd747
UD
763# ifdef _SC_PAGE_SIZE
764# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
765# else
766# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
767 extern size_t getpagesize();
768# define malloc_getpagesize getpagesize()
769# else
fa8d436c 770# ifdef WIN32 /* use supplied emulation of getpagesize */
a9177ff5 771# define malloc_getpagesize getpagesize()
f65fd747 772# else
fa8d436c
UD
773# ifndef LACKS_SYS_PARAM_H
774# include <sys/param.h>
775# endif
776# ifdef EXEC_PAGESIZE
777# define malloc_getpagesize EXEC_PAGESIZE
f65fd747 778# else
fa8d436c
UD
779# ifdef NBPG
780# ifndef CLSIZE
781# define malloc_getpagesize NBPG
782# else
783# define malloc_getpagesize (NBPG * CLSIZE)
784# endif
f65fd747 785# else
fa8d436c
UD
786# ifdef NBPC
787# define malloc_getpagesize NBPC
f65fd747 788# else
fa8d436c
UD
789# ifdef PAGESIZE
790# define malloc_getpagesize PAGESIZE
791# else /* just guess */
a9177ff5 792# define malloc_getpagesize (4096)
fa8d436c 793# endif
f65fd747
UD
794# endif
795# endif
796# endif
797# endif
798# endif
799# endif
800#endif
801
f65fd747 802/*
f65fd747 803 This version of malloc supports the standard SVID/XPG mallinfo
fa8d436c
UD
804 routine that returns a struct containing usage properties and
805 statistics. It should work on any SVID/XPG compliant system that has
806 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
807 install such a thing yourself, cut out the preliminary declarations
808 as described above and below and save them in a malloc.h file. But
809 there's no compelling reason to bother to do this.)
f65fd747
UD
810
811 The main declaration needed is the mallinfo struct that is returned
812 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
fa8d436c
UD
813 bunch of fields that are not even meaningful in this version of
814 malloc. These fields are are instead filled by mallinfo() with
815 other numbers that might be of interest.
f65fd747
UD
816
817 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
818 /usr/include/malloc.h file that includes a declaration of struct
819 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
820 version is declared below. These must be precisely the same for
fa8d436c
UD
821 mallinfo() to work. The original SVID version of this struct,
822 defined on most systems with mallinfo, declares all fields as
823 ints. But some others define as unsigned long. If your system
824 defines the fields using a type of different width than listed here,
825 you must #include your system version and #define
826 HAVE_USR_INCLUDE_MALLOC_H.
f65fd747
UD
827*/
828
829/* #define HAVE_USR_INCLUDE_MALLOC_H */
830
fa8d436c
UD
831#ifdef HAVE_USR_INCLUDE_MALLOC_H
832#include "/usr/include/malloc.h"
f65fd747
UD
833#endif
834
f65fd747 835
fa8d436c 836/* ---------- description of public routines ------------ */
f65fd747
UD
837
838/*
fa8d436c
UD
839 malloc(size_t n)
840 Returns a pointer to a newly allocated chunk of at least n bytes, or null
841 if no space is available. Additionally, on failure, errno is
842 set to ENOMEM on ANSI C systems.
843
844 If n is zero, malloc returns a minumum-sized chunk. (The minimum
845 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
846 systems.) On most systems, size_t is an unsigned type, so calls
847 with negative arguments are interpreted as requests for huge amounts
848 of space, which will often fail. The maximum supported value of n
849 differs across systems, but is in all cases less than the maximum
850 representable value of a size_t.
f65fd747 851*/
fa8d436c
UD
852#if __STD_C
853Void_t* public_mALLOc(size_t);
854#else
855Void_t* public_mALLOc();
856#endif
aa420660
UD
857#ifdef libc_hidden_proto
858libc_hidden_proto (public_mALLOc)
859#endif
f65fd747 860
fa8d436c
UD
861/*
862 free(Void_t* p)
863 Releases the chunk of memory pointed to by p, that had been previously
864 allocated using malloc or a related routine such as realloc.
865 It has no effect if p is null. It can have arbitrary (i.e., bad!)
866 effects if p has already been freed.
867
868 Unless disabled (using mallopt), freeing very large spaces will
869 when possible, automatically trigger operations that give
870 back unused memory to the system, thus reducing program footprint.
871*/
872#if __STD_C
873void public_fREe(Void_t*);
874#else
875void public_fREe();
876#endif
aa420660
UD
877#ifdef libc_hidden_proto
878libc_hidden_proto (public_fREe)
879#endif
f65fd747 880
fa8d436c
UD
881/*
882 calloc(size_t n_elements, size_t element_size);
883 Returns a pointer to n_elements * element_size bytes, with all locations
884 set to zero.
885*/
886#if __STD_C
887Void_t* public_cALLOc(size_t, size_t);
888#else
889Void_t* public_cALLOc();
f65fd747
UD
890#endif
891
892/*
fa8d436c
UD
893 realloc(Void_t* p, size_t n)
894 Returns a pointer to a chunk of size n that contains the same data
895 as does chunk p up to the minimum of (n, p's size) bytes, or null
a9177ff5 896 if no space is available.
f65fd747 897
fa8d436c
UD
898 The returned pointer may or may not be the same as p. The algorithm
899 prefers extending p when possible, otherwise it employs the
900 equivalent of a malloc-copy-free sequence.
f65fd747 901
a9177ff5 902 If p is null, realloc is equivalent to malloc.
f65fd747 903
fa8d436c
UD
904 If space is not available, realloc returns null, errno is set (if on
905 ANSI) and p is NOT freed.
f65fd747 906
fa8d436c
UD
907 if n is for fewer bytes than already held by p, the newly unused
908 space is lopped off and freed if possible. Unless the #define
909 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
910 zero (re)allocates a minimum-sized chunk.
f65fd747 911
fa8d436c
UD
912 Large chunks that were internally obtained via mmap will always
913 be reallocated using malloc-copy-free sequences unless
914 the system supports MREMAP (currently only linux).
f65fd747 915
fa8d436c
UD
916 The old unix realloc convention of allowing the last-free'd chunk
917 to be used as an argument to realloc is not supported.
f65fd747 918*/
fa8d436c
UD
919#if __STD_C
920Void_t* public_rEALLOc(Void_t*, size_t);
921#else
922Void_t* public_rEALLOc();
923#endif
aa420660
UD
924#ifdef libc_hidden_proto
925libc_hidden_proto (public_rEALLOc)
926#endif
f65fd747 927
fa8d436c
UD
928/*
929 memalign(size_t alignment, size_t n);
930 Returns a pointer to a newly allocated chunk of n bytes, aligned
931 in accord with the alignment argument.
932
933 The alignment argument should be a power of two. If the argument is
934 not a power of two, the nearest greater power is used.
935 8-byte alignment is guaranteed by normal malloc calls, so don't
936 bother calling memalign with an argument of 8 or less.
937
938 Overreliance on memalign is a sure way to fragment space.
939*/
940#if __STD_C
941Void_t* public_mEMALIGn(size_t, size_t);
942#else
943Void_t* public_mEMALIGn();
f65fd747 944#endif
aa420660
UD
945#ifdef libc_hidden_proto
946libc_hidden_proto (public_mEMALIGn)
947#endif
f65fd747
UD
948
949/*
fa8d436c
UD
950 valloc(size_t n);
951 Equivalent to memalign(pagesize, n), where pagesize is the page
952 size of the system. If the pagesize is unknown, 4096 is used.
953*/
954#if __STD_C
955Void_t* public_vALLOc(size_t);
956#else
957Void_t* public_vALLOc();
958#endif
959
f65fd747 960
f65fd747 961
fa8d436c
UD
962/*
963 mallopt(int parameter_number, int parameter_value)
964 Sets tunable parameters The format is to provide a
965 (parameter-number, parameter-value) pair. mallopt then sets the
966 corresponding parameter to the argument value if it can (i.e., so
967 long as the value is meaningful), and returns 1 if successful else
968 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
969 normally defined in malloc.h. Only one of these (M_MXFAST) is used
970 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
971 so setting them has no effect. But this malloc also supports four
972 other options in mallopt. See below for details. Briefly, supported
973 parameters are as follows (listed defaults are for "typical"
974 configurations).
975
976 Symbol param # default allowed param values
977 M_MXFAST 1 64 0-80 (0 disables fastbins)
978 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
a9177ff5 979 M_TOP_PAD -2 0 any
fa8d436c
UD
980 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
981 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
982*/
983#if __STD_C
984int public_mALLOPt(int, int);
985#else
986int public_mALLOPt();
987#endif
988
989
990/*
991 mallinfo()
992 Returns (by copy) a struct containing various summary statistics:
993
a9177ff5
RM
994 arena: current total non-mmapped bytes allocated from system
995 ordblks: the number of free chunks
fa8d436c
UD
996 smblks: the number of fastbin blocks (i.e., small chunks that
997 have been freed but not use resused or consolidated)
a9177ff5
RM
998 hblks: current number of mmapped regions
999 hblkhd: total bytes held in mmapped regions
fa8d436c
UD
1000 usmblks: the maximum total allocated space. This will be greater
1001 than current total if trimming has occurred.
a9177ff5 1002 fsmblks: total bytes held in fastbin blocks
fa8d436c 1003 uordblks: current total allocated space (normal or mmapped)
a9177ff5 1004 fordblks: total free space
fa8d436c
UD
1005 keepcost: the maximum number of bytes that could ideally be released
1006 back to system via malloc_trim. ("ideally" means that
1007 it ignores page restrictions etc.)
1008
1009 Because these fields are ints, but internal bookkeeping may
a9177ff5 1010 be kept as longs, the reported values may wrap around zero and
fa8d436c
UD
1011 thus be inaccurate.
1012*/
1013#if __STD_C
1014struct mallinfo public_mALLINFo(void);
1015#else
1016struct mallinfo public_mALLINFo();
1017#endif
f65fd747 1018
88764ae2 1019#ifndef _LIBC
fa8d436c
UD
1020/*
1021 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1022
1023 independent_calloc is similar to calloc, but instead of returning a
1024 single cleared space, it returns an array of pointers to n_elements
1025 independent elements that can hold contents of size elem_size, each
1026 of which starts out cleared, and can be independently freed,
1027 realloc'ed etc. The elements are guaranteed to be adjacently
1028 allocated (this is not guaranteed to occur with multiple callocs or
1029 mallocs), which may also improve cache locality in some
1030 applications.
1031
1032 The "chunks" argument is optional (i.e., may be null, which is
1033 probably the most typical usage). If it is null, the returned array
1034 is itself dynamically allocated and should also be freed when it is
1035 no longer needed. Otherwise, the chunks array must be of at least
1036 n_elements in length. It is filled in with the pointers to the
1037 chunks.
1038
1039 In either case, independent_calloc returns this pointer array, or
1040 null if the allocation failed. If n_elements is zero and "chunks"
1041 is null, it returns a chunk representing an array with zero elements
1042 (which should be freed if not wanted).
1043
1044 Each element must be individually freed when it is no longer
1045 needed. If you'd like to instead be able to free all at once, you
1046 should instead use regular calloc and assign pointers into this
1047 space to represent elements. (In this case though, you cannot
1048 independently free elements.)
a9177ff5 1049
fa8d436c
UD
1050 independent_calloc simplifies and speeds up implementations of many
1051 kinds of pools. It may also be useful when constructing large data
1052 structures that initially have a fixed number of fixed-sized nodes,
1053 but the number is not known at compile time, and some of the nodes
1054 may later need to be freed. For example:
1055
1056 struct Node { int item; struct Node* next; };
a9177ff5 1057
fa8d436c
UD
1058 struct Node* build_list() {
1059 struct Node** pool;
1060 int n = read_number_of_nodes_needed();
1061 if (n <= 0) return 0;
1062 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
a9177ff5
RM
1063 if (pool == 0) die();
1064 // organize into a linked list...
fa8d436c 1065 struct Node* first = pool[0];
a9177ff5 1066 for (i = 0; i < n-1; ++i)
fa8d436c
UD
1067 pool[i]->next = pool[i+1];
1068 free(pool); // Can now free the array (or not, if it is needed later)
1069 return first;
1070 }
1071*/
1072#if __STD_C
1073Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1074#else
1075Void_t** public_iCALLOc();
1076#endif
f65fd747 1077
fa8d436c
UD
1078/*
1079 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1080
1081 independent_comalloc allocates, all at once, a set of n_elements
1082 chunks with sizes indicated in the "sizes" array. It returns
1083 an array of pointers to these elements, each of which can be
1084 independently freed, realloc'ed etc. The elements are guaranteed to
1085 be adjacently allocated (this is not guaranteed to occur with
1086 multiple callocs or mallocs), which may also improve cache locality
1087 in some applications.
1088
1089 The "chunks" argument is optional (i.e., may be null). If it is null
1090 the returned array is itself dynamically allocated and should also
1091 be freed when it is no longer needed. Otherwise, the chunks array
1092 must be of at least n_elements in length. It is filled in with the
1093 pointers to the chunks.
1094
1095 In either case, independent_comalloc returns this pointer array, or
1096 null if the allocation failed. If n_elements is zero and chunks is
1097 null, it returns a chunk representing an array with zero elements
1098 (which should be freed if not wanted).
a9177ff5 1099
fa8d436c
UD
1100 Each element must be individually freed when it is no longer
1101 needed. If you'd like to instead be able to free all at once, you
1102 should instead use a single regular malloc, and assign pointers at
a9177ff5 1103 particular offsets in the aggregate space. (In this case though, you
fa8d436c
UD
1104 cannot independently free elements.)
1105
1106 independent_comallac differs from independent_calloc in that each
1107 element may have a different size, and also that it does not
1108 automatically clear elements.
1109
1110 independent_comalloc can be used to speed up allocation in cases
1111 where several structs or objects must always be allocated at the
1112 same time. For example:
1113
1114 struct Head { ... }
1115 struct Foot { ... }
1116
1117 void send_message(char* msg) {
1118 int msglen = strlen(msg);
1119 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1120 void* chunks[3];
1121 if (independent_comalloc(3, sizes, chunks) == 0)
1122 die();
1123 struct Head* head = (struct Head*)(chunks[0]);
1124 char* body = (char*)(chunks[1]);
1125 struct Foot* foot = (struct Foot*)(chunks[2]);
1126 // ...
1127 }
f65fd747 1128
fa8d436c
UD
1129 In general though, independent_comalloc is worth using only for
1130 larger values of n_elements. For small values, you probably won't
1131 detect enough difference from series of malloc calls to bother.
f65fd747 1132
fa8d436c
UD
1133 Overuse of independent_comalloc can increase overall memory usage,
1134 since it cannot reuse existing noncontiguous small chunks that
1135 might be available for some of the elements.
f65fd747 1136*/
fa8d436c
UD
1137#if __STD_C
1138Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1139#else
1140Void_t** public_iCOMALLOc();
1141#endif
f65fd747 1142
88764ae2
UD
1143#endif /* _LIBC */
1144
f65fd747 1145
fa8d436c
UD
1146/*
1147 pvalloc(size_t n);
1148 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1149 round up n to nearest pagesize.
1150 */
1151#if __STD_C
1152Void_t* public_pVALLOc(size_t);
1153#else
1154Void_t* public_pVALLOc();
1155#endif
f65fd747 1156
fa8d436c
UD
1157/*
1158 cfree(Void_t* p);
1159 Equivalent to free(p).
1160
1161 cfree is needed/defined on some systems that pair it with calloc,
a9177ff5 1162 for odd historical reasons (such as: cfree is used in example
fa8d436c
UD
1163 code in the first edition of K&R).
1164*/
1165#if __STD_C
1166void public_cFREe(Void_t*);
f65fd747 1167#else
fa8d436c
UD
1168void public_cFREe();
1169#endif
1170
1171/*
1172 malloc_trim(size_t pad);
1173
1174 If possible, gives memory back to the system (via negative
1175 arguments to sbrk) if there is unused memory at the `high' end of
1176 the malloc pool. You can call this after freeing large blocks of
1177 memory to potentially reduce the system-level memory requirements
1178 of a program. However, it cannot guarantee to reduce memory. Under
1179 some allocation patterns, some large free blocks of memory will be
1180 locked between two used chunks, so they cannot be given back to
1181 the system.
a9177ff5 1182
fa8d436c
UD
1183 The `pad' argument to malloc_trim represents the amount of free
1184 trailing space to leave untrimmed. If this argument is zero,
1185 only the minimum amount of memory to maintain internal data
1186 structures will be left (one page or less). Non-zero arguments
1187 can be supplied to maintain enough trailing space to service
1188 future expected allocations without having to re-obtain memory
1189 from the system.
a9177ff5 1190
fa8d436c
UD
1191 Malloc_trim returns 1 if it actually released any memory, else 0.
1192 On systems that do not support "negative sbrks", it will always
1193 rreturn 0.
1194*/
1195#if __STD_C
1196int public_mTRIm(size_t);
1197#else
1198int public_mTRIm();
1199#endif
1200
1201/*
1202 malloc_usable_size(Void_t* p);
1203
1204 Returns the number of bytes you can actually use in
1205 an allocated chunk, which may be more than you requested (although
1206 often not) due to alignment and minimum size constraints.
1207 You can use this many bytes without worrying about
1208 overwriting other allocated objects. This is not a particularly great
1209 programming practice. malloc_usable_size can be more useful in
1210 debugging and assertions, for example:
1211
1212 p = malloc(n);
1213 assert(malloc_usable_size(p) >= 256);
1214
1215*/
1216#if __STD_C
1217size_t public_mUSABLe(Void_t*);
1218#else
1219size_t public_mUSABLe();
f65fd747 1220#endif
fa8d436c
UD
1221
1222/*
1223 malloc_stats();
1224 Prints on stderr the amount of space obtained from the system (both
1225 via sbrk and mmap), the maximum amount (which may be more than
1226 current if malloc_trim and/or munmap got called), and the current
1227 number of bytes allocated via malloc (or realloc, etc) but not yet
1228 freed. Note that this is the number of bytes allocated, not the
1229 number requested. It will be larger than the number requested
1230 because of alignment and bookkeeping overhead. Because it includes
1231 alignment wastage as being in use, this figure may be greater than
1232 zero even when no user-level chunks are allocated.
1233
1234 The reported current and maximum system memory can be inaccurate if
1235 a program makes other calls to system memory allocation functions
1236 (normally sbrk) outside of malloc.
1237
1238 malloc_stats prints only the most commonly interesting statistics.
1239 More information can be obtained by calling mallinfo.
1240
1241*/
1242#if __STD_C
1243void public_mSTATs(void);
1244#else
1245void public_mSTATs();
f65fd747
UD
1246#endif
1247
f7ddf3d3
UD
1248/*
1249 malloc_get_state(void);
1250
1251 Returns the state of all malloc variables in an opaque data
1252 structure.
1253*/
1254#if __STD_C
1255Void_t* public_gET_STATe(void);
1256#else
1257Void_t* public_gET_STATe();
1258#endif
1259
1260/*
1261 malloc_set_state(Void_t* state);
1262
1263 Restore the state of all malloc variables from data obtained with
1264 malloc_get_state().
1265*/
1266#if __STD_C
1267int public_sET_STATe(Void_t*);
1268#else
1269int public_sET_STATe();
1270#endif
1271
1272#ifdef _LIBC
1273/*
1274 posix_memalign(void **memptr, size_t alignment, size_t size);
1275
1276 POSIX wrapper like memalign(), checking for validity of size.
1277*/
1278int __posix_memalign(void **, size_t, size_t);
1279#endif
1280
fa8d436c
UD
1281/* mallopt tuning options */
1282
f65fd747 1283/*
fa8d436c
UD
1284 M_MXFAST is the maximum request size used for "fastbins", special bins
1285 that hold returned chunks without consolidating their spaces. This
1286 enables future requests for chunks of the same size to be handled
1287 very quickly, but can increase fragmentation, and thus increase the
1288 overall memory footprint of a program.
1289
1290 This malloc manages fastbins very conservatively yet still
1291 efficiently, so fragmentation is rarely a problem for values less
1292 than or equal to the default. The maximum supported value of MXFAST
1293 is 80. You wouldn't want it any higher than this anyway. Fastbins
1294 are designed especially for use with many small structs, objects or
1295 strings -- the default handles structs/objects/arrays with sizes up
1296 to 8 4byte fields, or small strings representing words, tokens,
1297 etc. Using fastbins for larger objects normally worsens
1298 fragmentation without improving speed.
1299
1300 M_MXFAST is set in REQUEST size units. It is internally used in
1301 chunksize units, which adds padding and alignment. You can reduce
1302 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1303 algorithm to be a closer approximation of fifo-best-fit in all cases,
1304 not just for larger requests, but will generally cause it to be
1305 slower.
f65fd747
UD
1306*/
1307
1308
fa8d436c
UD
1309/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1310#ifndef M_MXFAST
a9177ff5 1311#define M_MXFAST 1
fa8d436c 1312#endif
f65fd747 1313
fa8d436c
UD
1314#ifndef DEFAULT_MXFAST
1315#define DEFAULT_MXFAST 64
10dc2a90
UD
1316#endif
1317
10dc2a90 1318
fa8d436c
UD
1319/*
1320 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1321 to keep before releasing via malloc_trim in free().
1322
1323 Automatic trimming is mainly useful in long-lived programs.
1324 Because trimming via sbrk can be slow on some systems, and can
1325 sometimes be wasteful (in cases where programs immediately
1326 afterward allocate more large chunks) the value should be high
1327 enough so that your overall system performance would improve by
1328 releasing this much memory.
1329
1330 The trim threshold and the mmap control parameters (see below)
1331 can be traded off with one another. Trimming and mmapping are
1332 two different ways of releasing unused memory back to the
1333 system. Between these two, it is often possible to keep
1334 system-level demands of a long-lived program down to a bare
1335 minimum. For example, in one test suite of sessions measuring
1336 the XF86 X server on Linux, using a trim threshold of 128K and a
1337 mmap threshold of 192K led to near-minimal long term resource
1338 consumption.
1339
1340 If you are using this malloc in a long-lived program, it should
1341 pay to experiment with these values. As a rough guide, you
1342 might set to a value close to the average size of a process
1343 (program) running on your system. Releasing this much memory
1344 would allow such a process to run in memory. Generally, it's
1345 worth it to tune for trimming rather tham memory mapping when a
1346 program undergoes phases where several large chunks are
1347 allocated and released in ways that can reuse each other's
1348 storage, perhaps mixed with phases where there are no such
1349 chunks at all. And in well-behaved long-lived programs,
1350 controlling release of large blocks via trimming versus mapping
1351 is usually faster.
1352
1353 However, in most programs, these parameters serve mainly as
1354 protection against the system-level effects of carrying around
1355 massive amounts of unneeded memory. Since frequent calls to
1356 sbrk, mmap, and munmap otherwise degrade performance, the default
1357 parameters are set to relatively high values that serve only as
1358 safeguards.
1359
1360 The trim value It must be greater than page size to have any useful
a9177ff5 1361 effect. To disable trimming completely, you can set to
fa8d436c
UD
1362 (unsigned long)(-1)
1363
1364 Trim settings interact with fastbin (MXFAST) settings: Unless
1365 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1366 freeing a chunk with size less than or equal to MXFAST. Trimming is
1367 instead delayed until subsequent freeing of larger chunks. However,
1368 you can still force an attempted trim by calling malloc_trim.
1369
1370 Also, trimming is not generally possible in cases where
1371 the main arena is obtained via mmap.
1372
1373 Note that the trick some people use of mallocing a huge space and
1374 then freeing it at program startup, in an attempt to reserve system
1375 memory, doesn't have the intended effect under automatic trimming,
1376 since that memory will immediately be returned to the system.
1377*/
1378
1379#define M_TRIM_THRESHOLD -1
1380
1381#ifndef DEFAULT_TRIM_THRESHOLD
1382#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1383#endif
1384
1385/*
1386 M_TOP_PAD is the amount of extra `padding' space to allocate or
1387 retain whenever sbrk is called. It is used in two ways internally:
1388
1389 * When sbrk is called to extend the top of the arena to satisfy
1390 a new malloc request, this much padding is added to the sbrk
1391 request.
1392
1393 * When malloc_trim is called automatically from free(),
1394 it is used as the `pad' argument.
1395
1396 In both cases, the actual amount of padding is rounded
1397 so that the end of the arena is always a system page boundary.
1398
1399 The main reason for using padding is to avoid calling sbrk so
1400 often. Having even a small pad greatly reduces the likelihood
1401 that nearly every malloc request during program start-up (or
1402 after trimming) will invoke sbrk, which needlessly wastes
1403 time.
1404
1405 Automatic rounding-up to page-size units is normally sufficient
1406 to avoid measurable overhead, so the default is 0. However, in
1407 systems where sbrk is relatively slow, it can pay to increase
1408 this value, at the expense of carrying around more memory than
1409 the program needs.
1410*/
10dc2a90 1411
fa8d436c 1412#define M_TOP_PAD -2
10dc2a90 1413
fa8d436c
UD
1414#ifndef DEFAULT_TOP_PAD
1415#define DEFAULT_TOP_PAD (0)
1416#endif
f65fd747 1417
1d05c2fb
UD
1418/*
1419 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
1420 adjusted MMAP_THRESHOLD.
1421*/
1422
1423#ifndef DEFAULT_MMAP_THRESHOLD_MIN
1424#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
1425#endif
1426
1427#ifndef DEFAULT_MMAP_THRESHOLD_MAX
e404fb16
UD
1428 /* For 32-bit platforms we cannot increase the maximum mmap
1429 threshold much because it is also the minimum value for the
bd2c2341
UD
1430 maximum heap size and its alignment. Going above 512k (i.e., 1M
1431 for new heaps) wastes too much address space. */
e404fb16 1432# if __WORDSIZE == 32
bd2c2341 1433# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
e404fb16 1434# else
bd2c2341 1435# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
e404fb16 1436# endif
1d05c2fb
UD
1437#endif
1438
fa8d436c
UD
1439/*
1440 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1441 to service a request. Requests of at least this size that cannot
1442 be allocated using already-existing space will be serviced via mmap.
1443 (If enough normal freed space already exists it is used instead.)
1444
1445 Using mmap segregates relatively large chunks of memory so that
1446 they can be individually obtained and released from the host
1447 system. A request serviced through mmap is never reused by any
1448 other request (at least not directly; the system may just so
1449 happen to remap successive requests to the same locations).
1450
1451 Segregating space in this way has the benefits that:
1452
a9177ff5
RM
1453 1. Mmapped space can ALWAYS be individually released back
1454 to the system, which helps keep the system level memory
1455 demands of a long-lived program low.
fa8d436c
UD
1456 2. Mapped memory can never become `locked' between
1457 other chunks, as can happen with normally allocated chunks, which
1458 means that even trimming via malloc_trim would not release them.
1459 3. On some systems with "holes" in address spaces, mmap can obtain
1460 memory that sbrk cannot.
1461
1462 However, it has the disadvantages that:
1463
1464 1. The space cannot be reclaimed, consolidated, and then
1465 used to service later requests, as happens with normal chunks.
1466 2. It can lead to more wastage because of mmap page alignment
1467 requirements
1468 3. It causes malloc performance to be more dependent on host
1469 system memory management support routines which may vary in
1470 implementation quality and may impose arbitrary
1471 limitations. Generally, servicing a request via normal
1472 malloc steps is faster than going through a system's mmap.
1473
1474 The advantages of mmap nearly always outweigh disadvantages for
1475 "large" chunks, but the value of "large" varies across systems. The
1476 default is an empirically derived value that works well in most
1477 systems.
1d05c2fb
UD
1478
1479
1480 Update in 2006:
1481 The above was written in 2001. Since then the world has changed a lot.
1482 Memory got bigger. Applications got bigger. The virtual address space
1483 layout in 32 bit linux changed.
1484
1485 In the new situation, brk() and mmap space is shared and there are no
1486 artificial limits on brk size imposed by the kernel. What is more,
1487 applications have started using transient allocations larger than the
1488 128Kb as was imagined in 2001.
1489
1490 The price for mmap is also high now; each time glibc mmaps from the
1491 kernel, the kernel is forced to zero out the memory it gives to the
1492 application. Zeroing memory is expensive and eats a lot of cache and
1493 memory bandwidth. This has nothing to do with the efficiency of the
1494 virtual memory system, by doing mmap the kernel just has no choice but
1495 to zero.
1496
1497 In 2001, the kernel had a maximum size for brk() which was about 800
1498 megabytes on 32 bit x86, at that point brk() would hit the first
1499 mmaped shared libaries and couldn't expand anymore. With current 2.6
1500 kernels, the VA space layout is different and brk() and mmap
1501 both can span the entire heap at will.
1502
1503 Rather than using a static threshold for the brk/mmap tradeoff,
1504 we are now using a simple dynamic one. The goal is still to avoid
1505 fragmentation. The old goals we kept are
1506 1) try to get the long lived large allocations to use mmap()
1507 2) really large allocations should always use mmap()
1508 and we're adding now:
1509 3) transient allocations should use brk() to avoid forcing the kernel
1510 having to zero memory over and over again
1511
1512 The implementation works with a sliding threshold, which is by default
1513 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1514 out at 128Kb as per the 2001 default.
1515
1516 This allows us to satisfy requirement 1) under the assumption that long
1517 lived allocations are made early in the process' lifespan, before it has
1518 started doing dynamic allocations of the same size (which will
1519 increase the threshold).
1520
1521 The upperbound on the threshold satisfies requirement 2)
1522
1523 The threshold goes up in value when the application frees memory that was
1524 allocated with the mmap allocator. The idea is that once the application
1525 starts freeing memory of a certain size, it's highly probable that this is
1526 a size the application uses for transient allocations. This estimator
1527 is there to satisfy the new third requirement.
1528
f65fd747
UD
1529*/
1530
fa8d436c 1531#define M_MMAP_THRESHOLD -3
f65fd747 1532
fa8d436c 1533#ifndef DEFAULT_MMAP_THRESHOLD
1d05c2fb 1534#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
fa8d436c
UD
1535#endif
1536
1537/*
1538 M_MMAP_MAX is the maximum number of requests to simultaneously
1539 service using mmap. This parameter exists because
1540 some systems have a limited number of internal tables for
1541 use by mmap, and using more than a few of them may degrade
1542 performance.
1543
1544 The default is set to a value that serves only as a safeguard.
1545 Setting to 0 disables use of mmap for servicing large requests. If
1546 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1547 to non-zero values in mallopt will fail.
1548*/
f65fd747 1549
fa8d436c
UD
1550#define M_MMAP_MAX -4
1551
1552#ifndef DEFAULT_MMAP_MAX
1553#if HAVE_MMAP
1554#define DEFAULT_MMAP_MAX (65536)
1555#else
1556#define DEFAULT_MMAP_MAX (0)
1557#endif
f65fd747
UD
1558#endif
1559
fa8d436c 1560#ifdef __cplusplus
3c6904fb 1561} /* end of extern "C" */
fa8d436c 1562#endif
f65fd747 1563
100351c3 1564#include <malloc.h>
f65fd747 1565
fa8d436c
UD
1566#ifndef BOUNDED_N
1567#define BOUNDED_N(ptr, sz) (ptr)
1568#endif
1569#ifndef RETURN_ADDRESS
1570#define RETURN_ADDRESS(X_) (NULL)
9ae6fc54 1571#endif
431c33c0
UD
1572
1573/* On some platforms we can compile internal, not exported functions better.
1574 Let the environment provide a macro and define it to be empty if it
1575 is not available. */
1576#ifndef internal_function
1577# define internal_function
1578#endif
1579
fa8d436c
UD
1580/* Forward declarations. */
1581struct malloc_chunk;
1582typedef struct malloc_chunk* mchunkptr;
431c33c0 1583
fa8d436c 1584/* Internal routines. */
f65fd747 1585
fa8d436c 1586#if __STD_C
f65fd747 1587
f1c5213d
RM
1588Void_t* _int_malloc(mstate, size_t);
1589void _int_free(mstate, Void_t*);
1590Void_t* _int_realloc(mstate, Void_t*, size_t);
1591Void_t* _int_memalign(mstate, size_t, size_t);
1592Void_t* _int_valloc(mstate, size_t);
fa8d436c
UD
1593static Void_t* _int_pvalloc(mstate, size_t);
1594/*static Void_t* cALLOc(size_t, size_t);*/
88764ae2 1595#ifndef _LIBC
fa8d436c
UD
1596static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
1597static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
88764ae2 1598#endif
fa8d436c
UD
1599static int mTRIm(size_t);
1600static size_t mUSABLe(Void_t*);
1601static void mSTATs(void);
1602static int mALLOPt(int, int);
1603static struct mallinfo mALLINFo(mstate);
6bf4302e 1604static void malloc_printerr(int action, const char *str, void *ptr);
fa8d436c
UD
1605
1606static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
1607static int internal_function top_check(void);
1608static void internal_function munmap_chunk(mchunkptr p);
a9177ff5 1609#if HAVE_MREMAP
fa8d436c 1610static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
a9177ff5 1611#endif
fa8d436c
UD
1612
1613static Void_t* malloc_check(size_t sz, const Void_t *caller);
1614static void free_check(Void_t* mem, const Void_t *caller);
1615static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
1616 const Void_t *caller);
1617static Void_t* memalign_check(size_t alignment, size_t bytes,
1618 const Void_t *caller);
1619#ifndef NO_THREADS
fde89ad0
RM
1620# ifdef _LIBC
1621# if USE___THREAD || (defined USE_TLS && !defined SHARED)
1622 /* These routines are never needed in this configuration. */
1623# define NO_STARTER
1624# endif
1625# endif
1626# ifdef NO_STARTER
1627# undef NO_STARTER
1628# else
fa8d436c 1629static Void_t* malloc_starter(size_t sz, const Void_t *caller);
fde89ad0 1630static Void_t* memalign_starter(size_t aln, size_t sz, const Void_t *caller);
fa8d436c 1631static void free_starter(Void_t* mem, const Void_t *caller);
fde89ad0 1632# endif
fa8d436c
UD
1633static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
1634static void free_atfork(Void_t* mem, const Void_t *caller);
1635#endif
f65fd747 1636
fa8d436c 1637#else
f65fd747 1638
fa8d436c
UD
1639Void_t* _int_malloc();
1640void _int_free();
1641Void_t* _int_realloc();
1642Void_t* _int_memalign();
1643Void_t* _int_valloc();
1644Void_t* _int_pvalloc();
1645/*static Void_t* cALLOc();*/
1646static Void_t** _int_icalloc();
1647static Void_t** _int_icomalloc();
1648static int mTRIm();
1649static size_t mUSABLe();
1650static void mSTATs();
1651static int mALLOPt();
1652static struct mallinfo mALLINFo();
f65fd747 1653
fa8d436c 1654#endif
f65fd747 1655
f65fd747 1656
f65fd747 1657
f65fd747 1658
fa8d436c 1659/* ------------- Optional versions of memcopy ---------------- */
f65fd747 1660
a1648746 1661
fa8d436c 1662#if USE_MEMCPY
a1648746 1663
a9177ff5 1664/*
fa8d436c
UD
1665 Note: memcpy is ONLY invoked with non-overlapping regions,
1666 so the (usually slower) memmove is not needed.
1667*/
a1648746 1668
fa8d436c
UD
1669#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1670#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
f65fd747 1671
fa8d436c 1672#else /* !USE_MEMCPY */
f65fd747 1673
fa8d436c 1674/* Use Duff's device for good zeroing/copying performance. */
f65fd747 1675
fa8d436c
UD
1676#define MALLOC_ZERO(charp, nbytes) \
1677do { \
1678 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1679 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1680 long mcn; \
1681 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1682 switch (mctmp) { \
1683 case 0: for(;;) { *mzp++ = 0; \
1684 case 7: *mzp++ = 0; \
1685 case 6: *mzp++ = 0; \
1686 case 5: *mzp++ = 0; \
1687 case 4: *mzp++ = 0; \
1688 case 3: *mzp++ = 0; \
1689 case 2: *mzp++ = 0; \
1690 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1691 } \
1692} while(0)
f65fd747 1693
fa8d436c
UD
1694#define MALLOC_COPY(dest,src,nbytes) \
1695do { \
1696 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1697 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1698 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1699 long mcn; \
1700 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1701 switch (mctmp) { \
1702 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1703 case 7: *mcdst++ = *mcsrc++; \
1704 case 6: *mcdst++ = *mcsrc++; \
1705 case 5: *mcdst++ = *mcsrc++; \
1706 case 4: *mcdst++ = *mcsrc++; \
1707 case 3: *mcdst++ = *mcsrc++; \
1708 case 2: *mcdst++ = *mcsrc++; \
1709 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1710 } \
1711} while(0)
f65fd747 1712
f65fd747
UD
1713#endif
1714
fa8d436c 1715/* ------------------ MMAP support ------------------ */
f65fd747 1716
f65fd747 1717
fa8d436c 1718#if HAVE_MMAP
f65fd747 1719
fa8d436c
UD
1720#include <fcntl.h>
1721#ifndef LACKS_SYS_MMAN_H
1722#include <sys/mman.h>
1723#endif
f65fd747 1724
fa8d436c
UD
1725#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1726# define MAP_ANONYMOUS MAP_ANON
1727#endif
1728#if !defined(MAP_FAILED)
1729# define MAP_FAILED ((char*)-1)
1730#endif
f65fd747 1731
fa8d436c
UD
1732#ifndef MAP_NORESERVE
1733# ifdef MAP_AUTORESRV
1734# define MAP_NORESERVE MAP_AUTORESRV
1735# else
1736# define MAP_NORESERVE 0
1737# endif
f65fd747
UD
1738#endif
1739
a9177ff5
RM
1740/*
1741 Nearly all versions of mmap support MAP_ANONYMOUS,
fa8d436c
UD
1742 so the following is unlikely to be needed, but is
1743 supplied just in case.
1744*/
f65fd747 1745
fa8d436c 1746#ifndef MAP_ANONYMOUS
f65fd747 1747
fa8d436c 1748static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
2f6d1f1b 1749
fa8d436c
UD
1750#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1751 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1752 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1753 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
f65fd747 1754
fa8d436c 1755#else
f65fd747 1756
fa8d436c
UD
1757#define MMAP(addr, size, prot, flags) \
1758 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
f65fd747 1759
e9b3e3c5 1760#endif
f65fd747
UD
1761
1762
fa8d436c
UD
1763#endif /* HAVE_MMAP */
1764
1765
f65fd747 1766/*
fa8d436c 1767 ----------------------- Chunk representations -----------------------
f65fd747
UD
1768*/
1769
1770
fa8d436c
UD
1771/*
1772 This struct declaration is misleading (but accurate and necessary).
1773 It declares a "view" into memory allowing access to necessary
1774 fields at known offsets from a given base. See explanation below.
1775*/
1776
1777struct malloc_chunk {
1778
1779 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1780 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1781
1782 struct malloc_chunk* fd; /* double links -- used only if free. */
f65fd747
UD
1783 struct malloc_chunk* bk;
1784};
1785
f65fd747
UD
1786
1787/*
f65fd747
UD
1788 malloc_chunk details:
1789
1790 (The following includes lightly edited explanations by Colin Plumb.)
1791
1792 Chunks of memory are maintained using a `boundary tag' method as
1793 described in e.g., Knuth or Standish. (See the paper by Paul
1794 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1795 survey of such techniques.) Sizes of free chunks are stored both
1796 in the front of each chunk and at the end. This makes
1797 consolidating fragmented chunks into bigger chunks very fast. The
1798 size fields also hold bits representing whether chunks are free or
1799 in use.
1800
1801 An allocated chunk looks like this:
1802
1803
1804 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1805 | Size of previous chunk, if allocated | |
1806 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
8088488d 1807 | Size of chunk, in bytes |M|P|
f65fd747
UD
1808 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1809 | User data starts here... .
1810 . .
9ea9af19 1811 . (malloc_usable_size() bytes) .
f65fd747
UD
1812 . |
1813nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1814 | Size of chunk |
1815 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1816
1817
1818 Where "chunk" is the front of the chunk for the purpose of most of
1819 the malloc code, but "mem" is the pointer that is returned to the
1820 user. "Nextchunk" is the beginning of the next contiguous chunk.
1821
fa8d436c 1822 Chunks always begin on even word boundries, so the mem portion
f65fd747 1823 (which is returned to the user) is also on an even word boundary, and
fa8d436c 1824 thus at least double-word aligned.
f65fd747
UD
1825
1826 Free chunks are stored in circular doubly-linked lists, and look like this:
1827
1828 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1829 | Size of previous chunk |
1830 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1831 `head:' | Size of chunk, in bytes |P|
1832 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1833 | Forward pointer to next chunk in list |
1834 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1835 | Back pointer to previous chunk in list |
1836 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1837 | Unused space (may be 0 bytes long) .
1838 . .
1839 . |
1840nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1841 `foot:' | Size of chunk, in bytes |
1842 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1843
1844 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1845 chunk size (which is always a multiple of two words), is an in-use
1846 bit for the *previous* chunk. If that bit is *clear*, then the
1847 word before the current chunk size contains the previous chunk
1848 size, and can be used to find the front of the previous chunk.
fa8d436c
UD
1849 The very first chunk allocated always has this bit set,
1850 preventing access to non-existent (or non-owned) memory. If
1851 prev_inuse is set for any given chunk, then you CANNOT determine
1852 the size of the previous chunk, and might even get a memory
1853 addressing fault when trying to do so.
f65fd747
UD
1854
1855 Note that the `foot' of the current chunk is actually represented
fa8d436c
UD
1856 as the prev_size of the NEXT chunk. This makes it easier to
1857 deal with alignments etc but can be very confusing when trying
1858 to extend or adapt this code.
f65fd747
UD
1859
1860 The two exceptions to all this are
1861
fa8d436c
UD
1862 1. The special chunk `top' doesn't bother using the
1863 trailing size field since there is no next contiguous chunk
1864 that would have to index off it. After initialization, `top'
1865 is forced to always exist. If it would become less than
1866 MINSIZE bytes long, it is replenished.
f65fd747
UD
1867
1868 2. Chunks allocated via mmap, which have the second-lowest-order
8088488d 1869 bit M (IS_MMAPPED) set in their size fields. Because they are
fa8d436c 1870 allocated one-by-one, each must contain its own trailing size field.
f65fd747
UD
1871
1872*/
1873
1874/*
fa8d436c
UD
1875 ---------- Size and alignment checks and conversions ----------
1876*/
f65fd747 1877
fa8d436c 1878/* conversion from malloc headers to user pointers, and back */
f65fd747 1879
fa8d436c
UD
1880#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1881#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
f65fd747 1882
fa8d436c
UD
1883/* The smallest possible chunk */
1884#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
f65fd747 1885
fa8d436c 1886/* The smallest size we can malloc is an aligned minimal chunk */
f65fd747 1887
fa8d436c
UD
1888#define MINSIZE \
1889 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
f65fd747 1890
fa8d436c 1891/* Check if m has acceptable alignment */
f65fd747 1892
073f560e
UD
1893#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1894
1895#define misaligned_chunk(p) \
1896 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1897 & MALLOC_ALIGN_MASK)
f65fd747 1898
f65fd747 1899
a9177ff5 1900/*
fa8d436c
UD
1901 Check if a request is so large that it would wrap around zero when
1902 padded and aligned. To simplify some other code, the bound is made
1903 low enough so that adding MINSIZE will also not wrap around zero.
1904*/
f65fd747 1905
fa8d436c
UD
1906#define REQUEST_OUT_OF_RANGE(req) \
1907 ((unsigned long)(req) >= \
a9177ff5 1908 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
f65fd747 1909
fa8d436c 1910/* pad request bytes into a usable size -- internal version */
f65fd747 1911
fa8d436c
UD
1912#define request2size(req) \
1913 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1914 MINSIZE : \
1915 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
f65fd747 1916
fa8d436c 1917/* Same, except also perform argument check */
f65fd747 1918
fa8d436c
UD
1919#define checked_request2size(req, sz) \
1920 if (REQUEST_OUT_OF_RANGE(req)) { \
1921 MALLOC_FAILURE_ACTION; \
1922 return 0; \
1923 } \
a9177ff5 1924 (sz) = request2size(req);
f65fd747
UD
1925
1926/*
fa8d436c 1927 --------------- Physical chunk operations ---------------
f65fd747
UD
1928*/
1929
10dc2a90 1930
fa8d436c
UD
1931/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1932#define PREV_INUSE 0x1
f65fd747 1933
fa8d436c
UD
1934/* extract inuse bit of previous chunk */
1935#define prev_inuse(p) ((p)->size & PREV_INUSE)
f65fd747 1936
f65fd747 1937
fa8d436c
UD
1938/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1939#define IS_MMAPPED 0x2
f65fd747 1940
fa8d436c
UD
1941/* check for mmap()'ed chunk */
1942#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
f65fd747 1943
f65fd747 1944
fa8d436c
UD
1945/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1946 from a non-main arena. This is only set immediately before handing
1947 the chunk to the user, if necessary. */
1948#define NON_MAIN_ARENA 0x4
f65fd747 1949
fa8d436c
UD
1950/* check for chunk from non-main arena */
1951#define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
f65fd747
UD
1952
1953
a9177ff5
RM
1954/*
1955 Bits to mask off when extracting size
f65fd747 1956
fa8d436c
UD
1957 Note: IS_MMAPPED is intentionally not masked off from size field in
1958 macros for which mmapped chunks should never be seen. This should
1959 cause helpful core dumps to occur if it is tried by accident by
1960 people extending or adapting this malloc.
f65fd747 1961*/
fa8d436c 1962#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
f65fd747 1963
fa8d436c
UD
1964/* Get size, ignoring use bits */
1965#define chunksize(p) ((p)->size & ~(SIZE_BITS))
f65fd747 1966
f65fd747 1967
fa8d436c
UD
1968/* Ptr to next physical malloc_chunk. */
1969#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
f65fd747 1970
fa8d436c
UD
1971/* Ptr to previous physical malloc_chunk */
1972#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
f65fd747 1973
fa8d436c
UD
1974/* Treat space at ptr + offset as a chunk */
1975#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1976
1977/* extract p's inuse bit */
1978#define inuse(p)\
1979((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
f65fd747 1980
fa8d436c
UD
1981/* set/clear chunk as being inuse without otherwise disturbing */
1982#define set_inuse(p)\
1983((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
f65fd747 1984
fa8d436c
UD
1985#define clear_inuse(p)\
1986((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
f65fd747
UD
1987
1988
fa8d436c
UD
1989/* check/set/clear inuse bits in known places */
1990#define inuse_bit_at_offset(p, s)\
1991 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
f65fd747 1992
fa8d436c
UD
1993#define set_inuse_bit_at_offset(p, s)\
1994 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
f65fd747 1995
fa8d436c
UD
1996#define clear_inuse_bit_at_offset(p, s)\
1997 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
f65fd747 1998
f65fd747 1999
fa8d436c
UD
2000/* Set size at head, without disturbing its use bit */
2001#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
f65fd747 2002
fa8d436c
UD
2003/* Set size/use field */
2004#define set_head(p, s) ((p)->size = (s))
f65fd747 2005
fa8d436c
UD
2006/* Set size at footer (only when chunk is not in use) */
2007#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
f65fd747
UD
2008
2009
fa8d436c
UD
2010/*
2011 -------------------- Internal data structures --------------------
2012
2013 All internal state is held in an instance of malloc_state defined
2014 below. There are no other static variables, except in two optional
a9177ff5
RM
2015 cases:
2016 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
fa8d436c
UD
2017 * If HAVE_MMAP is true, but mmap doesn't support
2018 MAP_ANONYMOUS, a dummy file descriptor for mmap.
2019
2020 Beware of lots of tricks that minimize the total bookkeeping space
2021 requirements. The result is a little over 1K bytes (for 4byte
2022 pointers and size_t.)
2023*/
f65fd747
UD
2024
2025/*
fa8d436c
UD
2026 Bins
2027
2028 An array of bin headers for free chunks. Each bin is doubly
2029 linked. The bins are approximately proportionally (log) spaced.
2030 There are a lot of these bins (128). This may look excessive, but
2031 works very well in practice. Most bins hold sizes that are
2032 unusual as malloc request sizes, but are more usual for fragments
2033 and consolidated sets of chunks, which is what these bins hold, so
2034 they can be found quickly. All procedures maintain the invariant
2035 that no consolidated chunk physically borders another one, so each
2036 chunk in a list is known to be preceeded and followed by either
2037 inuse chunks or the ends of memory.
2038
2039 Chunks in bins are kept in size order, with ties going to the
2040 approximately least recently used chunk. Ordering isn't needed
2041 for the small bins, which all contain the same-sized chunks, but
2042 facilitates best-fit allocation for larger chunks. These lists
2043 are just sequential. Keeping them in order almost never requires
2044 enough traversal to warrant using fancier ordered data
a9177ff5 2045 structures.
fa8d436c
UD
2046
2047 Chunks of the same size are linked with the most
2048 recently freed at the front, and allocations are taken from the
2049 back. This results in LRU (FIFO) allocation order, which tends
2050 to give each chunk an equal opportunity to be consolidated with
2051 adjacent freed chunks, resulting in larger free chunks and less
2052 fragmentation.
2053
2054 To simplify use in double-linked lists, each bin header acts
2055 as a malloc_chunk. This avoids special-casing for headers.
2056 But to conserve space and improve locality, we allocate
2057 only the fd/bk pointers of bins, and then use repositioning tricks
a9177ff5 2058 to treat these as the fields of a malloc_chunk*.
f65fd747
UD
2059*/
2060
fa8d436c 2061typedef struct malloc_chunk* mbinptr;
f65fd747 2062
fa8d436c 2063/* addressing -- note that bin_at(0) does not exist */
41999a1a
UD
2064#define bin_at(m, i) \
2065 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
2066 - offsetof (struct malloc_chunk, fd))
f65fd747 2067
fa8d436c
UD
2068/* analog of ++bin */
2069#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
f65fd747 2070
fa8d436c
UD
2071/* Reminders about list directionality within bins */
2072#define first(b) ((b)->fd)
2073#define last(b) ((b)->bk)
f65fd747 2074
fa8d436c
UD
2075/* Take a chunk off a bin list */
2076#define unlink(P, BK, FD) { \
2077 FD = P->fd; \
2078 BK = P->bk; \
3e030bd5 2079 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
6bf4302e
UD
2080 malloc_printerr (check_action, "corrupted double-linked list", P); \
2081 else { \
2082 FD->bk = BK; \
2083 BK->fd = FD; \
2084 } \
fa8d436c 2085}
f65fd747 2086
fa8d436c
UD
2087/*
2088 Indexing
2089
2090 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2091 8 bytes apart. Larger bins are approximately logarithmically spaced:
f65fd747 2092
fa8d436c
UD
2093 64 bins of size 8
2094 32 bins of size 64
2095 16 bins of size 512
2096 8 bins of size 4096
2097 4 bins of size 32768
2098 2 bins of size 262144
2099 1 bin of size what's left
f65fd747 2100
fa8d436c
UD
2101 There is actually a little bit of slop in the numbers in bin_index
2102 for the sake of speed. This makes no difference elsewhere.
f65fd747 2103
fa8d436c
UD
2104 The bins top out around 1MB because we expect to service large
2105 requests via mmap.
2106*/
f65fd747 2107
fa8d436c
UD
2108#define NBINS 128
2109#define NSMALLBINS 64
2110#define SMALLBIN_WIDTH 8
2111#define MIN_LARGE_SIZE 512
f65fd747 2112
fa8d436c
UD
2113#define in_smallbin_range(sz) \
2114 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
f65fd747 2115
fa8d436c 2116#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
f65fd747 2117
fa8d436c
UD
2118#define largebin_index(sz) \
2119(((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
2120 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2121 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2122 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2123 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2124 126)
f65fd747 2125
fa8d436c
UD
2126#define bin_index(sz) \
2127 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
f65fd747 2128
f65fd747
UD
2129
2130/*
fa8d436c
UD
2131 Unsorted chunks
2132
2133 All remainders from chunk splits, as well as all returned chunks,
2134 are first placed in the "unsorted" bin. They are then placed
2135 in regular bins after malloc gives them ONE chance to be used before
2136 binning. So, basically, the unsorted_chunks list acts as a queue,
2137 with chunks being placed on it in free (and malloc_consolidate),
2138 and taken off (to be either used or placed in bins) in malloc.
2139
2140 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2141 does not have to be taken into account in size comparisons.
f65fd747
UD
2142*/
2143
fa8d436c
UD
2144/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2145#define unsorted_chunks(M) (bin_at(M, 1))
f65fd747 2146
fa8d436c
UD
2147/*
2148 Top
2149
2150 The top-most available chunk (i.e., the one bordering the end of
2151 available memory) is treated specially. It is never included in
2152 any bin, is used only if no other chunk is available, and is
2153 released back to the system if it is very large (see
2154 M_TRIM_THRESHOLD). Because top initially
2155 points to its own bin with initial zero size, thus forcing
2156 extension on the first malloc request, we avoid having any special
2157 code in malloc to check whether it even exists yet. But we still
2158 need to do so when getting memory from system, so we make
2159 initial_top treat the bin as a legal but unusable chunk during the
2160 interval between initialization and the first call to
2161 sYSMALLOc. (This is somewhat delicate, since it relies on
2162 the 2 preceding words to be zero during this interval as well.)
2163*/
f65fd747 2164
fa8d436c
UD
2165/* Conveniently, the unsorted bin can be used as dummy top on first call */
2166#define initial_top(M) (unsorted_chunks(M))
f65fd747 2167
fa8d436c
UD
2168/*
2169 Binmap
f65fd747 2170
fa8d436c
UD
2171 To help compensate for the large number of bins, a one-level index
2172 structure is used for bin-by-bin searching. `binmap' is a
2173 bitvector recording whether bins are definitely empty so they can
2174 be skipped over during during traversals. The bits are NOT always
2175 cleared as soon as bins are empty, but instead only
2176 when they are noticed to be empty during traversal in malloc.
2177*/
f65fd747 2178
fa8d436c
UD
2179/* Conservatively use 32 bits per map word, even if on 64bit system */
2180#define BINMAPSHIFT 5
2181#define BITSPERMAP (1U << BINMAPSHIFT)
2182#define BINMAPSIZE (NBINS / BITSPERMAP)
f65fd747 2183
fa8d436c
UD
2184#define idx2block(i) ((i) >> BINMAPSHIFT)
2185#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
f65fd747 2186
fa8d436c
UD
2187#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2188#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2189#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
f65fd747 2190
fa8d436c
UD
2191/*
2192 Fastbins
2193
2194 An array of lists holding recently freed small chunks. Fastbins
2195 are not doubly linked. It is faster to single-link them, and
2196 since chunks are never removed from the middles of these lists,
2197 double linking is not necessary. Also, unlike regular bins, they
2198 are not even processed in FIFO order (they use faster LIFO) since
2199 ordering doesn't much matter in the transient contexts in which
2200 fastbins are normally used.
2201
2202 Chunks in fastbins keep their inuse bit set, so they cannot
2203 be consolidated with other free chunks. malloc_consolidate
2204 releases all chunks in fastbins and consolidates them with
a9177ff5 2205 other free chunks.
fa8d436c 2206*/
f65fd747 2207
fa8d436c 2208typedef struct malloc_chunk* mfastbinptr;
f65fd747 2209
fa8d436c
UD
2210/* offset 2 to use otherwise unindexable first 2 bins */
2211#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
f65fd747 2212
fa8d436c
UD
2213/* The maximum fastbin request size we support */
2214#define MAX_FAST_SIZE 80
f65fd747 2215
fa8d436c 2216#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
f65fd747
UD
2217
2218/*
fa8d436c
UD
2219 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2220 that triggers automatic consolidation of possibly-surrounding
2221 fastbin chunks. This is a heuristic, so the exact value should not
2222 matter too much. It is defined at half the default trim threshold as a
2223 compromise heuristic to only attempt consolidation if it is likely
2224 to lead to trimming. However, it is not dynamically tunable, since
a9177ff5 2225 consolidation reduces fragmentation surrounding large chunks even
fa8d436c 2226 if trimming is not used.
f65fd747
UD
2227*/
2228
fa8d436c 2229#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
f65fd747
UD
2230
2231/*
a9177ff5 2232 Since the lowest 2 bits in max_fast don't matter in size comparisons,
fa8d436c 2233 they are used as flags.
f65fd747
UD
2234*/
2235
fa8d436c
UD
2236/*
2237 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2238 some fastbin chunks. It is set true on entering a chunk into any
2239 fastbin, and cleared only in malloc_consolidate.
f65fd747 2240
fa8d436c
UD
2241 The truth value is inverted so that have_fastchunks will be true
2242 upon startup (since statics are zero-filled), simplifying
2243 initialization checks.
2244*/
f65fd747 2245
fa8d436c 2246#define FASTCHUNKS_BIT (1U)
f65fd747 2247
9bf248c6
UD
2248#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
2249#define clear_fastchunks(M) ((M)->flags |= FASTCHUNKS_BIT)
2250#define set_fastchunks(M) ((M)->flags &= ~FASTCHUNKS_BIT)
f65fd747
UD
2251
2252/*
fa8d436c
UD
2253 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2254 regions. Otherwise, contiguity is exploited in merging together,
2255 when possible, results from consecutive MORECORE calls.
f65fd747 2256
fa8d436c
UD
2257 The initial value comes from MORECORE_CONTIGUOUS, but is
2258 changed dynamically if mmap is ever used as an sbrk substitute.
f65fd747
UD
2259*/
2260
fa8d436c 2261#define NONCONTIGUOUS_BIT (2U)
f65fd747 2262
9bf248c6
UD
2263#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
2264#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
2265#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
2266#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
f65fd747 2267
a9177ff5
RM
2268/*
2269 Set value of max_fast.
fa8d436c
UD
2270 Use impossibly small value if 0.
2271 Precondition: there are no existing fastbin chunks.
2272 Setting the value clears fastchunk bit but preserves noncontiguous bit.
f65fd747
UD
2273*/
2274
9bf248c6
UD
2275#define set_max_fast(s) \
2276 global_max_fast = ((s) == 0)? SMALLBIN_WIDTH: request2size(s)
2277#define get_max_fast() global_max_fast
f65fd747 2278
f65fd747
UD
2279
2280/*
fa8d436c 2281 ----------- Internal state representation and initialization -----------
f65fd747
UD
2282*/
2283
fa8d436c
UD
2284struct malloc_state {
2285 /* Serialize access. */
2286 mutex_t mutex;
9bf248c6
UD
2287
2288 /* Flags (formerly in max_fast). */
2289 int flags;
f65fd747 2290
4f27c496 2291#if THREAD_STATS
fa8d436c
UD
2292 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2293 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
4f27c496 2294#endif
f65fd747 2295
fa8d436c
UD
2296 /* Fastbins */
2297 mfastbinptr fastbins[NFASTBINS];
f65fd747 2298
fa8d436c
UD
2299 /* Base of the topmost chunk -- not otherwise kept in a bin */
2300 mchunkptr top;
f65fd747 2301
fa8d436c
UD
2302 /* The remainder from the most recent split of a small request */
2303 mchunkptr last_remainder;
f65fd747 2304
fa8d436c 2305 /* Normal bins packed as described above */
41999a1a 2306 mchunkptr bins[NBINS * 2 - 2];
f65fd747 2307
fa8d436c
UD
2308 /* Bitmap of bins */
2309 unsigned int binmap[BINMAPSIZE];
f65fd747 2310
fa8d436c
UD
2311 /* Linked list */
2312 struct malloc_state *next;
f65fd747 2313
fa8d436c
UD
2314 /* Memory allocated from the system in this arena. */
2315 INTERNAL_SIZE_T system_mem;
2316 INTERNAL_SIZE_T max_system_mem;
2317};
f65fd747 2318
fa8d436c
UD
2319struct malloc_par {
2320 /* Tunable parameters */
2321 unsigned long trim_threshold;
2322 INTERNAL_SIZE_T top_pad;
2323 INTERNAL_SIZE_T mmap_threshold;
2324
2325 /* Memory map support */
2326 int n_mmaps;
2327 int n_mmaps_max;
2328 int max_n_mmaps;
1d05c2fb
UD
2329 /* the mmap_threshold is dynamic, until the user sets
2330 it manually, at which point we need to disable any
2331 dynamic behavior. */
2332 int no_dyn_threshold;
fa8d436c
UD
2333
2334 /* Cache malloc_getpagesize */
a9177ff5 2335 unsigned int pagesize;
fa8d436c
UD
2336
2337 /* Statistics */
2338 INTERNAL_SIZE_T mmapped_mem;
2339 /*INTERNAL_SIZE_T sbrked_mem;*/
2340 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2341 INTERNAL_SIZE_T max_mmapped_mem;
2342 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
2343
2344 /* First address handed out by MORECORE/sbrk. */
2345 char* sbrk_base;
2346};
f65fd747 2347
fa8d436c
UD
2348/* There are several instances of this struct ("arenas") in this
2349 malloc. If you are adapting this malloc in a way that does NOT use
2350 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2351 before using. This malloc relies on the property that malloc_state
2352 is initialized to all zeroes (as is true of C statics). */
f65fd747 2353
fa8d436c 2354static struct malloc_state main_arena;
f65fd747 2355
fa8d436c 2356/* There is only one instance of the malloc parameters. */
f65fd747 2357
fa8d436c 2358static struct malloc_par mp_;
f65fd747 2359
9bf248c6
UD
2360
2361/* Maximum size of memory handled in fastbins. */
2362static INTERNAL_SIZE_T global_max_fast;
2363
fa8d436c
UD
2364/*
2365 Initialize a malloc_state struct.
f65fd747 2366
fa8d436c
UD
2367 This is called only from within malloc_consolidate, which needs
2368 be called in the same contexts anyway. It is never called directly
2369 outside of malloc_consolidate because some optimizing compilers try
2370 to inline it at all call points, which turns out not to be an
2371 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2372*/
f65fd747 2373
fa8d436c
UD
2374#if __STD_C
2375static void malloc_init_state(mstate av)
2376#else
2377static void malloc_init_state(av) mstate av;
2378#endif
2379{
2380 int i;
2381 mbinptr bin;
a9177ff5 2382
fa8d436c 2383 /* Establish circular links for normal bins */
a9177ff5 2384 for (i = 1; i < NBINS; ++i) {
fa8d436c
UD
2385 bin = bin_at(av,i);
2386 bin->fd = bin->bk = bin;
2387 }
f65fd747 2388
fa8d436c
UD
2389#if MORECORE_CONTIGUOUS
2390 if (av != &main_arena)
2391#endif
2392 set_noncontiguous(av);
9bf248c6
UD
2393 if (av == &main_arena)
2394 set_max_fast(DEFAULT_MXFAST);
2395 av->flags |= FASTCHUNKS_BIT;
f65fd747 2396
fa8d436c
UD
2397 av->top = initial_top(av);
2398}
e9b3e3c5 2399
a9177ff5 2400/*
fa8d436c
UD
2401 Other internal utilities operating on mstates
2402*/
f65fd747 2403
fa8d436c
UD
2404#if __STD_C
2405static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2406static int sYSTRIm(size_t, mstate);
2407static void malloc_consolidate(mstate);
88764ae2 2408#ifndef _LIBC
fa8d436c 2409static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
88764ae2 2410#endif
831372e7 2411#else
fa8d436c
UD
2412static Void_t* sYSMALLOc();
2413static int sYSTRIm();
2414static void malloc_consolidate();
2415static Void_t** iALLOc();
831372e7 2416#endif
7e3be507 2417
404d4cef
RM
2418
2419/* -------------- Early definitions for debugging hooks ---------------- */
2420
2421/* Define and initialize the hook variables. These weak definitions must
2422 appear before any use of the variables in a function (arena.c uses one). */
2423#ifndef weak_variable
2424#ifndef _LIBC
2425#define weak_variable /**/
2426#else
2427/* In GNU libc we want the hook variables to be weak definitions to
2428 avoid a problem with Emacs. */
2429#define weak_variable weak_function
2430#endif
2431#endif
2432
2433/* Forward declarations. */
2434static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz,
2435 const __malloc_ptr_t caller));
2436static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz,
2437 const __malloc_ptr_t caller));
2438static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
2439 const __malloc_ptr_t caller));
2440
06d6611a
UD
2441void weak_variable (*__malloc_initialize_hook) (void) = NULL;
2442void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
2443 const __malloc_ptr_t) = NULL;
404d4cef 2444__malloc_ptr_t weak_variable (*__malloc_hook)
06d6611a 2445 (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
404d4cef 2446__malloc_ptr_t weak_variable (*__realloc_hook)
06d6611a 2447 (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
404d4cef
RM
2448 = realloc_hook_ini;
2449__malloc_ptr_t weak_variable (*__memalign_hook)
06d6611a 2450 (size_t __alignment, size_t __size, const __malloc_ptr_t)
404d4cef 2451 = memalign_hook_ini;
06d6611a 2452void weak_variable (*__after_morecore_hook) (void) = NULL;
404d4cef
RM
2453
2454
3e030bd5
UD
2455/* ---------------- Error behavior ------------------------------------ */
2456
2457#ifndef DEFAULT_CHECK_ACTION
2458#define DEFAULT_CHECK_ACTION 3
2459#endif
2460
2461static int check_action = DEFAULT_CHECK_ACTION;
2462
2463
854278df
UD
2464/* ------------------ Testing support ----------------------------------*/
2465
2466static int perturb_byte;
2467
2468#define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
2469#define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
2470
2471
fa8d436c
UD
2472/* ------------------- Support for multiple arenas -------------------- */
2473#include "arena.c"
f65fd747 2474
fa8d436c
UD
2475/*
2476 Debugging support
f65fd747 2477
fa8d436c
UD
2478 These routines make a number of assertions about the states
2479 of data structures that should be true at all times. If any
2480 are not true, it's very likely that a user program has somehow
2481 trashed memory. (It's also possible that there is a coding error
2482 in malloc. In which case, please report it!)
2483*/
ee74a442 2484
fa8d436c 2485#if ! MALLOC_DEBUG
d8f00d46 2486
fa8d436c
UD
2487#define check_chunk(A,P)
2488#define check_free_chunk(A,P)
2489#define check_inuse_chunk(A,P)
2490#define check_remalloced_chunk(A,P,N)
2491#define check_malloced_chunk(A,P,N)
2492#define check_malloc_state(A)
d8f00d46 2493
fa8d436c 2494#else
ca34d7a7 2495
fa8d436c
UD
2496#define check_chunk(A,P) do_check_chunk(A,P)
2497#define check_free_chunk(A,P) do_check_free_chunk(A,P)
2498#define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2499#define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2500#define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2501#define check_malloc_state(A) do_check_malloc_state(A)
ca34d7a7 2502
fa8d436c
UD
2503/*
2504 Properties of all chunks
2505*/
ca34d7a7 2506
fa8d436c
UD
2507#if __STD_C
2508static void do_check_chunk(mstate av, mchunkptr p)
2509#else
2510static void do_check_chunk(av, p) mstate av; mchunkptr p;
ca34d7a7 2511#endif
ca34d7a7 2512{
fa8d436c
UD
2513 unsigned long sz = chunksize(p);
2514 /* min and max possible addresses assuming contiguous allocation */
2515 char* max_address = (char*)(av->top) + chunksize(av->top);
2516 char* min_address = max_address - av->system_mem;
2517
2518 if (!chunk_is_mmapped(p)) {
a9177ff5 2519
fa8d436c
UD
2520 /* Has legal address ... */
2521 if (p != av->top) {
2522 if (contiguous(av)) {
2523 assert(((char*)p) >= min_address);
2524 assert(((char*)p + sz) <= ((char*)(av->top)));
2525 }
2526 }
2527 else {
2528 /* top size is always at least MINSIZE */
2529 assert((unsigned long)(sz) >= MINSIZE);
2530 /* top predecessor always marked inuse */
2531 assert(prev_inuse(p));
2532 }
a9177ff5 2533
ca34d7a7 2534 }
fa8d436c
UD
2535 else {
2536#if HAVE_MMAP
2537 /* address is outside main heap */
2538 if (contiguous(av) && av->top != initial_top(av)) {
2539 assert(((char*)p) < min_address || ((char*)p) > max_address);
2540 }
2541 /* chunk is page-aligned */
2542 assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
2543 /* mem is aligned */
2544 assert(aligned_OK(chunk2mem(p)));
2545#else
2546 /* force an appropriate assert violation if debug set */
2547 assert(!chunk_is_mmapped(p));
eb406346 2548#endif
eb406346 2549 }
eb406346
UD
2550}
2551
fa8d436c
UD
2552/*
2553 Properties of free chunks
2554*/
ee74a442 2555
fa8d436c
UD
2556#if __STD_C
2557static void do_check_free_chunk(mstate av, mchunkptr p)
2558#else
2559static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
10dc2a90 2560#endif
67c94753 2561{
fa8d436c
UD
2562 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2563 mchunkptr next = chunk_at_offset(p, sz);
67c94753 2564
fa8d436c 2565 do_check_chunk(av, p);
67c94753 2566
fa8d436c
UD
2567 /* Chunk must claim to be free ... */
2568 assert(!inuse(p));
2569 assert (!chunk_is_mmapped(p));
67c94753 2570
fa8d436c
UD
2571 /* Unless a special marker, must have OK fields */
2572 if ((unsigned long)(sz) >= MINSIZE)
2573 {
2574 assert((sz & MALLOC_ALIGN_MASK) == 0);
2575 assert(aligned_OK(chunk2mem(p)));
2576 /* ... matching footer field */
2577 assert(next->prev_size == sz);
2578 /* ... and is fully consolidated */
2579 assert(prev_inuse(p));
2580 assert (next == av->top || inuse(next));
2581
2582 /* ... and has minimally sane links */
2583 assert(p->fd->bk == p);
2584 assert(p->bk->fd == p);
2585 }
2586 else /* markers are always of size SIZE_SZ */
2587 assert(sz == SIZE_SZ);
67c94753 2588}
67c94753 2589
fa8d436c
UD
2590/*
2591 Properties of inuse chunks
2592*/
2593
2594#if __STD_C
2595static void do_check_inuse_chunk(mstate av, mchunkptr p)
f65fd747 2596#else
fa8d436c 2597static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
f65fd747
UD
2598#endif
2599{
fa8d436c 2600 mchunkptr next;
f65fd747 2601
fa8d436c 2602 do_check_chunk(av, p);
f65fd747 2603
fa8d436c
UD
2604 if (chunk_is_mmapped(p))
2605 return; /* mmapped chunks have no next/prev */
ca34d7a7 2606
fa8d436c
UD
2607 /* Check whether it claims to be in use ... */
2608 assert(inuse(p));
10dc2a90 2609
fa8d436c 2610 next = next_chunk(p);
10dc2a90 2611
fa8d436c
UD
2612 /* ... and is surrounded by OK chunks.
2613 Since more things can be checked with free chunks than inuse ones,
2614 if an inuse chunk borders them and debug is on, it's worth doing them.
2615 */
2616 if (!prev_inuse(p)) {
2617 /* Note that we cannot even look at prev unless it is not inuse */
2618 mchunkptr prv = prev_chunk(p);
2619 assert(next_chunk(prv) == p);
2620 do_check_free_chunk(av, prv);
2621 }
2622
2623 if (next == av->top) {
2624 assert(prev_inuse(next));
2625 assert(chunksize(next) >= MINSIZE);
2626 }
2627 else if (!inuse(next))
2628 do_check_free_chunk(av, next);
10dc2a90
UD
2629}
2630
fa8d436c
UD
2631/*
2632 Properties of chunks recycled from fastbins
2633*/
2634
10dc2a90 2635#if __STD_C
fa8d436c 2636static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2637#else
fa8d436c
UD
2638static void do_check_remalloced_chunk(av, p, s)
2639mstate av; mchunkptr p; INTERNAL_SIZE_T s;
a2b08ee5 2640#endif
10dc2a90 2641{
fa8d436c
UD
2642 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2643
2644 if (!chunk_is_mmapped(p)) {
2645 assert(av == arena_for_chunk(p));
2646 if (chunk_non_main_arena(p))
2647 assert(av != &main_arena);
2648 else
2649 assert(av == &main_arena);
2650 }
2651
2652 do_check_inuse_chunk(av, p);
2653
2654 /* Legal size ... */
2655 assert((sz & MALLOC_ALIGN_MASK) == 0);
2656 assert((unsigned long)(sz) >= MINSIZE);
2657 /* ... and alignment */
2658 assert(aligned_OK(chunk2mem(p)));
2659 /* chunk is less than MINSIZE more than request */
2660 assert((long)(sz) - (long)(s) >= 0);
2661 assert((long)(sz) - (long)(s + MINSIZE) < 0);
10dc2a90
UD
2662}
2663
fa8d436c
UD
2664/*
2665 Properties of nonrecycled chunks at the point they are malloced
2666*/
2667
10dc2a90 2668#if __STD_C
fa8d436c 2669static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2670#else
fa8d436c
UD
2671static void do_check_malloced_chunk(av, p, s)
2672mstate av; mchunkptr p; INTERNAL_SIZE_T s;
a2b08ee5 2673#endif
10dc2a90 2674{
fa8d436c
UD
2675 /* same as recycled case ... */
2676 do_check_remalloced_chunk(av, p, s);
10dc2a90 2677
fa8d436c
UD
2678 /*
2679 ... plus, must obey implementation invariant that prev_inuse is
2680 always true of any allocated chunk; i.e., that each allocated
2681 chunk borders either a previously allocated and still in-use
2682 chunk, or the base of its memory arena. This is ensured
2683 by making all allocations from the the `lowest' part of any found
2684 chunk. This does not necessarily hold however for chunks
2685 recycled via fastbins.
2686 */
10dc2a90 2687
fa8d436c
UD
2688 assert(prev_inuse(p));
2689}
10dc2a90 2690
f65fd747 2691
fa8d436c
UD
2692/*
2693 Properties of malloc_state.
f65fd747 2694
fa8d436c
UD
2695 This may be useful for debugging malloc, as well as detecting user
2696 programmer errors that somehow write into malloc_state.
f65fd747 2697
fa8d436c
UD
2698 If you are extending or experimenting with this malloc, you can
2699 probably figure out how to hack this routine to print out or
2700 display chunk addresses, sizes, bins, and other instrumentation.
2701*/
f65fd747 2702
fa8d436c
UD
2703static void do_check_malloc_state(mstate av)
2704{
2705 int i;
2706 mchunkptr p;
2707 mchunkptr q;
2708 mbinptr b;
2709 unsigned int binbit;
2710 int empty;
2711 unsigned int idx;
2712 INTERNAL_SIZE_T size;
2713 unsigned long total = 0;
2714 int max_fast_bin;
f65fd747 2715
fa8d436c
UD
2716 /* internal size_t must be no wider than pointer type */
2717 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
f65fd747 2718
fa8d436c
UD
2719 /* alignment is a power of 2 */
2720 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
f65fd747 2721
fa8d436c
UD
2722 /* cannot run remaining checks until fully initialized */
2723 if (av->top == 0 || av->top == initial_top(av))
2724 return;
f65fd747 2725
fa8d436c
UD
2726 /* pagesize is a power of 2 */
2727 assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
f65fd747 2728
fa8d436c
UD
2729 /* A contiguous main_arena is consistent with sbrk_base. */
2730 if (av == &main_arena && contiguous(av))
2731 assert((char*)mp_.sbrk_base + av->system_mem ==
2732 (char*)av->top + chunksize(av->top));
2733
2734 /* properties of fastbins */
2735
2736 /* max_fast is in allowed range */
9bf248c6 2737 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
fa8d436c 2738
9bf248c6 2739 max_fast_bin = fastbin_index(get_max_fast ());
fa8d436c
UD
2740
2741 for (i = 0; i < NFASTBINS; ++i) {
2742 p = av->fastbins[i];
2743
2744 /* all bins past max_fast are empty */
2745 if (i > max_fast_bin)
2746 assert(p == 0);
2747
2748 while (p != 0) {
2749 /* each chunk claims to be inuse */
2750 do_check_inuse_chunk(av, p);
2751 total += chunksize(p);
2752 /* chunk belongs in this bin */
2753 assert(fastbin_index(chunksize(p)) == i);
2754 p = p->fd;
2755 }
2756 }
2757
2758 if (total != 0)
2759 assert(have_fastchunks(av));
2760 else if (!have_fastchunks(av))
2761 assert(total == 0);
2762
2763 /* check normal bins */
2764 for (i = 1; i < NBINS; ++i) {
2765 b = bin_at(av,i);
2766
2767 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2768 if (i >= 2) {
2769 binbit = get_binmap(av,i);
2770 empty = last(b) == b;
2771 if (!binbit)
2772 assert(empty);
2773 else if (!empty)
2774 assert(binbit);
2775 }
2776
2777 for (p = last(b); p != b; p = p->bk) {
2778 /* each chunk claims to be free */
2779 do_check_free_chunk(av, p);
2780 size = chunksize(p);
2781 total += size;
2782 if (i >= 2) {
2783 /* chunk belongs in bin */
2784 idx = bin_index(size);
2785 assert(idx == i);
2786 /* lists are sorted */
a9177ff5 2787 assert(p->bk == b ||
fa8d436c
UD
2788 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2789 }
2790 /* chunk is followed by a legal chain of inuse chunks */
2791 for (q = next_chunk(p);
a9177ff5 2792 (q != av->top && inuse(q) &&
fa8d436c
UD
2793 (unsigned long)(chunksize(q)) >= MINSIZE);
2794 q = next_chunk(q))
2795 do_check_inuse_chunk(av, q);
2796 }
2797 }
f65fd747 2798
fa8d436c
UD
2799 /* top chunk is OK */
2800 check_chunk(av, av->top);
2801
2802 /* sanity checks for statistics */
2803
2804#ifdef NO_THREADS
2805 assert(total <= (unsigned long)(mp_.max_total_mem));
2806 assert(mp_.n_mmaps >= 0);
f65fd747 2807#endif
fa8d436c
UD
2808 assert(mp_.n_mmaps <= mp_.n_mmaps_max);
2809 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2810
2811 assert((unsigned long)(av->system_mem) <=
2812 (unsigned long)(av->max_system_mem));
f65fd747 2813
fa8d436c
UD
2814 assert((unsigned long)(mp_.mmapped_mem) <=
2815 (unsigned long)(mp_.max_mmapped_mem));
2816
2817#ifdef NO_THREADS
2818 assert((unsigned long)(mp_.max_total_mem) >=
2819 (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
dfd2257a 2820#endif
fa8d436c
UD
2821}
2822#endif
2823
2824
2825/* ----------------- Support for debugging hooks -------------------- */
2826#include "hooks.c"
2827
2828
2829/* ----------- Routines dealing with system allocation -------------- */
2830
2831/*
2832 sysmalloc handles malloc cases requiring more memory from the system.
2833 On entry, it is assumed that av->top does not have enough
2834 space to service request for nb bytes, thus requiring that av->top
2835 be extended or replaced.
2836*/
2837
f65fd747 2838#if __STD_C
fa8d436c 2839static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
f65fd747 2840#else
fa8d436c 2841static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
f65fd747
UD
2842#endif
2843{
fa8d436c
UD
2844 mchunkptr old_top; /* incoming value of av->top */
2845 INTERNAL_SIZE_T old_size; /* its size */
2846 char* old_end; /* its end address */
f65fd747 2847
fa8d436c
UD
2848 long size; /* arg to first MORECORE or mmap call */
2849 char* brk; /* return value from MORECORE */
f65fd747 2850
fa8d436c
UD
2851 long correction; /* arg to 2nd MORECORE call */
2852 char* snd_brk; /* 2nd return val */
f65fd747 2853
fa8d436c
UD
2854 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2855 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2856 char* aligned_brk; /* aligned offset into brk */
f65fd747 2857
fa8d436c
UD
2858 mchunkptr p; /* the allocated/returned chunk */
2859 mchunkptr remainder; /* remainder from allocation */
2860 unsigned long remainder_size; /* its size */
2861
2862 unsigned long sum; /* for updating stats */
2863
2864 size_t pagemask = mp_.pagesize - 1;
7463d5cb 2865 bool tried_mmap = false;
fa8d436c
UD
2866
2867
2868#if HAVE_MMAP
2869
2870 /*
2871 If have mmap, and the request size meets the mmap threshold, and
2872 the system supports mmap, and there are few enough currently
2873 allocated mmapped regions, try to directly map this request
2874 rather than expanding top.
2875 */
f65fd747 2876
fa8d436c
UD
2877 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2878 (mp_.n_mmaps < mp_.n_mmaps_max)) {
f65fd747 2879
fa8d436c
UD
2880 char* mm; /* return value from mmap call*/
2881
e404fb16 2882 try_mmap:
fa8d436c
UD
2883 /*
2884 Round up size to nearest page. For mmapped chunks, the overhead
2885 is one SIZE_SZ unit larger than for normal chunks, because there
2886 is no following chunk whose prev_size field could be used.
2887 */
2888 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
7463d5cb 2889 tried_mmap = true;
fa8d436c
UD
2890
2891 /* Don't try if size wraps around 0 */
2892 if ((unsigned long)(size) > (unsigned long)(nb)) {
2893
2894 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
a9177ff5 2895
fa8d436c 2896 if (mm != MAP_FAILED) {
a9177ff5 2897
fa8d436c
UD
2898 /*
2899 The offset to the start of the mmapped region is stored
2900 in the prev_size field of the chunk. This allows us to adjust
a9177ff5 2901 returned start address to meet alignment requirements here
fa8d436c
UD
2902 and in memalign(), and still be able to compute proper
2903 address argument for later munmap in free() and realloc().
2904 */
a9177ff5 2905
fa8d436c
UD
2906 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2907 if (front_misalign > 0) {
2908 correction = MALLOC_ALIGNMENT - front_misalign;
2909 p = (mchunkptr)(mm + correction);
2910 p->prev_size = correction;
2911 set_head(p, (size - correction) |IS_MMAPPED);
2912 }
2913 else {
2914 p = (mchunkptr)mm;
2915 set_head(p, size|IS_MMAPPED);
2916 }
a9177ff5 2917
fa8d436c 2918 /* update statistics */
a9177ff5
RM
2919
2920 if (++mp_.n_mmaps > mp_.max_n_mmaps)
fa8d436c 2921 mp_.max_n_mmaps = mp_.n_mmaps;
a9177ff5 2922
fa8d436c 2923 sum = mp_.mmapped_mem += size;
a9177ff5 2924 if (sum > (unsigned long)(mp_.max_mmapped_mem))
fa8d436c 2925 mp_.max_mmapped_mem = sum;
8a4b65b4 2926#ifdef NO_THREADS
fa8d436c 2927 sum += av->system_mem;
a9177ff5 2928 if (sum > (unsigned long)(mp_.max_total_mem))
fa8d436c 2929 mp_.max_total_mem = sum;
8a4b65b4 2930#endif
fa8d436c
UD
2931
2932 check_chunk(av, p);
a9177ff5 2933
fa8d436c
UD
2934 return chunk2mem(p);
2935 }
2936 }
2937 }
2938#endif
2939
2940 /* Record incoming configuration of top */
2941
2942 old_top = av->top;
2943 old_size = chunksize(old_top);
2944 old_end = (char*)(chunk_at_offset(old_top, old_size));
2945
a9177ff5 2946 brk = snd_brk = (char*)(MORECORE_FAILURE);
fa8d436c 2947
a9177ff5 2948 /*
fa8d436c
UD
2949 If not the first time through, we require old_size to be
2950 at least MINSIZE and to have prev_inuse set.
2951 */
2952
a9177ff5 2953 assert((old_top == initial_top(av) && old_size == 0) ||
fa8d436c
UD
2954 ((unsigned long) (old_size) >= MINSIZE &&
2955 prev_inuse(old_top) &&
2956 ((unsigned long)old_end & pagemask) == 0));
2957
2958 /* Precondition: not enough current space to satisfy nb request */
2959 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2960
2961 /* Precondition: all fastbins are consolidated */
2962 assert(!have_fastchunks(av));
2963
2964
2965 if (av != &main_arena) {
2966
2967 heap_info *old_heap, *heap;
2968 size_t old_heap_size;
2969
2970 /* First try to extend the current heap. */
2971 old_heap = heap_for_ptr(old_top);
2972 old_heap_size = old_heap->size;
a334319f 2973 if (grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
fa8d436c
UD
2974 av->system_mem += old_heap->size - old_heap_size;
2975 arena_mem += old_heap->size - old_heap_size;
2976#if 0
2977 if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
2978 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2979#endif
2980 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2981 | PREV_INUSE);
e6ac0e78
UD
2982 }
2983 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
2984 /* Use a newly allocated heap. */
2985 heap->ar_ptr = av;
2986 heap->prev = old_heap;
2987 av->system_mem += heap->size;
2988 arena_mem += heap->size;
fa8d436c 2989#if 0
e6ac0e78
UD
2990 if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
2991 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
fa8d436c 2992#endif
fa8d436c
UD
2993 /* Set up the new top. */
2994 top(av) = chunk_at_offset(heap, sizeof(*heap));
2995 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2996
2997 /* Setup fencepost and free the old top chunk. */
2998 /* The fencepost takes at least MINSIZE bytes, because it might
2999 become the top chunk again later. Note that a footer is set
3000 up, too, although the chunk is marked in use. */
3001 old_size -= MINSIZE;
3002 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
3003 if (old_size >= MINSIZE) {
3004 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
3005 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
3006 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
3007 _int_free(av, chunk2mem(old_top));
3008 } else {
3009 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
3010 set_foot(old_top, (old_size + 2*SIZE_SZ));
3011 }
3012 }
7463d5cb 3013 else if (!tried_mmap)
e404fb16
UD
3014 /* We can at least try to use to mmap memory. */
3015 goto try_mmap;
fa8d436c
UD
3016
3017 } else { /* av == main_arena */
3018
3019
3020 /* Request enough space for nb + pad + overhead */
3021
3022 size = nb + mp_.top_pad + MINSIZE;
3023
3024 /*
3025 If contiguous, we can subtract out existing space that we hope to
3026 combine with new space. We add it back later only if
3027 we don't actually get contiguous space.
3028 */
3029
3030 if (contiguous(av))
3031 size -= old_size;
3032
3033 /*
3034 Round to a multiple of page size.
3035 If MORECORE is not contiguous, this ensures that we only call it
3036 with whole-page arguments. And if MORECORE is contiguous and
3037 this is not first time through, this preserves page-alignment of
3038 previous calls. Otherwise, we correct to page-align below.
3039 */
3040
3041 size = (size + pagemask) & ~pagemask;
3042
3043 /*
3044 Don't try to call MORECORE if argument is so big as to appear
3045 negative. Note that since mmap takes size_t arg, it may succeed
3046 below even if we cannot call MORECORE.
3047 */
3048
a9177ff5 3049 if (size > 0)
fa8d436c
UD
3050 brk = (char*)(MORECORE(size));
3051
3052 if (brk != (char*)(MORECORE_FAILURE)) {
3053 /* Call the `morecore' hook if necessary. */
3054 if (__after_morecore_hook)
3055 (*__after_morecore_hook) ();
3056 } else {
3057 /*
3058 If have mmap, try using it as a backup when MORECORE fails or
3059 cannot be used. This is worth doing on systems that have "holes" in
3060 address space, so sbrk cannot extend to give contiguous space, but
3061 space is available elsewhere. Note that we ignore mmap max count
3062 and threshold limits, since the space will not be used as a
3063 segregated mmap region.
3064 */
3065
3066#if HAVE_MMAP
3067 /* Cannot merge with old top, so add its size back in */
3068 if (contiguous(av))
3069 size = (size + old_size + pagemask) & ~pagemask;
3070
3071 /* If we are relying on mmap as backup, then use larger units */
3072 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
3073 size = MMAP_AS_MORECORE_SIZE;
3074
3075 /* Don't try if size wraps around 0 */
3076 if ((unsigned long)(size) > (unsigned long)(nb)) {
3077
75bfdfc7 3078 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
a9177ff5 3079
75bfdfc7 3080 if (mbrk != MAP_FAILED) {
a9177ff5 3081
fa8d436c 3082 /* We do not need, and cannot use, another sbrk call to find end */
75bfdfc7 3083 brk = mbrk;
fa8d436c 3084 snd_brk = brk + size;
a9177ff5
RM
3085
3086 /*
3087 Record that we no longer have a contiguous sbrk region.
fa8d436c
UD
3088 After the first time mmap is used as backup, we do not
3089 ever rely on contiguous space since this could incorrectly
3090 bridge regions.
3091 */
3092 set_noncontiguous(av);
3093 }
3094 }
3095#endif
3096 }
3097
3098 if (brk != (char*)(MORECORE_FAILURE)) {
3099 if (mp_.sbrk_base == 0)
3100 mp_.sbrk_base = brk;
3101 av->system_mem += size;
3102
3103 /*
3104 If MORECORE extends previous space, we can likewise extend top size.
3105 */
a9177ff5 3106
fa8d436c
UD
3107 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
3108 set_head(old_top, (size + old_size) | PREV_INUSE);
3109
886d5973 3110 else if (contiguous(av) && old_size && brk < old_end) {
fa8d436c
UD
3111 /* Oops! Someone else killed our space.. Can't touch anything. */
3112 assert(0);
3113 }
3114
3115 /*
3116 Otherwise, make adjustments:
a9177ff5 3117
fa8d436c
UD
3118 * If the first time through or noncontiguous, we need to call sbrk
3119 just to find out where the end of memory lies.
3120
3121 * We need to ensure that all returned chunks from malloc will meet
3122 MALLOC_ALIGNMENT
3123
3124 * If there was an intervening foreign sbrk, we need to adjust sbrk
3125 request size to account for fact that we will not be able to
3126 combine new space with existing space in old_top.
3127
3128 * Almost all systems internally allocate whole pages at a time, in
3129 which case we might as well use the whole last page of request.
3130 So we allocate enough more memory to hit a page boundary now,
3131 which in turn causes future contiguous calls to page-align.
3132 */
a9177ff5 3133
fa8d436c 3134 else {
fa8d436c
UD
3135 front_misalign = 0;
3136 end_misalign = 0;
3137 correction = 0;
3138 aligned_brk = brk;
a9177ff5 3139
fa8d436c 3140 /* handle contiguous cases */
a9177ff5
RM
3141 if (contiguous(av)) {
3142
0cb71e02
UD
3143 /* Count foreign sbrk as system_mem. */
3144 if (old_size)
3145 av->system_mem += brk - old_end;
3146
fa8d436c
UD
3147 /* Guarantee alignment of first new chunk made from this space */
3148
3149 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3150 if (front_misalign > 0) {
3151
3152 /*
3153 Skip over some bytes to arrive at an aligned position.
3154 We don't need to specially mark these wasted front bytes.
3155 They will never be accessed anyway because
3156 prev_inuse of av->top (and any chunk created from its start)
3157 is always true after initialization.
3158 */
3159
3160 correction = MALLOC_ALIGNMENT - front_misalign;
3161 aligned_brk += correction;
3162 }
a9177ff5 3163
fa8d436c
UD
3164 /*
3165 If this isn't adjacent to existing space, then we will not
3166 be able to merge with old_top space, so must add to 2nd request.
3167 */
a9177ff5 3168
fa8d436c 3169 correction += old_size;
a9177ff5 3170
fa8d436c
UD
3171 /* Extend the end address to hit a page boundary */
3172 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3173 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
a9177ff5 3174
fa8d436c
UD
3175 assert(correction >= 0);
3176 snd_brk = (char*)(MORECORE(correction));
a9177ff5 3177
fa8d436c
UD
3178 /*
3179 If can't allocate correction, try to at least find out current
3180 brk. It might be enough to proceed without failing.
a9177ff5 3181
fa8d436c
UD
3182 Note that if second sbrk did NOT fail, we assume that space
3183 is contiguous with first sbrk. This is a safe assumption unless
3184 program is multithreaded but doesn't use locks and a foreign sbrk
3185 occurred between our first and second calls.
3186 */
a9177ff5 3187
fa8d436c
UD
3188 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3189 correction = 0;
3190 snd_brk = (char*)(MORECORE(0));
3191 } else
3192 /* Call the `morecore' hook if necessary. */
3193 if (__after_morecore_hook)
3194 (*__after_morecore_hook) ();
3195 }
a9177ff5 3196
fa8d436c 3197 /* handle non-contiguous cases */
a9177ff5 3198 else {
fa8d436c
UD
3199 /* MORECORE/mmap must correctly align */
3200 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
a9177ff5 3201
fa8d436c
UD
3202 /* Find out current end of memory */
3203 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3204 snd_brk = (char*)(MORECORE(0));
3205 }
3206 }
a9177ff5 3207
fa8d436c
UD
3208 /* Adjust top based on results of second sbrk */
3209 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3210 av->top = (mchunkptr)aligned_brk;
3211 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3212 av->system_mem += correction;
a9177ff5 3213
fa8d436c
UD
3214 /*
3215 If not the first time through, we either have a
3216 gap due to foreign sbrk or a non-contiguous region. Insert a
3217 double fencepost at old_top to prevent consolidation with space
3218 we don't own. These fenceposts are artificial chunks that are
3219 marked as inuse and are in any case too small to use. We need
3220 two to make sizes and alignments work out.
3221 */
a9177ff5 3222
fa8d436c 3223 if (old_size != 0) {
a9177ff5 3224 /*
fa8d436c
UD
3225 Shrink old_top to insert fenceposts, keeping size a
3226 multiple of MALLOC_ALIGNMENT. We know there is at least
3227 enough space in old_top to do this.
3228 */
3229 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3230 set_head(old_top, old_size | PREV_INUSE);
a9177ff5 3231
fa8d436c
UD
3232 /*
3233 Note that the following assignments completely overwrite
3234 old_top when old_size was previously MINSIZE. This is
3235 intentional. We need the fencepost, even if old_top otherwise gets
3236 lost.
3237 */
3238 chunk_at_offset(old_top, old_size )->size =
3239 (2*SIZE_SZ)|PREV_INUSE;
3240
3241 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
3242 (2*SIZE_SZ)|PREV_INUSE;
3243
3244 /* If possible, release the rest. */
3245 if (old_size >= MINSIZE) {
3246 _int_free(av, chunk2mem(old_top));
3247 }
3248
3249 }
3250 }
3251 }
a9177ff5 3252
fa8d436c
UD
3253 /* Update statistics */
3254#ifdef NO_THREADS
3255 sum = av->system_mem + mp_.mmapped_mem;
3256 if (sum > (unsigned long)(mp_.max_total_mem))
3257 mp_.max_total_mem = sum;
3258#endif
3259
3260 }
3261
3262 } /* if (av != &main_arena) */
3263
3264 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
3265 av->max_system_mem = av->system_mem;
3266 check_malloc_state(av);
a9177ff5 3267
fa8d436c
UD
3268 /* finally, do the allocation */
3269 p = av->top;
3270 size = chunksize(p);
3271
3272 /* check that one of the above allocation paths succeeded */
3273 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3274 remainder_size = size - nb;
3275 remainder = chunk_at_offset(p, nb);
3276 av->top = remainder;
3277 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
3278 set_head(remainder, remainder_size | PREV_INUSE);
3279 check_malloced_chunk(av, p, nb);
3280 return chunk2mem(p);
3281 }
3282
3283 /* catch all failure paths */
3284 MALLOC_FAILURE_ACTION;
3285 return 0;
3286}
3287
3288
3289/*
3290 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3291 to the system (via negative arguments to sbrk) if there is unused
3292 memory at the `high' end of the malloc pool. It is called
3293 automatically by free() when top space exceeds the trim
3294 threshold. It is also called by the public malloc_trim routine. It
3295 returns 1 if it actually released any memory, else 0.
3296*/
3297
3298#if __STD_C
3299static int sYSTRIm(size_t pad, mstate av)
3300#else
3301static int sYSTRIm(pad, av) size_t pad; mstate av;
3302#endif
3303{
3304 long top_size; /* Amount of top-most memory */
3305 long extra; /* Amount to release */
3306 long released; /* Amount actually released */
3307 char* current_brk; /* address returned by pre-check sbrk call */
3308 char* new_brk; /* address returned by post-check sbrk call */
3309 size_t pagesz;
3310
3311 pagesz = mp_.pagesize;
3312 top_size = chunksize(av->top);
a9177ff5 3313
fa8d436c
UD
3314 /* Release in pagesize units, keeping at least one page */
3315 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
a9177ff5 3316
fa8d436c 3317 if (extra > 0) {
a9177ff5 3318
fa8d436c
UD
3319 /*
3320 Only proceed if end of memory is where we last set it.
3321 This avoids problems if there were foreign sbrk calls.
3322 */
3323 current_brk = (char*)(MORECORE(0));
3324 if (current_brk == (char*)(av->top) + top_size) {
a9177ff5 3325
fa8d436c
UD
3326 /*
3327 Attempt to release memory. We ignore MORECORE return value,
3328 and instead call again to find out where new end of memory is.
3329 This avoids problems if first call releases less than we asked,
3330 of if failure somehow altered brk value. (We could still
3331 encounter problems if it altered brk in some very bad way,
3332 but the only thing we can do is adjust anyway, which will cause
3333 some downstream failure.)
3334 */
a9177ff5 3335
fa8d436c
UD
3336 MORECORE(-extra);
3337 /* Call the `morecore' hook if necessary. */
3338 if (__after_morecore_hook)
3339 (*__after_morecore_hook) ();
3340 new_brk = (char*)(MORECORE(0));
a9177ff5 3341
fa8d436c
UD
3342 if (new_brk != (char*)MORECORE_FAILURE) {
3343 released = (long)(current_brk - new_brk);
a9177ff5 3344
fa8d436c
UD
3345 if (released != 0) {
3346 /* Success. Adjust top. */
3347 av->system_mem -= released;
3348 set_head(av->top, (top_size - released) | PREV_INUSE);
3349 check_malloc_state(av);
3350 return 1;
3351 }
3352 }
3353 }
3354 }
3355 return 0;
f65fd747
UD
3356}
3357
fa8d436c
UD
3358#ifdef HAVE_MMAP
3359
431c33c0
UD
3360static void
3361internal_function
f65fd747 3362#if __STD_C
431c33c0 3363munmap_chunk(mchunkptr p)
f65fd747 3364#else
431c33c0 3365munmap_chunk(p) mchunkptr p;
f65fd747
UD
3366#endif
3367{
3368 INTERNAL_SIZE_T size = chunksize(p);
f65fd747
UD
3369
3370 assert (chunk_is_mmapped(p));
fa8d436c
UD
3371#if 0
3372 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3373 assert((mp_.n_mmaps > 0));
3374#endif
8e635611
UD
3375
3376 uintptr_t block = (uintptr_t) p - p->prev_size;
3377 size_t total_size = p->prev_size + size;
3378 /* Unfortunately we have to do the compilers job by hand here. Normally
3379 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3380 page size. But gcc does not recognize the optimization possibility
3381 (in the moment at least) so we combine the two values into one before
3382 the bit test. */
3383 if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
3384 {
3385 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
3386 chunk2mem (p));
3387 return;
3388 }
f65fd747 3389
fa8d436c 3390 mp_.n_mmaps--;
8e635611 3391 mp_.mmapped_mem -= total_size;
f65fd747 3392
2182b1ea 3393 int ret __attribute__ ((unused)) = munmap((char *)block, total_size);
f65fd747
UD
3394
3395 /* munmap returns non-zero on failure */
3396 assert(ret == 0);
3397}
3398
3399#if HAVE_MREMAP
3400
431c33c0
UD
3401static mchunkptr
3402internal_function
f65fd747 3403#if __STD_C
431c33c0 3404mremap_chunk(mchunkptr p, size_t new_size)
f65fd747 3405#else
431c33c0 3406mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
f65fd747
UD
3407#endif
3408{
fa8d436c 3409 size_t page_mask = mp_.pagesize - 1;
f65fd747
UD
3410 INTERNAL_SIZE_T offset = p->prev_size;
3411 INTERNAL_SIZE_T size = chunksize(p);
3412 char *cp;
3413
3414 assert (chunk_is_mmapped(p));
fa8d436c
UD
3415#if 0
3416 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3417 assert((mp_.n_mmaps > 0));
3418#endif
3419 assert(((size + offset) & (mp_.pagesize-1)) == 0);
f65fd747
UD
3420
3421 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3422 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
3423
3424 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
3425 MREMAP_MAYMOVE);
3426
431c33c0 3427 if (cp == MAP_FAILED) return 0;
f65fd747
UD
3428
3429 p = (mchunkptr)(cp + offset);
3430
3431 assert(aligned_OK(chunk2mem(p)));
3432
3433 assert((p->prev_size == offset));
3434 set_head(p, (new_size - offset)|IS_MMAPPED);
3435
fa8d436c
UD
3436 mp_.mmapped_mem -= size + offset;
3437 mp_.mmapped_mem += new_size;
3438 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
3439 mp_.max_mmapped_mem = mp_.mmapped_mem;
8a4b65b4 3440#ifdef NO_THREADS
fa8d436c
UD
3441 if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
3442 mp_.max_total_mem)
3443 mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
8a4b65b4 3444#endif
f65fd747
UD
3445 return p;
3446}
3447
3448#endif /* HAVE_MREMAP */
3449
3450#endif /* HAVE_MMAP */
3451
fa8d436c 3452/*------------------------ Public wrappers. --------------------------------*/
f65fd747 3453
fa8d436c
UD
3454Void_t*
3455public_mALLOc(size_t bytes)
3456{
3457 mstate ar_ptr;
3458 Void_t *victim;
f65fd747 3459
06d6611a 3460 __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook;
fa8d436c
UD
3461 if (hook != NULL)
3462 return (*hook)(bytes, RETURN_ADDRESS (0));
f65fd747 3463
fa8d436c
UD
3464 arena_get(ar_ptr, bytes);
3465 if(!ar_ptr)
f65fd747 3466 return 0;
fa8d436c
UD
3467 victim = _int_malloc(ar_ptr, bytes);
3468 if(!victim) {
3469 /* Maybe the failure is due to running out of mmapped areas. */
3470 if(ar_ptr != &main_arena) {
3471 (void)mutex_unlock(&ar_ptr->mutex);
3472 (void)mutex_lock(&main_arena.mutex);
3473 victim = _int_malloc(&main_arena, bytes);
3474 (void)mutex_unlock(&main_arena.mutex);
3475 } else {
3476#if USE_ARENAS
3477 /* ... or sbrk() has failed and there is still a chance to mmap() */
3478 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3479 (void)mutex_unlock(&main_arena.mutex);
3480 if(ar_ptr) {
3481 victim = _int_malloc(ar_ptr, bytes);
3482 (void)mutex_unlock(&ar_ptr->mutex);
3483 }
3484#endif
60f0e64b 3485 }
fa8d436c
UD
3486 } else
3487 (void)mutex_unlock(&ar_ptr->mutex);
3488 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
3489 ar_ptr == arena_for_chunk(mem2chunk(victim)));
3490 return victim;
f65fd747 3491}
aa420660
UD
3492#ifdef libc_hidden_def
3493libc_hidden_def(public_mALLOc)
3494#endif
f65fd747 3495
fa8d436c
UD
3496void
3497public_fREe(Void_t* mem)
f65fd747 3498{
fa8d436c
UD
3499 mstate ar_ptr;
3500 mchunkptr p; /* chunk corresponding to mem */
3501
06d6611a 3502 void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook;
fa8d436c
UD
3503 if (hook != NULL) {
3504 (*hook)(mem, RETURN_ADDRESS (0));
3505 return;
f65fd747 3506 }
f65fd747 3507
fa8d436c
UD
3508 if (mem == 0) /* free(0) has no effect */
3509 return;
f65fd747 3510
fa8d436c 3511 p = mem2chunk(mem);
f65fd747 3512
fa8d436c
UD
3513#if HAVE_MMAP
3514 if (chunk_is_mmapped(p)) /* release mmapped memory. */
3515 {
1d05c2fb
UD
3516 /* see if the dynamic brk/mmap threshold needs adjusting */
3517 if (!mp_.no_dyn_threshold
3518 && p->size > mp_.mmap_threshold
3519 && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
3520 {
3521 mp_.mmap_threshold = chunksize (p);
3522 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3523 }
fa8d436c
UD
3524 munmap_chunk(p);
3525 return;
8a4b65b4 3526 }
f65fd747 3527#endif
f65fd747 3528
fa8d436c
UD
3529 ar_ptr = arena_for_chunk(p);
3530#if THREAD_STATS
3531 if(!mutex_trylock(&ar_ptr->mutex))
3532 ++(ar_ptr->stat_lock_direct);
3533 else {
3534 (void)mutex_lock(&ar_ptr->mutex);
3535 ++(ar_ptr->stat_lock_wait);
f65fd747 3536 }
f65fd747 3537#else
fa8d436c 3538 (void)mutex_lock(&ar_ptr->mutex);
f65fd747 3539#endif
fa8d436c
UD
3540 _int_free(ar_ptr, mem);
3541 (void)mutex_unlock(&ar_ptr->mutex);
f65fd747 3542}
aa420660
UD
3543#ifdef libc_hidden_def
3544libc_hidden_def (public_fREe)
3545#endif
f65fd747 3546
fa8d436c
UD
3547Void_t*
3548public_rEALLOc(Void_t* oldmem, size_t bytes)
f65fd747 3549{
fa8d436c
UD
3550 mstate ar_ptr;
3551 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 3552
fa8d436c
UD
3553 mchunkptr oldp; /* chunk corresponding to oldmem */
3554 INTERNAL_SIZE_T oldsize; /* its size */
8a4b65b4 3555
fa8d436c 3556 Void_t* newp; /* chunk to return */
f65fd747 3557
06d6611a 3558 __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
fa8d436c
UD
3559 __realloc_hook;
3560 if (hook != NULL)
3561 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
f65fd747 3562
fa8d436c
UD
3563#if REALLOC_ZERO_BYTES_FREES
3564 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
f65fd747 3565#endif
f65fd747 3566
fa8d436c
UD
3567 /* realloc of null is supposed to be same as malloc */
3568 if (oldmem == 0) return public_mALLOc(bytes);
f65fd747 3569
fa8d436c
UD
3570 oldp = mem2chunk(oldmem);
3571 oldsize = chunksize(oldp);
f65fd747 3572
dc165f7b
UD
3573 /* Little security check which won't hurt performance: the
3574 allocator never wrapps around at the end of the address space.
3575 Therefore we can exclude some size values which might appear
3576 here by accident or by "design" from some intruder. */
3577 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
073f560e 3578 || __builtin_expect (misaligned_chunk (oldp), 0))
dc165f7b
UD
3579 {
3580 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
3581 return NULL;
3582 }
3583
fa8d436c 3584 checked_request2size(bytes, nb);
f65fd747 3585
fa8d436c
UD
3586#if HAVE_MMAP
3587 if (chunk_is_mmapped(oldp))
3588 {
3589 Void_t* newmem;
f65fd747 3590
fa8d436c
UD
3591#if HAVE_MREMAP
3592 newp = mremap_chunk(oldp, nb);
3593 if(newp) return chunk2mem(newp);
f65fd747 3594#endif
fa8d436c
UD
3595 /* Note the extra SIZE_SZ overhead. */
3596 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3597 /* Must alloc, copy, free. */
3598 newmem = public_mALLOc(bytes);
3599 if (newmem == 0) return 0; /* propagate failure */
3600 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3601 munmap_chunk(oldp);
3602 return newmem;
3603 }
dfd2257a 3604#endif
fa8d436c
UD
3605
3606 ar_ptr = arena_for_chunk(oldp);
3607#if THREAD_STATS
3608 if(!mutex_trylock(&ar_ptr->mutex))
3609 ++(ar_ptr->stat_lock_direct);
3610 else {
3611 (void)mutex_lock(&ar_ptr->mutex);
3612 ++(ar_ptr->stat_lock_wait);
3613 }
f65fd747 3614#else
fa8d436c 3615 (void)mutex_lock(&ar_ptr->mutex);
f65fd747 3616#endif
f65fd747 3617
fa8d436c
UD
3618#ifndef NO_THREADS
3619 /* As in malloc(), remember this arena for the next allocation. */
3620 tsd_setspecific(arena_key, (Void_t *)ar_ptr);
f65fd747
UD
3621#endif
3622
fa8d436c 3623 newp = _int_realloc(ar_ptr, oldmem, bytes);
f65fd747 3624
fa8d436c
UD
3625 (void)mutex_unlock(&ar_ptr->mutex);
3626 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3627 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3628 return newp;
3629}
aa420660
UD
3630#ifdef libc_hidden_def
3631libc_hidden_def (public_rEALLOc)
3632#endif
f65fd747 3633
fa8d436c
UD
3634Void_t*
3635public_mEMALIGn(size_t alignment, size_t bytes)
3636{
3637 mstate ar_ptr;
3638 Void_t *p;
f65fd747 3639
fa8d436c
UD
3640 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3641 __const __malloc_ptr_t)) =
3642 __memalign_hook;
3643 if (hook != NULL)
3644 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
f65fd747 3645
fa8d436c
UD
3646 /* If need less alignment than we give anyway, just relay to malloc */
3647 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
1228ed5c 3648
fa8d436c
UD
3649 /* Otherwise, ensure that it is at least a minimum chunk size */
3650 if (alignment < MINSIZE) alignment = MINSIZE;
f65fd747 3651
fa8d436c
UD
3652 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3653 if(!ar_ptr)
3654 return 0;
3655 p = _int_memalign(ar_ptr, alignment, bytes);
3656 (void)mutex_unlock(&ar_ptr->mutex);
3657 if(!p) {
3658 /* Maybe the failure is due to running out of mmapped areas. */
3659 if(ar_ptr != &main_arena) {
3660 (void)mutex_lock(&main_arena.mutex);
3661 p = _int_memalign(&main_arena, alignment, bytes);
3662 (void)mutex_unlock(&main_arena.mutex);
f65fd747 3663 } else {
e9b3e3c5 3664#if USE_ARENAS
fa8d436c
UD
3665 /* ... or sbrk() has failed and there is still a chance to mmap() */
3666 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3667 if(ar_ptr) {
3668 p = _int_memalign(ar_ptr, alignment, bytes);
3669 (void)mutex_unlock(&ar_ptr->mutex);
3670 }
e9b3e3c5 3671#endif
f65fd747
UD
3672 }
3673 }
fa8d436c
UD
3674 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3675 ar_ptr == arena_for_chunk(mem2chunk(p)));
3676 return p;
f65fd747 3677}
aa420660
UD
3678#ifdef libc_hidden_def
3679libc_hidden_def (public_mEMALIGn)
3680#endif
f65fd747 3681
fa8d436c
UD
3682Void_t*
3683public_vALLOc(size_t bytes)
3684{
3685 mstate ar_ptr;
3686 Void_t *p;
f65fd747 3687
fa8d436c
UD
3688 if(__malloc_initialized < 0)
3689 ptmalloc_init ();
8088488d
UD
3690
3691 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3692 __const __malloc_ptr_t)) =
3693 __memalign_hook;
3694 if (hook != NULL)
3695 return (*hook)(mp_.pagesize, bytes, RETURN_ADDRESS (0));
3696
fa8d436c
UD
3697 arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
3698 if(!ar_ptr)
3699 return 0;
3700 p = _int_valloc(ar_ptr, bytes);
3701 (void)mutex_unlock(&ar_ptr->mutex);
3702 return p;
3703}
f65fd747 3704
fa8d436c
UD
3705Void_t*
3706public_pVALLOc(size_t bytes)
3707{
3708 mstate ar_ptr;
3709 Void_t *p;
f65fd747 3710
fa8d436c
UD
3711 if(__malloc_initialized < 0)
3712 ptmalloc_init ();
8088488d
UD
3713
3714 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3715 __const __malloc_ptr_t)) =
3716 __memalign_hook;
3717 if (hook != NULL)
3718 return (*hook)(mp_.pagesize,
3719 (bytes + mp_.pagesize - 1) & ~(mp_.pagesize - 1),
3720 RETURN_ADDRESS (0));
3721
fa8d436c
UD
3722 arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
3723 p = _int_pvalloc(ar_ptr, bytes);
3724 (void)mutex_unlock(&ar_ptr->mutex);
3725 return p;
3726}
f65fd747 3727
fa8d436c
UD
3728Void_t*
3729public_cALLOc(size_t n, size_t elem_size)
f65fd747 3730{
fa8d436c
UD
3731 mstate av;
3732 mchunkptr oldtop, p;
0950889b 3733 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
fa8d436c
UD
3734 Void_t* mem;
3735 unsigned long clearsize;
3736 unsigned long nclears;
3737 INTERNAL_SIZE_T* d;
6c6bb055 3738 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
fa8d436c 3739 __malloc_hook;
0950889b
UD
3740
3741 /* size_t is unsigned so the behavior on overflow is defined. */
3742 bytes = n * elem_size;
d9af917d
UD
3743#define HALF_INTERNAL_SIZE_T \
3744 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3745 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
0be405c2 3746 if (elem_size != 0 && bytes / elem_size != n) {
d9af917d
UD
3747 MALLOC_FAILURE_ACTION;
3748 return 0;
3749 }
0950889b
UD
3750 }
3751
6c6bb055 3752 if (hook != NULL) {
0950889b 3753 sz = bytes;
fa8d436c
UD
3754 mem = (*hook)(sz, RETURN_ADDRESS (0));
3755 if(mem == 0)
3756 return 0;
3757#ifdef HAVE_MEMCPY
3758 return memset(mem, 0, sz);
a2b08ee5 3759#else
fa8d436c
UD
3760 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3761 return mem;
a2b08ee5 3762#endif
10dc2a90 3763 }
10dc2a90 3764
0950889b 3765 sz = bytes;
fa8d436c
UD
3766
3767 arena_get(av, sz);
3768 if(!av)
f65fd747 3769 return 0;
fa8d436c
UD
3770
3771 /* Check if we hand out the top chunk, in which case there may be no
3772 need to clear. */
3773#if MORECORE_CLEARS
3774 oldtop = top(av);
3775 oldtopsize = chunksize(top(av));
3776#if MORECORE_CLEARS < 2
3777 /* Only newly allocated memory is guaranteed to be cleared. */
3778 if (av == &main_arena &&
3779 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3780 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3781#endif
3782#endif
3783 mem = _int_malloc(av, sz);
3784
3785 /* Only clearing follows, so we can unlock early. */
3786 (void)mutex_unlock(&av->mutex);
3787
3788 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3789 av == arena_for_chunk(mem2chunk(mem)));
3790
3791 if (mem == 0) {
7799b7b3 3792 /* Maybe the failure is due to running out of mmapped areas. */
fa8d436c 3793 if(av != &main_arena) {
7799b7b3 3794 (void)mutex_lock(&main_arena.mutex);
fa8d436c 3795 mem = _int_malloc(&main_arena, sz);
7799b7b3 3796 (void)mutex_unlock(&main_arena.mutex);
e9b3e3c5
UD
3797 } else {
3798#if USE_ARENAS
3799 /* ... or sbrk() has failed and there is still a chance to mmap() */
fa8d436c
UD
3800 (void)mutex_lock(&main_arena.mutex);
3801 av = arena_get2(av->next ? av : 0, sz);
e9b3e3c5 3802 (void)mutex_unlock(&main_arena.mutex);
fa8d436c
UD
3803 if(av) {
3804 mem = _int_malloc(av, sz);
3805 (void)mutex_unlock(&av->mutex);
e9b3e3c5
UD
3806 }
3807#endif
7799b7b3 3808 }
fa8d436c
UD
3809 if (mem == 0) return 0;
3810 }
3811 p = mem2chunk(mem);
f65fd747 3812
fa8d436c
UD
3813 /* Two optional cases in which clearing not necessary */
3814#if HAVE_MMAP
9ea9af19
UD
3815 if (chunk_is_mmapped (p))
3816 {
3817 if (__builtin_expect (perturb_byte, 0))
3818 MALLOC_ZERO (mem, sz);
3819 return mem;
3820 }
f65fd747 3821#endif
f65fd747 3822
fa8d436c 3823 csz = chunksize(p);
f65fd747 3824
fa8d436c 3825#if MORECORE_CLEARS
56137dbc 3826 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
fa8d436c
UD
3827 /* clear only the bytes from non-freshly-sbrked memory */
3828 csz = oldtopsize;
f65fd747 3829 }
fa8d436c 3830#endif
f65fd747 3831
fa8d436c
UD
3832 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3833 contents have an odd number of INTERNAL_SIZE_T-sized words;
3834 minimally 3. */
3835 d = (INTERNAL_SIZE_T*)mem;
3836 clearsize = csz - SIZE_SZ;
3837 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3838 assert(nclears >= 3);
f65fd747 3839
fa8d436c
UD
3840 if (nclears > 9)
3841 MALLOC_ZERO(d, clearsize);
f65fd747 3842
fa8d436c
UD
3843 else {
3844 *(d+0) = 0;
3845 *(d+1) = 0;
3846 *(d+2) = 0;
3847 if (nclears > 4) {
3848 *(d+3) = 0;
3849 *(d+4) = 0;
3850 if (nclears > 6) {
3851 *(d+5) = 0;
3852 *(d+6) = 0;
3853 if (nclears > 8) {
3854 *(d+7) = 0;
3855 *(d+8) = 0;
3856 }
f65fd747
UD
3857 }
3858 }
f65fd747
UD
3859 }
3860
fa8d436c
UD
3861 return mem;
3862}
f65fd747 3863
88764ae2
UD
3864#ifndef _LIBC
3865
fa8d436c
UD
3866Void_t**
3867public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
3868{
3869 mstate ar_ptr;
3870 Void_t** m;
f65fd747 3871
fa8d436c
UD
3872 arena_get(ar_ptr, n*elem_size);
3873 if(!ar_ptr)
3874 return 0;
f65fd747 3875
fa8d436c
UD
3876 m = _int_icalloc(ar_ptr, n, elem_size, chunks);
3877 (void)mutex_unlock(&ar_ptr->mutex);
3878 return m;
3879}
f65fd747 3880
fa8d436c
UD
3881Void_t**
3882public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
3883{
3884 mstate ar_ptr;
3885 Void_t** m;
f65fd747 3886
fa8d436c
UD
3887 arena_get(ar_ptr, 0);
3888 if(!ar_ptr)
3889 return 0;
f65fd747 3890
fa8d436c
UD
3891 m = _int_icomalloc(ar_ptr, n, sizes, chunks);
3892 (void)mutex_unlock(&ar_ptr->mutex);
3893 return m;
3894}
f65fd747 3895
fa8d436c
UD
3896void
3897public_cFREe(Void_t* m)
3898{
3899 public_fREe(m);
3900}
f65fd747 3901
fa8d436c 3902#endif /* _LIBC */
f65fd747 3903
fa8d436c
UD
3904int
3905public_mTRIm(size_t s)
3906{
3907 int result;
f65fd747 3908
88764ae2
UD
3909 if(__malloc_initialized < 0)
3910 ptmalloc_init ();
fa8d436c
UD
3911 (void)mutex_lock(&main_arena.mutex);
3912 result = mTRIm(s);
3913 (void)mutex_unlock(&main_arena.mutex);
3914 return result;
3915}
f65fd747 3916
fa8d436c
UD
3917size_t
3918public_mUSABLe(Void_t* m)
3919{
3920 size_t result;
f65fd747 3921
fa8d436c
UD
3922 result = mUSABLe(m);
3923 return result;
3924}
f65fd747 3925
fa8d436c
UD
3926void
3927public_mSTATs()
3928{
3929 mSTATs();
3930}
f65fd747 3931
fa8d436c
UD
3932struct mallinfo public_mALLINFo()
3933{
3934 struct mallinfo m;
f65fd747 3935
6a00759b
UD
3936 if(__malloc_initialized < 0)
3937 ptmalloc_init ();
fa8d436c
UD
3938 (void)mutex_lock(&main_arena.mutex);
3939 m = mALLINFo(&main_arena);
3940 (void)mutex_unlock(&main_arena.mutex);
3941 return m;
f65fd747
UD
3942}
3943
fa8d436c
UD
3944int
3945public_mALLOPt(int p, int v)
3946{
3947 int result;
3948 result = mALLOPt(p, v);
3949 return result;
3950}
f65fd747
UD
3951
3952/*
fa8d436c 3953 ------------------------------ malloc ------------------------------
f65fd747
UD
3954*/
3955
f1c5213d 3956Void_t*
fa8d436c 3957_int_malloc(mstate av, size_t bytes)
f65fd747 3958{
fa8d436c
UD
3959 INTERNAL_SIZE_T nb; /* normalized request size */
3960 unsigned int idx; /* associated bin index */
3961 mbinptr bin; /* associated bin */
3962 mfastbinptr* fb; /* associated fastbin */
f65fd747 3963
fa8d436c
UD
3964 mchunkptr victim; /* inspected/selected chunk */
3965 INTERNAL_SIZE_T size; /* its size */
3966 int victim_index; /* its bin index */
f65fd747 3967
fa8d436c
UD
3968 mchunkptr remainder; /* remainder from a split */
3969 unsigned long remainder_size; /* its size */
8a4b65b4 3970
fa8d436c
UD
3971 unsigned int block; /* bit map traverser */
3972 unsigned int bit; /* bit map traverser */
3973 unsigned int map; /* current word of binmap */
8a4b65b4 3974
fa8d436c
UD
3975 mchunkptr fwd; /* misc temp for linking */
3976 mchunkptr bck; /* misc temp for linking */
8a4b65b4 3977
fa8d436c
UD
3978 /*
3979 Convert request size to internal form by adding SIZE_SZ bytes
3980 overhead plus possibly more to obtain necessary alignment and/or
3981 to obtain a size of at least MINSIZE, the smallest allocatable
3982 size. Also, checked_request2size traps (returning 0) request sizes
3983 that are so large that they wrap around zero when padded and
3984 aligned.
3985 */
f65fd747 3986
fa8d436c 3987 checked_request2size(bytes, nb);
f65fd747 3988
fa8d436c
UD
3989 /*
3990 If the size qualifies as a fastbin, first check corresponding bin.
3991 This code is safe to execute even if av is not yet initialized, so we
3992 can try it without checking, which saves some time on this fast path.
3993 */
f65fd747 3994
9bf248c6 3995 if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
6cce6540
UD
3996 long int idx = fastbin_index(nb);
3997 fb = &(av->fastbins[idx]);
fa8d436c 3998 if ( (victim = *fb) != 0) {
6cce6540
UD
3999 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
4000 malloc_printerr (check_action, "malloc(): memory corruption (fast)",
4001 chunk2mem (victim));
fa8d436c
UD
4002 *fb = victim->fd;
4003 check_remalloced_chunk(av, victim, nb);
854278df
UD
4004 void *p = chunk2mem(victim);
4005 if (__builtin_expect (perturb_byte, 0))
4006 alloc_perturb (p, bytes);
4007 return p;
fa8d436c 4008 }
f65fd747
UD
4009 }
4010
fa8d436c
UD
4011 /*
4012 If a small request, check regular bin. Since these "smallbins"
4013 hold one size each, no searching within bins is necessary.
4014 (For a large request, we need to wait until unsorted chunks are
4015 processed to find best fit. But for small ones, fits are exact
4016 anyway, so we can check now, which is faster.)
4017 */
f65fd747 4018
fa8d436c
UD
4019 if (in_smallbin_range(nb)) {
4020 idx = smallbin_index(nb);
4021 bin = bin_at(av,idx);
7799b7b3 4022
fa8d436c
UD
4023 if ( (victim = last(bin)) != bin) {
4024 if (victim == 0) /* initialization check */
4025 malloc_consolidate(av);
4026 else {
4027 bck = victim->bk;
4028 set_inuse_bit_at_offset(victim, nb);
4029 bin->bk = bck;
4030 bck->fd = bin;
4031
4032 if (av != &main_arena)
4033 victim->size |= NON_MAIN_ARENA;
4034 check_malloced_chunk(av, victim, nb);
854278df
UD
4035 void *p = chunk2mem(victim);
4036 if (__builtin_expect (perturb_byte, 0))
4037 alloc_perturb (p, bytes);
4038 return p;
fa8d436c
UD
4039 }
4040 }
f65fd747
UD
4041 }
4042
a9177ff5 4043 /*
fa8d436c
UD
4044 If this is a large request, consolidate fastbins before continuing.
4045 While it might look excessive to kill all fastbins before
4046 even seeing if there is space available, this avoids
4047 fragmentation problems normally associated with fastbins.
4048 Also, in practice, programs tend to have runs of either small or
a9177ff5 4049 large requests, but less often mixtures, so consolidation is not
fa8d436c
UD
4050 invoked all that often in most programs. And the programs that
4051 it is called frequently in otherwise tend to fragment.
4052 */
7799b7b3 4053
fa8d436c
UD
4054 else {
4055 idx = largebin_index(nb);
a9177ff5 4056 if (have_fastchunks(av))
fa8d436c 4057 malloc_consolidate(av);
7799b7b3 4058 }
f65fd747 4059
fa8d436c
UD
4060 /*
4061 Process recently freed or remaindered chunks, taking one only if
4062 it is exact fit, or, if this a small request, the chunk is remainder from
4063 the most recent non-exact fit. Place other traversed chunks in
4064 bins. Note that this step is the only place in any routine where
4065 chunks are placed in bins.
4066
4067 The outer loop here is needed because we might not realize until
4068 near the end of malloc that we should have consolidated, so must
4069 do so and retry. This happens at most once, and only when we would
4070 otherwise need to expand memory to service a "small" request.
4071 */
a9177ff5
RM
4072
4073 for(;;) {
4074
72320021
UD
4075 int iters = 0;
4076 bool any_larger = false;
fa8d436c
UD
4077 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4078 bck = victim->bk;
6cce6540
UD
4079 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
4080 || __builtin_expect (victim->size > av->system_mem, 0))
4081 malloc_printerr (check_action, "malloc(): memory corruption",
4082 chunk2mem (victim));
fa8d436c
UD
4083 size = chunksize(victim);
4084
a9177ff5 4085 /*
fa8d436c
UD
4086 If a small request, try to use last remainder if it is the
4087 only chunk in unsorted bin. This helps promote locality for
4088 runs of consecutive small requests. This is the only
4089 exception to best-fit, and applies only when there is
4090 no exact fit for a small chunk.
4091 */
4092
a9177ff5 4093 if (in_smallbin_range(nb) &&
fa8d436c
UD
4094 bck == unsorted_chunks(av) &&
4095 victim == av->last_remainder &&
4096 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4097
4098 /* split and reattach remainder */
4099 remainder_size = size - nb;
4100 remainder = chunk_at_offset(victim, nb);
4101 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
a9177ff5 4102 av->last_remainder = remainder;
fa8d436c 4103 remainder->bk = remainder->fd = unsorted_chunks(av);
a9177ff5 4104
fa8d436c
UD
4105 set_head(victim, nb | PREV_INUSE |
4106 (av != &main_arena ? NON_MAIN_ARENA : 0));
4107 set_head(remainder, remainder_size | PREV_INUSE);
4108 set_foot(remainder, remainder_size);
a9177ff5 4109
fa8d436c 4110 check_malloced_chunk(av, victim, nb);
854278df
UD
4111 void *p = chunk2mem(victim);
4112 if (__builtin_expect (perturb_byte, 0))
4113 alloc_perturb (p, bytes);
4114 return p;
fa8d436c 4115 }
f65fd747 4116
fa8d436c
UD
4117 /* remove from unsorted list */
4118 unsorted_chunks(av)->bk = bck;
4119 bck->fd = unsorted_chunks(av);
a9177ff5 4120
fa8d436c 4121 /* Take now instead of binning if exact fit */
a9177ff5 4122
fa8d436c
UD
4123 if (size == nb) {
4124 set_inuse_bit_at_offset(victim, size);
4125 if (av != &main_arena)
4126 victim->size |= NON_MAIN_ARENA;
4127 check_malloced_chunk(av, victim, nb);
854278df
UD
4128 void *p = chunk2mem(victim);
4129 if (__builtin_expect (perturb_byte, 0))
4130 alloc_perturb (p, bytes);
4131 return p;
fa8d436c 4132 }
a9177ff5 4133
fa8d436c 4134 /* place chunk in bin */
a9177ff5 4135
fa8d436c
UD
4136 if (in_smallbin_range(size)) {
4137 victim_index = smallbin_index(size);
4138 bck = bin_at(av, victim_index);
4139 fwd = bck->fd;
4140 }
4141 else {
4142 victim_index = largebin_index(size);
4143 bck = bin_at(av, victim_index);
4144 fwd = bck->fd;
4145
4146 /* maintain large bins in sorted order */
4147 if (fwd != bck) {
4148 /* Or with inuse bit to speed comparisons */
4149 size |= PREV_INUSE;
4150 /* if smaller than smallest, bypass loop below */
4151 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
4152 if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
4153 fwd = bck;
4154 bck = bck->bk;
4155 }
4156 else {
4157 assert((fwd->size & NON_MAIN_ARENA) == 0);
4158 while ((unsigned long)(size) < (unsigned long)(fwd->size)) {
4159 fwd = fwd->fd;
4160 assert((fwd->size & NON_MAIN_ARENA) == 0);
4161 }
4162 bck = fwd->bk;
4163 }
4164 }
4165 }
a9177ff5 4166
fa8d436c
UD
4167 mark_bin(av, victim_index);
4168 victim->bk = bck;
4169 victim->fd = fwd;
4170 fwd->bk = victim;
4171 bck->fd = victim;
3997b7c4 4172
41999a1a 4173 if (size >= nb + MINSIZE)
3997b7c4
UD
4174 any_larger = true;
4175#define MAX_ITERS 10000
4176 if (++iters >= MAX_ITERS)
4177 break;
fa8d436c 4178 }
a9177ff5 4179
fa8d436c
UD
4180 /*
4181 If a large request, scan through the chunks of current bin in
4182 sorted order to find smallest that fits. This is the only step
4183 where an unbounded number of chunks might be scanned without doing
4184 anything useful with them. However the lists tend to be short.
4185 */
a9177ff5 4186
fa8d436c
UD
4187 if (!in_smallbin_range(nb)) {
4188 bin = bin_at(av, idx);
f65fd747 4189
fa8d436c
UD
4190 /* skip scan if empty or largest chunk is too small */
4191 if ((victim = last(bin)) != bin &&
4192 (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
f65fd747 4193
a9177ff5 4194 while (((unsigned long)(size = chunksize(victim)) <
fa8d436c
UD
4195 (unsigned long)(nb)))
4196 victim = victim->bk;
f65fd747 4197
fa8d436c
UD
4198 remainder_size = size - nb;
4199 unlink(victim, bck, fwd);
a9177ff5 4200
fa8d436c
UD
4201 /* Exhaust */
4202 if (remainder_size < MINSIZE) {
4203 set_inuse_bit_at_offset(victim, size);
4204 if (av != &main_arena)
4205 victim->size |= NON_MAIN_ARENA;
fa8d436c
UD
4206 }
4207 /* Split */
4208 else {
4209 remainder = chunk_at_offset(victim, nb);
a334319f
UD
4210 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4211 remainder->bk = remainder->fd = unsorted_chunks(av);
fa8d436c
UD
4212 set_head(victim, nb | PREV_INUSE |
4213 (av != &main_arena ? NON_MAIN_ARENA : 0));
4214 set_head(remainder, remainder_size | PREV_INUSE);
4215 set_foot(remainder, remainder_size);
a9177ff5 4216 }
854278df
UD
4217 check_malloced_chunk(av, victim, nb);
4218 void *p = chunk2mem(victim);
4219 if (__builtin_expect (perturb_byte, 0))
4220 alloc_perturb (p, bytes);
4221 return p;
fa8d436c 4222 }
a9177ff5 4223 }
f65fd747 4224
fa8d436c
UD
4225 /*
4226 Search for a chunk by scanning bins, starting with next largest
4227 bin. This search is strictly by best-fit; i.e., the smallest
4228 (with ties going to approximately the least recently used) chunk
4229 that fits is selected.
a9177ff5 4230
fa8d436c
UD
4231 The bitmap avoids needing to check that most blocks are nonempty.
4232 The particular case of skipping all bins during warm-up phases
4233 when no chunks have been returned yet is faster than it might look.
4234 */
a9177ff5 4235
fa8d436c
UD
4236 ++idx;
4237 bin = bin_at(av,idx);
4238 block = idx2block(idx);
4239 map = av->binmap[block];
4240 bit = idx2bit(idx);
a9177ff5 4241
fa8d436c
UD
4242 for (;;) {
4243
4244 /* Skip rest of block if there are no more set bits in this block. */
4245 if (bit > map || bit == 0) {
4246 do {
4247 if (++block >= BINMAPSIZE) /* out of bins */
4248 goto use_top;
4249 } while ( (map = av->binmap[block]) == 0);
4250
4251 bin = bin_at(av, (block << BINMAPSHIFT));
4252 bit = 1;
4253 }
a9177ff5 4254
fa8d436c
UD
4255 /* Advance to bin with set bit. There must be one. */
4256 while ((bit & map) == 0) {
4257 bin = next_bin(bin);
4258 bit <<= 1;
4259 assert(bit != 0);
4260 }
a9177ff5 4261
fa8d436c
UD
4262 /* Inspect the bin. It is likely to be non-empty */
4263 victim = last(bin);
a9177ff5 4264
fa8d436c
UD
4265 /* If a false alarm (empty bin), clear the bit. */
4266 if (victim == bin) {
4267 av->binmap[block] = map &= ~bit; /* Write through */
4268 bin = next_bin(bin);
4269 bit <<= 1;
4270 }
a9177ff5 4271
fa8d436c
UD
4272 else {
4273 size = chunksize(victim);
4274
4275 /* We know the first chunk in this bin is big enough to use. */
4276 assert((unsigned long)(size) >= (unsigned long)(nb));
4277
4278 remainder_size = size - nb;
a9177ff5 4279
fa8d436c
UD
4280 /* unlink */
4281 bck = victim->bk;
4282 bin->bk = bck;
4283 bck->fd = bin;
a9177ff5 4284
fa8d436c
UD
4285 /* Exhaust */
4286 if (remainder_size < MINSIZE) {
4287 set_inuse_bit_at_offset(victim, size);
4288 if (av != &main_arena)
4289 victim->size |= NON_MAIN_ARENA;
fa8d436c 4290 }
a9177ff5 4291
fa8d436c
UD
4292 /* Split */
4293 else {
4294 remainder = chunk_at_offset(victim, nb);
a9177ff5 4295
41999a1a
UD
4296 /* We cannot assume the unsorted list is empty and therefore
4297 have to perform a complete insert here. */
4298 bck = unsorted_chunks(av);
4299 fwd = bck->fd;
4300 remainder->bk = bck;
4301 remainder->fd = fwd;
4302 bck->fd = remainder;
4303 fwd->bk = remainder;
4304
fa8d436c 4305 /* advertise as last remainder */
a9177ff5
RM
4306 if (in_smallbin_range(nb))
4307 av->last_remainder = remainder;
4308
fa8d436c
UD
4309 set_head(victim, nb | PREV_INUSE |
4310 (av != &main_arena ? NON_MAIN_ARENA : 0));
4311 set_head(remainder, remainder_size | PREV_INUSE);
4312 set_foot(remainder, remainder_size);
fa8d436c 4313 }
854278df
UD
4314 check_malloced_chunk(av, victim, nb);
4315 void *p = chunk2mem(victim);
4316 if (__builtin_expect (perturb_byte, 0))
4317 alloc_perturb (p, bytes);
4318 return p;
fa8d436c
UD
4319 }
4320 }
f65fd747 4321
a9177ff5 4322 use_top:
fa8d436c
UD
4323 /*
4324 If large enough, split off the chunk bordering the end of memory
4325 (held in av->top). Note that this is in accord with the best-fit
4326 search rule. In effect, av->top is treated as larger (and thus
4327 less well fitting) than any other available chunk since it can
4328 be extended to be as large as necessary (up to system
4329 limitations).
4330
4331 We require that av->top always exists (i.e., has size >=
4332 MINSIZE) after initialization, so if it would otherwise be
4333 exhuasted by current request, it is replenished. (The main
4334 reason for ensuring it exists is that we may need MINSIZE space
4335 to put in fenceposts in sysmalloc.)
4336 */
f65fd747 4337
fa8d436c
UD
4338 victim = av->top;
4339 size = chunksize(victim);
a9177ff5 4340
fa8d436c
UD
4341 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
4342 remainder_size = size - nb;
4343 remainder = chunk_at_offset(victim, nb);
4344 av->top = remainder;
4345 set_head(victim, nb | PREV_INUSE |
4346 (av != &main_arena ? NON_MAIN_ARENA : 0));
4347 set_head(remainder, remainder_size | PREV_INUSE);
f65fd747 4348
fa8d436c 4349 check_malloced_chunk(av, victim, nb);
854278df
UD
4350 void *p = chunk2mem(victim);
4351 if (__builtin_expect (perturb_byte, 0))
4352 alloc_perturb (p, bytes);
4353 return p;
fa8d436c 4354 }
f65fd747 4355
fa8d436c
UD
4356 /*
4357 If there is space available in fastbins, consolidate and retry,
4358 to possibly avoid expanding memory. This can occur only if nb is
4359 in smallbin range so we didn't consolidate upon entry.
4360 */
f65fd747 4361
fa8d436c
UD
4362 else if (have_fastchunks(av)) {
4363 assert(in_smallbin_range(nb));
4364 malloc_consolidate(av);
4365 idx = smallbin_index(nb); /* restore original bin index */
4366 }
f65fd747 4367
a9177ff5
RM
4368 /*
4369 Otherwise, relay to handle system-dependent cases
fa8d436c 4370 */
854278df
UD
4371 else {
4372 void *p = sYSMALLOc(nb, av);
4373 if (__builtin_expect (perturb_byte, 0))
4374 alloc_perturb (p, bytes);
4375 return p;
4376 }
fa8d436c
UD
4377 }
4378}
f65fd747 4379
fa8d436c
UD
4380/*
4381 ------------------------------ free ------------------------------
f65fd747
UD
4382*/
4383
f1c5213d 4384void
fa8d436c 4385_int_free(mstate av, Void_t* mem)
f65fd747 4386{
fa8d436c
UD
4387 mchunkptr p; /* chunk corresponding to mem */
4388 INTERNAL_SIZE_T size; /* its size */
4389 mfastbinptr* fb; /* associated fastbin */
4390 mchunkptr nextchunk; /* next contiguous chunk */
4391 INTERNAL_SIZE_T nextsize; /* its size */
4392 int nextinuse; /* true if nextchunk is used */
4393 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4394 mchunkptr bck; /* misc temp for linking */
4395 mchunkptr fwd; /* misc temp for linking */
4396
37fa1953 4397 const char *errstr = NULL;
f65fd747 4398
37fa1953
UD
4399 p = mem2chunk(mem);
4400 size = chunksize(p);
f65fd747 4401
37fa1953
UD
4402 /* Little security check which won't hurt performance: the
4403 allocator never wrapps around at the end of the address space.
4404 Therefore we can exclude some size values which might appear
4405 here by accident or by "design" from some intruder. */
dc165f7b 4406 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
073f560e 4407 || __builtin_expect (misaligned_chunk (p), 0))
37fa1953
UD
4408 {
4409 errstr = "free(): invalid pointer";
4410 errout:
4411 malloc_printerr (check_action, errstr, mem);
4412 return;
fa8d436c 4413 }
bf589066
UD
4414 /* We know that each chunk is at least MINSIZE bytes in size. */
4415 if (__builtin_expect (size < MINSIZE, 0))
4416 {
4417 errstr = "free(): invalid size";
4418 goto errout;
4419 }
f65fd747 4420
37fa1953 4421 check_inuse_chunk(av, p);
f65fd747 4422
37fa1953
UD
4423 /*
4424 If eligible, place chunk on a fastbin so it can be found
4425 and used quickly in malloc.
4426 */
6bf4302e 4427
9bf248c6 4428 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
6bf4302e 4429
37fa1953
UD
4430#if TRIM_FASTBINS
4431 /*
4432 If TRIM_FASTBINS set, don't place chunks
4433 bordering top into fastbins
4434 */
4435 && (chunk_at_offset(p, size) != av->top)
4436#endif
4437 ) {
fa8d436c 4438
893e6098
UD
4439 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
4440 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4441 >= av->system_mem, 0))
4442 {
76761b63 4443 errstr = "free(): invalid next size (fast)";
893e6098
UD
4444 goto errout;
4445 }
4446
37fa1953
UD
4447 set_fastchunks(av);
4448 fb = &(av->fastbins[fastbin_index(size)]);
4449 /* Another simple check: make sure the top of the bin is not the
4450 record we are going to add (i.e., double free). */
4451 if (__builtin_expect (*fb == p, 0))
4452 {
4453 errstr = "double free or corruption (fasttop)";
4454 goto errout;
fa8d436c 4455 }
854278df
UD
4456
4457 if (__builtin_expect (perturb_byte, 0))
4458 free_perturb (mem, size - SIZE_SZ);
4459
37fa1953
UD
4460 p->fd = *fb;
4461 *fb = p;
4462 }
f65fd747 4463
37fa1953
UD
4464 /*
4465 Consolidate other non-mmapped chunks as they arrive.
4466 */
fa8d436c 4467
37fa1953
UD
4468 else if (!chunk_is_mmapped(p)) {
4469 nextchunk = chunk_at_offset(p, size);
fa8d436c 4470
37fa1953
UD
4471 /* Lightweight tests: check whether the block is already the
4472 top block. */
4473 if (__builtin_expect (p == av->top, 0))
4474 {
4475 errstr = "double free or corruption (top)";
4476 goto errout;
4477 }
4478 /* Or whether the next chunk is beyond the boundaries of the arena. */
4479 if (__builtin_expect (contiguous (av)
4480 && (char *) nextchunk
4481 >= ((char *) av->top + chunksize(av->top)), 0))
4482 {
4483 errstr = "double free or corruption (out)";
4484 goto errout;
4485 }
4486 /* Or whether the block is actually not marked used. */
4487 if (__builtin_expect (!prev_inuse(nextchunk), 0))
4488 {
4489 errstr = "double free or corruption (!prev)";
4490 goto errout;
4491 }
fa8d436c 4492
37fa1953 4493 nextsize = chunksize(nextchunk);
893e6098
UD
4494 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
4495 || __builtin_expect (nextsize >= av->system_mem, 0))
4496 {
76761b63 4497 errstr = "free(): invalid next size (normal)";
893e6098
UD
4498 goto errout;
4499 }
fa8d436c 4500
854278df
UD
4501 if (__builtin_expect (perturb_byte, 0))
4502 free_perturb (mem, size - SIZE_SZ);
4503
37fa1953
UD
4504 /* consolidate backward */
4505 if (!prev_inuse(p)) {
4506 prevsize = p->prev_size;
4507 size += prevsize;
4508 p = chunk_at_offset(p, -((long) prevsize));
4509 unlink(p, bck, fwd);
4510 }
a9177ff5 4511
37fa1953
UD
4512 if (nextchunk != av->top) {
4513 /* get and clear inuse bit */
4514 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4515
4516 /* consolidate forward */
4517 if (!nextinuse) {
4518 unlink(nextchunk, bck, fwd);
4519 size += nextsize;
4520 } else
4521 clear_inuse_bit_at_offset(nextchunk, 0);
10dc2a90 4522
fa8d436c 4523 /*
37fa1953
UD
4524 Place the chunk in unsorted chunk list. Chunks are
4525 not placed into regular bins until after they have
4526 been given one chance to be used in malloc.
fa8d436c 4527 */
f65fd747 4528
37fa1953
UD
4529 bck = unsorted_chunks(av);
4530 fwd = bck->fd;
4531 p->bk = bck;
4532 p->fd = fwd;
4533 bck->fd = p;
4534 fwd->bk = p;
8a4b65b4 4535
37fa1953
UD
4536 set_head(p, size | PREV_INUSE);
4537 set_foot(p, size);
4538
4539 check_free_chunk(av, p);
4540 }
4541
4542 /*
4543 If the chunk borders the current high end of memory,
4544 consolidate into top
4545 */
4546
4547 else {
4548 size += nextsize;
4549 set_head(p, size | PREV_INUSE);
4550 av->top = p;
4551 check_chunk(av, p);
4552 }
4553
4554 /*
4555 If freeing a large space, consolidate possibly-surrounding
4556 chunks. Then, if the total unused topmost memory exceeds trim
4557 threshold, ask malloc_trim to reduce top.
4558
4559 Unless max_fast is 0, we don't know if there are fastbins
4560 bordering top, so we cannot tell for sure whether threshold
4561 has been reached unless fastbins are consolidated. But we
4562 don't want to consolidate on each free. As a compromise,
4563 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4564 is reached.
4565 */
fa8d436c 4566
37fa1953
UD
4567 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4568 if (have_fastchunks(av))
4569 malloc_consolidate(av);
fa8d436c 4570
37fa1953 4571 if (av == &main_arena) {
a9177ff5 4572#ifndef MORECORE_CANNOT_TRIM
37fa1953
UD
4573 if ((unsigned long)(chunksize(av->top)) >=
4574 (unsigned long)(mp_.trim_threshold))
4575 sYSTRIm(mp_.top_pad, av);
fa8d436c 4576#endif
37fa1953
UD
4577 } else {
4578 /* Always try heap_trim(), even if the top chunk is not
4579 large, because the corresponding heap might go away. */
4580 heap_info *heap = heap_for_ptr(top(av));
fa8d436c 4581
37fa1953
UD
4582 assert(heap->ar_ptr == av);
4583 heap_trim(heap, mp_.top_pad);
fa8d436c 4584 }
fa8d436c 4585 }
10dc2a90 4586
37fa1953
UD
4587 }
4588 /*
4589 If the chunk was allocated via mmap, release via munmap(). Note
4590 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4591 user must have overwritten memory. There's nothing we can do to
4592 catch this error unless MALLOC_DEBUG is set, in which case
4593 check_inuse_chunk (above) will have triggered error.
4594 */
4595
4596 else {
fa8d436c 4597#if HAVE_MMAP
c120d94d 4598 munmap_chunk (p);
fa8d436c 4599#endif
fa8d436c 4600 }
10dc2a90
UD
4601}
4602
fa8d436c
UD
4603/*
4604 ------------------------- malloc_consolidate -------------------------
4605
4606 malloc_consolidate is a specialized version of free() that tears
4607 down chunks held in fastbins. Free itself cannot be used for this
4608 purpose since, among other things, it might place chunks back onto
4609 fastbins. So, instead, we need to use a minor variant of the same
4610 code.
a9177ff5 4611
fa8d436c
UD
4612 Also, because this routine needs to be called the first time through
4613 malloc anyway, it turns out to be the perfect place to trigger
4614 initialization code.
4615*/
4616
10dc2a90 4617#if __STD_C
fa8d436c 4618static void malloc_consolidate(mstate av)
10dc2a90 4619#else
fa8d436c 4620static void malloc_consolidate(av) mstate av;
10dc2a90
UD
4621#endif
4622{
fa8d436c
UD
4623 mfastbinptr* fb; /* current fastbin being consolidated */
4624 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4625 mchunkptr p; /* current chunk being consolidated */
4626 mchunkptr nextp; /* next chunk to consolidate */
4627 mchunkptr unsorted_bin; /* bin header */
4628 mchunkptr first_unsorted; /* chunk to link to */
4629
4630 /* These have same use as in free() */
4631 mchunkptr nextchunk;
4632 INTERNAL_SIZE_T size;
4633 INTERNAL_SIZE_T nextsize;
4634 INTERNAL_SIZE_T prevsize;
4635 int nextinuse;
4636 mchunkptr bck;
4637 mchunkptr fwd;
10dc2a90 4638
fa8d436c
UD
4639 /*
4640 If max_fast is 0, we know that av hasn't
4641 yet been initialized, in which case do so below
4642 */
10dc2a90 4643
9bf248c6 4644 if (get_max_fast () != 0) {
fa8d436c 4645 clear_fastchunks(av);
10dc2a90 4646
fa8d436c 4647 unsorted_bin = unsorted_chunks(av);
10dc2a90 4648
fa8d436c
UD
4649 /*
4650 Remove each chunk from fast bin and consolidate it, placing it
4651 then in unsorted bin. Among other reasons for doing this,
4652 placing in unsorted bin avoids needing to calculate actual bins
4653 until malloc is sure that chunks aren't immediately going to be
4654 reused anyway.
4655 */
a9177ff5 4656
9bf248c6 4657 maxfb = &(av->fastbins[fastbin_index(get_max_fast ())]);
fa8d436c
UD
4658 fb = &(av->fastbins[0]);
4659 do {
4660 if ( (p = *fb) != 0) {
4661 *fb = 0;
a9177ff5 4662
fa8d436c
UD
4663 do {
4664 check_inuse_chunk(av, p);
4665 nextp = p->fd;
a9177ff5 4666
fa8d436c
UD
4667 /* Slightly streamlined version of consolidation code in free() */
4668 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4669 nextchunk = chunk_at_offset(p, size);
4670 nextsize = chunksize(nextchunk);
a9177ff5 4671
fa8d436c
UD
4672 if (!prev_inuse(p)) {
4673 prevsize = p->prev_size;
4674 size += prevsize;
4675 p = chunk_at_offset(p, -((long) prevsize));
4676 unlink(p, bck, fwd);
4677 }
a9177ff5 4678
fa8d436c
UD
4679 if (nextchunk != av->top) {
4680 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
a9177ff5 4681
fa8d436c
UD
4682 if (!nextinuse) {
4683 size += nextsize;
4684 unlink(nextchunk, bck, fwd);
4685 } else
4686 clear_inuse_bit_at_offset(nextchunk, 0);
a9177ff5 4687
fa8d436c
UD
4688 first_unsorted = unsorted_bin->fd;
4689 unsorted_bin->fd = p;
4690 first_unsorted->bk = p;
a9177ff5 4691
fa8d436c
UD
4692 set_head(p, size | PREV_INUSE);
4693 p->bk = unsorted_bin;
4694 p->fd = first_unsorted;
4695 set_foot(p, size);
4696 }
a9177ff5 4697
fa8d436c
UD
4698 else {
4699 size += nextsize;
4700 set_head(p, size | PREV_INUSE);
4701 av->top = p;
4702 }
a9177ff5 4703
fa8d436c 4704 } while ( (p = nextp) != 0);
a9177ff5 4705
fa8d436c
UD
4706 }
4707 } while (fb++ != maxfb);
4708 }
4709 else {
4710 malloc_init_state(av);
4711 check_malloc_state(av);
4712 }
4713}
10dc2a90 4714
fa8d436c
UD
4715/*
4716 ------------------------------ realloc ------------------------------
4717*/
f65fd747 4718
f1c5213d 4719Void_t*
fa8d436c
UD
4720_int_realloc(mstate av, Void_t* oldmem, size_t bytes)
4721{
4722 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 4723
fa8d436c
UD
4724 mchunkptr oldp; /* chunk corresponding to oldmem */
4725 INTERNAL_SIZE_T oldsize; /* its size */
f65fd747 4726
fa8d436c
UD
4727 mchunkptr newp; /* chunk to return */
4728 INTERNAL_SIZE_T newsize; /* its size */
4729 Void_t* newmem; /* corresponding user mem */
f65fd747 4730
fa8d436c 4731 mchunkptr next; /* next contiguous chunk after oldp */
f65fd747 4732
fa8d436c
UD
4733 mchunkptr remainder; /* extra space at end of newp */
4734 unsigned long remainder_size; /* its size */
f65fd747 4735
fa8d436c
UD
4736 mchunkptr bck; /* misc temp for linking */
4737 mchunkptr fwd; /* misc temp for linking */
2ed5fd9a 4738
fa8d436c
UD
4739 unsigned long copysize; /* bytes to copy */
4740 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
a9177ff5 4741 INTERNAL_SIZE_T* s; /* copy source */
fa8d436c 4742 INTERNAL_SIZE_T* d; /* copy destination */
f65fd747 4743
76761b63 4744 const char *errstr = NULL;
f65fd747 4745
f65fd747 4746
fa8d436c 4747 checked_request2size(bytes, nb);
f65fd747 4748
fa8d436c
UD
4749 oldp = mem2chunk(oldmem);
4750 oldsize = chunksize(oldp);
f65fd747 4751
76761b63 4752 /* Simple tests for old block integrity. */
073f560e 4753 if (__builtin_expect (misaligned_chunk (oldp), 0))
76761b63
UD
4754 {
4755 errstr = "realloc(): invalid pointer";
4756 errout:
4757 malloc_printerr (check_action, errstr, oldmem);
4758 return NULL;
4759 }
4760 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4761 || __builtin_expect (oldsize >= av->system_mem, 0))
4762 {
4b04154d 4763 errstr = "realloc(): invalid old size";
76761b63
UD
4764 goto errout;
4765 }
4766
fa8d436c 4767 check_inuse_chunk(av, oldp);
f65fd747 4768
fa8d436c 4769 if (!chunk_is_mmapped(oldp)) {
f65fd747 4770
76761b63
UD
4771 next = chunk_at_offset(oldp, oldsize);
4772 INTERNAL_SIZE_T nextsize = chunksize(next);
4773 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4774 || __builtin_expect (nextsize >= av->system_mem, 0))
4775 {
4776 errstr = "realloc(): invalid next size";
4777 goto errout;
4778 }
4779
fa8d436c
UD
4780 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4781 /* already big enough; split below */
4782 newp = oldp;
4783 newsize = oldsize;
7799b7b3 4784 }
f65fd747 4785
fa8d436c 4786 else {
fa8d436c
UD
4787 /* Try to expand forward into top */
4788 if (next == av->top &&
76761b63 4789 (unsigned long)(newsize = oldsize + nextsize) >=
fa8d436c
UD
4790 (unsigned long)(nb + MINSIZE)) {
4791 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4792 av->top = chunk_at_offset(oldp, nb);
4793 set_head(av->top, (newsize - nb) | PREV_INUSE);
4794 check_inuse_chunk(av, oldp);
4795 return chunk2mem(oldp);
4796 }
a9177ff5 4797
fa8d436c 4798 /* Try to expand forward into next chunk; split off remainder below */
a9177ff5 4799 else if (next != av->top &&
fa8d436c 4800 !inuse(next) &&
76761b63 4801 (unsigned long)(newsize = oldsize + nextsize) >=
fa8d436c
UD
4802 (unsigned long)(nb)) {
4803 newp = oldp;
4804 unlink(next, bck, fwd);
4805 }
f65fd747 4806
fa8d436c
UD
4807 /* allocate, copy, free */
4808 else {
4809 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4810 if (newmem == 0)
4811 return 0; /* propagate failure */
a9177ff5 4812
fa8d436c
UD
4813 newp = mem2chunk(newmem);
4814 newsize = chunksize(newp);
a9177ff5 4815
fa8d436c
UD
4816 /*
4817 Avoid copy if newp is next chunk after oldp.
4818 */
4819 if (newp == next) {
4820 newsize += oldsize;
4821 newp = oldp;
4822 }
4823 else {
4824 /*
4825 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4826 We know that contents have an odd number of
4827 INTERNAL_SIZE_T-sized words; minimally 3.
4828 */
a9177ff5 4829
fa8d436c
UD
4830 copysize = oldsize - SIZE_SZ;
4831 s = (INTERNAL_SIZE_T*)(oldmem);
4832 d = (INTERNAL_SIZE_T*)(newmem);
4833 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4834 assert(ncopies >= 3);
a9177ff5 4835
fa8d436c
UD
4836 if (ncopies > 9)
4837 MALLOC_COPY(d, s, copysize);
a9177ff5 4838
fa8d436c
UD
4839 else {
4840 *(d+0) = *(s+0);
4841 *(d+1) = *(s+1);
4842 *(d+2) = *(s+2);
4843 if (ncopies > 4) {
4844 *(d+3) = *(s+3);
4845 *(d+4) = *(s+4);
4846 if (ncopies > 6) {
4847 *(d+5) = *(s+5);
4848 *(d+6) = *(s+6);
4849 if (ncopies > 8) {
4850 *(d+7) = *(s+7);
4851 *(d+8) = *(s+8);
4852 }
4853 }
4854 }
4855 }
a9177ff5 4856
fa8d436c
UD
4857 _int_free(av, oldmem);
4858 check_inuse_chunk(av, newp);
4859 return chunk2mem(newp);
4860 }
4861 }
f65fd747
UD
4862 }
4863
fa8d436c 4864 /* If possible, free extra space in old or extended chunk */
f65fd747 4865
fa8d436c 4866 assert((unsigned long)(newsize) >= (unsigned long)(nb));
f65fd747 4867
f65fd747 4868 remainder_size = newsize - nb;
f65fd747 4869
fa8d436c
UD
4870 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4871 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4872 set_inuse_bit_at_offset(newp, newsize);
4873 }
4874 else { /* split remainder */
4875 remainder = chunk_at_offset(newp, nb);
4876 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4877 set_head(remainder, remainder_size | PREV_INUSE |
4878 (av != &main_arena ? NON_MAIN_ARENA : 0));
4879 /* Mark remainder as inuse so free() won't complain */
4880 set_inuse_bit_at_offset(remainder, remainder_size);
a9177ff5 4881 _int_free(av, chunk2mem(remainder));
fa8d436c 4882 }
f65fd747 4883
fa8d436c
UD
4884 check_inuse_chunk(av, newp);
4885 return chunk2mem(newp);
4886 }
f65fd747 4887
fa8d436c
UD
4888 /*
4889 Handle mmap cases
4890 */
f65fd747 4891
fa8d436c
UD
4892 else {
4893#if HAVE_MMAP
f65fd747 4894
fa8d436c
UD
4895#if HAVE_MREMAP
4896 INTERNAL_SIZE_T offset = oldp->prev_size;
4897 size_t pagemask = mp_.pagesize - 1;
4898 char *cp;
4899 unsigned long sum;
a9177ff5 4900
fa8d436c
UD
4901 /* Note the extra SIZE_SZ overhead */
4902 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4903
4904 /* don't need to remap if still within same page */
a9177ff5 4905 if (oldsize == newsize - offset)
fa8d436c
UD
4906 return oldmem;
4907
4908 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
a9177ff5 4909
fa8d436c
UD
4910 if (cp != MAP_FAILED) {
4911
4912 newp = (mchunkptr)(cp + offset);
4913 set_head(newp, (newsize - offset)|IS_MMAPPED);
a9177ff5 4914
fa8d436c
UD
4915 assert(aligned_OK(chunk2mem(newp)));
4916 assert((newp->prev_size == offset));
a9177ff5 4917
fa8d436c
UD
4918 /* update statistics */
4919 sum = mp_.mmapped_mem += newsize - oldsize;
a9177ff5 4920 if (sum > (unsigned long)(mp_.max_mmapped_mem))
fa8d436c
UD
4921 mp_.max_mmapped_mem = sum;
4922#ifdef NO_THREADS
4923 sum += main_arena.system_mem;
a9177ff5 4924 if (sum > (unsigned long)(mp_.max_total_mem))
fa8d436c
UD
4925 mp_.max_total_mem = sum;
4926#endif
a9177ff5 4927
fa8d436c
UD
4928 return chunk2mem(newp);
4929 }
f65fd747 4930#endif
10dc2a90 4931
fa8d436c 4932 /* Note the extra SIZE_SZ overhead. */
a9177ff5 4933 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
fa8d436c
UD
4934 newmem = oldmem; /* do nothing */
4935 else {
4936 /* Must alloc, copy, free. */
4937 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4938 if (newmem != 0) {
4939 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
4940 _int_free(av, oldmem);
4941 }
4942 }
4943 return newmem;
10dc2a90 4944
a9177ff5 4945#else
fa8d436c
UD
4946 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4947 check_malloc_state(av);
4948 MALLOC_FAILURE_ACTION;
4949 return 0;
a2b08ee5 4950#endif
10dc2a90 4951 }
fa8d436c
UD
4952}
4953
4954/*
4955 ------------------------------ memalign ------------------------------
4956*/
4957
f1c5213d 4958Void_t*
fa8d436c
UD
4959_int_memalign(mstate av, size_t alignment, size_t bytes)
4960{
4961 INTERNAL_SIZE_T nb; /* padded request size */
4962 char* m; /* memory returned by malloc call */
4963 mchunkptr p; /* corresponding chunk */
4964 char* brk; /* alignment point within p */
4965 mchunkptr newp; /* chunk to return */
4966 INTERNAL_SIZE_T newsize; /* its size */
4967 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4968 mchunkptr remainder; /* spare room at end to split off */
4969 unsigned long remainder_size; /* its size */
4970 INTERNAL_SIZE_T size;
f65fd747
UD
4971
4972 /* If need less alignment than we give anyway, just relay to malloc */
4973
fa8d436c 4974 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
f65fd747
UD
4975
4976 /* Otherwise, ensure that it is at least a minimum chunk size */
4977
4978 if (alignment < MINSIZE) alignment = MINSIZE;
4979
fa8d436c
UD
4980 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4981 if ((alignment & (alignment - 1)) != 0) {
4982 size_t a = MALLOC_ALIGNMENT * 2;
4983 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4984 alignment = a;
7799b7b3 4985 }
f65fd747 4986
fa8d436c
UD
4987 checked_request2size(bytes, nb);
4988
4989 /*
4990 Strategy: find a spot within that chunk that meets the alignment
4991 request, and then possibly free the leading and trailing space.
4992 */
4993
4994
4995 /* Call malloc with worst case padding to hit alignment. */
4996
4997 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4998
4999 if (m == 0) return 0; /* propagate failure */
5000
5001 p = mem2chunk(m);
5002
5003 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
5004
f65fd747 5005 /*
fa8d436c
UD
5006 Find an aligned spot inside chunk. Since we need to give back
5007 leading space in a chunk of at least MINSIZE, if the first
5008 calculation places us at a spot with less than MINSIZE leader,
5009 we can move to the next aligned spot -- we've allocated enough
5010 total room so that this is always possible.
f65fd747
UD
5011 */
5012
fa8d436c
UD
5013 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
5014 -((signed long) alignment));
5015 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
5016 brk += alignment;
f65fd747 5017
fa8d436c 5018 newp = (mchunkptr)brk;
f65fd747
UD
5019 leadsize = brk - (char*)(p);
5020 newsize = chunksize(p) - leadsize;
5021
fa8d436c
UD
5022 /* For mmapped chunks, just adjust offset */
5023 if (chunk_is_mmapped(p)) {
f65fd747
UD
5024 newp->prev_size = p->prev_size + leadsize;
5025 set_head(newp, newsize|IS_MMAPPED);
fa8d436c 5026 return chunk2mem(newp);
f65fd747 5027 }
f65fd747 5028
fa8d436c
UD
5029 /* Otherwise, give back leader, use the rest */
5030 set_head(newp, newsize | PREV_INUSE |
5031 (av != &main_arena ? NON_MAIN_ARENA : 0));
f65fd747 5032 set_inuse_bit_at_offset(newp, newsize);
fa8d436c
UD
5033 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5034 _int_free(av, chunk2mem(p));
f65fd747
UD
5035 p = newp;
5036
fa8d436c
UD
5037 assert (newsize >= nb &&
5038 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
f65fd747
UD
5039 }
5040
5041 /* Also give back spare room at the end */
fa8d436c
UD
5042 if (!chunk_is_mmapped(p)) {
5043 size = chunksize(p);
5044 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
5045 remainder_size = size - nb;
5046 remainder = chunk_at_offset(p, nb);
5047 set_head(remainder, remainder_size | PREV_INUSE |
5048 (av != &main_arena ? NON_MAIN_ARENA : 0));
5049 set_head_size(p, nb);
5050 _int_free(av, chunk2mem(remainder));
5051 }
f65fd747
UD
5052 }
5053
fa8d436c
UD
5054 check_inuse_chunk(av, p);
5055 return chunk2mem(p);
f65fd747
UD
5056}
5057
fa8d436c
UD
5058#if 0
5059/*
5060 ------------------------------ calloc ------------------------------
5061*/
5062
5063#if __STD_C
5064Void_t* cALLOc(size_t n_elements, size_t elem_size)
5065#else
5066Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5067#endif
5068{
5069 mchunkptr p;
5070 unsigned long clearsize;
5071 unsigned long nclears;
5072 INTERNAL_SIZE_T* d;
5073
5074 Void_t* mem = mALLOc(n_elements * elem_size);
5075
5076 if (mem != 0) {
5077 p = mem2chunk(mem);
5078
5079#if MMAP_CLEARS
5080 if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
5081#endif
a9177ff5 5082 {
fa8d436c
UD
5083 /*
5084 Unroll clear of <= 36 bytes (72 if 8byte sizes)
5085 We know that contents have an odd number of
5086 INTERNAL_SIZE_T-sized words; minimally 3.
5087 */
5088
5089 d = (INTERNAL_SIZE_T*)mem;
5090 clearsize = chunksize(p) - SIZE_SZ;
5091 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5092 assert(nclears >= 3);
f65fd747 5093
fa8d436c
UD
5094 if (nclears > 9)
5095 MALLOC_ZERO(d, clearsize);
5096
5097 else {
5098 *(d+0) = 0;
5099 *(d+1) = 0;
5100 *(d+2) = 0;
5101 if (nclears > 4) {
5102 *(d+3) = 0;
5103 *(d+4) = 0;
5104 if (nclears > 6) {
5105 *(d+5) = 0;
5106 *(d+6) = 0;
5107 if (nclears > 8) {
5108 *(d+7) = 0;
5109 *(d+8) = 0;
5110 }
5111 }
5112 }
5113 }
5114 }
5115 }
5116 return mem;
5117}
5118#endif /* 0 */
f65fd747 5119
88764ae2 5120#ifndef _LIBC
f65fd747 5121/*
fa8d436c 5122 ------------------------- independent_calloc -------------------------
f65fd747
UD
5123*/
5124
f1c5213d 5125Void_t**
f65fd747 5126#if __STD_C
fa8d436c 5127_int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
f65fd747 5128#else
fa8d436c
UD
5129_int_icalloc(av, n_elements, elem_size, chunks)
5130mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
f65fd747
UD
5131#endif
5132{
fa8d436c
UD
5133 size_t sz = elem_size; /* serves as 1-element array */
5134 /* opts arg of 3 means all elements are same size, and should be cleared */
5135 return iALLOc(av, n_elements, &sz, 3, chunks);
f65fd747
UD
5136}
5137
5138/*
fa8d436c 5139 ------------------------- independent_comalloc -------------------------
f65fd747
UD
5140*/
5141
f1c5213d 5142Void_t**
f65fd747 5143#if __STD_C
fa8d436c 5144_int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
f65fd747 5145#else
fa8d436c
UD
5146_int_icomalloc(av, n_elements, sizes, chunks)
5147mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
f65fd747
UD
5148#endif
5149{
fa8d436c 5150 return iALLOc(av, n_elements, sizes, 0, chunks);
f65fd747
UD
5151}
5152
f65fd747 5153
fa8d436c
UD
5154/*
5155 ------------------------------ ialloc ------------------------------
5156 ialloc provides common support for independent_X routines, handling all of
5157 the combinations that can result.
f65fd747 5158
fa8d436c
UD
5159 The opts arg has:
5160 bit 0 set if all elements are same size (using sizes[0])
5161 bit 1 set if elements should be zeroed
f65fd747
UD
5162*/
5163
fa8d436c
UD
5164
5165static Void_t**
f65fd747 5166#if __STD_C
fa8d436c 5167iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
f65fd747 5168#else
fa8d436c
UD
5169iALLOc(av, n_elements, sizes, opts, chunks)
5170mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
f65fd747
UD
5171#endif
5172{
fa8d436c
UD
5173 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
5174 INTERNAL_SIZE_T contents_size; /* total size of elements */
5175 INTERNAL_SIZE_T array_size; /* request size of pointer array */
5176 Void_t* mem; /* malloced aggregate space */
5177 mchunkptr p; /* corresponding chunk */
5178 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
5179 Void_t** marray; /* either "chunks" or malloced ptr array */
5180 mchunkptr array_chunk; /* chunk for malloced ptr array */
5181 int mmx; /* to disable mmap */
a9177ff5 5182 INTERNAL_SIZE_T size;
fa8d436c
UD
5183 INTERNAL_SIZE_T size_flags;
5184 size_t i;
5185
5186 /* Ensure initialization/consolidation */
5187 if (have_fastchunks(av)) malloc_consolidate(av);
5188
5189 /* compute array length, if needed */
5190 if (chunks != 0) {
5191 if (n_elements == 0)
5192 return chunks; /* nothing to do */
5193 marray = chunks;
5194 array_size = 0;
5195 }
5196 else {
5197 /* if empty req, must still return chunk representing empty array */
a9177ff5 5198 if (n_elements == 0)
fa8d436c
UD
5199 return (Void_t**) _int_malloc(av, 0);
5200 marray = 0;
5201 array_size = request2size(n_elements * (sizeof(Void_t*)));
5202 }
f65fd747 5203
fa8d436c
UD
5204 /* compute total element size */
5205 if (opts & 0x1) { /* all-same-size */
5206 element_size = request2size(*sizes);
5207 contents_size = n_elements * element_size;
5208 }
5209 else { /* add up all the sizes */
5210 element_size = 0;
5211 contents_size = 0;
a9177ff5
RM
5212 for (i = 0; i != n_elements; ++i)
5213 contents_size += request2size(sizes[i]);
10dc2a90 5214 }
f65fd747 5215
fa8d436c
UD
5216 /* subtract out alignment bytes from total to minimize overallocation */
5217 size = contents_size + array_size - MALLOC_ALIGN_MASK;
a9177ff5
RM
5218
5219 /*
fa8d436c
UD
5220 Allocate the aggregate chunk.
5221 But first disable mmap so malloc won't use it, since
5222 we would not be able to later free/realloc space internal
5223 to a segregated mmap region.
5224 */
5225 mmx = mp_.n_mmaps_max; /* disable mmap */
5226 mp_.n_mmaps_max = 0;
5227 mem = _int_malloc(av, size);
5228 mp_.n_mmaps_max = mmx; /* reset mmap */
a9177ff5 5229 if (mem == 0)
f65fd747
UD
5230 return 0;
5231
fa8d436c 5232 p = mem2chunk(mem);
a9177ff5 5233 assert(!chunk_is_mmapped(p));
fa8d436c 5234 remainder_size = chunksize(p);
f65fd747 5235
fa8d436c
UD
5236 if (opts & 0x2) { /* optionally clear the elements */
5237 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
7799b7b3 5238 }
f65fd747 5239
fa8d436c 5240 size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
f65fd747 5241
fa8d436c
UD
5242 /* If not provided, allocate the pointer array as final part of chunk */
5243 if (marray == 0) {
5244 array_chunk = chunk_at_offset(p, contents_size);
5245 marray = (Void_t**) (chunk2mem(array_chunk));
5246 set_head(array_chunk, (remainder_size - contents_size) | size_flags);
5247 remainder_size = contents_size;
5248 }
f65fd747 5249
fa8d436c
UD
5250 /* split out elements */
5251 for (i = 0; ; ++i) {
5252 marray[i] = chunk2mem(p);
5253 if (i != n_elements-1) {
a9177ff5 5254 if (element_size != 0)
fa8d436c
UD
5255 size = element_size;
5256 else
a9177ff5 5257 size = request2size(sizes[i]);
fa8d436c
UD
5258 remainder_size -= size;
5259 set_head(p, size | size_flags);
5260 p = chunk_at_offset(p, size);
5261 }
5262 else { /* the final element absorbs any overallocation slop */
5263 set_head(p, remainder_size | size_flags);
5264 break;
5265 }
5266 }
f65fd747 5267
fa8d436c
UD
5268#if MALLOC_DEBUG
5269 if (marray != chunks) {
5270 /* final element must have exactly exhausted chunk */
a9177ff5 5271 if (element_size != 0)
fa8d436c
UD
5272 assert(remainder_size == element_size);
5273 else
5274 assert(remainder_size == request2size(sizes[i]));
5275 check_inuse_chunk(av, mem2chunk(marray));
7799b7b3 5276 }
fa8d436c
UD
5277
5278 for (i = 0; i != n_elements; ++i)
5279 check_inuse_chunk(av, mem2chunk(marray[i]));
f65fd747
UD
5280#endif
5281
fa8d436c 5282 return marray;
f65fd747 5283}
88764ae2 5284#endif /* _LIBC */
f65fd747 5285
f65fd747 5286
fa8d436c
UD
5287/*
5288 ------------------------------ valloc ------------------------------
f65fd747
UD
5289*/
5290
f1c5213d 5291Void_t*
f65fd747 5292#if __STD_C
fa8d436c 5293_int_valloc(mstate av, size_t bytes)
f65fd747 5294#else
fa8d436c 5295_int_valloc(av, bytes) mstate av; size_t bytes;
f65fd747
UD
5296#endif
5297{
fa8d436c
UD
5298 /* Ensure initialization/consolidation */
5299 if (have_fastchunks(av)) malloc_consolidate(av);
5300 return _int_memalign(av, mp_.pagesize, bytes);
f65fd747 5301}
f65fd747
UD
5302
5303/*
fa8d436c 5304 ------------------------------ pvalloc ------------------------------
f65fd747
UD
5305*/
5306
fa8d436c 5307
f1c5213d 5308Void_t*
f65fd747 5309#if __STD_C
fa8d436c 5310_int_pvalloc(mstate av, size_t bytes)
f65fd747 5311#else
fa8d436c 5312_int_pvalloc(av, bytes) mstate av, size_t bytes;
f65fd747
UD
5313#endif
5314{
fa8d436c 5315 size_t pagesz;
f65fd747 5316
fa8d436c
UD
5317 /* Ensure initialization/consolidation */
5318 if (have_fastchunks(av)) malloc_consolidate(av);
5319 pagesz = mp_.pagesize;
5320 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
f65fd747 5321}
a9177ff5 5322
f65fd747 5323
fa8d436c
UD
5324/*
5325 ------------------------------ malloc_trim ------------------------------
5326*/
8a4b65b4 5327
f65fd747 5328#if __STD_C
fa8d436c 5329int mTRIm(size_t pad)
f65fd747 5330#else
fa8d436c 5331int mTRIm(pad) size_t pad;
f65fd747
UD
5332#endif
5333{
fa8d436c 5334 mstate av = &main_arena; /* already locked */
f65fd747 5335
fa8d436c
UD
5336 /* Ensure initialization/consolidation */
5337 malloc_consolidate(av);
8a4b65b4 5338
a9177ff5 5339#ifndef MORECORE_CANNOT_TRIM
fa8d436c 5340 return sYSTRIm(pad, av);
8a4b65b4 5341#else
fa8d436c 5342 return 0;
f65fd747 5343#endif
f65fd747
UD
5344}
5345
f65fd747
UD
5346
5347/*
fa8d436c 5348 ------------------------- malloc_usable_size -------------------------
f65fd747
UD
5349*/
5350
5351#if __STD_C
fa8d436c 5352size_t mUSABLe(Void_t* mem)
f65fd747 5353#else
fa8d436c 5354size_t mUSABLe(mem) Void_t* mem;
f65fd747
UD
5355#endif
5356{
5357 mchunkptr p;
fa8d436c 5358 if (mem != 0) {
f65fd747 5359 p = mem2chunk(mem);
fa8d436c
UD
5360 if (chunk_is_mmapped(p))
5361 return chunksize(p) - 2*SIZE_SZ;
5362 else if (inuse(p))
f65fd747 5363 return chunksize(p) - SIZE_SZ;
f65fd747 5364 }
fa8d436c 5365 return 0;
f65fd747
UD
5366}
5367
fa8d436c
UD
5368/*
5369 ------------------------------ mallinfo ------------------------------
5370*/
f65fd747 5371
fa8d436c 5372struct mallinfo mALLINFo(mstate av)
f65fd747 5373{
fa8d436c 5374 struct mallinfo mi;
6dd67bd5 5375 size_t i;
f65fd747
UD
5376 mbinptr b;
5377 mchunkptr p;
f65fd747 5378 INTERNAL_SIZE_T avail;
fa8d436c
UD
5379 INTERNAL_SIZE_T fastavail;
5380 int nblocks;
5381 int nfastblocks;
f65fd747 5382
fa8d436c
UD
5383 /* Ensure initialization */
5384 if (av->top == 0) malloc_consolidate(av);
8a4b65b4 5385
fa8d436c 5386 check_malloc_state(av);
8a4b65b4 5387
fa8d436c
UD
5388 /* Account for top */
5389 avail = chunksize(av->top);
5390 nblocks = 1; /* top always exists */
f65fd747 5391
fa8d436c
UD
5392 /* traverse fastbins */
5393 nfastblocks = 0;
5394 fastavail = 0;
5395
5396 for (i = 0; i < NFASTBINS; ++i) {
5397 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5398 ++nfastblocks;
5399 fastavail += chunksize(p);
5400 }
5401 }
5402
5403 avail += fastavail;
f65fd747 5404
fa8d436c
UD
5405 /* traverse regular bins */
5406 for (i = 1; i < NBINS; ++i) {
5407 b = bin_at(av, i);
5408 for (p = last(b); p != b; p = p->bk) {
5409 ++nblocks;
5410 avail += chunksize(p);
5411 }
5412 }
f65fd747 5413
fa8d436c
UD
5414 mi.smblks = nfastblocks;
5415 mi.ordblks = nblocks;
5416 mi.fordblks = avail;
5417 mi.uordblks = av->system_mem - avail;
5418 mi.arena = av->system_mem;
5419 mi.hblks = mp_.n_mmaps;
5420 mi.hblkhd = mp_.mmapped_mem;
5421 mi.fsmblks = fastavail;
5422 mi.keepcost = chunksize(av->top);
5423 mi.usmblks = mp_.max_total_mem;
5424 return mi;
5425}
f65fd747 5426
fa8d436c
UD
5427/*
5428 ------------------------------ malloc_stats ------------------------------
f65fd747
UD
5429*/
5430
fa8d436c 5431void mSTATs()
f65fd747 5432{
8a4b65b4 5433 int i;
fa8d436c 5434 mstate ar_ptr;
8a4b65b4 5435 struct mallinfo mi;
fa8d436c 5436 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
8a4b65b4
UD
5437#if THREAD_STATS
5438 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
5439#endif
5440
a234e27d
UD
5441 if(__malloc_initialized < 0)
5442 ptmalloc_init ();
8dab36a1
UD
5443#ifdef _LIBC
5444 _IO_flockfile (stderr);
5445 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
5446 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5447#endif
fa8d436c
UD
5448 for (i=0, ar_ptr = &main_arena;; i++) {
5449 (void)mutex_lock(&ar_ptr->mutex);
5450 mi = mALLINFo(ar_ptr);
8a4b65b4
UD
5451 fprintf(stderr, "Arena %d:\n", i);
5452 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
5453 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
fa8d436c
UD
5454#if MALLOC_DEBUG > 1
5455 if (i > 0)
5456 dump_heap(heap_for_ptr(top(ar_ptr)));
5457#endif
8a4b65b4
UD
5458 system_b += mi.arena;
5459 in_use_b += mi.uordblks;
5460#if THREAD_STATS
5461 stat_lock_direct += ar_ptr->stat_lock_direct;
5462 stat_lock_loop += ar_ptr->stat_lock_loop;
5463 stat_lock_wait += ar_ptr->stat_lock_wait;
5464#endif
fa8d436c 5465 (void)mutex_unlock(&ar_ptr->mutex);
7e3be507
UD
5466 ar_ptr = ar_ptr->next;
5467 if(ar_ptr == &main_arena) break;
8a4b65b4 5468 }
7799b7b3 5469#if HAVE_MMAP
8a4b65b4 5470 fprintf(stderr, "Total (incl. mmap):\n");
7799b7b3
UD
5471#else
5472 fprintf(stderr, "Total:\n");
5473#endif
8a4b65b4
UD
5474 fprintf(stderr, "system bytes = %10u\n", system_b);
5475 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
5476#ifdef NO_THREADS
fa8d436c 5477 fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
8a4b65b4 5478#endif
f65fd747 5479#if HAVE_MMAP
fa8d436c
UD
5480 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
5481 fprintf(stderr, "max mmap bytes = %10lu\n",
5482 (unsigned long)mp_.max_mmapped_mem);
f65fd747
UD
5483#endif
5484#if THREAD_STATS
8a4b65b4 5485 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
f65fd747
UD
5486 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
5487 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
8a4b65b4
UD
5488 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
5489 fprintf(stderr, "locked total = %10ld\n",
5490 stat_lock_direct + stat_lock_loop + stat_lock_wait);
f65fd747 5491#endif
8dab36a1
UD
5492#ifdef _LIBC
5493 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
5494 _IO_funlockfile (stderr);
5495#endif
f65fd747
UD
5496}
5497
f65fd747
UD
5498
5499/*
fa8d436c 5500 ------------------------------ mallopt ------------------------------
f65fd747
UD
5501*/
5502
5503#if __STD_C
5504int mALLOPt(int param_number, int value)
5505#else
5506int mALLOPt(param_number, value) int param_number; int value;
5507#endif
5508{
fa8d436c
UD
5509 mstate av = &main_arena;
5510 int res = 1;
f65fd747 5511
0cb71e02
UD
5512 if(__malloc_initialized < 0)
5513 ptmalloc_init ();
fa8d436c
UD
5514 (void)mutex_lock(&av->mutex);
5515 /* Ensure initialization/consolidation */
5516 malloc_consolidate(av);
2f6d1f1b 5517
fa8d436c
UD
5518 switch(param_number) {
5519 case M_MXFAST:
5520 if (value >= 0 && value <= MAX_FAST_SIZE) {
9bf248c6 5521 set_max_fast(value);
fa8d436c
UD
5522 }
5523 else
5524 res = 0;
5525 break;
2f6d1f1b 5526
fa8d436c
UD
5527 case M_TRIM_THRESHOLD:
5528 mp_.trim_threshold = value;
1d05c2fb 5529 mp_.no_dyn_threshold = 1;
fa8d436c 5530 break;
2f6d1f1b 5531
fa8d436c
UD
5532 case M_TOP_PAD:
5533 mp_.top_pad = value;
1d05c2fb 5534 mp_.no_dyn_threshold = 1;
fa8d436c 5535 break;
2f6d1f1b 5536
fa8d436c
UD
5537 case M_MMAP_THRESHOLD:
5538#if USE_ARENAS
5539 /* Forbid setting the threshold too high. */
5540 if((unsigned long)value > HEAP_MAX_SIZE/2)
5541 res = 0;
5542 else
2f6d1f1b 5543#endif
fa8d436c 5544 mp_.mmap_threshold = value;
1d05c2fb 5545 mp_.no_dyn_threshold = 1;
fa8d436c 5546 break;
2f6d1f1b 5547
fa8d436c
UD
5548 case M_MMAP_MAX:
5549#if !HAVE_MMAP
5550 if (value != 0)
5551 res = 0;
5552 else
9a51759b 5553#endif
fa8d436c 5554 mp_.n_mmaps_max = value;
1d05c2fb 5555 mp_.no_dyn_threshold = 1;
fa8d436c 5556 break;
10dc2a90 5557
fa8d436c
UD
5558 case M_CHECK_ACTION:
5559 check_action = value;
5560 break;
854278df
UD
5561
5562 case M_PERTURB:
5563 perturb_byte = value;
5564 break;
b22fc5f5 5565 }
fa8d436c
UD
5566 (void)mutex_unlock(&av->mutex);
5567 return res;
b22fc5f5
UD
5568}
5569
10dc2a90 5570
a9177ff5 5571/*
fa8d436c
UD
5572 -------------------- Alternative MORECORE functions --------------------
5573*/
10dc2a90 5574
b22fc5f5 5575
fa8d436c
UD
5576/*
5577 General Requirements for MORECORE.
b22fc5f5 5578
fa8d436c 5579 The MORECORE function must have the following properties:
b22fc5f5 5580
fa8d436c 5581 If MORECORE_CONTIGUOUS is false:
10dc2a90 5582
fa8d436c
UD
5583 * MORECORE must allocate in multiples of pagesize. It will
5584 only be called with arguments that are multiples of pagesize.
10dc2a90 5585
a9177ff5 5586 * MORECORE(0) must return an address that is at least
fa8d436c 5587 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
10dc2a90 5588
fa8d436c 5589 else (i.e. If MORECORE_CONTIGUOUS is true):
10dc2a90 5590
fa8d436c
UD
5591 * Consecutive calls to MORECORE with positive arguments
5592 return increasing addresses, indicating that space has been
5593 contiguously extended.
10dc2a90 5594
fa8d436c
UD
5595 * MORECORE need not allocate in multiples of pagesize.
5596 Calls to MORECORE need not have args of multiples of pagesize.
10dc2a90 5597
fa8d436c 5598 * MORECORE need not page-align.
10dc2a90 5599
fa8d436c 5600 In either case:
10dc2a90 5601
fa8d436c
UD
5602 * MORECORE may allocate more memory than requested. (Or even less,
5603 but this will generally result in a malloc failure.)
10dc2a90 5604
fa8d436c
UD
5605 * MORECORE must not allocate memory when given argument zero, but
5606 instead return one past the end address of memory from previous
5607 nonzero call. This malloc does NOT call MORECORE(0)
5608 until at least one call with positive arguments is made, so
5609 the initial value returned is not important.
10dc2a90 5610
fa8d436c
UD
5611 * Even though consecutive calls to MORECORE need not return contiguous
5612 addresses, it must be OK for malloc'ed chunks to span multiple
5613 regions in those cases where they do happen to be contiguous.
10dc2a90 5614
fa8d436c
UD
5615 * MORECORE need not handle negative arguments -- it may instead
5616 just return MORECORE_FAILURE when given negative arguments.
5617 Negative arguments are always multiples of pagesize. MORECORE
5618 must not misinterpret negative args as large positive unsigned
5619 args. You can suppress all such calls from even occurring by defining
5620 MORECORE_CANNOT_TRIM,
10dc2a90 5621
fa8d436c
UD
5622 There is some variation across systems about the type of the
5623 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5624 actually be size_t, because sbrk supports negative args, so it is
5625 normally the signed type of the same width as size_t (sometimes
5626 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5627 matter though. Internally, we use "long" as arguments, which should
5628 work across all reasonable possibilities.
ee74a442 5629
fa8d436c
UD
5630 Additionally, if MORECORE ever returns failure for a positive
5631 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
5632 system allocator. This is a useful backup strategy for systems with
5633 holes in address spaces -- in this case sbrk cannot contiguously
5634 expand the heap, but mmap may be able to map noncontiguous space.
7e3be507 5635
fa8d436c
UD
5636 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5637 a function that always returns MORECORE_FAILURE.
2e65ca2b 5638
fa8d436c
UD
5639 If you are using this malloc with something other than sbrk (or its
5640 emulation) to supply memory regions, you probably want to set
5641 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5642 allocator kindly contributed for pre-OSX macOS. It uses virtually
5643 but not necessarily physically contiguous non-paged memory (locked
5644 in, present and won't get swapped out). You can use it by
5645 uncommenting this section, adding some #includes, and setting up the
5646 appropriate defines above:
7e3be507 5647
fa8d436c
UD
5648 #define MORECORE osMoreCore
5649 #define MORECORE_CONTIGUOUS 0
7e3be507 5650
fa8d436c
UD
5651 There is also a shutdown routine that should somehow be called for
5652 cleanup upon program exit.
7e3be507 5653
fa8d436c
UD
5654 #define MAX_POOL_ENTRIES 100
5655 #define MINIMUM_MORECORE_SIZE (64 * 1024)
5656 static int next_os_pool;
5657 void *our_os_pools[MAX_POOL_ENTRIES];
7e3be507 5658
fa8d436c
UD
5659 void *osMoreCore(int size)
5660 {
5661 void *ptr = 0;
5662 static void *sbrk_top = 0;
ca34d7a7 5663
fa8d436c
UD
5664 if (size > 0)
5665 {
5666 if (size < MINIMUM_MORECORE_SIZE)
5667 size = MINIMUM_MORECORE_SIZE;
5668 if (CurrentExecutionLevel() == kTaskLevel)
5669 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5670 if (ptr == 0)
5671 {
5672 return (void *) MORECORE_FAILURE;
5673 }
5674 // save ptrs so they can be freed during cleanup
5675 our_os_pools[next_os_pool] = ptr;
5676 next_os_pool++;
5677 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5678 sbrk_top = (char *) ptr + size;
5679 return ptr;
5680 }
5681 else if (size < 0)
5682 {
5683 // we don't currently support shrink behavior
5684 return (void *) MORECORE_FAILURE;
5685 }
5686 else
5687 {
5688 return sbrk_top;
431c33c0 5689 }
ca34d7a7 5690 }
ca34d7a7 5691
fa8d436c
UD
5692 // cleanup any allocated memory pools
5693 // called as last thing before shutting down driver
ca34d7a7 5694
fa8d436c 5695 void osCleanupMem(void)
ca34d7a7 5696 {
fa8d436c 5697 void **ptr;
ca34d7a7 5698
fa8d436c
UD
5699 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5700 if (*ptr)
5701 {
5702 PoolDeallocate(*ptr);
5703 *ptr = 0;
5704 }
5705 }
ee74a442 5706
fa8d436c 5707*/
f65fd747 5708
7e3be507 5709
3e030bd5
UD
5710/* Helper code. */
5711
ae7f5313
UD
5712extern char **__libc_argv attribute_hidden;
5713
3e030bd5 5714static void
6bf4302e 5715malloc_printerr(int action, const char *str, void *ptr)
3e030bd5 5716{
553cc5f9
UD
5717 if ((action & 5) == 5)
5718 __libc_message (action & 2, "%s\n", str);
5719 else if (action & 1)
3e030bd5 5720 {
a9055cab 5721 char buf[2 * sizeof (uintptr_t) + 1];
3e030bd5 5722
a9055cab
UD
5723 buf[sizeof (buf) - 1] = '\0';
5724 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
5725 while (cp > buf)
5726 *--cp = '0';
5727
5728 __libc_message (action & 2,
553cc5f9 5729 "*** glibc detected *** %s: %s: 0x%s ***\n",
ae7f5313 5730 __libc_argv[0] ?: "<unknown>", str, cp);
3e030bd5 5731 }
a9055cab 5732 else if (action & 2)
3e030bd5
UD
5733 abort ();
5734}
5735
7e3be507 5736#ifdef _LIBC
b2bffca2 5737# include <sys/param.h>
fa8d436c 5738
a204dbb2
UD
5739/* We need a wrapper function for one of the additions of POSIX. */
5740int
5741__posix_memalign (void **memptr, size_t alignment, size_t size)
5742{
5743 void *mem;
e796f92f
UD
5744 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
5745 __const __malloc_ptr_t)) =
5746 __memalign_hook;
a204dbb2
UD
5747
5748 /* Test whether the SIZE argument is valid. It must be a power of
5749 two multiple of sizeof (void *). */
de02bd05
UD
5750 if (alignment % sizeof (void *) != 0
5751 || !powerof2 (alignment / sizeof (void *)) != 0
5752 || alignment == 0)
a204dbb2
UD
5753 return EINVAL;
5754
e796f92f
UD
5755 /* Call the hook here, so that caller is posix_memalign's caller
5756 and not posix_memalign itself. */
5757 if (hook != NULL)
5758 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
5759 else
aa420660 5760 mem = public_mEMALIGn (alignment, size);
a204dbb2 5761
fa8d436c
UD
5762 if (mem != NULL) {
5763 *memptr = mem;
5764 return 0;
5765 }
a204dbb2
UD
5766
5767 return ENOMEM;
5768}
5769weak_alias (__posix_memalign, posix_memalign)
5770
eba19d2b
UD
5771strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5772strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5773strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5774strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5775strong_alias (__libc_memalign, __memalign)
5776weak_alias (__libc_memalign, memalign)
5777strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5778strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5779strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5780strong_alias (__libc_mallinfo, __mallinfo)
5781weak_alias (__libc_mallinfo, mallinfo)
5782strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
7e3be507
UD
5783
5784weak_alias (__malloc_stats, malloc_stats)
5785weak_alias (__malloc_usable_size, malloc_usable_size)
5786weak_alias (__malloc_trim, malloc_trim)
2f6d1f1b
UD
5787weak_alias (__malloc_get_state, malloc_get_state)
5788weak_alias (__malloc_set_state, malloc_set_state)
7e3be507 5789
fa8d436c 5790#endif /* _LIBC */
f65fd747 5791
fa8d436c 5792/* ------------------------------------------------------------
f65fd747
UD
5793History:
5794
fa8d436c 5795[see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
f65fd747
UD
5796
5797*/
fa8d436c
UD
5798/*
5799 * Local variables:
5800 * c-basic-offset: 2
5801 * End:
5802 */