]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/malloc.c
Remove code from div that is by C99 obsolete. Fixes bug 15799
[thirdparty/glibc.git] / malloc / malloc.c
CommitLineData
56137dbc 1/* Malloc implementation for multiple threads without lock contention.
568035b7 2 Copyright (C) 1996-2013 Free Software Foundation, Inc.
f65fd747 3 This file is part of the GNU C Library.
fa8d436c
UD
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
f65fd747
UD
6
7 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
fa8d436c 10 License, or (at your option) any later version.
f65fd747
UD
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 15 Lesser General Public License for more details.
f65fd747 16
cc7375ce 17 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
f65fd747 20
fa8d436c
UD
21/*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
da2d2fb6
UD
25 There have been substantial changesmade after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
fa8d436c 29* Version ptmalloc2-20011215
fa8d436c
UD
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
f65fd747 32
fa8d436c 33* Quickstart
f65fd747 34
fa8d436c
UD
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
f65fd747
UD
43
44* Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
fa8d436c
UD
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71* Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
22a89187
UD
76 free(void* p);
77 realloc(void* p, size_t n);
fa8d436c
UD
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
22a89187
UD
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
fa8d436c 86 pvalloc(size_t n);
22a89187 87 cfree(void* p);
fa8d436c 88 malloc_trim(size_t pad);
22a89187 89 malloc_usable_size(void* p);
fa8d436c 90 malloc_stats();
f65fd747
UD
91
92* Vital statistics:
93
fa8d436c 94 Supported pointer representation: 4 or 8 bytes
a9177ff5 95 Supported size_t representation: 4 or 8 bytes
f65fd747 96 Note that size_t is allowed to be 4 bytes even if pointers are 8.
fa8d436c
UD
97 You can adjust this by defining INTERNAL_SIZE_T
98
99 Alignment: 2 * sizeof(size_t) (default)
100 (i.e., 8 byte alignment with 4byte size_t). This suffices for
101 nearly all current machines and C compilers. However, you can
102 define MALLOC_ALIGNMENT to be wider than this if necessary.
f65fd747 103
fa8d436c
UD
104 Minimum overhead per allocated chunk: 4 or 8 bytes
105 Each malloced chunk has a hidden word of overhead holding size
f65fd747
UD
106 and status information.
107
108 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
72f90263 109 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
f65fd747
UD
110
111 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
112 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
fa8d436c
UD
113 needed; 4 (8) for a trailing size field and 8 (16) bytes for
114 free list pointers. Thus, the minimum allocatable size is
115 16/24/32 bytes.
f65fd747
UD
116
117 Even a request for zero bytes (i.e., malloc(0)) returns a
118 pointer to something of the minimum allocatable size.
119
fa8d436c
UD
120 The maximum overhead wastage (i.e., number of extra bytes
121 allocated than were requested in malloc) is less than or equal
122 to the minimum size, except for requests >= mmap_threshold that
123 are serviced via mmap(), where the worst case wastage is 2 *
124 sizeof(size_t) bytes plus the remainder from a system page (the
125 minimal mmap unit); typically 4096 or 8192 bytes.
f65fd747 126
a9177ff5 127 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
72f90263 128 8-byte size_t: 2^64 minus about two pages
fa8d436c
UD
129
130 It is assumed that (possibly signed) size_t values suffice to
f65fd747
UD
131 represent chunk sizes. `Possibly signed' is due to the fact
132 that `size_t' may be defined on a system as either a signed or
fa8d436c
UD
133 an unsigned type. The ISO C standard says that it must be
134 unsigned, but a few systems are known not to adhere to this.
135 Additionally, even when size_t is unsigned, sbrk (which is by
136 default used to obtain memory from system) accepts signed
137 arguments, and may not be able to handle size_t-wide arguments
138 with negative sign bit. Generally, values that would
139 appear as negative after accounting for overhead and alignment
140 are supported only via mmap(), which does not have this
141 limitation.
142
143 Requests for sizes outside the allowed range will perform an optional
144 failure action and then return null. (Requests may also
145 also fail because a system is out of memory.)
146
22a89187 147 Thread-safety: thread-safe
fa8d436c
UD
148
149 Compliance: I believe it is compliant with the 1997 Single Unix Specification
2b0fba75 150 Also SVID/XPG, ANSI C, and probably others as well.
f65fd747
UD
151
152* Synopsis of compile-time options:
153
154 People have reported using previous versions of this malloc on all
155 versions of Unix, sometimes by tweaking some of the defines
22a89187 156 below. It has been tested most extensively on Solaris and Linux.
fa8d436c
UD
157 People also report using it in stand-alone embedded systems.
158
159 The implementation is in straight, hand-tuned ANSI C. It is not
160 at all modular. (Sorry!) It uses a lot of macros. To be at all
161 usable, this code should be compiled using an optimizing compiler
162 (for example gcc -O3) that can simplify expressions and control
163 paths. (FAQ: some macros import variables as arguments rather than
164 declare locals because people reported that some debuggers
165 otherwise get confused.)
166
167 OPTION DEFAULT VALUE
168
169 Compilation Environment options:
170
2a26ef3a 171 HAVE_MREMAP 0
fa8d436c
UD
172
173 Changing default word sizes:
174
175 INTERNAL_SIZE_T size_t
073f560e
UD
176 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
177 __alignof__ (long double))
fa8d436c
UD
178
179 Configuration and functionality options:
180
fa8d436c
UD
181 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
182 USE_MALLOC_LOCK NOT defined
183 MALLOC_DEBUG NOT defined
184 REALLOC_ZERO_BYTES_FREES 1
fa8d436c
UD
185 TRIM_FASTBINS 0
186
187 Options for customizing MORECORE:
188
189 MORECORE sbrk
190 MORECORE_FAILURE -1
a9177ff5 191 MORECORE_CONTIGUOUS 1
fa8d436c
UD
192 MORECORE_CANNOT_TRIM NOT defined
193 MORECORE_CLEARS 1
a9177ff5 194 MMAP_AS_MORECORE_SIZE (1024 * 1024)
fa8d436c
UD
195
196 Tuning options that are also dynamically changeable via mallopt:
197
425ce2ed 198 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
fa8d436c
UD
199 DEFAULT_TRIM_THRESHOLD 128 * 1024
200 DEFAULT_TOP_PAD 0
201 DEFAULT_MMAP_THRESHOLD 128 * 1024
202 DEFAULT_MMAP_MAX 65536
203
204 There are several other #defined constants and macros that you
205 probably don't want to touch unless you are extending or adapting malloc. */
f65fd747
UD
206
207/*
22a89187 208 void* is the pointer type that malloc should say it returns
f65fd747
UD
209*/
210
22a89187
UD
211#ifndef void
212#define void void
213#endif /*void*/
f65fd747 214
fa8d436c
UD
215#include <stddef.h> /* for size_t */
216#include <stdlib.h> /* for getenv(), abort() */
2a26ef3a 217#include <unistd.h> /* for __libc_enable_secure */
f65fd747 218
3c6904fb 219#include <malloc-machine.h>
2a26ef3a 220#include <malloc-sysdep.h>
3c6904fb 221
425ce2ed 222#include <atomic.h>
eb96ffb0 223#include <_itoa.h>
e404fb16 224#include <bits/wordsize.h>
425ce2ed 225#include <sys/sysinfo.h>
c56da3a3 226
02d46fc4
UD
227#include <ldsodefs.h>
228
fa8d436c 229#include <unistd.h>
fa8d436c 230#include <stdio.h> /* needed for malloc_stats */
8e58439c 231#include <errno.h>
f65fd747 232
66274218
AJ
233#include <shlib-compat.h>
234
5d78bb43
UD
235/* For uintptr_t. */
236#include <stdint.h>
f65fd747 237
3e030bd5
UD
238/* For va_arg, va_start, va_end. */
239#include <stdarg.h>
240
c0f62c56 241
fa8d436c
UD
242/*
243 Debugging:
244
245 Because freed chunks may be overwritten with bookkeeping fields, this
246 malloc will often die when freed memory is overwritten by user
247 programs. This can be very effective (albeit in an annoying way)
248 in helping track down dangling pointers.
249
250 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
251 enabled that will catch more memory errors. You probably won't be
252 able to make much sense of the actual assertion errors, but they
253 should help you locate incorrectly overwritten memory. The checking
254 is fairly extensive, and will slow down execution
255 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
256 will attempt to check every non-mmapped allocated and free chunk in
257 the course of computing the summmaries. (By nature, mmapped regions
258 cannot be checked very much automatically.)
259
260 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
261 this code. The assertions in the check routines spell out in more
262 detail the assumptions and invariants underlying the algorithms.
263
264 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
265 checking that all accesses to malloced memory stay within their
266 bounds. However, there are several add-ons and adaptations of this
267 or other mallocs available that do this.
f65fd747
UD
268*/
269
72f90263
UD
270#ifdef NDEBUG
271# define assert(expr) ((void) 0)
272#else
273# define assert(expr) \
274 ((expr) \
275 ? ((void) 0) \
276 : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__))
277
278extern const char *__progname;
279
280static void
281__malloc_assert (const char *assertion, const char *file, unsigned int line,
282 const char *function)
283{
284 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
285 __progname, __progname[0] ? ": " : "",
286 file, line,
287 function ? function : "", function ? ": " : "",
288 assertion);
289 fflush (stderr);
290 abort ();
291}
292#endif
f65fd747
UD
293
294
295/*
296 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
fa8d436c
UD
297 of chunk sizes.
298
299 The default version is the same as size_t.
300
301 While not strictly necessary, it is best to define this as an
302 unsigned type, even if size_t is a signed type. This may avoid some
303 artificial size limitations on some systems.
304
305 On a 64-bit machine, you may be able to reduce malloc overhead by
306 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
307 expense of not being able to handle more than 2^32 of malloced
308 space. If this limitation is acceptable, you are encouraged to set
309 this unless you are on a platform requiring 16byte alignments. In
310 this case the alignment requirements turn out to negate any
311 potential advantages of decreasing size_t word size.
312
313 Implementors: Beware of the possible combinations of:
314 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
315 and might be the same width as int or as long
316 - size_t might have different width and signedness as INTERNAL_SIZE_T
317 - int and long might be 32 or 64 bits, and might be the same width
318 To deal with this, most comparisons and difference computations
319 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
320 aware of the fact that casting an unsigned int to a wider long does
321 not sign-extend. (This also makes checking for negative numbers
322 awkward.) Some of these casts result in harmless compiler warnings
323 on some systems.
f65fd747
UD
324*/
325
326#ifndef INTERNAL_SIZE_T
327#define INTERNAL_SIZE_T size_t
328#endif
329
fa8d436c
UD
330/* The corresponding word size */
331#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
332
333
334/*
335 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
336 It must be a power of two at least 2 * SIZE_SZ, even on machines
337 for which smaller alignments would suffice. It may be defined as
338 larger than this though. Note however that code and data structures
339 are optimized for the case of 8-byte alignment.
340*/
341
342
343#ifndef MALLOC_ALIGNMENT
cbc00a03
RM
344# if !SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_16)
345/* This is the correct definition when there is no past ABI to constrain it.
346
347 Among configurations with a past ABI constraint, it differs from
348 2*SIZE_SZ only on powerpc32. For the time being, changing this is
349 causing more compatibility problems due to malloc_get_state and
350 malloc_set_state than will returning blocks not adequately aligned for
351 long double objects under -mlong-double-128. */
352
353# define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
354 ? __alignof__ (long double) : 2 * SIZE_SZ)
355# else
356# define MALLOC_ALIGNMENT (2 * SIZE_SZ)
357# endif
fa8d436c
UD
358#endif
359
360/* The corresponding bit mask value */
361#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
362
363
364
365/*
366 REALLOC_ZERO_BYTES_FREES should be set if a call to
367 realloc with zero bytes should be the same as a call to free.
368 This is required by the C standard. Otherwise, since this malloc
369 returns a unique pointer for malloc(0), so does realloc(p, 0).
370*/
371
372#ifndef REALLOC_ZERO_BYTES_FREES
373#define REALLOC_ZERO_BYTES_FREES 1
374#endif
375
376/*
377 TRIM_FASTBINS controls whether free() of a very small chunk can
378 immediately lead to trimming. Setting to true (1) can reduce memory
379 footprint, but will almost always slow down programs that use a lot
380 of small chunks.
381
382 Define this only if you are willing to give up some speed to more
383 aggressively reduce system-level memory footprint when releasing
384 memory in programs that use many small chunks. You can get
385 essentially the same effect by setting MXFAST to 0, but this can
386 lead to even greater slowdowns in programs using many small chunks.
387 TRIM_FASTBINS is an in-between compile-time option, that disables
388 only those chunks bordering topmost memory from being placed in
389 fastbins.
390*/
391
392#ifndef TRIM_FASTBINS
393#define TRIM_FASTBINS 0
394#endif
395
396
3b49edc0 397/* Definition for getting more memory from the OS. */
fa8d436c
UD
398#define MORECORE (*__morecore)
399#define MORECORE_FAILURE 0
22a89187
UD
400void * __default_morecore (ptrdiff_t);
401void *(*__morecore)(ptrdiff_t) = __default_morecore;
f65fd747 402
f65fd747 403
22a89187 404#include <string.h>
f65fd747 405
df77455c
UD
406
407/* Force a value to be in a register and stop the compiler referring
408 to the source (mostly memory location) again. */
409#define force_reg(val) \
410 ({ __typeof (val) _v; asm ("" : "=r" (_v) : "0" (val)); _v; })
411
412
fa8d436c
UD
413/*
414 MORECORE-related declarations. By default, rely on sbrk
415*/
09f5e163 416
f65fd747 417
fa8d436c
UD
418/*
419 MORECORE is the name of the routine to call to obtain more memory
420 from the system. See below for general guidance on writing
421 alternative MORECORE functions, as well as a version for WIN32 and a
422 sample version for pre-OSX macos.
423*/
f65fd747 424
fa8d436c
UD
425#ifndef MORECORE
426#define MORECORE sbrk
427#endif
f65fd747 428
fa8d436c
UD
429/*
430 MORECORE_FAILURE is the value returned upon failure of MORECORE
431 as well as mmap. Since it cannot be an otherwise valid memory address,
432 and must reflect values of standard sys calls, you probably ought not
433 try to redefine it.
434*/
09f5e163 435
fa8d436c
UD
436#ifndef MORECORE_FAILURE
437#define MORECORE_FAILURE (-1)
438#endif
439
440/*
441 If MORECORE_CONTIGUOUS is true, take advantage of fact that
442 consecutive calls to MORECORE with positive arguments always return
443 contiguous increasing addresses. This is true of unix sbrk. Even
444 if not defined, when regions happen to be contiguous, malloc will
445 permit allocations spanning regions obtained from different
446 calls. But defining this when applicable enables some stronger
447 consistency checks and space efficiencies.
448*/
f65fd747 449
fa8d436c
UD
450#ifndef MORECORE_CONTIGUOUS
451#define MORECORE_CONTIGUOUS 1
f65fd747
UD
452#endif
453
fa8d436c
UD
454/*
455 Define MORECORE_CANNOT_TRIM if your version of MORECORE
456 cannot release space back to the system when given negative
457 arguments. This is generally necessary only if you are using
458 a hand-crafted MORECORE function that cannot handle negative arguments.
459*/
460
461/* #define MORECORE_CANNOT_TRIM */
f65fd747 462
fa8d436c
UD
463/* MORECORE_CLEARS (default 1)
464 The degree to which the routine mapped to MORECORE zeroes out
465 memory: never (0), only for newly allocated space (1) or always
466 (2). The distinction between (1) and (2) is necessary because on
467 some systems, if the application first decrements and then
468 increments the break value, the contents of the reallocated space
469 are unspecified.
470*/
471
472#ifndef MORECORE_CLEARS
473#define MORECORE_CLEARS 1
7cabd57c
UD
474#endif
475
fa8d436c 476
a9177ff5 477/*
fa8d436c 478 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
22a89187
UD
479 sbrk fails, and mmap is used as a backup. The value must be a
480 multiple of page size. This backup strategy generally applies only
481 when systems have "holes" in address space, so sbrk cannot perform
482 contiguous expansion, but there is still space available on system.
483 On systems for which this is known to be useful (i.e. most linux
484 kernels), this occurs only when programs allocate huge amounts of
485 memory. Between this, and the fact that mmap regions tend to be
486 limited, the size should be large, to avoid too many mmap calls and
487 thus avoid running out of kernel resources. */
fa8d436c
UD
488
489#ifndef MMAP_AS_MORECORE_SIZE
490#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
f65fd747
UD
491#endif
492
493/*
494 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
2a26ef3a 495 large blocks.
f65fd747
UD
496*/
497
498#ifndef HAVE_MREMAP
fa8d436c 499#define HAVE_MREMAP 0
f65fd747
UD
500#endif
501
f65fd747 502
f65fd747 503/*
f65fd747 504 This version of malloc supports the standard SVID/XPG mallinfo
fa8d436c
UD
505 routine that returns a struct containing usage properties and
506 statistics. It should work on any SVID/XPG compliant system that has
507 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
508 install such a thing yourself, cut out the preliminary declarations
509 as described above and below and save them in a malloc.h file. But
510 there's no compelling reason to bother to do this.)
f65fd747
UD
511
512 The main declaration needed is the mallinfo struct that is returned
513 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
fa8d436c
UD
514 bunch of fields that are not even meaningful in this version of
515 malloc. These fields are are instead filled by mallinfo() with
516 other numbers that might be of interest.
f65fd747
UD
517*/
518
f65fd747 519
fa8d436c 520/* ---------- description of public routines ------------ */
f65fd747
UD
521
522/*
fa8d436c
UD
523 malloc(size_t n)
524 Returns a pointer to a newly allocated chunk of at least n bytes, or null
525 if no space is available. Additionally, on failure, errno is
526 set to ENOMEM on ANSI C systems.
527
528 If n is zero, malloc returns a minumum-sized chunk. (The minimum
529 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
530 systems.) On most systems, size_t is an unsigned type, so calls
531 with negative arguments are interpreted as requests for huge amounts
532 of space, which will often fail. The maximum supported value of n
533 differs across systems, but is in all cases less than the maximum
534 representable value of a size_t.
f65fd747 535*/
3b49edc0
UD
536void* __libc_malloc(size_t);
537libc_hidden_proto (__libc_malloc)
f65fd747 538
fa8d436c 539/*
22a89187 540 free(void* p)
fa8d436c
UD
541 Releases the chunk of memory pointed to by p, that had been previously
542 allocated using malloc or a related routine such as realloc.
543 It has no effect if p is null. It can have arbitrary (i.e., bad!)
544 effects if p has already been freed.
545
546 Unless disabled (using mallopt), freeing very large spaces will
547 when possible, automatically trigger operations that give
548 back unused memory to the system, thus reducing program footprint.
549*/
3b49edc0
UD
550void __libc_free(void*);
551libc_hidden_proto (__libc_free)
f65fd747 552
fa8d436c
UD
553/*
554 calloc(size_t n_elements, size_t element_size);
555 Returns a pointer to n_elements * element_size bytes, with all locations
556 set to zero.
557*/
3b49edc0 558void* __libc_calloc(size_t, size_t);
f65fd747
UD
559
560/*
22a89187 561 realloc(void* p, size_t n)
fa8d436c
UD
562 Returns a pointer to a chunk of size n that contains the same data
563 as does chunk p up to the minimum of (n, p's size) bytes, or null
a9177ff5 564 if no space is available.
f65fd747 565
fa8d436c
UD
566 The returned pointer may or may not be the same as p. The algorithm
567 prefers extending p when possible, otherwise it employs the
568 equivalent of a malloc-copy-free sequence.
f65fd747 569
a9177ff5 570 If p is null, realloc is equivalent to malloc.
f65fd747 571
fa8d436c
UD
572 If space is not available, realloc returns null, errno is set (if on
573 ANSI) and p is NOT freed.
f65fd747 574
fa8d436c
UD
575 if n is for fewer bytes than already held by p, the newly unused
576 space is lopped off and freed if possible. Unless the #define
577 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
578 zero (re)allocates a minimum-sized chunk.
f65fd747 579
fa8d436c
UD
580 Large chunks that were internally obtained via mmap will always
581 be reallocated using malloc-copy-free sequences unless
582 the system supports MREMAP (currently only linux).
f65fd747 583
fa8d436c
UD
584 The old unix realloc convention of allowing the last-free'd chunk
585 to be used as an argument to realloc is not supported.
f65fd747 586*/
3b49edc0
UD
587void* __libc_realloc(void*, size_t);
588libc_hidden_proto (__libc_realloc)
f65fd747 589
fa8d436c
UD
590/*
591 memalign(size_t alignment, size_t n);
592 Returns a pointer to a newly allocated chunk of n bytes, aligned
593 in accord with the alignment argument.
594
595 The alignment argument should be a power of two. If the argument is
596 not a power of two, the nearest greater power is used.
597 8-byte alignment is guaranteed by normal malloc calls, so don't
598 bother calling memalign with an argument of 8 or less.
599
600 Overreliance on memalign is a sure way to fragment space.
601*/
3b49edc0
UD
602void* __libc_memalign(size_t, size_t);
603libc_hidden_proto (__libc_memalign)
f65fd747
UD
604
605/*
fa8d436c
UD
606 valloc(size_t n);
607 Equivalent to memalign(pagesize, n), where pagesize is the page
608 size of the system. If the pagesize is unknown, 4096 is used.
609*/
3b49edc0 610void* __libc_valloc(size_t);
fa8d436c 611
f65fd747 612
f65fd747 613
fa8d436c
UD
614/*
615 mallopt(int parameter_number, int parameter_value)
616 Sets tunable parameters The format is to provide a
617 (parameter-number, parameter-value) pair. mallopt then sets the
618 corresponding parameter to the argument value if it can (i.e., so
619 long as the value is meaningful), and returns 1 if successful else
620 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
621 normally defined in malloc.h. Only one of these (M_MXFAST) is used
622 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
623 so setting them has no effect. But this malloc also supports four
624 other options in mallopt. See below for details. Briefly, supported
625 parameters are as follows (listed defaults are for "typical"
626 configurations).
627
628 Symbol param # default allowed param values
629 M_MXFAST 1 64 0-80 (0 disables fastbins)
630 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
a9177ff5 631 M_TOP_PAD -2 0 any
fa8d436c
UD
632 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
633 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
634*/
3b49edc0
UD
635int __libc_mallopt(int, int);
636libc_hidden_proto (__libc_mallopt)
fa8d436c
UD
637
638
639/*
640 mallinfo()
641 Returns (by copy) a struct containing various summary statistics:
642
a9177ff5
RM
643 arena: current total non-mmapped bytes allocated from system
644 ordblks: the number of free chunks
fa8d436c 645 smblks: the number of fastbin blocks (i.e., small chunks that
72f90263 646 have been freed but not use resused or consolidated)
a9177ff5
RM
647 hblks: current number of mmapped regions
648 hblkhd: total bytes held in mmapped regions
fa8d436c 649 usmblks: the maximum total allocated space. This will be greater
72f90263 650 than current total if trimming has occurred.
a9177ff5 651 fsmblks: total bytes held in fastbin blocks
fa8d436c 652 uordblks: current total allocated space (normal or mmapped)
a9177ff5 653 fordblks: total free space
fa8d436c 654 keepcost: the maximum number of bytes that could ideally be released
72f90263
UD
655 back to system via malloc_trim. ("ideally" means that
656 it ignores page restrictions etc.)
fa8d436c
UD
657
658 Because these fields are ints, but internal bookkeeping may
a9177ff5 659 be kept as longs, the reported values may wrap around zero and
fa8d436c
UD
660 thus be inaccurate.
661*/
3b49edc0 662struct mallinfo __libc_mallinfo(void);
88764ae2 663
f65fd747 664
fa8d436c
UD
665/*
666 pvalloc(size_t n);
667 Equivalent to valloc(minimum-page-that-holds(n)), that is,
668 round up n to nearest pagesize.
669 */
3b49edc0 670void* __libc_pvalloc(size_t);
fa8d436c
UD
671
672/*
673 malloc_trim(size_t pad);
674
675 If possible, gives memory back to the system (via negative
676 arguments to sbrk) if there is unused memory at the `high' end of
677 the malloc pool. You can call this after freeing large blocks of
678 memory to potentially reduce the system-level memory requirements
679 of a program. However, it cannot guarantee to reduce memory. Under
680 some allocation patterns, some large free blocks of memory will be
681 locked between two used chunks, so they cannot be given back to
682 the system.
a9177ff5 683
fa8d436c
UD
684 The `pad' argument to malloc_trim represents the amount of free
685 trailing space to leave untrimmed. If this argument is zero,
686 only the minimum amount of memory to maintain internal data
687 structures will be left (one page or less). Non-zero arguments
688 can be supplied to maintain enough trailing space to service
689 future expected allocations without having to re-obtain memory
690 from the system.
a9177ff5 691
fa8d436c
UD
692 Malloc_trim returns 1 if it actually released any memory, else 0.
693 On systems that do not support "negative sbrks", it will always
c958a6a4 694 return 0.
fa8d436c 695*/
3b49edc0 696int __malloc_trim(size_t);
fa8d436c
UD
697
698/*
22a89187 699 malloc_usable_size(void* p);
fa8d436c
UD
700
701 Returns the number of bytes you can actually use in
702 an allocated chunk, which may be more than you requested (although
703 often not) due to alignment and minimum size constraints.
704 You can use this many bytes without worrying about
705 overwriting other allocated objects. This is not a particularly great
706 programming practice. malloc_usable_size can be more useful in
707 debugging and assertions, for example:
708
709 p = malloc(n);
710 assert(malloc_usable_size(p) >= 256);
711
712*/
3b49edc0 713size_t __malloc_usable_size(void*);
fa8d436c
UD
714
715/*
716 malloc_stats();
717 Prints on stderr the amount of space obtained from the system (both
718 via sbrk and mmap), the maximum amount (which may be more than
719 current if malloc_trim and/or munmap got called), and the current
720 number of bytes allocated via malloc (or realloc, etc) but not yet
721 freed. Note that this is the number of bytes allocated, not the
722 number requested. It will be larger than the number requested
723 because of alignment and bookkeeping overhead. Because it includes
724 alignment wastage as being in use, this figure may be greater than
725 zero even when no user-level chunks are allocated.
726
727 The reported current and maximum system memory can be inaccurate if
728 a program makes other calls to system memory allocation functions
729 (normally sbrk) outside of malloc.
730
731 malloc_stats prints only the most commonly interesting statistics.
732 More information can be obtained by calling mallinfo.
733
734*/
3b49edc0 735void __malloc_stats(void);
f65fd747 736
f7ddf3d3
UD
737/*
738 malloc_get_state(void);
739
740 Returns the state of all malloc variables in an opaque data
741 structure.
742*/
3b49edc0 743void* __malloc_get_state(void);
f7ddf3d3
UD
744
745/*
22a89187 746 malloc_set_state(void* state);
f7ddf3d3
UD
747
748 Restore the state of all malloc variables from data obtained with
749 malloc_get_state().
750*/
3b49edc0 751int __malloc_set_state(void*);
f7ddf3d3 752
f7ddf3d3
UD
753/*
754 posix_memalign(void **memptr, size_t alignment, size_t size);
755
756 POSIX wrapper like memalign(), checking for validity of size.
757*/
758int __posix_memalign(void **, size_t, size_t);
f7ddf3d3 759
fa8d436c
UD
760/* mallopt tuning options */
761
f65fd747 762/*
fa8d436c
UD
763 M_MXFAST is the maximum request size used for "fastbins", special bins
764 that hold returned chunks without consolidating their spaces. This
765 enables future requests for chunks of the same size to be handled
766 very quickly, but can increase fragmentation, and thus increase the
767 overall memory footprint of a program.
768
769 This malloc manages fastbins very conservatively yet still
770 efficiently, so fragmentation is rarely a problem for values less
771 than or equal to the default. The maximum supported value of MXFAST
772 is 80. You wouldn't want it any higher than this anyway. Fastbins
773 are designed especially for use with many small structs, objects or
774 strings -- the default handles structs/objects/arrays with sizes up
775 to 8 4byte fields, or small strings representing words, tokens,
776 etc. Using fastbins for larger objects normally worsens
777 fragmentation without improving speed.
778
779 M_MXFAST is set in REQUEST size units. It is internally used in
780 chunksize units, which adds padding and alignment. You can reduce
781 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
782 algorithm to be a closer approximation of fifo-best-fit in all cases,
783 not just for larger requests, but will generally cause it to be
784 slower.
f65fd747
UD
785*/
786
787
fa8d436c
UD
788/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
789#ifndef M_MXFAST
a9177ff5 790#define M_MXFAST 1
fa8d436c 791#endif
f65fd747 792
fa8d436c 793#ifndef DEFAULT_MXFAST
425ce2ed 794#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
10dc2a90
UD
795#endif
796
10dc2a90 797
fa8d436c
UD
798/*
799 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
800 to keep before releasing via malloc_trim in free().
801
802 Automatic trimming is mainly useful in long-lived programs.
803 Because trimming via sbrk can be slow on some systems, and can
804 sometimes be wasteful (in cases where programs immediately
805 afterward allocate more large chunks) the value should be high
806 enough so that your overall system performance would improve by
807 releasing this much memory.
808
809 The trim threshold and the mmap control parameters (see below)
810 can be traded off with one another. Trimming and mmapping are
811 two different ways of releasing unused memory back to the
812 system. Between these two, it is often possible to keep
813 system-level demands of a long-lived program down to a bare
814 minimum. For example, in one test suite of sessions measuring
815 the XF86 X server on Linux, using a trim threshold of 128K and a
816 mmap threshold of 192K led to near-minimal long term resource
817 consumption.
818
819 If you are using this malloc in a long-lived program, it should
820 pay to experiment with these values. As a rough guide, you
821 might set to a value close to the average size of a process
822 (program) running on your system. Releasing this much memory
823 would allow such a process to run in memory. Generally, it's
824 worth it to tune for trimming rather tham memory mapping when a
825 program undergoes phases where several large chunks are
826 allocated and released in ways that can reuse each other's
827 storage, perhaps mixed with phases where there are no such
828 chunks at all. And in well-behaved long-lived programs,
829 controlling release of large blocks via trimming versus mapping
830 is usually faster.
831
832 However, in most programs, these parameters serve mainly as
833 protection against the system-level effects of carrying around
834 massive amounts of unneeded memory. Since frequent calls to
835 sbrk, mmap, and munmap otherwise degrade performance, the default
836 parameters are set to relatively high values that serve only as
837 safeguards.
838
839 The trim value It must be greater than page size to have any useful
a9177ff5 840 effect. To disable trimming completely, you can set to
fa8d436c
UD
841 (unsigned long)(-1)
842
843 Trim settings interact with fastbin (MXFAST) settings: Unless
844 TRIM_FASTBINS is defined, automatic trimming never takes place upon
845 freeing a chunk with size less than or equal to MXFAST. Trimming is
846 instead delayed until subsequent freeing of larger chunks. However,
847 you can still force an attempted trim by calling malloc_trim.
848
849 Also, trimming is not generally possible in cases where
850 the main arena is obtained via mmap.
851
852 Note that the trick some people use of mallocing a huge space and
853 then freeing it at program startup, in an attempt to reserve system
854 memory, doesn't have the intended effect under automatic trimming,
855 since that memory will immediately be returned to the system.
856*/
857
858#define M_TRIM_THRESHOLD -1
859
860#ifndef DEFAULT_TRIM_THRESHOLD
861#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
862#endif
863
864/*
865 M_TOP_PAD is the amount of extra `padding' space to allocate or
866 retain whenever sbrk is called. It is used in two ways internally:
867
868 * When sbrk is called to extend the top of the arena to satisfy
869 a new malloc request, this much padding is added to the sbrk
870 request.
871
872 * When malloc_trim is called automatically from free(),
873 it is used as the `pad' argument.
874
875 In both cases, the actual amount of padding is rounded
876 so that the end of the arena is always a system page boundary.
877
878 The main reason for using padding is to avoid calling sbrk so
879 often. Having even a small pad greatly reduces the likelihood
880 that nearly every malloc request during program start-up (or
881 after trimming) will invoke sbrk, which needlessly wastes
882 time.
883
884 Automatic rounding-up to page-size units is normally sufficient
885 to avoid measurable overhead, so the default is 0. However, in
886 systems where sbrk is relatively slow, it can pay to increase
887 this value, at the expense of carrying around more memory than
888 the program needs.
889*/
10dc2a90 890
fa8d436c 891#define M_TOP_PAD -2
10dc2a90 892
fa8d436c
UD
893#ifndef DEFAULT_TOP_PAD
894#define DEFAULT_TOP_PAD (0)
895#endif
f65fd747 896
1d05c2fb
UD
897/*
898 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
899 adjusted MMAP_THRESHOLD.
900*/
901
902#ifndef DEFAULT_MMAP_THRESHOLD_MIN
903#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
904#endif
905
906#ifndef DEFAULT_MMAP_THRESHOLD_MAX
e404fb16
UD
907 /* For 32-bit platforms we cannot increase the maximum mmap
908 threshold much because it is also the minimum value for the
bd2c2341
UD
909 maximum heap size and its alignment. Going above 512k (i.e., 1M
910 for new heaps) wastes too much address space. */
e404fb16 911# if __WORDSIZE == 32
bd2c2341 912# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
e404fb16 913# else
bd2c2341 914# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
e404fb16 915# endif
1d05c2fb
UD
916#endif
917
fa8d436c
UD
918/*
919 M_MMAP_THRESHOLD is the request size threshold for using mmap()
920 to service a request. Requests of at least this size that cannot
921 be allocated using already-existing space will be serviced via mmap.
922 (If enough normal freed space already exists it is used instead.)
923
924 Using mmap segregates relatively large chunks of memory so that
925 they can be individually obtained and released from the host
926 system. A request serviced through mmap is never reused by any
927 other request (at least not directly; the system may just so
928 happen to remap successive requests to the same locations).
929
930 Segregating space in this way has the benefits that:
931
a9177ff5
RM
932 1. Mmapped space can ALWAYS be individually released back
933 to the system, which helps keep the system level memory
934 demands of a long-lived program low.
fa8d436c
UD
935 2. Mapped memory can never become `locked' between
936 other chunks, as can happen with normally allocated chunks, which
937 means that even trimming via malloc_trim would not release them.
938 3. On some systems with "holes" in address spaces, mmap can obtain
939 memory that sbrk cannot.
940
941 However, it has the disadvantages that:
942
943 1. The space cannot be reclaimed, consolidated, and then
944 used to service later requests, as happens with normal chunks.
945 2. It can lead to more wastage because of mmap page alignment
946 requirements
947 3. It causes malloc performance to be more dependent on host
948 system memory management support routines which may vary in
949 implementation quality and may impose arbitrary
950 limitations. Generally, servicing a request via normal
951 malloc steps is faster than going through a system's mmap.
952
953 The advantages of mmap nearly always outweigh disadvantages for
954 "large" chunks, but the value of "large" varies across systems. The
955 default is an empirically derived value that works well in most
956 systems.
1d05c2fb
UD
957
958
959 Update in 2006:
960 The above was written in 2001. Since then the world has changed a lot.
961 Memory got bigger. Applications got bigger. The virtual address space
962 layout in 32 bit linux changed.
963
964 In the new situation, brk() and mmap space is shared and there are no
965 artificial limits on brk size imposed by the kernel. What is more,
966 applications have started using transient allocations larger than the
967 128Kb as was imagined in 2001.
968
969 The price for mmap is also high now; each time glibc mmaps from the
970 kernel, the kernel is forced to zero out the memory it gives to the
971 application. Zeroing memory is expensive and eats a lot of cache and
972 memory bandwidth. This has nothing to do with the efficiency of the
973 virtual memory system, by doing mmap the kernel just has no choice but
974 to zero.
975
976 In 2001, the kernel had a maximum size for brk() which was about 800
977 megabytes on 32 bit x86, at that point brk() would hit the first
978 mmaped shared libaries and couldn't expand anymore. With current 2.6
979 kernels, the VA space layout is different and brk() and mmap
980 both can span the entire heap at will.
981
982 Rather than using a static threshold for the brk/mmap tradeoff,
983 we are now using a simple dynamic one. The goal is still to avoid
984 fragmentation. The old goals we kept are
985 1) try to get the long lived large allocations to use mmap()
986 2) really large allocations should always use mmap()
987 and we're adding now:
988 3) transient allocations should use brk() to avoid forcing the kernel
989 having to zero memory over and over again
990
991 The implementation works with a sliding threshold, which is by default
992 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
993 out at 128Kb as per the 2001 default.
994
995 This allows us to satisfy requirement 1) under the assumption that long
996 lived allocations are made early in the process' lifespan, before it has
997 started doing dynamic allocations of the same size (which will
998 increase the threshold).
999
1000 The upperbound on the threshold satisfies requirement 2)
1001
1002 The threshold goes up in value when the application frees memory that was
1003 allocated with the mmap allocator. The idea is that once the application
1004 starts freeing memory of a certain size, it's highly probable that this is
1005 a size the application uses for transient allocations. This estimator
1006 is there to satisfy the new third requirement.
1007
f65fd747
UD
1008*/
1009
fa8d436c 1010#define M_MMAP_THRESHOLD -3
f65fd747 1011
fa8d436c 1012#ifndef DEFAULT_MMAP_THRESHOLD
1d05c2fb 1013#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
fa8d436c
UD
1014#endif
1015
1016/*
1017 M_MMAP_MAX is the maximum number of requests to simultaneously
1018 service using mmap. This parameter exists because
1019 some systems have a limited number of internal tables for
1020 use by mmap, and using more than a few of them may degrade
1021 performance.
1022
1023 The default is set to a value that serves only as a safeguard.
22a89187 1024 Setting to 0 disables use of mmap for servicing large requests.
fa8d436c 1025*/
f65fd747 1026
fa8d436c
UD
1027#define M_MMAP_MAX -4
1028
1029#ifndef DEFAULT_MMAP_MAX
fa8d436c 1030#define DEFAULT_MMAP_MAX (65536)
f65fd747
UD
1031#endif
1032
100351c3 1033#include <malloc.h>
f65fd747 1034
fa8d436c
UD
1035#ifndef RETURN_ADDRESS
1036#define RETURN_ADDRESS(X_) (NULL)
9ae6fc54 1037#endif
431c33c0
UD
1038
1039/* On some platforms we can compile internal, not exported functions better.
1040 Let the environment provide a macro and define it to be empty if it
1041 is not available. */
1042#ifndef internal_function
1043# define internal_function
1044#endif
1045
fa8d436c
UD
1046/* Forward declarations. */
1047struct malloc_chunk;
1048typedef struct malloc_chunk* mchunkptr;
431c33c0 1049
fa8d436c 1050/* Internal routines. */
f65fd747 1051
22a89187 1052static void* _int_malloc(mstate, size_t);
425ce2ed 1053static void _int_free(mstate, mchunkptr, int);
22a89187 1054static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
6e4b2107 1055 INTERNAL_SIZE_T);
22a89187
UD
1056static void* _int_memalign(mstate, size_t, size_t);
1057static void* _int_valloc(mstate, size_t);
1058static void* _int_pvalloc(mstate, size_t);
6bf4302e 1059static void malloc_printerr(int action, const char *str, void *ptr);
fa8d436c 1060
22a89187 1061static void* internal_function mem2mem_check(void *p, size_t sz);
fa8d436c
UD
1062static int internal_function top_check(void);
1063static void internal_function munmap_chunk(mchunkptr p);
a9177ff5 1064#if HAVE_MREMAP
fa8d436c 1065static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
a9177ff5 1066#endif
fa8d436c 1067
22a89187
UD
1068static void* malloc_check(size_t sz, const void *caller);
1069static void free_check(void* mem, const void *caller);
1070static void* realloc_check(void* oldmem, size_t bytes,
1071 const void *caller);
1072static void* memalign_check(size_t alignment, size_t bytes,
1073 const void *caller);
750c1f2a 1074#ifndef NO_THREADS
22a89187
UD
1075static void* malloc_atfork(size_t sz, const void *caller);
1076static void free_atfork(void* mem, const void *caller);
750c1f2a 1077#endif
f65fd747 1078
f65fd747 1079
fa8d436c 1080/* ------------- Optional versions of memcopy ---------------- */
f65fd747 1081
a1648746 1082
a9177ff5 1083/*
fa8d436c
UD
1084 Note: memcpy is ONLY invoked with non-overlapping regions,
1085 so the (usually slower) memmove is not needed.
1086*/
a1648746 1087
fa8d436c
UD
1088#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1089#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
f65fd747 1090
f65fd747 1091
fa8d436c 1092/* ------------------ MMAP support ------------------ */
f65fd747 1093
f65fd747 1094
fa8d436c 1095#include <fcntl.h>
fa8d436c 1096#include <sys/mman.h>
f65fd747 1097
fa8d436c
UD
1098#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1099# define MAP_ANONYMOUS MAP_ANON
1100#endif
f65fd747 1101
fa8d436c 1102#ifndef MAP_NORESERVE
3b49edc0 1103# define MAP_NORESERVE 0
f65fd747
UD
1104#endif
1105
fa8d436c 1106#define MMAP(addr, size, prot, flags) \
3b49edc0 1107 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
f65fd747 1108
f65fd747
UD
1109
1110/*
fa8d436c 1111 ----------------------- Chunk representations -----------------------
f65fd747
UD
1112*/
1113
1114
fa8d436c
UD
1115/*
1116 This struct declaration is misleading (but accurate and necessary).
1117 It declares a "view" into memory allowing access to necessary
1118 fields at known offsets from a given base. See explanation below.
1119*/
1120
1121struct malloc_chunk {
1122
1123 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1124 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1125
1126 struct malloc_chunk* fd; /* double links -- used only if free. */
f65fd747 1127 struct malloc_chunk* bk;
7ecfbd38
UD
1128
1129 /* Only used for large blocks: pointer to next larger size. */
1130 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1131 struct malloc_chunk* bk_nextsize;
f65fd747
UD
1132};
1133
f65fd747
UD
1134
1135/*
f65fd747
UD
1136 malloc_chunk details:
1137
1138 (The following includes lightly edited explanations by Colin Plumb.)
1139
1140 Chunks of memory are maintained using a `boundary tag' method as
1141 described in e.g., Knuth or Standish. (See the paper by Paul
1142 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1143 survey of such techniques.) Sizes of free chunks are stored both
1144 in the front of each chunk and at the end. This makes
1145 consolidating fragmented chunks into bigger chunks very fast. The
1146 size fields also hold bits representing whether chunks are free or
1147 in use.
1148
1149 An allocated chunk looks like this:
1150
1151
1152 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1153 | Size of previous chunk, if allocated | |
1154 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1155 | Size of chunk, in bytes |M|P|
f65fd747 1156 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1157 | User data starts here... .
1158 . .
1159 . (malloc_usable_size() bytes) .
1160 . |
f65fd747 1161nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1162 | Size of chunk |
1163 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1164
1165
1166 Where "chunk" is the front of the chunk for the purpose of most of
1167 the malloc code, but "mem" is the pointer that is returned to the
1168 user. "Nextchunk" is the beginning of the next contiguous chunk.
1169
6f65e668 1170 Chunks always begin on even word boundaries, so the mem portion
f65fd747 1171 (which is returned to the user) is also on an even word boundary, and
fa8d436c 1172 thus at least double-word aligned.
f65fd747
UD
1173
1174 Free chunks are stored in circular doubly-linked lists, and look like this:
1175
1176 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1177 | Size of previous chunk |
1178 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1179 `head:' | Size of chunk, in bytes |P|
1180 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1181 | Forward pointer to next chunk in list |
1182 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1183 | Back pointer to previous chunk in list |
1184 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1185 | Unused space (may be 0 bytes long) .
1186 . .
1187 . |
f65fd747
UD
1188nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1189 `foot:' | Size of chunk, in bytes |
72f90263 1190 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1191
1192 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1193 chunk size (which is always a multiple of two words), is an in-use
1194 bit for the *previous* chunk. If that bit is *clear*, then the
1195 word before the current chunk size contains the previous chunk
1196 size, and can be used to find the front of the previous chunk.
fa8d436c
UD
1197 The very first chunk allocated always has this bit set,
1198 preventing access to non-existent (or non-owned) memory. If
1199 prev_inuse is set for any given chunk, then you CANNOT determine
1200 the size of the previous chunk, and might even get a memory
1201 addressing fault when trying to do so.
f65fd747
UD
1202
1203 Note that the `foot' of the current chunk is actually represented
fa8d436c
UD
1204 as the prev_size of the NEXT chunk. This makes it easier to
1205 deal with alignments etc but can be very confusing when trying
1206 to extend or adapt this code.
f65fd747
UD
1207
1208 The two exceptions to all this are
1209
fa8d436c 1210 1. The special chunk `top' doesn't bother using the
72f90263
UD
1211 trailing size field since there is no next contiguous chunk
1212 that would have to index off it. After initialization, `top'
1213 is forced to always exist. If it would become less than
1214 MINSIZE bytes long, it is replenished.
f65fd747
UD
1215
1216 2. Chunks allocated via mmap, which have the second-lowest-order
72f90263
UD
1217 bit M (IS_MMAPPED) set in their size fields. Because they are
1218 allocated one-by-one, each must contain its own trailing size field.
f65fd747
UD
1219
1220*/
1221
1222/*
fa8d436c
UD
1223 ---------- Size and alignment checks and conversions ----------
1224*/
f65fd747 1225
fa8d436c 1226/* conversion from malloc headers to user pointers, and back */
f65fd747 1227
22a89187 1228#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
fa8d436c 1229#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
f65fd747 1230
fa8d436c 1231/* The smallest possible chunk */
7ecfbd38 1232#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
f65fd747 1233
fa8d436c 1234/* The smallest size we can malloc is an aligned minimal chunk */
f65fd747 1235
fa8d436c
UD
1236#define MINSIZE \
1237 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
f65fd747 1238
fa8d436c 1239/* Check if m has acceptable alignment */
f65fd747 1240
073f560e
UD
1241#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1242
1243#define misaligned_chunk(p) \
1244 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1245 & MALLOC_ALIGN_MASK)
f65fd747 1246
f65fd747 1247
a9177ff5 1248/*
fa8d436c
UD
1249 Check if a request is so large that it would wrap around zero when
1250 padded and aligned. To simplify some other code, the bound is made
1251 low enough so that adding MINSIZE will also not wrap around zero.
1252*/
f65fd747 1253
fa8d436c
UD
1254#define REQUEST_OUT_OF_RANGE(req) \
1255 ((unsigned long)(req) >= \
a9177ff5 1256 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
f65fd747 1257
fa8d436c 1258/* pad request bytes into a usable size -- internal version */
f65fd747 1259
fa8d436c
UD
1260#define request2size(req) \
1261 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1262 MINSIZE : \
1263 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
f65fd747 1264
fa8d436c 1265/* Same, except also perform argument check */
f65fd747 1266
fa8d436c
UD
1267#define checked_request2size(req, sz) \
1268 if (REQUEST_OUT_OF_RANGE(req)) { \
8e58439c 1269 __set_errno (ENOMEM); \
fa8d436c
UD
1270 return 0; \
1271 } \
a9177ff5 1272 (sz) = request2size(req);
f65fd747
UD
1273
1274/*
fa8d436c 1275 --------------- Physical chunk operations ---------------
f65fd747
UD
1276*/
1277
10dc2a90 1278
fa8d436c
UD
1279/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1280#define PREV_INUSE 0x1
f65fd747 1281
fa8d436c
UD
1282/* extract inuse bit of previous chunk */
1283#define prev_inuse(p) ((p)->size & PREV_INUSE)
f65fd747 1284
f65fd747 1285
fa8d436c
UD
1286/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1287#define IS_MMAPPED 0x2
f65fd747 1288
fa8d436c
UD
1289/* check for mmap()'ed chunk */
1290#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
f65fd747 1291
f65fd747 1292
fa8d436c
UD
1293/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1294 from a non-main arena. This is only set immediately before handing
1295 the chunk to the user, if necessary. */
1296#define NON_MAIN_ARENA 0x4
f65fd747 1297
fa8d436c
UD
1298/* check for chunk from non-main arena */
1299#define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
f65fd747
UD
1300
1301
a9177ff5
RM
1302/*
1303 Bits to mask off when extracting size
f65fd747 1304
fa8d436c
UD
1305 Note: IS_MMAPPED is intentionally not masked off from size field in
1306 macros for which mmapped chunks should never be seen. This should
1307 cause helpful core dumps to occur if it is tried by accident by
1308 people extending or adapting this malloc.
f65fd747 1309*/
fa8d436c 1310#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
f65fd747 1311
fa8d436c
UD
1312/* Get size, ignoring use bits */
1313#define chunksize(p) ((p)->size & ~(SIZE_BITS))
f65fd747 1314
f65fd747 1315
fa8d436c
UD
1316/* Ptr to next physical malloc_chunk. */
1317#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
f65fd747 1318
fa8d436c
UD
1319/* Ptr to previous physical malloc_chunk */
1320#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
f65fd747 1321
fa8d436c
UD
1322/* Treat space at ptr + offset as a chunk */
1323#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1324
1325/* extract p's inuse bit */
1326#define inuse(p)\
1327((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
f65fd747 1328
fa8d436c
UD
1329/* set/clear chunk as being inuse without otherwise disturbing */
1330#define set_inuse(p)\
1331((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
f65fd747 1332
fa8d436c
UD
1333#define clear_inuse(p)\
1334((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
f65fd747
UD
1335
1336
fa8d436c
UD
1337/* check/set/clear inuse bits in known places */
1338#define inuse_bit_at_offset(p, s)\
1339 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
f65fd747 1340
fa8d436c
UD
1341#define set_inuse_bit_at_offset(p, s)\
1342 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
f65fd747 1343
fa8d436c
UD
1344#define clear_inuse_bit_at_offset(p, s)\
1345 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
f65fd747 1346
f65fd747 1347
fa8d436c
UD
1348/* Set size at head, without disturbing its use bit */
1349#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
f65fd747 1350
fa8d436c
UD
1351/* Set size/use field */
1352#define set_head(p, s) ((p)->size = (s))
f65fd747 1353
fa8d436c
UD
1354/* Set size at footer (only when chunk is not in use) */
1355#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
f65fd747
UD
1356
1357
fa8d436c
UD
1358/*
1359 -------------------- Internal data structures --------------------
1360
1361 All internal state is held in an instance of malloc_state defined
1362 below. There are no other static variables, except in two optional
a9177ff5
RM
1363 cases:
1364 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
22a89187
UD
1365 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1366 for mmap.
fa8d436c
UD
1367
1368 Beware of lots of tricks that minimize the total bookkeeping space
1369 requirements. The result is a little over 1K bytes (for 4byte
1370 pointers and size_t.)
1371*/
f65fd747
UD
1372
1373/*
fa8d436c
UD
1374 Bins
1375
1376 An array of bin headers for free chunks. Each bin is doubly
1377 linked. The bins are approximately proportionally (log) spaced.
1378 There are a lot of these bins (128). This may look excessive, but
1379 works very well in practice. Most bins hold sizes that are
1380 unusual as malloc request sizes, but are more usual for fragments
1381 and consolidated sets of chunks, which is what these bins hold, so
1382 they can be found quickly. All procedures maintain the invariant
1383 that no consolidated chunk physically borders another one, so each
1384 chunk in a list is known to be preceeded and followed by either
1385 inuse chunks or the ends of memory.
1386
1387 Chunks in bins are kept in size order, with ties going to the
1388 approximately least recently used chunk. Ordering isn't needed
1389 for the small bins, which all contain the same-sized chunks, but
1390 facilitates best-fit allocation for larger chunks. These lists
1391 are just sequential. Keeping them in order almost never requires
1392 enough traversal to warrant using fancier ordered data
a9177ff5 1393 structures.
fa8d436c
UD
1394
1395 Chunks of the same size are linked with the most
1396 recently freed at the front, and allocations are taken from the
1397 back. This results in LRU (FIFO) allocation order, which tends
1398 to give each chunk an equal opportunity to be consolidated with
1399 adjacent freed chunks, resulting in larger free chunks and less
1400 fragmentation.
1401
1402 To simplify use in double-linked lists, each bin header acts
1403 as a malloc_chunk. This avoids special-casing for headers.
1404 But to conserve space and improve locality, we allocate
1405 only the fd/bk pointers of bins, and then use repositioning tricks
a9177ff5 1406 to treat these as the fields of a malloc_chunk*.
f65fd747
UD
1407*/
1408
fa8d436c 1409typedef struct malloc_chunk* mbinptr;
f65fd747 1410
fa8d436c 1411/* addressing -- note that bin_at(0) does not exist */
41999a1a
UD
1412#define bin_at(m, i) \
1413 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1414 - offsetof (struct malloc_chunk, fd))
f65fd747 1415
fa8d436c
UD
1416/* analog of ++bin */
1417#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
f65fd747 1418
fa8d436c
UD
1419/* Reminders about list directionality within bins */
1420#define first(b) ((b)->fd)
1421#define last(b) ((b)->bk)
f65fd747 1422
fa8d436c
UD
1423/* Take a chunk off a bin list */
1424#define unlink(P, BK, FD) { \
1425 FD = P->fd; \
1426 BK = P->bk; \
3e030bd5 1427 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
6bf4302e
UD
1428 malloc_printerr (check_action, "corrupted double-linked list", P); \
1429 else { \
1430 FD->bk = BK; \
1431 BK->fd = FD; \
7ecfbd38
UD
1432 if (!in_smallbin_range (P->size) \
1433 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
1434 assert (P->fd_nextsize->bk_nextsize == P); \
1435 assert (P->bk_nextsize->fd_nextsize == P); \
1436 if (FD->fd_nextsize == NULL) { \
1437 if (P->fd_nextsize == P) \
1438 FD->fd_nextsize = FD->bk_nextsize = FD; \
1439 else { \
1440 FD->fd_nextsize = P->fd_nextsize; \
1441 FD->bk_nextsize = P->bk_nextsize; \
1442 P->fd_nextsize->bk_nextsize = FD; \
1443 P->bk_nextsize->fd_nextsize = FD; \
1444 } \
1445 } else { \
1446 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
1447 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
1448 } \
1449 } \
6bf4302e 1450 } \
fa8d436c 1451}
f65fd747 1452
fa8d436c
UD
1453/*
1454 Indexing
1455
1456 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1457 8 bytes apart. Larger bins are approximately logarithmically spaced:
f65fd747 1458
fa8d436c
UD
1459 64 bins of size 8
1460 32 bins of size 64
1461 16 bins of size 512
1462 8 bins of size 4096
1463 4 bins of size 32768
1464 2 bins of size 262144
1465 1 bin of size what's left
f65fd747 1466
fa8d436c
UD
1467 There is actually a little bit of slop in the numbers in bin_index
1468 for the sake of speed. This makes no difference elsewhere.
f65fd747 1469
fa8d436c
UD
1470 The bins top out around 1MB because we expect to service large
1471 requests via mmap.
b5a2bbe6
L
1472
1473 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1474 a valid chunk size the small bins are bumped up one.
fa8d436c 1475*/
f65fd747 1476
fa8d436c
UD
1477#define NBINS 128
1478#define NSMALLBINS 64
1d47e92f 1479#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
b5a2bbe6
L
1480#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1481#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
f65fd747 1482
fa8d436c
UD
1483#define in_smallbin_range(sz) \
1484 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
f65fd747 1485
1d47e92f 1486#define smallbin_index(sz) \
b5a2bbe6
L
1487 ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \
1488 + SMALLBIN_CORRECTION)
f65fd747 1489
1d47e92f 1490#define largebin_index_32(sz) \
1a31b586 1491(((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
fa8d436c
UD
1492 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1493 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1494 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1495 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
72f90263 1496 126)
f65fd747 1497
b5a2bbe6
L
1498#define largebin_index_32_big(sz) \
1499(((((unsigned long)(sz)) >> 6) <= 45)? 49 + (((unsigned long)(sz)) >> 6): \
1500 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1501 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1502 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1503 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
1504 126)
1505
1d47e92f
UD
1506// XXX It remains to be seen whether it is good to keep the widths of
1507// XXX the buckets the same or whether it should be scaled by a factor
1508// XXX of two as well.
1509#define largebin_index_64(sz) \
1510(((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \
1511 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
1512 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
1513 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
1514 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
72f90263 1515 126)
1d47e92f
UD
1516
1517#define largebin_index(sz) \
b5a2bbe6
L
1518 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1519 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1520 : largebin_index_32 (sz))
1d47e92f 1521
fa8d436c
UD
1522#define bin_index(sz) \
1523 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
f65fd747 1524
f65fd747
UD
1525
1526/*
fa8d436c
UD
1527 Unsorted chunks
1528
1529 All remainders from chunk splits, as well as all returned chunks,
1530 are first placed in the "unsorted" bin. They are then placed
1531 in regular bins after malloc gives them ONE chance to be used before
1532 binning. So, basically, the unsorted_chunks list acts as a queue,
1533 with chunks being placed on it in free (and malloc_consolidate),
1534 and taken off (to be either used or placed in bins) in malloc.
1535
1536 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1537 does not have to be taken into account in size comparisons.
f65fd747
UD
1538*/
1539
fa8d436c
UD
1540/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1541#define unsorted_chunks(M) (bin_at(M, 1))
f65fd747 1542
fa8d436c
UD
1543/*
1544 Top
1545
1546 The top-most available chunk (i.e., the one bordering the end of
1547 available memory) is treated specially. It is never included in
1548 any bin, is used only if no other chunk is available, and is
1549 released back to the system if it is very large (see
1550 M_TRIM_THRESHOLD). Because top initially
1551 points to its own bin with initial zero size, thus forcing
1552 extension on the first malloc request, we avoid having any special
1553 code in malloc to check whether it even exists yet. But we still
1554 need to do so when getting memory from system, so we make
1555 initial_top treat the bin as a legal but unusable chunk during the
1556 interval between initialization and the first call to
3b49edc0 1557 sysmalloc. (This is somewhat delicate, since it relies on
fa8d436c
UD
1558 the 2 preceding words to be zero during this interval as well.)
1559*/
f65fd747 1560
fa8d436c
UD
1561/* Conveniently, the unsorted bin can be used as dummy top on first call */
1562#define initial_top(M) (unsorted_chunks(M))
f65fd747 1563
fa8d436c
UD
1564/*
1565 Binmap
f65fd747 1566
fa8d436c
UD
1567 To help compensate for the large number of bins, a one-level index
1568 structure is used for bin-by-bin searching. `binmap' is a
1569 bitvector recording whether bins are definitely empty so they can
1570 be skipped over during during traversals. The bits are NOT always
1571 cleared as soon as bins are empty, but instead only
1572 when they are noticed to be empty during traversal in malloc.
1573*/
f65fd747 1574
fa8d436c
UD
1575/* Conservatively use 32 bits per map word, even if on 64bit system */
1576#define BINMAPSHIFT 5
1577#define BITSPERMAP (1U << BINMAPSHIFT)
1578#define BINMAPSIZE (NBINS / BITSPERMAP)
f65fd747 1579
fa8d436c
UD
1580#define idx2block(i) ((i) >> BINMAPSHIFT)
1581#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
f65fd747 1582
fa8d436c
UD
1583#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
1584#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
1585#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
f65fd747 1586
fa8d436c
UD
1587/*
1588 Fastbins
1589
1590 An array of lists holding recently freed small chunks. Fastbins
1591 are not doubly linked. It is faster to single-link them, and
1592 since chunks are never removed from the middles of these lists,
1593 double linking is not necessary. Also, unlike regular bins, they
1594 are not even processed in FIFO order (they use faster LIFO) since
1595 ordering doesn't much matter in the transient contexts in which
1596 fastbins are normally used.
1597
1598 Chunks in fastbins keep their inuse bit set, so they cannot
1599 be consolidated with other free chunks. malloc_consolidate
1600 releases all chunks in fastbins and consolidates them with
a9177ff5 1601 other free chunks.
fa8d436c 1602*/
f65fd747 1603
fa8d436c 1604typedef struct malloc_chunk* mfastbinptr;
425ce2ed 1605#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
f65fd747 1606
fa8d436c 1607/* offset 2 to use otherwise unindexable first 2 bins */
425ce2ed
UD
1608#define fastbin_index(sz) \
1609 ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1610
f65fd747 1611
fa8d436c 1612/* The maximum fastbin request size we support */
425ce2ed 1613#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
f65fd747 1614
fa8d436c 1615#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
f65fd747
UD
1616
1617/*
fa8d436c
UD
1618 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1619 that triggers automatic consolidation of possibly-surrounding
1620 fastbin chunks. This is a heuristic, so the exact value should not
1621 matter too much. It is defined at half the default trim threshold as a
1622 compromise heuristic to only attempt consolidation if it is likely
1623 to lead to trimming. However, it is not dynamically tunable, since
a9177ff5 1624 consolidation reduces fragmentation surrounding large chunks even
fa8d436c 1625 if trimming is not used.
f65fd747
UD
1626*/
1627
fa8d436c 1628#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
f65fd747
UD
1629
1630/*
a9177ff5 1631 Since the lowest 2 bits in max_fast don't matter in size comparisons,
fa8d436c 1632 they are used as flags.
f65fd747
UD
1633*/
1634
fa8d436c
UD
1635/*
1636 FASTCHUNKS_BIT held in max_fast indicates that there are probably
1637 some fastbin chunks. It is set true on entering a chunk into any
1638 fastbin, and cleared only in malloc_consolidate.
f65fd747 1639
fa8d436c
UD
1640 The truth value is inverted so that have_fastchunks will be true
1641 upon startup (since statics are zero-filled), simplifying
1642 initialization checks.
1643*/
f65fd747 1644
fa8d436c 1645#define FASTCHUNKS_BIT (1U)
f65fd747 1646
9bf248c6 1647#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
425ce2ed
UD
1648#define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
1649#define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
f65fd747
UD
1650
1651/*
fa8d436c
UD
1652 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1653 regions. Otherwise, contiguity is exploited in merging together,
1654 when possible, results from consecutive MORECORE calls.
f65fd747 1655
fa8d436c
UD
1656 The initial value comes from MORECORE_CONTIGUOUS, but is
1657 changed dynamically if mmap is ever used as an sbrk substitute.
f65fd747
UD
1658*/
1659
fa8d436c 1660#define NONCONTIGUOUS_BIT (2U)
f65fd747 1661
9bf248c6
UD
1662#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1663#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1664#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1665#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
f65fd747 1666
a9177ff5
RM
1667/*
1668 Set value of max_fast.
fa8d436c
UD
1669 Use impossibly small value if 0.
1670 Precondition: there are no existing fastbin chunks.
1671 Setting the value clears fastchunk bit but preserves noncontiguous bit.
f65fd747
UD
1672*/
1673
9bf248c6 1674#define set_max_fast(s) \
991eda1e
UD
1675 global_max_fast = (((s) == 0) \
1676 ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
9bf248c6 1677#define get_max_fast() global_max_fast
f65fd747 1678
f65fd747
UD
1679
1680/*
fa8d436c 1681 ----------- Internal state representation and initialization -----------
f65fd747
UD
1682*/
1683
fa8d436c
UD
1684struct malloc_state {
1685 /* Serialize access. */
1686 mutex_t mutex;
9bf248c6
UD
1687
1688 /* Flags (formerly in max_fast). */
1689 int flags;
f65fd747 1690
4f27c496 1691#if THREAD_STATS
fa8d436c
UD
1692 /* Statistics for locking. Only used if THREAD_STATS is defined. */
1693 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
4f27c496 1694#endif
f65fd747 1695
fa8d436c 1696 /* Fastbins */
425ce2ed 1697 mfastbinptr fastbinsY[NFASTBINS];
f65fd747 1698
fa8d436c
UD
1699 /* Base of the topmost chunk -- not otherwise kept in a bin */
1700 mchunkptr top;
f65fd747 1701
fa8d436c
UD
1702 /* The remainder from the most recent split of a small request */
1703 mchunkptr last_remainder;
f65fd747 1704
fa8d436c 1705 /* Normal bins packed as described above */
41999a1a 1706 mchunkptr bins[NBINS * 2 - 2];
f65fd747 1707
fa8d436c
UD
1708 /* Bitmap of bins */
1709 unsigned int binmap[BINMAPSIZE];
f65fd747 1710
fa8d436c
UD
1711 /* Linked list */
1712 struct malloc_state *next;
f65fd747 1713
425ce2ed
UD
1714#ifdef PER_THREAD
1715 /* Linked list for free arenas. */
1716 struct malloc_state *next_free;
1717#endif
1718
fa8d436c
UD
1719 /* Memory allocated from the system in this arena. */
1720 INTERNAL_SIZE_T system_mem;
1721 INTERNAL_SIZE_T max_system_mem;
1722};
f65fd747 1723
fa8d436c
UD
1724struct malloc_par {
1725 /* Tunable parameters */
1726 unsigned long trim_threshold;
1727 INTERNAL_SIZE_T top_pad;
1728 INTERNAL_SIZE_T mmap_threshold;
425ce2ed
UD
1729#ifdef PER_THREAD
1730 INTERNAL_SIZE_T arena_test;
1731 INTERNAL_SIZE_T arena_max;
1732#endif
fa8d436c
UD
1733
1734 /* Memory map support */
1735 int n_mmaps;
1736 int n_mmaps_max;
1737 int max_n_mmaps;
1d05c2fb
UD
1738 /* the mmap_threshold is dynamic, until the user sets
1739 it manually, at which point we need to disable any
1740 dynamic behavior. */
1741 int no_dyn_threshold;
fa8d436c 1742
fa8d436c
UD
1743 /* Statistics */
1744 INTERNAL_SIZE_T mmapped_mem;
1745 /*INTERNAL_SIZE_T sbrked_mem;*/
1746 /*INTERNAL_SIZE_T max_sbrked_mem;*/
1747 INTERNAL_SIZE_T max_mmapped_mem;
1748 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
1749
1750 /* First address handed out by MORECORE/sbrk. */
1751 char* sbrk_base;
1752};
f65fd747 1753
fa8d436c
UD
1754/* There are several instances of this struct ("arenas") in this
1755 malloc. If you are adapting this malloc in a way that does NOT use
1756 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1757 before using. This malloc relies on the property that malloc_state
1758 is initialized to all zeroes (as is true of C statics). */
f65fd747 1759
02d46fc4
UD
1760static struct malloc_state main_arena =
1761 {
1762 .mutex = MUTEX_INITIALIZER,
1763 .next = &main_arena
1764 };
f65fd747 1765
fa8d436c 1766/* There is only one instance of the malloc parameters. */
f65fd747 1767
02d46fc4
UD
1768static struct malloc_par mp_ =
1769 {
1770 .top_pad = DEFAULT_TOP_PAD,
1771 .n_mmaps_max = DEFAULT_MMAP_MAX,
1772 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1773 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1774#ifdef PER_THREAD
1775# define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
1776 .arena_test = NARENAS_FROM_NCORES (1)
1777#endif
1778 };
f65fd747 1779
9bf248c6 1780
425ce2ed
UD
1781#ifdef PER_THREAD
1782/* Non public mallopt parameters. */
1783#define M_ARENA_TEST -7
1784#define M_ARENA_MAX -8
1785#endif
1786
1787
9bf248c6
UD
1788/* Maximum size of memory handled in fastbins. */
1789static INTERNAL_SIZE_T global_max_fast;
1790
fa8d436c
UD
1791/*
1792 Initialize a malloc_state struct.
f65fd747 1793
fa8d436c
UD
1794 This is called only from within malloc_consolidate, which needs
1795 be called in the same contexts anyway. It is never called directly
1796 outside of malloc_consolidate because some optimizing compilers try
1797 to inline it at all call points, which turns out not to be an
1798 optimization at all. (Inlining it in malloc_consolidate is fine though.)
1799*/
f65fd747 1800
fa8d436c 1801static void malloc_init_state(mstate av)
fa8d436c
UD
1802{
1803 int i;
1804 mbinptr bin;
a9177ff5 1805
fa8d436c 1806 /* Establish circular links for normal bins */
a9177ff5 1807 for (i = 1; i < NBINS; ++i) {
fa8d436c
UD
1808 bin = bin_at(av,i);
1809 bin->fd = bin->bk = bin;
1810 }
f65fd747 1811
fa8d436c
UD
1812#if MORECORE_CONTIGUOUS
1813 if (av != &main_arena)
1814#endif
1815 set_noncontiguous(av);
9bf248c6
UD
1816 if (av == &main_arena)
1817 set_max_fast(DEFAULT_MXFAST);
1818 av->flags |= FASTCHUNKS_BIT;
f65fd747 1819
fa8d436c
UD
1820 av->top = initial_top(av);
1821}
e9b3e3c5 1822
a9177ff5 1823/*
fa8d436c
UD
1824 Other internal utilities operating on mstates
1825*/
f65fd747 1826
3b49edc0
UD
1827static void* sysmalloc(INTERNAL_SIZE_T, mstate);
1828static int systrim(size_t, mstate);
fa8d436c 1829static void malloc_consolidate(mstate);
7e3be507 1830
404d4cef
RM
1831
1832/* -------------- Early definitions for debugging hooks ---------------- */
1833
1834/* Define and initialize the hook variables. These weak definitions must
1835 appear before any use of the variables in a function (arena.c uses one). */
1836#ifndef weak_variable
404d4cef
RM
1837/* In GNU libc we want the hook variables to be weak definitions to
1838 avoid a problem with Emacs. */
22a89187 1839# define weak_variable weak_function
404d4cef
RM
1840#endif
1841
1842/* Forward declarations. */
375607b9 1843static void* malloc_hook_ini (size_t sz,
a222d91a 1844 const void *caller) __THROW;
375607b9 1845static void* realloc_hook_ini (void* ptr, size_t sz,
a222d91a 1846 const void *caller) __THROW;
375607b9 1847static void* memalign_hook_ini (size_t alignment, size_t sz,
a222d91a 1848 const void *caller) __THROW;
404d4cef 1849
06d6611a 1850void weak_variable (*__malloc_initialize_hook) (void) = NULL;
a222d91a
JM
1851void weak_variable (*__free_hook) (void *__ptr,
1852 const void *) = NULL;
1853void *weak_variable (*__malloc_hook)
1854 (size_t __size, const void *) = malloc_hook_ini;
1855void *weak_variable (*__realloc_hook)
1856 (void *__ptr, size_t __size, const void *)
404d4cef 1857 = realloc_hook_ini;
a222d91a
JM
1858void *weak_variable (*__memalign_hook)
1859 (size_t __alignment, size_t __size, const void *)
404d4cef 1860 = memalign_hook_ini;
06d6611a 1861void weak_variable (*__after_morecore_hook) (void) = NULL;
404d4cef
RM
1862
1863
3e030bd5
UD
1864/* ---------------- Error behavior ------------------------------------ */
1865
1866#ifndef DEFAULT_CHECK_ACTION
1867#define DEFAULT_CHECK_ACTION 3
1868#endif
1869
1870static int check_action = DEFAULT_CHECK_ACTION;
1871
1872
854278df
UD
1873/* ------------------ Testing support ----------------------------------*/
1874
1875static int perturb_byte;
1876
1877#define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
1878#define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
1879
1880
3ea5be54
AO
1881#include <stap-probe.h>
1882
fa8d436c
UD
1883/* ------------------- Support for multiple arenas -------------------- */
1884#include "arena.c"
f65fd747 1885
fa8d436c
UD
1886/*
1887 Debugging support
f65fd747 1888
fa8d436c
UD
1889 These routines make a number of assertions about the states
1890 of data structures that should be true at all times. If any
1891 are not true, it's very likely that a user program has somehow
1892 trashed memory. (It's also possible that there is a coding error
1893 in malloc. In which case, please report it!)
1894*/
ee74a442 1895
fa8d436c 1896#if ! MALLOC_DEBUG
d8f00d46 1897
fa8d436c
UD
1898#define check_chunk(A,P)
1899#define check_free_chunk(A,P)
1900#define check_inuse_chunk(A,P)
1901#define check_remalloced_chunk(A,P,N)
1902#define check_malloced_chunk(A,P,N)
1903#define check_malloc_state(A)
d8f00d46 1904
fa8d436c 1905#else
ca34d7a7 1906
fa8d436c
UD
1907#define check_chunk(A,P) do_check_chunk(A,P)
1908#define check_free_chunk(A,P) do_check_free_chunk(A,P)
1909#define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
1910#define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
1911#define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
1912#define check_malloc_state(A) do_check_malloc_state(A)
ca34d7a7 1913
fa8d436c
UD
1914/*
1915 Properties of all chunks
1916*/
ca34d7a7 1917
fa8d436c 1918static void do_check_chunk(mstate av, mchunkptr p)
ca34d7a7 1919{
fa8d436c
UD
1920 unsigned long sz = chunksize(p);
1921 /* min and max possible addresses assuming contiguous allocation */
1922 char* max_address = (char*)(av->top) + chunksize(av->top);
1923 char* min_address = max_address - av->system_mem;
1924
1925 if (!chunk_is_mmapped(p)) {
a9177ff5 1926
fa8d436c
UD
1927 /* Has legal address ... */
1928 if (p != av->top) {
1929 if (contiguous(av)) {
72f90263
UD
1930 assert(((char*)p) >= min_address);
1931 assert(((char*)p + sz) <= ((char*)(av->top)));
fa8d436c
UD
1932 }
1933 }
1934 else {
1935 /* top size is always at least MINSIZE */
1936 assert((unsigned long)(sz) >= MINSIZE);
1937 /* top predecessor always marked inuse */
1938 assert(prev_inuse(p));
1939 }
a9177ff5 1940
ca34d7a7 1941 }
fa8d436c 1942 else {
fa8d436c
UD
1943 /* address is outside main heap */
1944 if (contiguous(av) && av->top != initial_top(av)) {
2acd01ac 1945 assert(((char*)p) < min_address || ((char*)p) >= max_address);
fa8d436c
UD
1946 }
1947 /* chunk is page-aligned */
02d46fc4 1948 assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0);
fa8d436c
UD
1949 /* mem is aligned */
1950 assert(aligned_OK(chunk2mem(p)));
eb406346 1951 }
eb406346
UD
1952}
1953
fa8d436c
UD
1954/*
1955 Properties of free chunks
1956*/
ee74a442 1957
fa8d436c 1958static void do_check_free_chunk(mstate av, mchunkptr p)
67c94753 1959{
fa8d436c
UD
1960 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
1961 mchunkptr next = chunk_at_offset(p, sz);
67c94753 1962
fa8d436c 1963 do_check_chunk(av, p);
67c94753 1964
fa8d436c
UD
1965 /* Chunk must claim to be free ... */
1966 assert(!inuse(p));
1967 assert (!chunk_is_mmapped(p));
67c94753 1968
fa8d436c
UD
1969 /* Unless a special marker, must have OK fields */
1970 if ((unsigned long)(sz) >= MINSIZE)
1971 {
1972 assert((sz & MALLOC_ALIGN_MASK) == 0);
1973 assert(aligned_OK(chunk2mem(p)));
1974 /* ... matching footer field */
1975 assert(next->prev_size == sz);
1976 /* ... and is fully consolidated */
1977 assert(prev_inuse(p));
1978 assert (next == av->top || inuse(next));
1979
1980 /* ... and has minimally sane links */
1981 assert(p->fd->bk == p);
1982 assert(p->bk->fd == p);
1983 }
1984 else /* markers are always of size SIZE_SZ */
1985 assert(sz == SIZE_SZ);
67c94753 1986}
67c94753 1987
fa8d436c
UD
1988/*
1989 Properties of inuse chunks
1990*/
1991
fa8d436c 1992static void do_check_inuse_chunk(mstate av, mchunkptr p)
f65fd747 1993{
fa8d436c 1994 mchunkptr next;
f65fd747 1995
fa8d436c 1996 do_check_chunk(av, p);
f65fd747 1997
fa8d436c
UD
1998 if (chunk_is_mmapped(p))
1999 return; /* mmapped chunks have no next/prev */
ca34d7a7 2000
fa8d436c
UD
2001 /* Check whether it claims to be in use ... */
2002 assert(inuse(p));
10dc2a90 2003
fa8d436c 2004 next = next_chunk(p);
10dc2a90 2005
fa8d436c
UD
2006 /* ... and is surrounded by OK chunks.
2007 Since more things can be checked with free chunks than inuse ones,
2008 if an inuse chunk borders them and debug is on, it's worth doing them.
2009 */
2010 if (!prev_inuse(p)) {
2011 /* Note that we cannot even look at prev unless it is not inuse */
2012 mchunkptr prv = prev_chunk(p);
2013 assert(next_chunk(prv) == p);
2014 do_check_free_chunk(av, prv);
2015 }
2016
2017 if (next == av->top) {
2018 assert(prev_inuse(next));
2019 assert(chunksize(next) >= MINSIZE);
2020 }
2021 else if (!inuse(next))
2022 do_check_free_chunk(av, next);
10dc2a90
UD
2023}
2024
fa8d436c
UD
2025/*
2026 Properties of chunks recycled from fastbins
2027*/
2028
fa8d436c 2029static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2030{
fa8d436c
UD
2031 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2032
2033 if (!chunk_is_mmapped(p)) {
2034 assert(av == arena_for_chunk(p));
2035 if (chunk_non_main_arena(p))
2036 assert(av != &main_arena);
2037 else
2038 assert(av == &main_arena);
2039 }
2040
2041 do_check_inuse_chunk(av, p);
2042
2043 /* Legal size ... */
2044 assert((sz & MALLOC_ALIGN_MASK) == 0);
2045 assert((unsigned long)(sz) >= MINSIZE);
2046 /* ... and alignment */
2047 assert(aligned_OK(chunk2mem(p)));
2048 /* chunk is less than MINSIZE more than request */
2049 assert((long)(sz) - (long)(s) >= 0);
2050 assert((long)(sz) - (long)(s + MINSIZE) < 0);
10dc2a90
UD
2051}
2052
fa8d436c
UD
2053/*
2054 Properties of nonrecycled chunks at the point they are malloced
2055*/
2056
fa8d436c 2057static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2058{
fa8d436c
UD
2059 /* same as recycled case ... */
2060 do_check_remalloced_chunk(av, p, s);
10dc2a90 2061
fa8d436c
UD
2062 /*
2063 ... plus, must obey implementation invariant that prev_inuse is
2064 always true of any allocated chunk; i.e., that each allocated
2065 chunk borders either a previously allocated and still in-use
2066 chunk, or the base of its memory arena. This is ensured
ded5b9b7 2067 by making all allocations from the `lowest' part of any found
fa8d436c
UD
2068 chunk. This does not necessarily hold however for chunks
2069 recycled via fastbins.
2070 */
10dc2a90 2071
fa8d436c
UD
2072 assert(prev_inuse(p));
2073}
10dc2a90 2074
f65fd747 2075
fa8d436c
UD
2076/*
2077 Properties of malloc_state.
f65fd747 2078
fa8d436c
UD
2079 This may be useful for debugging malloc, as well as detecting user
2080 programmer errors that somehow write into malloc_state.
f65fd747 2081
fa8d436c
UD
2082 If you are extending or experimenting with this malloc, you can
2083 probably figure out how to hack this routine to print out or
2084 display chunk addresses, sizes, bins, and other instrumentation.
2085*/
f65fd747 2086
fa8d436c
UD
2087static void do_check_malloc_state(mstate av)
2088{
2089 int i;
2090 mchunkptr p;
2091 mchunkptr q;
2092 mbinptr b;
fa8d436c
UD
2093 unsigned int idx;
2094 INTERNAL_SIZE_T size;
2095 unsigned long total = 0;
2096 int max_fast_bin;
f65fd747 2097
fa8d436c
UD
2098 /* internal size_t must be no wider than pointer type */
2099 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
f65fd747 2100
fa8d436c
UD
2101 /* alignment is a power of 2 */
2102 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
f65fd747 2103
fa8d436c
UD
2104 /* cannot run remaining checks until fully initialized */
2105 if (av->top == 0 || av->top == initial_top(av))
2106 return;
f65fd747 2107
fa8d436c 2108 /* pagesize is a power of 2 */
02d46fc4 2109 assert((GLRO(dl_pagesize) & (GLRO(dl_pagesize)-1)) == 0);
f65fd747 2110
fa8d436c
UD
2111 /* A contiguous main_arena is consistent with sbrk_base. */
2112 if (av == &main_arena && contiguous(av))
2113 assert((char*)mp_.sbrk_base + av->system_mem ==
2114 (char*)av->top + chunksize(av->top));
2115
2116 /* properties of fastbins */
2117
2118 /* max_fast is in allowed range */
9bf248c6 2119 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
fa8d436c 2120
9bf248c6 2121 max_fast_bin = fastbin_index(get_max_fast ());
fa8d436c
UD
2122
2123 for (i = 0; i < NFASTBINS; ++i) {
97983464 2124 p = fastbin (av, i);
fa8d436c 2125
11bf311e
UD
2126 /* The following test can only be performed for the main arena.
2127 While mallopt calls malloc_consolidate to get rid of all fast
2128 bins (especially those larger than the new maximum) this does
2129 only happen for the main arena. Trying to do this for any
2130 other arena would mean those arenas have to be locked and
2131 malloc_consolidate be called for them. This is excessive. And
2132 even if this is acceptable to somebody it still cannot solve
2133 the problem completely since if the arena is locked a
2134 concurrent malloc call might create a new arena which then
2135 could use the newly invalid fast bins. */
2136
fa8d436c 2137 /* all bins past max_fast are empty */
11bf311e 2138 if (av == &main_arena && i > max_fast_bin)
fa8d436c
UD
2139 assert(p == 0);
2140
2141 while (p != 0) {
2142 /* each chunk claims to be inuse */
2143 do_check_inuse_chunk(av, p);
2144 total += chunksize(p);
2145 /* chunk belongs in this bin */
2146 assert(fastbin_index(chunksize(p)) == i);
2147 p = p->fd;
2148 }
2149 }
2150
2151 if (total != 0)
2152 assert(have_fastchunks(av));
2153 else if (!have_fastchunks(av))
2154 assert(total == 0);
2155
2156 /* check normal bins */
2157 for (i = 1; i < NBINS; ++i) {
2158 b = bin_at(av,i);
2159
2160 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2161 if (i >= 2) {
68631c8e
UD
2162 unsigned int binbit = get_binmap(av,i);
2163 int empty = last(b) == b;
fa8d436c 2164 if (!binbit)
72f90263 2165 assert(empty);
fa8d436c 2166 else if (!empty)
72f90263 2167 assert(binbit);
fa8d436c
UD
2168 }
2169
2170 for (p = last(b); p != b; p = p->bk) {
2171 /* each chunk claims to be free */
2172 do_check_free_chunk(av, p);
2173 size = chunksize(p);
2174 total += size;
2175 if (i >= 2) {
72f90263
UD
2176 /* chunk belongs in bin */
2177 idx = bin_index(size);
2178 assert(idx == i);
2179 /* lists are sorted */
2180 assert(p->bk == b ||
2181 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
7ecfbd38
UD
2182
2183 if (!in_smallbin_range(size))
2184 {
2185 if (p->fd_nextsize != NULL)
2186 {
2187 if (p->fd_nextsize == p)
2188 assert (p->bk_nextsize == p);
2189 else
2190 {
2191 if (p->fd_nextsize == first (b))
2192 assert (chunksize (p) < chunksize (p->fd_nextsize));
2193 else
2194 assert (chunksize (p) > chunksize (p->fd_nextsize));
2195
2196 if (p == first (b))
2197 assert (chunksize (p) > chunksize (p->bk_nextsize));
2198 else
2199 assert (chunksize (p) < chunksize (p->bk_nextsize));
2200 }
2201 }
2202 else
2203 assert (p->bk_nextsize == NULL);
2204 }
2205 } else if (!in_smallbin_range(size))
2206 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
fa8d436c
UD
2207 /* chunk is followed by a legal chain of inuse chunks */
2208 for (q = next_chunk(p);
72f90263
UD
2209 (q != av->top && inuse(q) &&
2210 (unsigned long)(chunksize(q)) >= MINSIZE);
2211 q = next_chunk(q))
2212 do_check_inuse_chunk(av, q);
fa8d436c
UD
2213 }
2214 }
f65fd747 2215
fa8d436c
UD
2216 /* top chunk is OK */
2217 check_chunk(av, av->top);
2218
fa8d436c
UD
2219}
2220#endif
2221
2222
2223/* ----------------- Support for debugging hooks -------------------- */
2224#include "hooks.c"
2225
2226
2227/* ----------- Routines dealing with system allocation -------------- */
2228
2229/*
2230 sysmalloc handles malloc cases requiring more memory from the system.
2231 On entry, it is assumed that av->top does not have enough
2232 space to service request for nb bytes, thus requiring that av->top
2233 be extended or replaced.
2234*/
2235
3b49edc0 2236static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
f65fd747 2237{
fa8d436c
UD
2238 mchunkptr old_top; /* incoming value of av->top */
2239 INTERNAL_SIZE_T old_size; /* its size */
2240 char* old_end; /* its end address */
f65fd747 2241
fa8d436c
UD
2242 long size; /* arg to first MORECORE or mmap call */
2243 char* brk; /* return value from MORECORE */
f65fd747 2244
fa8d436c
UD
2245 long correction; /* arg to 2nd MORECORE call */
2246 char* snd_brk; /* 2nd return val */
f65fd747 2247
fa8d436c
UD
2248 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2249 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2250 char* aligned_brk; /* aligned offset into brk */
f65fd747 2251
fa8d436c
UD
2252 mchunkptr p; /* the allocated/returned chunk */
2253 mchunkptr remainder; /* remainder from allocation */
2254 unsigned long remainder_size; /* its size */
2255
2256 unsigned long sum; /* for updating stats */
2257
02d46fc4 2258 size_t pagemask = GLRO(dl_pagesize) - 1;
7463d5cb 2259 bool tried_mmap = false;
fa8d436c
UD
2260
2261
fa8d436c
UD
2262 /*
2263 If have mmap, and the request size meets the mmap threshold, and
2264 the system supports mmap, and there are few enough currently
2265 allocated mmapped regions, try to directly map this request
2266 rather than expanding top.
2267 */
f65fd747 2268
fa8d436c
UD
2269 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2270 (mp_.n_mmaps < mp_.n_mmaps_max)) {
f65fd747 2271
fa8d436c
UD
2272 char* mm; /* return value from mmap call*/
2273
e404fb16 2274 try_mmap:
fa8d436c
UD
2275 /*
2276 Round up size to nearest page. For mmapped chunks, the overhead
2277 is one SIZE_SZ unit larger than for normal chunks, because there
2278 is no following chunk whose prev_size field could be used.
22a89187
UD
2279
2280 See the front_misalign handling below, for glibc there is no
b5a2bbe6
L
2281 need for further alignments unless we have have high alignment.
2282 */
2283 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2284 size = (nb + SIZE_SZ + pagemask) & ~pagemask;
2285 else
2286 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
7463d5cb 2287 tried_mmap = true;
fa8d436c
UD
2288
2289 /* Don't try if size wraps around 0 */
2290 if ((unsigned long)(size) > (unsigned long)(nb)) {
2291
3b49edc0 2292 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, 0));
a9177ff5 2293
fa8d436c 2294 if (mm != MAP_FAILED) {
a9177ff5 2295
72f90263
UD
2296 /*
2297 The offset to the start of the mmapped region is stored
2298 in the prev_size field of the chunk. This allows us to adjust
2299 returned start address to meet alignment requirements here
2300 and in memalign(), and still be able to compute proper
2301 address argument for later munmap in free() and realloc().
b5a2bbe6 2302 */
a9177ff5 2303
b5a2bbe6
L
2304 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2305 {
2306 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2307 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2308 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2309 assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
2310 front_misalign = 0;
2311 }
2312 else
2313 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2314 if (front_misalign > 0) {
2315 correction = MALLOC_ALIGNMENT - front_misalign;
2316 p = (mchunkptr)(mm + correction);
2317 p->prev_size = correction;
2318 set_head(p, (size - correction) |IS_MMAPPED);
2319 }
2320 else
2321 {
2322 p = (mchunkptr)mm;
2323 set_head(p, size|IS_MMAPPED);
2324 }
a9177ff5 2325
72f90263 2326 /* update statistics */
a9177ff5 2327
72f90263
UD
2328 if (++mp_.n_mmaps > mp_.max_n_mmaps)
2329 mp_.max_n_mmaps = mp_.n_mmaps;
a9177ff5 2330
72f90263
UD
2331 sum = mp_.mmapped_mem += size;
2332 if (sum > (unsigned long)(mp_.max_mmapped_mem))
2333 mp_.max_mmapped_mem = sum;
fa8d436c 2334
72f90263 2335 check_chunk(av, p);
a9177ff5 2336
72f90263 2337 return chunk2mem(p);
fa8d436c
UD
2338 }
2339 }
2340 }
fa8d436c
UD
2341
2342 /* Record incoming configuration of top */
2343
2344 old_top = av->top;
2345 old_size = chunksize(old_top);
2346 old_end = (char*)(chunk_at_offset(old_top, old_size));
2347
a9177ff5 2348 brk = snd_brk = (char*)(MORECORE_FAILURE);
fa8d436c 2349
a9177ff5 2350 /*
fa8d436c
UD
2351 If not the first time through, we require old_size to be
2352 at least MINSIZE and to have prev_inuse set.
2353 */
2354
a9177ff5 2355 assert((old_top == initial_top(av) && old_size == 0) ||
72f90263
UD
2356 ((unsigned long) (old_size) >= MINSIZE &&
2357 prev_inuse(old_top) &&
fa8d436c
UD
2358 ((unsigned long)old_end & pagemask) == 0));
2359
2360 /* Precondition: not enough current space to satisfy nb request */
2361 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2362
fa8d436c
UD
2363
2364 if (av != &main_arena) {
2365
2366 heap_info *old_heap, *heap;
2367 size_t old_heap_size;
2368
2369 /* First try to extend the current heap. */
2370 old_heap = heap_for_ptr(old_top);
2371 old_heap_size = old_heap->size;
469615bd
UD
2372 if ((long) (MINSIZE + nb - old_size) > 0
2373 && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
fa8d436c
UD
2374 av->system_mem += old_heap->size - old_heap_size;
2375 arena_mem += old_heap->size - old_heap_size;
fa8d436c
UD
2376 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2377 | PREV_INUSE);
e6ac0e78
UD
2378 }
2379 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
2380 /* Use a newly allocated heap. */
2381 heap->ar_ptr = av;
2382 heap->prev = old_heap;
2383 av->system_mem += heap->size;
2384 arena_mem += heap->size;
fa8d436c
UD
2385 /* Set up the new top. */
2386 top(av) = chunk_at_offset(heap, sizeof(*heap));
2387 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2388
347c92e9
L
2389 /* Setup fencepost and free the old top chunk with a multiple of
2390 MALLOC_ALIGNMENT in size. */
fa8d436c
UD
2391 /* The fencepost takes at least MINSIZE bytes, because it might
2392 become the top chunk again later. Note that a footer is set
2393 up, too, although the chunk is marked in use. */
347c92e9 2394 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
fa8d436c
UD
2395 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
2396 if (old_size >= MINSIZE) {
2397 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
2398 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
2399 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
425ce2ed 2400 _int_free(av, old_top, 1);
fa8d436c
UD
2401 } else {
2402 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
2403 set_foot(old_top, (old_size + 2*SIZE_SZ));
2404 }
2405 }
7463d5cb 2406 else if (!tried_mmap)
e404fb16
UD
2407 /* We can at least try to use to mmap memory. */
2408 goto try_mmap;
fa8d436c
UD
2409
2410 } else { /* av == main_arena */
2411
2412
2413 /* Request enough space for nb + pad + overhead */
2414
2415 size = nb + mp_.top_pad + MINSIZE;
2416
2417 /*
2418 If contiguous, we can subtract out existing space that we hope to
2419 combine with new space. We add it back later only if
2420 we don't actually get contiguous space.
2421 */
2422
2423 if (contiguous(av))
2424 size -= old_size;
2425
2426 /*
2427 Round to a multiple of page size.
2428 If MORECORE is not contiguous, this ensures that we only call it
2429 with whole-page arguments. And if MORECORE is contiguous and
2430 this is not first time through, this preserves page-alignment of
2431 previous calls. Otherwise, we correct to page-align below.
2432 */
2433
2434 size = (size + pagemask) & ~pagemask;
2435
2436 /*
2437 Don't try to call MORECORE if argument is so big as to appear
2438 negative. Note that since mmap takes size_t arg, it may succeed
2439 below even if we cannot call MORECORE.
2440 */
2441
322dea08 2442 if (size > 0) {
fa8d436c 2443 brk = (char*)(MORECORE(size));
322dea08
AO
2444 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2445 }
fa8d436c
UD
2446
2447 if (brk != (char*)(MORECORE_FAILURE)) {
2448 /* Call the `morecore' hook if necessary. */
df77455c
UD
2449 void (*hook) (void) = force_reg (__after_morecore_hook);
2450 if (__builtin_expect (hook != NULL, 0))
2451 (*hook) ();
fa8d436c
UD
2452 } else {
2453 /*
2454 If have mmap, try using it as a backup when MORECORE fails or
2455 cannot be used. This is worth doing on systems that have "holes" in
2456 address space, so sbrk cannot extend to give contiguous space, but
2457 space is available elsewhere. Note that we ignore mmap max count
2458 and threshold limits, since the space will not be used as a
2459 segregated mmap region.
2460 */
2461
fa8d436c
UD
2462 /* Cannot merge with old top, so add its size back in */
2463 if (contiguous(av))
2464 size = (size + old_size + pagemask) & ~pagemask;
2465
2466 /* If we are relying on mmap as backup, then use larger units */
2467 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
2468 size = MMAP_AS_MORECORE_SIZE;
2469
2470 /* Don't try if size wraps around 0 */
2471 if ((unsigned long)(size) > (unsigned long)(nb)) {
2472
3b49edc0 2473 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, 0));
a9177ff5 2474
75bfdfc7 2475 if (mbrk != MAP_FAILED) {
a9177ff5 2476
72f90263
UD
2477 /* We do not need, and cannot use, another sbrk call to find end */
2478 brk = mbrk;
2479 snd_brk = brk + size;
2480
2481 /*
2482 Record that we no longer have a contiguous sbrk region.
2483 After the first time mmap is used as backup, we do not
2484 ever rely on contiguous space since this could incorrectly
2485 bridge regions.
2486 */
2487 set_noncontiguous(av);
fa8d436c
UD
2488 }
2489 }
fa8d436c
UD
2490 }
2491
2492 if (brk != (char*)(MORECORE_FAILURE)) {
2493 if (mp_.sbrk_base == 0)
2494 mp_.sbrk_base = brk;
2495 av->system_mem += size;
2496
2497 /*
2498 If MORECORE extends previous space, we can likewise extend top size.
2499 */
a9177ff5 2500
fa8d436c
UD
2501 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
2502 set_head(old_top, (size + old_size) | PREV_INUSE);
2503
886d5973 2504 else if (contiguous(av) && old_size && brk < old_end) {
fa8d436c 2505 /* Oops! Someone else killed our space.. Can't touch anything. */
1fdd89a7 2506 malloc_printerr (3, "break adjusted to free malloc space", brk);
fa8d436c
UD
2507 }
2508
2509 /*
2510 Otherwise, make adjustments:
a9177ff5 2511
fa8d436c 2512 * If the first time through or noncontiguous, we need to call sbrk
72f90263 2513 just to find out where the end of memory lies.
fa8d436c
UD
2514
2515 * We need to ensure that all returned chunks from malloc will meet
72f90263 2516 MALLOC_ALIGNMENT
fa8d436c
UD
2517
2518 * If there was an intervening foreign sbrk, we need to adjust sbrk
72f90263
UD
2519 request size to account for fact that we will not be able to
2520 combine new space with existing space in old_top.
fa8d436c
UD
2521
2522 * Almost all systems internally allocate whole pages at a time, in
72f90263
UD
2523 which case we might as well use the whole last page of request.
2524 So we allocate enough more memory to hit a page boundary now,
2525 which in turn causes future contiguous calls to page-align.
fa8d436c 2526 */
a9177ff5 2527
fa8d436c 2528 else {
fa8d436c
UD
2529 front_misalign = 0;
2530 end_misalign = 0;
2531 correction = 0;
2532 aligned_brk = brk;
a9177ff5 2533
fa8d436c 2534 /* handle contiguous cases */
a9177ff5
RM
2535 if (contiguous(av)) {
2536
0cb71e02
UD
2537 /* Count foreign sbrk as system_mem. */
2538 if (old_size)
2539 av->system_mem += brk - old_end;
2540
72f90263 2541 /* Guarantee alignment of first new chunk made from this space */
fa8d436c 2542
72f90263
UD
2543 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2544 if (front_misalign > 0) {
fa8d436c 2545
72f90263
UD
2546 /*
2547 Skip over some bytes to arrive at an aligned position.
2548 We don't need to specially mark these wasted front bytes.
2549 They will never be accessed anyway because
2550 prev_inuse of av->top (and any chunk created from its start)
2551 is always true after initialization.
2552 */
fa8d436c 2553
72f90263
UD
2554 correction = MALLOC_ALIGNMENT - front_misalign;
2555 aligned_brk += correction;
2556 }
a9177ff5 2557
72f90263
UD
2558 /*
2559 If this isn't adjacent to existing space, then we will not
2560 be able to merge with old_top space, so must add to 2nd request.
2561 */
a9177ff5 2562
72f90263 2563 correction += old_size;
a9177ff5 2564
72f90263
UD
2565 /* Extend the end address to hit a page boundary */
2566 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
2567 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
a9177ff5 2568
72f90263
UD
2569 assert(correction >= 0);
2570 snd_brk = (char*)(MORECORE(correction));
a9177ff5 2571
72f90263
UD
2572 /*
2573 If can't allocate correction, try to at least find out current
2574 brk. It might be enough to proceed without failing.
a9177ff5 2575
72f90263
UD
2576 Note that if second sbrk did NOT fail, we assume that space
2577 is contiguous with first sbrk. This is a safe assumption unless
2578 program is multithreaded but doesn't use locks and a foreign sbrk
2579 occurred between our first and second calls.
2580 */
a9177ff5 2581
72f90263
UD
2582 if (snd_brk == (char*)(MORECORE_FAILURE)) {
2583 correction = 0;
2584 snd_brk = (char*)(MORECORE(0));
2585 } else {
fa8d436c 2586 /* Call the `morecore' hook if necessary. */
df77455c
UD
2587 void (*hook) (void) = force_reg (__after_morecore_hook);
2588 if (__builtin_expect (hook != NULL, 0))
2589 (*hook) ();
2590 }
fa8d436c 2591 }
a9177ff5 2592
fa8d436c 2593 /* handle non-contiguous cases */
a9177ff5 2594 else {
b5a2bbe6
L
2595 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2596 /* MORECORE/mmap must correctly align */
2597 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
2598 else {
2599 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
2600 if (front_misalign > 0) {
2601
2602 /*
2603 Skip over some bytes to arrive at an aligned position.
2604 We don't need to specially mark these wasted front bytes.
2605 They will never be accessed anyway because
2606 prev_inuse of av->top (and any chunk created from its start)
2607 is always true after initialization.
2608 */
2609
2610 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2611 }
2612 }
a9177ff5 2613
72f90263
UD
2614 /* Find out current end of memory */
2615 if (snd_brk == (char*)(MORECORE_FAILURE)) {
2616 snd_brk = (char*)(MORECORE(0));
2617 }
fa8d436c 2618 }
a9177ff5 2619
fa8d436c
UD
2620 /* Adjust top based on results of second sbrk */
2621 if (snd_brk != (char*)(MORECORE_FAILURE)) {
72f90263
UD
2622 av->top = (mchunkptr)aligned_brk;
2623 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2624 av->system_mem += correction;
2625
2626 /*
2627 If not the first time through, we either have a
2628 gap due to foreign sbrk or a non-contiguous region. Insert a
2629 double fencepost at old_top to prevent consolidation with space
2630 we don't own. These fenceposts are artificial chunks that are
2631 marked as inuse and are in any case too small to use. We need
2632 two to make sizes and alignments work out.
2633 */
2634
2635 if (old_size != 0) {
2636 /*
2637 Shrink old_top to insert fenceposts, keeping size a
2638 multiple of MALLOC_ALIGNMENT. We know there is at least
2639 enough space in old_top to do this.
2640 */
2641 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2642 set_head(old_top, old_size | PREV_INUSE);
2643
2644 /*
2645 Note that the following assignments completely overwrite
2646 old_top when old_size was previously MINSIZE. This is
2647 intentional. We need the fencepost, even if old_top otherwise gets
2648 lost.
2649 */
2650 chunk_at_offset(old_top, old_size )->size =
2651 (2*SIZE_SZ)|PREV_INUSE;
2652
2653 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
2654 (2*SIZE_SZ)|PREV_INUSE;
2655
2656 /* If possible, release the rest. */
2657 if (old_size >= MINSIZE) {
72f90263 2658 _int_free(av, old_top, 1);
72f90263 2659 }
fa8d436c 2660
72f90263 2661 }
fa8d436c
UD
2662 }
2663 }
fa8d436c
UD
2664 }
2665
2666 } /* if (av != &main_arena) */
2667
2668 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
2669 av->max_system_mem = av->system_mem;
2670 check_malloc_state(av);
a9177ff5 2671
fa8d436c
UD
2672 /* finally, do the allocation */
2673 p = av->top;
2674 size = chunksize(p);
2675
2676 /* check that one of the above allocation paths succeeded */
2677 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
2678 remainder_size = size - nb;
2679 remainder = chunk_at_offset(p, nb);
2680 av->top = remainder;
2681 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2682 set_head(remainder, remainder_size | PREV_INUSE);
2683 check_malloced_chunk(av, p, nb);
2684 return chunk2mem(p);
2685 }
2686
2687 /* catch all failure paths */
8e58439c 2688 __set_errno (ENOMEM);
fa8d436c
UD
2689 return 0;
2690}
2691
2692
2693/*
3b49edc0 2694 systrim is an inverse of sorts to sysmalloc. It gives memory back
fa8d436c
UD
2695 to the system (via negative arguments to sbrk) if there is unused
2696 memory at the `high' end of the malloc pool. It is called
2697 automatically by free() when top space exceeds the trim
2698 threshold. It is also called by the public malloc_trim routine. It
2699 returns 1 if it actually released any memory, else 0.
2700*/
2701
3b49edc0 2702static int systrim(size_t pad, mstate av)
fa8d436c
UD
2703{
2704 long top_size; /* Amount of top-most memory */
2705 long extra; /* Amount to release */
2706 long released; /* Amount actually released */
2707 char* current_brk; /* address returned by pre-check sbrk call */
2708 char* new_brk; /* address returned by post-check sbrk call */
2709 size_t pagesz;
2710
02d46fc4 2711 pagesz = GLRO(dl_pagesize);
fa8d436c 2712 top_size = chunksize(av->top);
a9177ff5 2713
fa8d436c 2714 /* Release in pagesize units, keeping at least one page */
b9b42ee0 2715 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
a9177ff5 2716
fa8d436c 2717 if (extra > 0) {
a9177ff5 2718
fa8d436c
UD
2719 /*
2720 Only proceed if end of memory is where we last set it.
2721 This avoids problems if there were foreign sbrk calls.
2722 */
2723 current_brk = (char*)(MORECORE(0));
2724 if (current_brk == (char*)(av->top) + top_size) {
a9177ff5 2725
fa8d436c 2726 /*
72f90263
UD
2727 Attempt to release memory. We ignore MORECORE return value,
2728 and instead call again to find out where new end of memory is.
2729 This avoids problems if first call releases less than we asked,
2730 of if failure somehow altered brk value. (We could still
2731 encounter problems if it altered brk in some very bad way,
2732 but the only thing we can do is adjust anyway, which will cause
2733 some downstream failure.)
fa8d436c 2734 */
a9177ff5 2735
fa8d436c
UD
2736 MORECORE(-extra);
2737 /* Call the `morecore' hook if necessary. */
df77455c
UD
2738 void (*hook) (void) = force_reg (__after_morecore_hook);
2739 if (__builtin_expect (hook != NULL, 0))
2740 (*hook) ();
fa8d436c 2741 new_brk = (char*)(MORECORE(0));
a9177ff5 2742
322dea08
AO
2743 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2744
fa8d436c 2745 if (new_brk != (char*)MORECORE_FAILURE) {
72f90263
UD
2746 released = (long)(current_brk - new_brk);
2747
2748 if (released != 0) {
2749 /* Success. Adjust top. */
2750 av->system_mem -= released;
2751 set_head(av->top, (top_size - released) | PREV_INUSE);
2752 check_malloc_state(av);
2753 return 1;
2754 }
fa8d436c
UD
2755 }
2756 }
2757 }
2758 return 0;
f65fd747
UD
2759}
2760
431c33c0
UD
2761static void
2762internal_function
431c33c0 2763munmap_chunk(mchunkptr p)
f65fd747
UD
2764{
2765 INTERNAL_SIZE_T size = chunksize(p);
f65fd747
UD
2766
2767 assert (chunk_is_mmapped(p));
8e635611
UD
2768
2769 uintptr_t block = (uintptr_t) p - p->prev_size;
2770 size_t total_size = p->prev_size + size;
2771 /* Unfortunately we have to do the compilers job by hand here. Normally
2772 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2773 page size. But gcc does not recognize the optimization possibility
2774 (in the moment at least) so we combine the two values into one before
2775 the bit test. */
02d46fc4 2776 if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0))
8e635611
UD
2777 {
2778 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
2779 chunk2mem (p));
2780 return;
2781 }
f65fd747 2782
fa8d436c 2783 mp_.n_mmaps--;
8e635611 2784 mp_.mmapped_mem -= total_size;
f65fd747 2785
6ef76f3b
UD
2786 /* If munmap failed the process virtual memory address space is in a
2787 bad shape. Just leave the block hanging around, the process will
2788 terminate shortly anyway since not much can be done. */
3b49edc0 2789 __munmap((char *)block, total_size);
f65fd747
UD
2790}
2791
2792#if HAVE_MREMAP
2793
431c33c0
UD
2794static mchunkptr
2795internal_function
431c33c0 2796mremap_chunk(mchunkptr p, size_t new_size)
f65fd747 2797{
02d46fc4 2798 size_t page_mask = GLRO(dl_pagesize) - 1;
f65fd747
UD
2799 INTERNAL_SIZE_T offset = p->prev_size;
2800 INTERNAL_SIZE_T size = chunksize(p);
2801 char *cp;
2802
2803 assert (chunk_is_mmapped(p));
02d46fc4 2804 assert(((size + offset) & (GLRO(dl_pagesize)-1)) == 0);
f65fd747
UD
2805
2806 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2807 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
2808
68f3802d
UD
2809 /* No need to remap if the number of pages does not change. */
2810 if (size + offset == new_size)
2811 return p;
2812
3b49edc0
UD
2813 cp = (char *)__mremap((char *)p - offset, size + offset, new_size,
2814 MREMAP_MAYMOVE);
f65fd747 2815
431c33c0 2816 if (cp == MAP_FAILED) return 0;
f65fd747
UD
2817
2818 p = (mchunkptr)(cp + offset);
2819
2820 assert(aligned_OK(chunk2mem(p)));
2821
2822 assert((p->prev_size == offset));
2823 set_head(p, (new_size - offset)|IS_MMAPPED);
2824
fa8d436c
UD
2825 mp_.mmapped_mem -= size + offset;
2826 mp_.mmapped_mem += new_size;
2827 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
2828 mp_.max_mmapped_mem = mp_.mmapped_mem;
f65fd747
UD
2829 return p;
2830}
2831
2832#endif /* HAVE_MREMAP */
2833
fa8d436c 2834/*------------------------ Public wrappers. --------------------------------*/
f65fd747 2835
22a89187 2836void*
3b49edc0 2837__libc_malloc(size_t bytes)
fa8d436c
UD
2838{
2839 mstate ar_ptr;
22a89187 2840 void *victim;
f65fd747 2841
a222d91a 2842 void *(*hook) (size_t, const void *)
df77455c 2843 = force_reg (__malloc_hook);
bfacf1af 2844 if (__builtin_expect (hook != NULL, 0))
fa8d436c 2845 return (*hook)(bytes, RETURN_ADDRESS (0));
f65fd747 2846
425ce2ed 2847 arena_lookup(ar_ptr);
425ce2ed
UD
2848
2849 arena_lock(ar_ptr, bytes);
fa8d436c 2850 if(!ar_ptr)
f65fd747 2851 return 0;
fa8d436c
UD
2852 victim = _int_malloc(ar_ptr, bytes);
2853 if(!victim) {
35fed6f1 2854 LIBC_PROBE (memory_malloc_retry, 1, bytes);
c78ab094
SP
2855 ar_ptr = arena_get_retry(ar_ptr, bytes);
2856 if (__builtin_expect(ar_ptr != NULL, 1)) {
b3443701
UD
2857 victim = _int_malloc(ar_ptr, bytes);
2858 (void)mutex_unlock(&ar_ptr->mutex);
60f0e64b 2859 }
fa8d436c
UD
2860 } else
2861 (void)mutex_unlock(&ar_ptr->mutex);
2862 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
2863 ar_ptr == arena_for_chunk(mem2chunk(victim)));
2864 return victim;
f65fd747 2865}
3b49edc0 2866libc_hidden_def(__libc_malloc)
f65fd747 2867
fa8d436c 2868void
3b49edc0 2869__libc_free(void* mem)
f65fd747 2870{
fa8d436c
UD
2871 mstate ar_ptr;
2872 mchunkptr p; /* chunk corresponding to mem */
2873
a222d91a 2874 void (*hook) (void *, const void *)
df77455c 2875 = force_reg (__free_hook);
bfacf1af 2876 if (__builtin_expect (hook != NULL, 0)) {
fa8d436c
UD
2877 (*hook)(mem, RETURN_ADDRESS (0));
2878 return;
f65fd747 2879 }
f65fd747 2880
fa8d436c
UD
2881 if (mem == 0) /* free(0) has no effect */
2882 return;
f65fd747 2883
fa8d436c 2884 p = mem2chunk(mem);
f65fd747 2885
fa8d436c
UD
2886 if (chunk_is_mmapped(p)) /* release mmapped memory. */
2887 {
1d05c2fb
UD
2888 /* see if the dynamic brk/mmap threshold needs adjusting */
2889 if (!mp_.no_dyn_threshold
2890 && p->size > mp_.mmap_threshold
72f90263 2891 && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
1d05c2fb
UD
2892 {
2893 mp_.mmap_threshold = chunksize (p);
2894 mp_.trim_threshold = 2 * mp_.mmap_threshold;
0653427f
AO
2895 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
2896 mp_.mmap_threshold, mp_.trim_threshold);
1d05c2fb 2897 }
fa8d436c
UD
2898 munmap_chunk(p);
2899 return;
8a4b65b4 2900 }
f65fd747 2901
fa8d436c 2902 ar_ptr = arena_for_chunk(p);
425ce2ed 2903 _int_free(ar_ptr, p, 0);
f65fd747 2904}
3b49edc0 2905libc_hidden_def (__libc_free)
f65fd747 2906
22a89187 2907void*
3b49edc0 2908__libc_realloc(void* oldmem, size_t bytes)
f65fd747 2909{
fa8d436c
UD
2910 mstate ar_ptr;
2911 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 2912
22a89187 2913 void* newp; /* chunk to return */
f65fd747 2914
a222d91a 2915 void *(*hook) (void *, size_t, const void *) =
df77455c 2916 force_reg (__realloc_hook);
bfacf1af 2917 if (__builtin_expect (hook != NULL, 0))
fa8d436c 2918 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
f65fd747 2919
fa8d436c 2920#if REALLOC_ZERO_BYTES_FREES
3b49edc0 2921 if (bytes == 0 && oldmem != NULL) { __libc_free(oldmem); return 0; }
f65fd747 2922#endif
f65fd747 2923
fa8d436c 2924 /* realloc of null is supposed to be same as malloc */
3b49edc0 2925 if (oldmem == 0) return __libc_malloc(bytes);
f65fd747 2926
78ac92ad
UD
2927 /* chunk corresponding to oldmem */
2928 const mchunkptr oldp = mem2chunk(oldmem);
2929 /* its size */
2930 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
f65fd747 2931
dc165f7b
UD
2932 /* Little security check which won't hurt performance: the
2933 allocator never wrapps around at the end of the address space.
2934 Therefore we can exclude some size values which might appear
2935 here by accident or by "design" from some intruder. */
2936 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
073f560e 2937 || __builtin_expect (misaligned_chunk (oldp), 0))
dc165f7b
UD
2938 {
2939 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
2940 return NULL;
2941 }
2942
fa8d436c 2943 checked_request2size(bytes, nb);
f65fd747 2944
fa8d436c
UD
2945 if (chunk_is_mmapped(oldp))
2946 {
22a89187 2947 void* newmem;
f65fd747 2948
fa8d436c
UD
2949#if HAVE_MREMAP
2950 newp = mremap_chunk(oldp, nb);
2951 if(newp) return chunk2mem(newp);
f65fd747 2952#endif
fa8d436c
UD
2953 /* Note the extra SIZE_SZ overhead. */
2954 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
2955 /* Must alloc, copy, free. */
3b49edc0 2956 newmem = __libc_malloc(bytes);
fa8d436c
UD
2957 if (newmem == 0) return 0; /* propagate failure */
2958 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
2959 munmap_chunk(oldp);
2960 return newmem;
2961 }
fa8d436c
UD
2962
2963 ar_ptr = arena_for_chunk(oldp);
2964#if THREAD_STATS
2965 if(!mutex_trylock(&ar_ptr->mutex))
2966 ++(ar_ptr->stat_lock_direct);
2967 else {
2968 (void)mutex_lock(&ar_ptr->mutex);
2969 ++(ar_ptr->stat_lock_wait);
2970 }
f65fd747 2971#else
fa8d436c 2972 (void)mutex_lock(&ar_ptr->mutex);
f65fd747 2973#endif
f65fd747 2974
22a89187 2975#if !defined PER_THREAD
6999d38c 2976 LIBC_PROBE (memory_arena_reuse_realloc, 1, ar_ptr);
fa8d436c 2977 /* As in malloc(), remember this arena for the next allocation. */
22a89187 2978 tsd_setspecific(arena_key, (void *)ar_ptr);
f65fd747
UD
2979#endif
2980
4c8b8cc3 2981 newp = _int_realloc(ar_ptr, oldp, oldsize, nb);
f65fd747 2982
fa8d436c
UD
2983 (void)mutex_unlock(&ar_ptr->mutex);
2984 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
2985 ar_ptr == arena_for_chunk(mem2chunk(newp)));
07014fca
UD
2986
2987 if (newp == NULL)
2988 {
2989 /* Try harder to allocate memory in other arenas. */
35fed6f1 2990 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3b49edc0 2991 newp = __libc_malloc(bytes);
07014fca
UD
2992 if (newp != NULL)
2993 {
486bdb88 2994 MALLOC_COPY (newp, oldmem, oldsize - SIZE_SZ);
425ce2ed 2995 _int_free(ar_ptr, oldp, 0);
07014fca
UD
2996 }
2997 }
2998
fa8d436c
UD
2999 return newp;
3000}
3b49edc0 3001libc_hidden_def (__libc_realloc)
f65fd747 3002
22a89187 3003void*
3b49edc0 3004__libc_memalign(size_t alignment, size_t bytes)
fa8d436c
UD
3005{
3006 mstate ar_ptr;
22a89187 3007 void *p;
f65fd747 3008
a222d91a 3009 void *(*hook) (size_t, size_t, const void *) =
df77455c 3010 force_reg (__memalign_hook);
bfacf1af 3011 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3012 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
f65fd747 3013
fa8d436c 3014 /* If need less alignment than we give anyway, just relay to malloc */
3b49edc0 3015 if (alignment <= MALLOC_ALIGNMENT) return __libc_malloc(bytes);
1228ed5c 3016
fa8d436c
UD
3017 /* Otherwise, ensure that it is at least a minimum chunk size */
3018 if (alignment < MINSIZE) alignment = MINSIZE;
f65fd747 3019
b73ed247
WN
3020 /* Check for overflow. */
3021 if (bytes > SIZE_MAX - alignment - MINSIZE)
3022 {
3023 __set_errno (ENOMEM);
3024 return 0;
3025 }
3026
fa8d436c
UD
3027 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3028 if(!ar_ptr)
3029 return 0;
3030 p = _int_memalign(ar_ptr, alignment, bytes);
fa8d436c 3031 if(!p) {
35fed6f1 3032 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
c78ab094
SP
3033 ar_ptr = arena_get_retry (ar_ptr, bytes);
3034 if (__builtin_expect(ar_ptr != NULL, 1)) {
b3443701
UD
3035 p = _int_memalign(ar_ptr, alignment, bytes);
3036 (void)mutex_unlock(&ar_ptr->mutex);
f65fd747 3037 }
ab355d9a
UD
3038 } else
3039 (void)mutex_unlock(&ar_ptr->mutex);
fa8d436c
UD
3040 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3041 ar_ptr == arena_for_chunk(mem2chunk(p)));
3042 return p;
f65fd747 3043}
380d7e87 3044/* For ISO C11. */
3b49edc0
UD
3045weak_alias (__libc_memalign, aligned_alloc)
3046libc_hidden_def (__libc_memalign)
f65fd747 3047
22a89187 3048void*
3b49edc0 3049__libc_valloc(size_t bytes)
fa8d436c
UD
3050{
3051 mstate ar_ptr;
22a89187 3052 void *p;
f65fd747 3053
fa8d436c
UD
3054 if(__malloc_initialized < 0)
3055 ptmalloc_init ();
8088488d 3056
02d46fc4 3057 size_t pagesz = GLRO(dl_pagesize);
dba38551 3058
55e17aad
WN
3059 /* Check for overflow. */
3060 if (bytes > SIZE_MAX - pagesz - MINSIZE)
3061 {
3062 __set_errno (ENOMEM);
3063 return 0;
3064 }
3065
a222d91a 3066 void *(*hook) (size_t, size_t, const void *) =
df77455c 3067 force_reg (__memalign_hook);
bfacf1af 3068 if (__builtin_expect (hook != NULL, 0))
dba38551 3069 return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
8088488d 3070
dba38551 3071 arena_get(ar_ptr, bytes + pagesz + MINSIZE);
fa8d436c
UD
3072 if(!ar_ptr)
3073 return 0;
3074 p = _int_valloc(ar_ptr, bytes);
dba38551 3075 if(!p) {
35fed6f1 3076 LIBC_PROBE (memory_valloc_retry, 1, bytes);
c78ab094
SP
3077 ar_ptr = arena_get_retry (ar_ptr, bytes);
3078 if (__builtin_expect(ar_ptr != NULL, 1)) {
9d256984 3079 p = _int_memalign(ar_ptr, pagesz, bytes);
8c0b5f14 3080 (void)mutex_unlock(&ar_ptr->mutex);
dba38551 3081 }
bf51f568
JL
3082 } else
3083 (void)mutex_unlock (&ar_ptr->mutex);
dba38551
UD
3084 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3085 ar_ptr == arena_for_chunk(mem2chunk(p)));
3086
fa8d436c
UD
3087 return p;
3088}
f65fd747 3089
22a89187 3090void*
3b49edc0 3091__libc_pvalloc(size_t bytes)
fa8d436c
UD
3092{
3093 mstate ar_ptr;
22a89187 3094 void *p;
f65fd747 3095
fa8d436c
UD
3096 if(__malloc_initialized < 0)
3097 ptmalloc_init ();
8088488d 3098
02d46fc4
UD
3099 size_t pagesz = GLRO(dl_pagesize);
3100 size_t page_mask = GLRO(dl_pagesize) - 1;
dba38551
UD
3101 size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
3102
1159a193
WN
3103 /* Check for overflow. */
3104 if (bytes > SIZE_MAX - 2*pagesz - MINSIZE)
3105 {
3106 __set_errno (ENOMEM);
3107 return 0;
3108 }
3109
a222d91a 3110 void *(*hook) (size_t, size_t, const void *) =
df77455c 3111 force_reg (__memalign_hook);
bfacf1af 3112 if (__builtin_expect (hook != NULL, 0))
dba38551 3113 return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
8088488d 3114
dba38551 3115 arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
fa8d436c 3116 p = _int_pvalloc(ar_ptr, bytes);
dba38551 3117 if(!p) {
35fed6f1 3118 LIBC_PROBE (memory_pvalloc_retry, 1, bytes);
c78ab094
SP
3119 ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
3120 if (__builtin_expect(ar_ptr != NULL, 1)) {
9d256984 3121 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
8c0b5f14 3122 (void)mutex_unlock(&ar_ptr->mutex);
dba38551 3123 }
bf51f568
JL
3124 } else
3125 (void)mutex_unlock(&ar_ptr->mutex);
dba38551
UD
3126 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3127 ar_ptr == arena_for_chunk(mem2chunk(p)));
3128
fa8d436c
UD
3129 return p;
3130}
f65fd747 3131
22a89187 3132void*
3b49edc0 3133__libc_calloc(size_t n, size_t elem_size)
f65fd747 3134{
fa8d436c
UD
3135 mstate av;
3136 mchunkptr oldtop, p;
0950889b 3137 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
22a89187 3138 void* mem;
fa8d436c
UD
3139 unsigned long clearsize;
3140 unsigned long nclears;
3141 INTERNAL_SIZE_T* d;
0950889b
UD
3142
3143 /* size_t is unsigned so the behavior on overflow is defined. */
3144 bytes = n * elem_size;
d9af917d
UD
3145#define HALF_INTERNAL_SIZE_T \
3146 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3147 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
0be405c2 3148 if (elem_size != 0 && bytes / elem_size != n) {
8e58439c 3149 __set_errno (ENOMEM);
d9af917d
UD
3150 return 0;
3151 }
0950889b
UD
3152 }
3153
a222d91a 3154 void *(*hook) (size_t, const void *) =
df77455c 3155 force_reg (__malloc_hook);
bfacf1af 3156 if (__builtin_expect (hook != NULL, 0)) {
0950889b 3157 sz = bytes;
fa8d436c
UD
3158 mem = (*hook)(sz, RETURN_ADDRESS (0));
3159 if(mem == 0)
3160 return 0;
fa8d436c 3161 return memset(mem, 0, sz);
10dc2a90 3162 }
10dc2a90 3163
0950889b 3164 sz = bytes;
fa8d436c
UD
3165
3166 arena_get(av, sz);
3167 if(!av)
f65fd747 3168 return 0;
fa8d436c
UD
3169
3170 /* Check if we hand out the top chunk, in which case there may be no
3171 need to clear. */
3172#if MORECORE_CLEARS
3173 oldtop = top(av);
3174 oldtopsize = chunksize(top(av));
3175#if MORECORE_CLEARS < 2
3176 /* Only newly allocated memory is guaranteed to be cleared. */
3177 if (av == &main_arena &&
3178 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3179 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3180#endif
4cd4c5d6
UD
3181 if (av != &main_arena)
3182 {
3183 heap_info *heap = heap_for_ptr (oldtop);
3184 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3185 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3186 }
fa8d436c
UD
3187#endif
3188 mem = _int_malloc(av, sz);
3189
fa8d436c
UD
3190
3191 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3192 av == arena_for_chunk(mem2chunk(mem)));
3193
3194 if (mem == 0) {
35fed6f1 3195 LIBC_PROBE (memory_calloc_retry, 1, sz);
c78ab094
SP
3196 av = arena_get_retry (av, sz);
3197 if (__builtin_expect(av != NULL, 1)) {
3198 mem = _int_malloc(av, sz);
bf51f568 3199 (void)mutex_unlock(&av->mutex);
7799b7b3 3200 }
fa8d436c 3201 if (mem == 0) return 0;
bf51f568
JL
3202 } else
3203 (void)mutex_unlock(&av->mutex);
fa8d436c 3204 p = mem2chunk(mem);
f65fd747 3205
fa8d436c 3206 /* Two optional cases in which clearing not necessary */
9ea9af19
UD
3207 if (chunk_is_mmapped (p))
3208 {
3209 if (__builtin_expect (perturb_byte, 0))
3210 MALLOC_ZERO (mem, sz);
3211 return mem;
3212 }
f65fd747 3213
fa8d436c 3214 csz = chunksize(p);
f65fd747 3215
fa8d436c 3216#if MORECORE_CLEARS
56137dbc 3217 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
fa8d436c
UD
3218 /* clear only the bytes from non-freshly-sbrked memory */
3219 csz = oldtopsize;
f65fd747 3220 }
fa8d436c 3221#endif
f65fd747 3222
fa8d436c
UD
3223 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3224 contents have an odd number of INTERNAL_SIZE_T-sized words;
3225 minimally 3. */
3226 d = (INTERNAL_SIZE_T*)mem;
3227 clearsize = csz - SIZE_SZ;
3228 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3229 assert(nclears >= 3);
f65fd747 3230
fa8d436c
UD
3231 if (nclears > 9)
3232 MALLOC_ZERO(d, clearsize);
f65fd747 3233
fa8d436c
UD
3234 else {
3235 *(d+0) = 0;
3236 *(d+1) = 0;
3237 *(d+2) = 0;
3238 if (nclears > 4) {
3239 *(d+3) = 0;
3240 *(d+4) = 0;
3241 if (nclears > 6) {
3242 *(d+5) = 0;
3243 *(d+6) = 0;
3244 if (nclears > 8) {
3245 *(d+7) = 0;
3246 *(d+8) = 0;
3247 }
f65fd747
UD
3248 }
3249 }
f65fd747
UD
3250 }
3251
fa8d436c
UD
3252 return mem;
3253}
f65fd747 3254
f65fd747 3255/*
fa8d436c 3256 ------------------------------ malloc ------------------------------
f65fd747
UD
3257*/
3258
22a89187 3259static void*
fa8d436c 3260_int_malloc(mstate av, size_t bytes)
f65fd747 3261{
fa8d436c
UD
3262 INTERNAL_SIZE_T nb; /* normalized request size */
3263 unsigned int idx; /* associated bin index */
3264 mbinptr bin; /* associated bin */
f65fd747 3265
fa8d436c
UD
3266 mchunkptr victim; /* inspected/selected chunk */
3267 INTERNAL_SIZE_T size; /* its size */
3268 int victim_index; /* its bin index */
f65fd747 3269
fa8d436c
UD
3270 mchunkptr remainder; /* remainder from a split */
3271 unsigned long remainder_size; /* its size */
8a4b65b4 3272
fa8d436c
UD
3273 unsigned int block; /* bit map traverser */
3274 unsigned int bit; /* bit map traverser */
3275 unsigned int map; /* current word of binmap */
8a4b65b4 3276
fa8d436c
UD
3277 mchunkptr fwd; /* misc temp for linking */
3278 mchunkptr bck; /* misc temp for linking */
8a4b65b4 3279
f6887a0d
UD
3280 const char *errstr = NULL;
3281
fa8d436c
UD
3282 /*
3283 Convert request size to internal form by adding SIZE_SZ bytes
3284 overhead plus possibly more to obtain necessary alignment and/or
3285 to obtain a size of at least MINSIZE, the smallest allocatable
3286 size. Also, checked_request2size traps (returning 0) request sizes
3287 that are so large that they wrap around zero when padded and
3288 aligned.
3289 */
f65fd747 3290
fa8d436c 3291 checked_request2size(bytes, nb);
f65fd747 3292
fa8d436c
UD
3293 /*
3294 If the size qualifies as a fastbin, first check corresponding bin.
3295 This code is safe to execute even if av is not yet initialized, so we
3296 can try it without checking, which saves some time on this fast path.
3297 */
f65fd747 3298
9bf248c6 3299 if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
425ce2ed
UD
3300 idx = fastbin_index(nb);
3301 mfastbinptr* fb = &fastbin (av, idx);
425ce2ed
UD
3302 mchunkptr pp = *fb;
3303 do
3304 {
3305 victim = pp;
3306 if (victim == NULL)
3307 break;
3308 }
3309 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
3310 != victim);
425ce2ed 3311 if (victim != 0) {
6cce6540 3312 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
f6887a0d
UD
3313 {
3314 errstr = "malloc(): memory corruption (fast)";
3315 errout:
3316 malloc_printerr (check_action, errstr, chunk2mem (victim));
ad3d3e8f 3317 return NULL;
f6887a0d 3318 }
fa8d436c 3319 check_remalloced_chunk(av, victim, nb);
854278df
UD
3320 void *p = chunk2mem(victim);
3321 if (__builtin_expect (perturb_byte, 0))
3322 alloc_perturb (p, bytes);
3323 return p;
fa8d436c 3324 }
f65fd747
UD
3325 }
3326
fa8d436c
UD
3327 /*
3328 If a small request, check regular bin. Since these "smallbins"
3329 hold one size each, no searching within bins is necessary.
3330 (For a large request, we need to wait until unsorted chunks are
3331 processed to find best fit. But for small ones, fits are exact
3332 anyway, so we can check now, which is faster.)
3333 */
f65fd747 3334
fa8d436c
UD
3335 if (in_smallbin_range(nb)) {
3336 idx = smallbin_index(nb);
3337 bin = bin_at(av,idx);
7799b7b3 3338
fa8d436c
UD
3339 if ( (victim = last(bin)) != bin) {
3340 if (victim == 0) /* initialization check */
72f90263 3341 malloc_consolidate(av);
fa8d436c 3342 else {
72f90263 3343 bck = victim->bk;
f6887a0d
UD
3344 if (__builtin_expect (bck->fd != victim, 0))
3345 {
3346 errstr = "malloc(): smallbin double linked list corrupted";
3347 goto errout;
3348 }
72f90263
UD
3349 set_inuse_bit_at_offset(victim, nb);
3350 bin->bk = bck;
3351 bck->fd = bin;
fa8d436c 3352
72f90263 3353 if (av != &main_arena)
fa8d436c 3354 victim->size |= NON_MAIN_ARENA;
72f90263 3355 check_malloced_chunk(av, victim, nb);
854278df
UD
3356 void *p = chunk2mem(victim);
3357 if (__builtin_expect (perturb_byte, 0))
3358 alloc_perturb (p, bytes);
3359 return p;
fa8d436c
UD
3360 }
3361 }
f65fd747
UD
3362 }
3363
a9177ff5 3364 /*
fa8d436c
UD
3365 If this is a large request, consolidate fastbins before continuing.
3366 While it might look excessive to kill all fastbins before
3367 even seeing if there is space available, this avoids
3368 fragmentation problems normally associated with fastbins.
3369 Also, in practice, programs tend to have runs of either small or
a9177ff5 3370 large requests, but less often mixtures, so consolidation is not
fa8d436c
UD
3371 invoked all that often in most programs. And the programs that
3372 it is called frequently in otherwise tend to fragment.
3373 */
7799b7b3 3374
fa8d436c
UD
3375 else {
3376 idx = largebin_index(nb);
a9177ff5 3377 if (have_fastchunks(av))
fa8d436c 3378 malloc_consolidate(av);
7799b7b3 3379 }
f65fd747 3380
fa8d436c
UD
3381 /*
3382 Process recently freed or remaindered chunks, taking one only if
3383 it is exact fit, or, if this a small request, the chunk is remainder from
3384 the most recent non-exact fit. Place other traversed chunks in
3385 bins. Note that this step is the only place in any routine where
3386 chunks are placed in bins.
3387
3388 The outer loop here is needed because we might not realize until
3389 near the end of malloc that we should have consolidated, so must
3390 do so and retry. This happens at most once, and only when we would
3391 otherwise need to expand memory to service a "small" request.
3392 */
a9177ff5
RM
3393
3394 for(;;) {
3395
72320021 3396 int iters = 0;
fa8d436c
UD
3397 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3398 bck = victim->bk;
6cce6540
UD
3399 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
3400 || __builtin_expect (victim->size > av->system_mem, 0))
3401 malloc_printerr (check_action, "malloc(): memory corruption",
3402 chunk2mem (victim));
fa8d436c
UD
3403 size = chunksize(victim);
3404
a9177ff5 3405 /*
72f90263
UD
3406 If a small request, try to use last remainder if it is the
3407 only chunk in unsorted bin. This helps promote locality for
3408 runs of consecutive small requests. This is the only
3409 exception to best-fit, and applies only when there is
3410 no exact fit for a small chunk.
fa8d436c
UD
3411 */
3412
a9177ff5 3413 if (in_smallbin_range(nb) &&
72f90263
UD
3414 bck == unsorted_chunks(av) &&
3415 victim == av->last_remainder &&
3416 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
3417
3418 /* split and reattach remainder */
3419 remainder_size = size - nb;
3420 remainder = chunk_at_offset(victim, nb);
3421 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3422 av->last_remainder = remainder;
3423 remainder->bk = remainder->fd = unsorted_chunks(av);
7ecfbd38
UD
3424 if (!in_smallbin_range(remainder_size))
3425 {
3426 remainder->fd_nextsize = NULL;
3427 remainder->bk_nextsize = NULL;
3428 }
a9177ff5 3429
72f90263 3430 set_head(victim, nb | PREV_INUSE |
fa8d436c 3431 (av != &main_arena ? NON_MAIN_ARENA : 0));
72f90263
UD
3432 set_head(remainder, remainder_size | PREV_INUSE);
3433 set_foot(remainder, remainder_size);
a9177ff5 3434
72f90263 3435 check_malloced_chunk(av, victim, nb);
854278df
UD
3436 void *p = chunk2mem(victim);
3437 if (__builtin_expect (perturb_byte, 0))
3438 alloc_perturb (p, bytes);
3439 return p;
fa8d436c 3440 }
f65fd747 3441
fa8d436c
UD
3442 /* remove from unsorted list */
3443 unsorted_chunks(av)->bk = bck;
3444 bck->fd = unsorted_chunks(av);
a9177ff5 3445
fa8d436c 3446 /* Take now instead of binning if exact fit */
a9177ff5 3447
fa8d436c 3448 if (size == nb) {
72f90263 3449 set_inuse_bit_at_offset(victim, size);
fa8d436c
UD
3450 if (av != &main_arena)
3451 victim->size |= NON_MAIN_ARENA;
72f90263 3452 check_malloced_chunk(av, victim, nb);
854278df
UD
3453 void *p = chunk2mem(victim);
3454 if (__builtin_expect (perturb_byte, 0))
3455 alloc_perturb (p, bytes);
3456 return p;
fa8d436c 3457 }
a9177ff5 3458
fa8d436c 3459 /* place chunk in bin */
a9177ff5 3460
fa8d436c 3461 if (in_smallbin_range(size)) {
72f90263
UD
3462 victim_index = smallbin_index(size);
3463 bck = bin_at(av, victim_index);
3464 fwd = bck->fd;
fa8d436c
UD
3465 }
3466 else {
72f90263
UD
3467 victim_index = largebin_index(size);
3468 bck = bin_at(av, victim_index);
3469 fwd = bck->fd;
fa8d436c 3470
72f90263
UD
3471 /* maintain large bins in sorted order */
3472 if (fwd != bck) {
fa8d436c 3473 /* Or with inuse bit to speed comparisons */
72f90263
UD
3474 size |= PREV_INUSE;
3475 /* if smaller than smallest, bypass loop below */
fa8d436c 3476 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
7ecfbd38 3477 if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
72f90263
UD
3478 fwd = bck;
3479 bck = bck->bk;
7ecfbd38
UD
3480
3481 victim->fd_nextsize = fwd->fd;
3482 victim->bk_nextsize = fwd->fd->bk_nextsize;
3483 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
72f90263
UD
3484 }
3485 else {
fa8d436c 3486 assert((fwd->size & NON_MAIN_ARENA) == 0);
7ecfbd38
UD
3487 while ((unsigned long) size < fwd->size)
3488 {
3489 fwd = fwd->fd_nextsize;
3490 assert((fwd->size & NON_MAIN_ARENA) == 0);
3491 }
3492
3493 if ((unsigned long) size == (unsigned long) fwd->size)
3494 /* Always insert in the second position. */
3495 fwd = fwd->fd;
3496 else
3497 {
3498 victim->fd_nextsize = fwd;
3499 victim->bk_nextsize = fwd->bk_nextsize;
3500 fwd->bk_nextsize = victim;
3501 victim->bk_nextsize->fd_nextsize = victim;
3502 }
3503 bck = fwd->bk;
72f90263 3504 }
7ecfbd38
UD
3505 } else
3506 victim->fd_nextsize = victim->bk_nextsize = victim;
fa8d436c 3507 }
a9177ff5 3508
fa8d436c
UD
3509 mark_bin(av, victim_index);
3510 victim->bk = bck;
3511 victim->fd = fwd;
3512 fwd->bk = victim;
3513 bck->fd = victim;
3997b7c4 3514
3997b7c4
UD
3515#define MAX_ITERS 10000
3516 if (++iters >= MAX_ITERS)
3517 break;
fa8d436c 3518 }
a9177ff5 3519
fa8d436c
UD
3520 /*
3521 If a large request, scan through the chunks of current bin in
7ecfbd38 3522 sorted order to find smallest that fits. Use the skip list for this.
fa8d436c 3523 */
a9177ff5 3524
fa8d436c
UD
3525 if (!in_smallbin_range(nb)) {
3526 bin = bin_at(av, idx);
f65fd747 3527
fa8d436c 3528 /* skip scan if empty or largest chunk is too small */
7ecfbd38 3529 if ((victim = first(bin)) != bin &&
72f90263 3530 (unsigned long)(victim->size) >= (unsigned long)(nb)) {
f65fd747 3531
7ecfbd38 3532 victim = victim->bk_nextsize;
72f90263
UD
3533 while (((unsigned long)(size = chunksize(victim)) <
3534 (unsigned long)(nb)))
3535 victim = victim->bk_nextsize;
7ecfbd38
UD
3536
3537 /* Avoid removing the first entry for a size so that the skip
3538 list does not have to be rerouted. */
3539 if (victim != last(bin) && victim->size == victim->fd->size)
3540 victim = victim->fd;
f65fd747 3541
72f90263
UD
3542 remainder_size = size - nb;
3543 unlink(victim, bck, fwd);
a9177ff5 3544
72f90263
UD
3545 /* Exhaust */
3546 if (remainder_size < MINSIZE) {
3547 set_inuse_bit_at_offset(victim, size);
fa8d436c
UD
3548 if (av != &main_arena)
3549 victim->size |= NON_MAIN_ARENA;
72f90263
UD
3550 }
3551 /* Split */
3552 else {
3553 remainder = chunk_at_offset(victim, nb);
3554 /* We cannot assume the unsorted list is empty and therefore
3555 have to perform a complete insert here. */
b80770b2
UD
3556 bck = unsorted_chunks(av);
3557 fwd = bck->fd;
f6887a0d
UD
3558 if (__builtin_expect (fwd->bk != bck, 0))
3559 {
3560 errstr = "malloc(): corrupted unsorted chunks";
3561 goto errout;
3562 }
b80770b2
UD
3563 remainder->bk = bck;
3564 remainder->fd = fwd;
3565 bck->fd = remainder;
3566 fwd->bk = remainder;
7ecfbd38
UD
3567 if (!in_smallbin_range(remainder_size))
3568 {
3569 remainder->fd_nextsize = NULL;
3570 remainder->bk_nextsize = NULL;
3571 }
72f90263 3572 set_head(victim, nb | PREV_INUSE |
fa8d436c 3573 (av != &main_arena ? NON_MAIN_ARENA : 0));
72f90263
UD
3574 set_head(remainder, remainder_size | PREV_INUSE);
3575 set_foot(remainder, remainder_size);
3576 }
854278df
UD
3577 check_malloced_chunk(av, victim, nb);
3578 void *p = chunk2mem(victim);
3579 if (__builtin_expect (perturb_byte, 0))
3580 alloc_perturb (p, bytes);
3581 return p;
fa8d436c 3582 }
a9177ff5 3583 }
f65fd747 3584
fa8d436c
UD
3585 /*
3586 Search for a chunk by scanning bins, starting with next largest
3587 bin. This search is strictly by best-fit; i.e., the smallest
3588 (with ties going to approximately the least recently used) chunk
3589 that fits is selected.
a9177ff5 3590
fa8d436c
UD
3591 The bitmap avoids needing to check that most blocks are nonempty.
3592 The particular case of skipping all bins during warm-up phases
3593 when no chunks have been returned yet is faster than it might look.
3594 */
a9177ff5 3595
fa8d436c
UD
3596 ++idx;
3597 bin = bin_at(av,idx);
3598 block = idx2block(idx);
3599 map = av->binmap[block];
3600 bit = idx2bit(idx);
a9177ff5 3601
fa8d436c
UD
3602 for (;;) {
3603
3604 /* Skip rest of block if there are no more set bits in this block. */
3605 if (bit > map || bit == 0) {
72f90263
UD
3606 do {
3607 if (++block >= BINMAPSIZE) /* out of bins */
3608 goto use_top;
3609 } while ( (map = av->binmap[block]) == 0);
fa8d436c 3610
72f90263
UD
3611 bin = bin_at(av, (block << BINMAPSHIFT));
3612 bit = 1;
fa8d436c 3613 }
a9177ff5 3614
fa8d436c
UD
3615 /* Advance to bin with set bit. There must be one. */
3616 while ((bit & map) == 0) {
72f90263
UD
3617 bin = next_bin(bin);
3618 bit <<= 1;
3619 assert(bit != 0);
fa8d436c 3620 }
a9177ff5 3621
fa8d436c
UD
3622 /* Inspect the bin. It is likely to be non-empty */
3623 victim = last(bin);
a9177ff5 3624
fa8d436c
UD
3625 /* If a false alarm (empty bin), clear the bit. */
3626 if (victim == bin) {
72f90263
UD
3627 av->binmap[block] = map &= ~bit; /* Write through */
3628 bin = next_bin(bin);
3629 bit <<= 1;
fa8d436c 3630 }
a9177ff5 3631
fa8d436c 3632 else {
72f90263 3633 size = chunksize(victim);
fa8d436c 3634
72f90263
UD
3635 /* We know the first chunk in this bin is big enough to use. */
3636 assert((unsigned long)(size) >= (unsigned long)(nb));
fa8d436c 3637
72f90263 3638 remainder_size = size - nb;
a9177ff5 3639
72f90263
UD
3640 /* unlink */
3641 unlink(victim, bck, fwd);
a9177ff5 3642
72f90263
UD
3643 /* Exhaust */
3644 if (remainder_size < MINSIZE) {
3645 set_inuse_bit_at_offset(victim, size);
fa8d436c
UD
3646 if (av != &main_arena)
3647 victim->size |= NON_MAIN_ARENA;
72f90263 3648 }
a9177ff5 3649
72f90263
UD
3650 /* Split */
3651 else {
3652 remainder = chunk_at_offset(victim, nb);
a9177ff5 3653
41999a1a
UD
3654 /* We cannot assume the unsorted list is empty and therefore
3655 have to perform a complete insert here. */
3656 bck = unsorted_chunks(av);
3657 fwd = bck->fd;
f6887a0d
UD
3658 if (__builtin_expect (fwd->bk != bck, 0))
3659 {
3660 errstr = "malloc(): corrupted unsorted chunks 2";
3661 goto errout;
3662 }
41999a1a
UD
3663 remainder->bk = bck;
3664 remainder->fd = fwd;
3665 bck->fd = remainder;
3666 fwd->bk = remainder;
3667
72f90263
UD
3668 /* advertise as last remainder */
3669 if (in_smallbin_range(nb))
3670 av->last_remainder = remainder;
7ecfbd38
UD
3671 if (!in_smallbin_range(remainder_size))
3672 {
3673 remainder->fd_nextsize = NULL;
3674 remainder->bk_nextsize = NULL;
3675 }
72f90263 3676 set_head(victim, nb | PREV_INUSE |
fa8d436c 3677 (av != &main_arena ? NON_MAIN_ARENA : 0));
72f90263
UD
3678 set_head(remainder, remainder_size | PREV_INUSE);
3679 set_foot(remainder, remainder_size);
3680 }
854278df
UD
3681 check_malloced_chunk(av, victim, nb);
3682 void *p = chunk2mem(victim);
3683 if (__builtin_expect (perturb_byte, 0))
3684 alloc_perturb (p, bytes);
3685 return p;
fa8d436c
UD
3686 }
3687 }
f65fd747 3688
a9177ff5 3689 use_top:
fa8d436c
UD
3690 /*
3691 If large enough, split off the chunk bordering the end of memory
3692 (held in av->top). Note that this is in accord with the best-fit
3693 search rule. In effect, av->top is treated as larger (and thus
3694 less well fitting) than any other available chunk since it can
3695 be extended to be as large as necessary (up to system
3696 limitations).
3697
3698 We require that av->top always exists (i.e., has size >=
3699 MINSIZE) after initialization, so if it would otherwise be
17edb30c 3700 exhausted by current request, it is replenished. (The main
fa8d436c
UD
3701 reason for ensuring it exists is that we may need MINSIZE space
3702 to put in fenceposts in sysmalloc.)
3703 */
f65fd747 3704
fa8d436c
UD
3705 victim = av->top;
3706 size = chunksize(victim);
a9177ff5 3707
fa8d436c
UD
3708 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3709 remainder_size = size - nb;
3710 remainder = chunk_at_offset(victim, nb);
3711 av->top = remainder;
3712 set_head(victim, nb | PREV_INUSE |
3713 (av != &main_arena ? NON_MAIN_ARENA : 0));
3714 set_head(remainder, remainder_size | PREV_INUSE);
f65fd747 3715
fa8d436c 3716 check_malloced_chunk(av, victim, nb);
854278df
UD
3717 void *p = chunk2mem(victim);
3718 if (__builtin_expect (perturb_byte, 0))
3719 alloc_perturb (p, bytes);
3720 return p;
fa8d436c 3721 }
f65fd747 3722
425ce2ed
UD
3723 /* When we are using atomic ops to free fast chunks we can get
3724 here for all block sizes. */
3725 else if (have_fastchunks(av)) {
3726 malloc_consolidate(av);
3727 /* restore original bin index */
3728 if (in_smallbin_range(nb))
3729 idx = smallbin_index(nb);
3730 else
3731 idx = largebin_index(nb);
3732 }
f65fd747 3733
a9177ff5
RM
3734 /*
3735 Otherwise, relay to handle system-dependent cases
fa8d436c 3736 */
854278df 3737 else {
3b49edc0 3738 void *p = sysmalloc(nb, av);
17edb30c 3739 if (p != NULL && __builtin_expect (perturb_byte, 0))
854278df
UD
3740 alloc_perturb (p, bytes);
3741 return p;
3742 }
fa8d436c
UD
3743 }
3744}
f65fd747 3745
fa8d436c
UD
3746/*
3747 ------------------------------ free ------------------------------
f65fd747
UD
3748*/
3749
78ac92ad 3750static void
425ce2ed 3751_int_free(mstate av, mchunkptr p, int have_lock)
f65fd747 3752{
fa8d436c
UD
3753 INTERNAL_SIZE_T size; /* its size */
3754 mfastbinptr* fb; /* associated fastbin */
3755 mchunkptr nextchunk; /* next contiguous chunk */
3756 INTERNAL_SIZE_T nextsize; /* its size */
3757 int nextinuse; /* true if nextchunk is used */
3758 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
3759 mchunkptr bck; /* misc temp for linking */
3760 mchunkptr fwd; /* misc temp for linking */
3761
37fa1953 3762 const char *errstr = NULL;
425ce2ed 3763 int locked = 0;
f65fd747 3764
37fa1953 3765 size = chunksize(p);
f65fd747 3766
37fa1953
UD
3767 /* Little security check which won't hurt performance: the
3768 allocator never wrapps around at the end of the address space.
3769 Therefore we can exclude some size values which might appear
3770 here by accident or by "design" from some intruder. */
dc165f7b 3771 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
073f560e 3772 || __builtin_expect (misaligned_chunk (p), 0))
37fa1953
UD
3773 {
3774 errstr = "free(): invalid pointer";
3775 errout:
425ce2ed
UD
3776 if (! have_lock && locked)
3777 (void)mutex_unlock(&av->mutex);
78ac92ad 3778 malloc_printerr (check_action, errstr, chunk2mem(p));
37fa1953 3779 return;
fa8d436c 3780 }
347c92e9
L
3781 /* We know that each chunk is at least MINSIZE bytes in size or a
3782 multiple of MALLOC_ALIGNMENT. */
3783 if (__builtin_expect (size < MINSIZE || !aligned_OK (size), 0))
bf589066
UD
3784 {
3785 errstr = "free(): invalid size";
3786 goto errout;
3787 }
f65fd747 3788
37fa1953 3789 check_inuse_chunk(av, p);
f65fd747 3790
37fa1953
UD
3791 /*
3792 If eligible, place chunk on a fastbin so it can be found
3793 and used quickly in malloc.
3794 */
6bf4302e 3795
9bf248c6 3796 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
6bf4302e 3797
37fa1953
UD
3798#if TRIM_FASTBINS
3799 /*
3800 If TRIM_FASTBINS set, don't place chunks
3801 bordering top into fastbins
3802 */
3803 && (chunk_at_offset(p, size) != av->top)
3804#endif
3805 ) {
fa8d436c 3806
893e6098
UD
3807 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
3808 || __builtin_expect (chunksize (chunk_at_offset (p, size))
3809 >= av->system_mem, 0))
3810 {
bec466d9
UD
3811 /* We might not have a lock at this point and concurrent modifications
3812 of system_mem might have let to a false positive. Redo the test
3813 after getting the lock. */
3814 if (have_lock
3815 || ({ assert (locked == 0);
3816 mutex_lock(&av->mutex);
3817 locked = 1;
3818 chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
3819 || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
3820 }))
bec466d9
UD
3821 {
3822 errstr = "free(): invalid next size (fast)";
3823 goto errout;
3824 }
bec466d9
UD
3825 if (! have_lock)
3826 {
3827 (void)mutex_unlock(&av->mutex);
3828 locked = 0;
3829 }
893e6098
UD
3830 }
3831
425ce2ed 3832 if (__builtin_expect (perturb_byte, 0))
fdc0f374 3833 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
425ce2ed 3834
37fa1953 3835 set_fastchunks(av);
90a3055e
UD
3836 unsigned int idx = fastbin_index(size);
3837 fb = &fastbin (av, idx);
425ce2ed 3838
425ce2ed
UD
3839 mchunkptr fd;
3840 mchunkptr old = *fb;
5f24d53a 3841 unsigned int old_idx = ~0u;
425ce2ed
UD
3842 do
3843 {
3844 /* Another simple check: make sure the top of the bin is not the
3845 record we are going to add (i.e., double free). */
3846 if (__builtin_expect (old == p, 0))
3847 {
3848 errstr = "double free or corruption (fasttop)";
3849 goto errout;
3850 }
5f24d53a
UD
3851 if (old != NULL)
3852 old_idx = fastbin_index(chunksize(old));
425ce2ed
UD
3853 p->fd = fd = old;
3854 }
bea0ac1d 3855 while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
5f24d53a
UD
3856
3857 if (fd != NULL && __builtin_expect (old_idx != idx, 0))
3858 {
3859 errstr = "invalid fastbin entry (free)";
3860 goto errout;
3861 }
37fa1953 3862 }
f65fd747 3863
37fa1953
UD
3864 /*
3865 Consolidate other non-mmapped chunks as they arrive.
3866 */
fa8d436c 3867
37fa1953 3868 else if (!chunk_is_mmapped(p)) {
425ce2ed 3869 if (! have_lock) {
22a89187 3870#if THREAD_STATS
425ce2ed
UD
3871 if(!mutex_trylock(&av->mutex))
3872 ++(av->stat_lock_direct);
3873 else {
3874 (void)mutex_lock(&av->mutex);
3875 ++(av->stat_lock_wait);
3876 }
22a89187 3877#else
425ce2ed 3878 (void)mutex_lock(&av->mutex);
22a89187 3879#endif
425ce2ed
UD
3880 locked = 1;
3881 }
425ce2ed 3882
37fa1953 3883 nextchunk = chunk_at_offset(p, size);
fa8d436c 3884
37fa1953
UD
3885 /* Lightweight tests: check whether the block is already the
3886 top block. */
3887 if (__builtin_expect (p == av->top, 0))
3888 {
3889 errstr = "double free or corruption (top)";
3890 goto errout;
3891 }
3892 /* Or whether the next chunk is beyond the boundaries of the arena. */
3893 if (__builtin_expect (contiguous (av)
3894 && (char *) nextchunk
3895 >= ((char *) av->top + chunksize(av->top)), 0))
3896 {
3897 errstr = "double free or corruption (out)";
3898 goto errout;
3899 }
3900 /* Or whether the block is actually not marked used. */
3901 if (__builtin_expect (!prev_inuse(nextchunk), 0))
3902 {
3903 errstr = "double free or corruption (!prev)";
3904 goto errout;
3905 }
fa8d436c 3906
37fa1953 3907 nextsize = chunksize(nextchunk);
893e6098
UD
3908 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
3909 || __builtin_expect (nextsize >= av->system_mem, 0))
3910 {
76761b63 3911 errstr = "free(): invalid next size (normal)";
893e6098
UD
3912 goto errout;
3913 }
fa8d436c 3914
854278df 3915 if (__builtin_expect (perturb_byte, 0))
fdc0f374 3916 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
854278df 3917
37fa1953
UD
3918 /* consolidate backward */
3919 if (!prev_inuse(p)) {
3920 prevsize = p->prev_size;
3921 size += prevsize;
3922 p = chunk_at_offset(p, -((long) prevsize));
3923 unlink(p, bck, fwd);
3924 }
a9177ff5 3925
37fa1953
UD
3926 if (nextchunk != av->top) {
3927 /* get and clear inuse bit */
3928 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
3929
3930 /* consolidate forward */
3931 if (!nextinuse) {
3932 unlink(nextchunk, bck, fwd);
3933 size += nextsize;
3934 } else
3935 clear_inuse_bit_at_offset(nextchunk, 0);
10dc2a90 3936
fa8d436c 3937 /*
37fa1953
UD
3938 Place the chunk in unsorted chunk list. Chunks are
3939 not placed into regular bins until after they have
3940 been given one chance to be used in malloc.
fa8d436c 3941 */
f65fd747 3942
37fa1953
UD
3943 bck = unsorted_chunks(av);
3944 fwd = bck->fd;
f6887a0d
UD
3945 if (__builtin_expect (fwd->bk != bck, 0))
3946 {
3947 errstr = "free(): corrupted unsorted chunks";
3948 goto errout;
3949 }
37fa1953 3950 p->fd = fwd;
7ecfbd38
UD
3951 p->bk = bck;
3952 if (!in_smallbin_range(size))
3953 {
3954 p->fd_nextsize = NULL;
3955 p->bk_nextsize = NULL;
3956 }
37fa1953
UD
3957 bck->fd = p;
3958 fwd->bk = p;
8a4b65b4 3959
37fa1953
UD
3960 set_head(p, size | PREV_INUSE);
3961 set_foot(p, size);
3962
3963 check_free_chunk(av, p);
3964 }
3965
3966 /*
3967 If the chunk borders the current high end of memory,
3968 consolidate into top
3969 */
3970
3971 else {
3972 size += nextsize;
3973 set_head(p, size | PREV_INUSE);
3974 av->top = p;
3975 check_chunk(av, p);
3976 }
3977
3978 /*
3979 If freeing a large space, consolidate possibly-surrounding
3980 chunks. Then, if the total unused topmost memory exceeds trim
3981 threshold, ask malloc_trim to reduce top.
3982
3983 Unless max_fast is 0, we don't know if there are fastbins
3984 bordering top, so we cannot tell for sure whether threshold
3985 has been reached unless fastbins are consolidated. But we
3986 don't want to consolidate on each free. As a compromise,
3987 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
3988 is reached.
3989 */
fa8d436c 3990
37fa1953
UD
3991 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
3992 if (have_fastchunks(av))
3993 malloc_consolidate(av);
fa8d436c 3994
37fa1953 3995 if (av == &main_arena) {
a9177ff5 3996#ifndef MORECORE_CANNOT_TRIM
37fa1953
UD
3997 if ((unsigned long)(chunksize(av->top)) >=
3998 (unsigned long)(mp_.trim_threshold))
3b49edc0 3999 systrim(mp_.top_pad, av);
fa8d436c 4000#endif
37fa1953
UD
4001 } else {
4002 /* Always try heap_trim(), even if the top chunk is not
4003 large, because the corresponding heap might go away. */
4004 heap_info *heap = heap_for_ptr(top(av));
fa8d436c 4005
37fa1953
UD
4006 assert(heap->ar_ptr == av);
4007 heap_trim(heap, mp_.top_pad);
fa8d436c 4008 }
fa8d436c 4009 }
10dc2a90 4010
425ce2ed
UD
4011 if (! have_lock) {
4012 assert (locked);
4013 (void)mutex_unlock(&av->mutex);
4014 }
37fa1953
UD
4015 }
4016 /*
22a89187 4017 If the chunk was allocated via mmap, release via munmap().
37fa1953
UD
4018 */
4019
4020 else {
c120d94d 4021 munmap_chunk (p);
fa8d436c 4022 }
10dc2a90
UD
4023}
4024
fa8d436c
UD
4025/*
4026 ------------------------- malloc_consolidate -------------------------
4027
4028 malloc_consolidate is a specialized version of free() that tears
4029 down chunks held in fastbins. Free itself cannot be used for this
4030 purpose since, among other things, it might place chunks back onto
4031 fastbins. So, instead, we need to use a minor variant of the same
4032 code.
a9177ff5 4033
fa8d436c
UD
4034 Also, because this routine needs to be called the first time through
4035 malloc anyway, it turns out to be the perfect place to trigger
4036 initialization code.
4037*/
4038
fa8d436c 4039static void malloc_consolidate(mstate av)
10dc2a90 4040{
fa8d436c
UD
4041 mfastbinptr* fb; /* current fastbin being consolidated */
4042 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4043 mchunkptr p; /* current chunk being consolidated */
4044 mchunkptr nextp; /* next chunk to consolidate */
4045 mchunkptr unsorted_bin; /* bin header */
4046 mchunkptr first_unsorted; /* chunk to link to */
4047
4048 /* These have same use as in free() */
4049 mchunkptr nextchunk;
4050 INTERNAL_SIZE_T size;
4051 INTERNAL_SIZE_T nextsize;
4052 INTERNAL_SIZE_T prevsize;
4053 int nextinuse;
4054 mchunkptr bck;
4055 mchunkptr fwd;
10dc2a90 4056
fa8d436c
UD
4057 /*
4058 If max_fast is 0, we know that av hasn't
4059 yet been initialized, in which case do so below
4060 */
10dc2a90 4061
9bf248c6 4062 if (get_max_fast () != 0) {
fa8d436c 4063 clear_fastchunks(av);
10dc2a90 4064
fa8d436c 4065 unsorted_bin = unsorted_chunks(av);
10dc2a90 4066
fa8d436c
UD
4067 /*
4068 Remove each chunk from fast bin and consolidate it, placing it
4069 then in unsorted bin. Among other reasons for doing this,
4070 placing in unsorted bin avoids needing to calculate actual bins
4071 until malloc is sure that chunks aren't immediately going to be
4072 reused anyway.
4073 */
a9177ff5 4074
425ce2ed 4075 maxfb = &fastbin (av, NFASTBINS - 1);
425ce2ed 4076 fb = &fastbin (av, 0);
fa8d436c 4077 do {
425ce2ed 4078 p = atomic_exchange_acq (fb, 0);
425ce2ed 4079 if (p != 0) {
72f90263
UD
4080 do {
4081 check_inuse_chunk(av, p);
4082 nextp = p->fd;
4083
4084 /* Slightly streamlined version of consolidation code in free() */
4085 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4086 nextchunk = chunk_at_offset(p, size);
4087 nextsize = chunksize(nextchunk);
4088
4089 if (!prev_inuse(p)) {
4090 prevsize = p->prev_size;
4091 size += prevsize;
4092 p = chunk_at_offset(p, -((long) prevsize));
4093 unlink(p, bck, fwd);
4094 }
4095
4096 if (nextchunk != av->top) {
4097 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4098
4099 if (!nextinuse) {
4100 size += nextsize;
4101 unlink(nextchunk, bck, fwd);
4102 } else
fa8d436c 4103 clear_inuse_bit_at_offset(nextchunk, 0);
a9177ff5 4104
72f90263
UD
4105 first_unsorted = unsorted_bin->fd;
4106 unsorted_bin->fd = p;
4107 first_unsorted->bk = p;
a9177ff5 4108
72f90263 4109 if (!in_smallbin_range (size)) {
7ecfbd38
UD
4110 p->fd_nextsize = NULL;
4111 p->bk_nextsize = NULL;
4112 }
4113
72f90263
UD
4114 set_head(p, size | PREV_INUSE);
4115 p->bk = unsorted_bin;
4116 p->fd = first_unsorted;
4117 set_foot(p, size);
4118 }
a9177ff5 4119
72f90263
UD
4120 else {
4121 size += nextsize;
4122 set_head(p, size | PREV_INUSE);
4123 av->top = p;
4124 }
a9177ff5 4125
72f90263 4126 } while ( (p = nextp) != 0);
a9177ff5 4127
fa8d436c
UD
4128 }
4129 } while (fb++ != maxfb);
4130 }
4131 else {
4132 malloc_init_state(av);
4133 check_malloc_state(av);
4134 }
4135}
10dc2a90 4136
fa8d436c
UD
4137/*
4138 ------------------------------ realloc ------------------------------
4139*/
f65fd747 4140
22a89187 4141void*
4c8b8cc3
UD
4142_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4143 INTERNAL_SIZE_T nb)
fa8d436c 4144{
fa8d436c
UD
4145 mchunkptr newp; /* chunk to return */
4146 INTERNAL_SIZE_T newsize; /* its size */
22a89187 4147 void* newmem; /* corresponding user mem */
f65fd747 4148
fa8d436c 4149 mchunkptr next; /* next contiguous chunk after oldp */
f65fd747 4150
fa8d436c
UD
4151 mchunkptr remainder; /* extra space at end of newp */
4152 unsigned long remainder_size; /* its size */
f65fd747 4153
fa8d436c
UD
4154 mchunkptr bck; /* misc temp for linking */
4155 mchunkptr fwd; /* misc temp for linking */
2ed5fd9a 4156
fa8d436c
UD
4157 unsigned long copysize; /* bytes to copy */
4158 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
a9177ff5 4159 INTERNAL_SIZE_T* s; /* copy source */
fa8d436c 4160 INTERNAL_SIZE_T* d; /* copy destination */
f65fd747 4161
76761b63 4162 const char *errstr = NULL;
f65fd747 4163
6dd6a580 4164 /* oldmem size */
76761b63
UD
4165 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4166 || __builtin_expect (oldsize >= av->system_mem, 0))
4167 {
4b04154d 4168 errstr = "realloc(): invalid old size";
4c8b8cc3
UD
4169 errout:
4170 malloc_printerr (check_action, errstr, chunk2mem(oldp));
4171 return NULL;
76761b63
UD
4172 }
4173
fa8d436c 4174 check_inuse_chunk(av, oldp);
f65fd747 4175
4c8b8cc3 4176 /* All callers already filter out mmap'ed chunks. */
4c8b8cc3 4177 assert (!chunk_is_mmapped(oldp));
f65fd747 4178
22a89187
UD
4179 next = chunk_at_offset(oldp, oldsize);
4180 INTERNAL_SIZE_T nextsize = chunksize(next);
4181 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4182 || __builtin_expect (nextsize >= av->system_mem, 0))
4183 {
4184 errstr = "realloc(): invalid next size";
4185 goto errout;
4186 }
4187
4188 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4189 /* already big enough; split below */
4190 newp = oldp;
4191 newsize = oldsize;
4192 }
4193
4194 else {
4195 /* Try to expand forward into top */
4196 if (next == av->top &&
4197 (unsigned long)(newsize = oldsize + nextsize) >=
4198 (unsigned long)(nb + MINSIZE)) {
4199 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4200 av->top = chunk_at_offset(oldp, nb);
4201 set_head(av->top, (newsize - nb) | PREV_INUSE);
4202 check_inuse_chunk(av, oldp);
4203 return chunk2mem(oldp);
4204 }
76761b63 4205
22a89187
UD
4206 /* Try to expand forward into next chunk; split off remainder below */
4207 else if (next != av->top &&
4208 !inuse(next) &&
4209 (unsigned long)(newsize = oldsize + nextsize) >=
4210 (unsigned long)(nb)) {
fa8d436c 4211 newp = oldp;
22a89187 4212 unlink(next, bck, fwd);
7799b7b3 4213 }
f65fd747 4214
22a89187 4215 /* allocate, copy, free */
fa8d436c 4216 else {
22a89187
UD
4217 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4218 if (newmem == 0)
4219 return 0; /* propagate failure */
4220
4221 newp = mem2chunk(newmem);
4222 newsize = chunksize(newp);
a9177ff5 4223
22a89187
UD
4224 /*
4225 Avoid copy if newp is next chunk after oldp.
4226 */
4227 if (newp == next) {
4228 newsize += oldsize;
72f90263 4229 newp = oldp;
fa8d436c 4230 }
fa8d436c 4231 else {
72f90263 4232 /*
22a89187
UD
4233 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4234 We know that contents have an odd number of
4235 INTERNAL_SIZE_T-sized words; minimally 3.
72f90263 4236 */
72f90263 4237
22a89187
UD
4238 copysize = oldsize - SIZE_SZ;
4239 s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
4240 d = (INTERNAL_SIZE_T*)(newmem);
4241 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4242 assert(ncopies >= 3);
72f90263 4243
22a89187
UD
4244 if (ncopies > 9)
4245 MALLOC_COPY(d, s, copysize);
72f90263 4246
22a89187
UD
4247 else {
4248 *(d+0) = *(s+0);
4249 *(d+1) = *(s+1);
4250 *(d+2) = *(s+2);
4251 if (ncopies > 4) {
4252 *(d+3) = *(s+3);
4253 *(d+4) = *(s+4);
4254 if (ncopies > 6) {
4255 *(d+5) = *(s+5);
4256 *(d+6) = *(s+6);
4257 if (ncopies > 8) {
4258 *(d+7) = *(s+7);
4259 *(d+8) = *(s+8);
72f90263
UD
4260 }
4261 }
4262 }
72f90263 4263 }
f65fd747 4264
22a89187
UD
4265 _int_free(av, oldp, 1);
4266 check_inuse_chunk(av, newp);
4267 return chunk2mem(newp);
4268 }
fa8d436c 4269 }
fa8d436c 4270 }
f65fd747 4271
22a89187 4272 /* If possible, free extra space in old or extended chunk */
f65fd747 4273
22a89187 4274 assert((unsigned long)(newsize) >= (unsigned long)(nb));
f65fd747 4275
22a89187 4276 remainder_size = newsize - nb;
10dc2a90 4277
22a89187
UD
4278 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4279 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4280 set_inuse_bit_at_offset(newp, newsize);
10dc2a90 4281 }
22a89187
UD
4282 else { /* split remainder */
4283 remainder = chunk_at_offset(newp, nb);
4284 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4285 set_head(remainder, remainder_size | PREV_INUSE |
4286 (av != &main_arena ? NON_MAIN_ARENA : 0));
4287 /* Mark remainder as inuse so free() won't complain */
4288 set_inuse_bit_at_offset(remainder, remainder_size);
4289 _int_free(av, remainder, 1);
4290 }
4291
4292 check_inuse_chunk(av, newp);
4293 return chunk2mem(newp);
fa8d436c
UD
4294}
4295
4296/*
4297 ------------------------------ memalign ------------------------------
4298*/
4299
22a89187 4300static void*
fa8d436c
UD
4301_int_memalign(mstate av, size_t alignment, size_t bytes)
4302{
4303 INTERNAL_SIZE_T nb; /* padded request size */
4304 char* m; /* memory returned by malloc call */
4305 mchunkptr p; /* corresponding chunk */
4306 char* brk; /* alignment point within p */
4307 mchunkptr newp; /* chunk to return */
4308 INTERNAL_SIZE_T newsize; /* its size */
4309 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4310 mchunkptr remainder; /* spare room at end to split off */
4311 unsigned long remainder_size; /* its size */
4312 INTERNAL_SIZE_T size;
f65fd747
UD
4313
4314 /* If need less alignment than we give anyway, just relay to malloc */
4315
fa8d436c 4316 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
f65fd747
UD
4317
4318 /* Otherwise, ensure that it is at least a minimum chunk size */
4319
4320 if (alignment < MINSIZE) alignment = MINSIZE;
4321
fa8d436c
UD
4322 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4323 if ((alignment & (alignment - 1)) != 0) {
4324 size_t a = MALLOC_ALIGNMENT * 2;
4325 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4326 alignment = a;
7799b7b3 4327 }
f65fd747 4328
fa8d436c
UD
4329 checked_request2size(bytes, nb);
4330
4331 /*
4332 Strategy: find a spot within that chunk that meets the alignment
4333 request, and then possibly free the leading and trailing space.
4334 */
4335
4336
4337 /* Call malloc with worst case padding to hit alignment. */
4338
4339 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4340
4341 if (m == 0) return 0; /* propagate failure */
4342
4343 p = mem2chunk(m);
4344
4345 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4346
f65fd747 4347 /*
fa8d436c
UD
4348 Find an aligned spot inside chunk. Since we need to give back
4349 leading space in a chunk of at least MINSIZE, if the first
4350 calculation places us at a spot with less than MINSIZE leader,
4351 we can move to the next aligned spot -- we've allocated enough
4352 total room so that this is always possible.
f65fd747
UD
4353 */
4354
fa8d436c 4355 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
72f90263 4356 -((signed long) alignment));
fa8d436c
UD
4357 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4358 brk += alignment;
f65fd747 4359
fa8d436c 4360 newp = (mchunkptr)brk;
f65fd747
UD
4361 leadsize = brk - (char*)(p);
4362 newsize = chunksize(p) - leadsize;
4363
fa8d436c
UD
4364 /* For mmapped chunks, just adjust offset */
4365 if (chunk_is_mmapped(p)) {
f65fd747
UD
4366 newp->prev_size = p->prev_size + leadsize;
4367 set_head(newp, newsize|IS_MMAPPED);
fa8d436c 4368 return chunk2mem(newp);
f65fd747 4369 }
f65fd747 4370
fa8d436c
UD
4371 /* Otherwise, give back leader, use the rest */
4372 set_head(newp, newsize | PREV_INUSE |
4373 (av != &main_arena ? NON_MAIN_ARENA : 0));
f65fd747 4374 set_inuse_bit_at_offset(newp, newsize);
fa8d436c 4375 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
425ce2ed 4376 _int_free(av, p, 1);
f65fd747
UD
4377 p = newp;
4378
fa8d436c 4379 assert (newsize >= nb &&
72f90263 4380 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
f65fd747
UD
4381 }
4382
4383 /* Also give back spare room at the end */
fa8d436c
UD
4384 if (!chunk_is_mmapped(p)) {
4385 size = chunksize(p);
4386 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4387 remainder_size = size - nb;
4388 remainder = chunk_at_offset(p, nb);
4389 set_head(remainder, remainder_size | PREV_INUSE |
4390 (av != &main_arena ? NON_MAIN_ARENA : 0));
4391 set_head_size(p, nb);
425ce2ed 4392 _int_free(av, remainder, 1);
fa8d436c 4393 }
f65fd747
UD
4394 }
4395
fa8d436c
UD
4396 check_inuse_chunk(av, p);
4397 return chunk2mem(p);
f65fd747
UD
4398}
4399
f65fd747 4400
fa8d436c
UD
4401/*
4402 ------------------------------ valloc ------------------------------
f65fd747
UD
4403*/
4404
22a89187 4405static void*
fa8d436c 4406_int_valloc(mstate av, size_t bytes)
f65fd747 4407{
fa8d436c
UD
4408 /* Ensure initialization/consolidation */
4409 if (have_fastchunks(av)) malloc_consolidate(av);
02d46fc4 4410 return _int_memalign(av, GLRO(dl_pagesize), bytes);
f65fd747 4411}
f65fd747
UD
4412
4413/*
fa8d436c 4414 ------------------------------ pvalloc ------------------------------
f65fd747
UD
4415*/
4416
fa8d436c 4417
22a89187 4418static void*
fa8d436c 4419_int_pvalloc(mstate av, size_t bytes)
f65fd747 4420{
fa8d436c 4421 size_t pagesz;
f65fd747 4422
fa8d436c
UD
4423 /* Ensure initialization/consolidation */
4424 if (have_fastchunks(av)) malloc_consolidate(av);
02d46fc4 4425 pagesz = GLRO(dl_pagesize);
fa8d436c 4426 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
f65fd747 4427}
a9177ff5 4428
f65fd747 4429
fa8d436c
UD
4430/*
4431 ------------------------------ malloc_trim ------------------------------
4432*/
8a4b65b4 4433
3b49edc0 4434static int mtrim(mstate av, size_t pad)
f65fd747 4435{
fa8d436c 4436 /* Ensure initialization/consolidation */
68631c8e
UD
4437 malloc_consolidate (av);
4438
02d46fc4 4439 const size_t ps = GLRO(dl_pagesize);
68631c8e
UD
4440 int psindex = bin_index (ps);
4441 const size_t psm1 = ps - 1;
4442
4443 int result = 0;
4444 for (int i = 1; i < NBINS; ++i)
4445 if (i == 1 || i >= psindex)
4446 {
72f90263 4447 mbinptr bin = bin_at (av, i);
68631c8e 4448
72f90263 4449 for (mchunkptr p = last (bin); p != bin; p = p->bk)
68631c8e
UD
4450 {
4451 INTERNAL_SIZE_T size = chunksize (p);
4452
4453 if (size > psm1 + sizeof (struct malloc_chunk))
4454 {
4455 /* See whether the chunk contains at least one unused page. */
4456 char *paligned_mem = (char *) (((uintptr_t) p
4457 + sizeof (struct malloc_chunk)
4458 + psm1) & ~psm1);
4459
4460 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4461 assert ((char *) p + size > paligned_mem);
4462
4463 /* This is the size we could potentially free. */
4464 size -= paligned_mem - (char *) p;
4465
4466 if (size > psm1)
4467 {
4468#ifdef MALLOC_DEBUG
4469 /* When debugging we simulate destroying the memory
4470 content. */
4471 memset (paligned_mem, 0x89, size & ~psm1);
4472#endif
9043e228 4473 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
68631c8e
UD
4474
4475 result = 1;
4476 }
4477 }
4478 }
4479 }
8a4b65b4 4480
a9177ff5 4481#ifndef MORECORE_CANNOT_TRIM
3b49edc0 4482 return result | (av == &main_arena ? systrim (pad, av) : 0);
8a4b65b4 4483#else
68631c8e 4484 return result;
f65fd747 4485#endif
f65fd747
UD
4486}
4487
f65fd747 4488
3b49edc0
UD
4489int
4490__malloc_trim(size_t s)
4491{
4492 int result = 0;
4493
4494 if(__malloc_initialized < 0)
4495 ptmalloc_init ();
4496
4497 mstate ar_ptr = &main_arena;
4498 do
4499 {
4500 (void) mutex_lock (&ar_ptr->mutex);
4501 result |= mtrim (ar_ptr, s);
4502 (void) mutex_unlock (&ar_ptr->mutex);
4503
4504 ar_ptr = ar_ptr->next;
4505 }
4506 while (ar_ptr != &main_arena);
4507
4508 return result;
4509}
4510
4511
f65fd747 4512/*
fa8d436c 4513 ------------------------- malloc_usable_size -------------------------
f65fd747
UD
4514*/
4515
3b49edc0
UD
4516static size_t
4517musable(void* mem)
f65fd747
UD
4518{
4519 mchunkptr p;
fa8d436c 4520 if (mem != 0) {
f65fd747 4521 p = mem2chunk(mem);
6ef9cc37
SP
4522
4523 if (__builtin_expect(using_malloc_checking == 1, 0))
4524 return malloc_check_get_size(p);
fa8d436c
UD
4525 if (chunk_is_mmapped(p))
4526 return chunksize(p) - 2*SIZE_SZ;
4527 else if (inuse(p))
f65fd747 4528 return chunksize(p) - SIZE_SZ;
f65fd747 4529 }
fa8d436c 4530 return 0;
f65fd747
UD
4531}
4532
3b49edc0
UD
4533
4534size_t
4535__malloc_usable_size(void* m)
4536{
4537 size_t result;
4538
4539 result = musable(m);
4540 return result;
4541}
4542
fa8d436c
UD
4543/*
4544 ------------------------------ mallinfo ------------------------------
bedee953 4545 Accumulate malloc statistics for arena AV into M.
fa8d436c 4546*/
f65fd747 4547
bedee953
PP
4548static void
4549int_mallinfo(mstate av, struct mallinfo *m)
f65fd747 4550{
6dd67bd5 4551 size_t i;
f65fd747
UD
4552 mbinptr b;
4553 mchunkptr p;
f65fd747 4554 INTERNAL_SIZE_T avail;
fa8d436c
UD
4555 INTERNAL_SIZE_T fastavail;
4556 int nblocks;
4557 int nfastblocks;
f65fd747 4558
fa8d436c
UD
4559 /* Ensure initialization */
4560 if (av->top == 0) malloc_consolidate(av);
8a4b65b4 4561
fa8d436c 4562 check_malloc_state(av);
8a4b65b4 4563
fa8d436c
UD
4564 /* Account for top */
4565 avail = chunksize(av->top);
4566 nblocks = 1; /* top always exists */
f65fd747 4567
fa8d436c
UD
4568 /* traverse fastbins */
4569 nfastblocks = 0;
4570 fastavail = 0;
4571
4572 for (i = 0; i < NFASTBINS; ++i) {
425ce2ed 4573 for (p = fastbin (av, i); p != 0; p = p->fd) {
fa8d436c
UD
4574 ++nfastblocks;
4575 fastavail += chunksize(p);
4576 }
4577 }
4578
4579 avail += fastavail;
f65fd747 4580
fa8d436c
UD
4581 /* traverse regular bins */
4582 for (i = 1; i < NBINS; ++i) {
4583 b = bin_at(av, i);
4584 for (p = last(b); p != b; p = p->bk) {
4585 ++nblocks;
4586 avail += chunksize(p);
4587 }
4588 }
f65fd747 4589
bedee953
PP
4590 m->smblks += nfastblocks;
4591 m->ordblks += nblocks;
4592 m->fordblks += avail;
4593 m->uordblks += av->system_mem - avail;
4594 m->arena += av->system_mem;
4595 m->fsmblks += fastavail;
4596 if (av == &main_arena)
4597 {
4598 m->hblks = mp_.n_mmaps;
4599 m->hblkhd = mp_.mmapped_mem;
4600 m->usmblks = mp_.max_total_mem;
4601 m->keepcost = chunksize(av->top);
4602 }
fa8d436c 4603}
f65fd747 4604
3b49edc0
UD
4605
4606struct mallinfo __libc_mallinfo()
4607{
4608 struct mallinfo m;
bedee953 4609 mstate ar_ptr;
3b49edc0
UD
4610
4611 if(__malloc_initialized < 0)
4612 ptmalloc_init ();
bedee953
PP
4613
4614 memset(&m, 0, sizeof (m));
4615 ar_ptr = &main_arena;
4616 do {
4617 (void)mutex_lock(&ar_ptr->mutex);
4618 int_mallinfo(ar_ptr, &m);
4619 (void)mutex_unlock(&ar_ptr->mutex);
4620
4621 ar_ptr = ar_ptr->next;
4622 } while (ar_ptr != &main_arena);
4623
3b49edc0
UD
4624 return m;
4625}
4626
fa8d436c
UD
4627/*
4628 ------------------------------ malloc_stats ------------------------------
f65fd747
UD
4629*/
4630
3b49edc0 4631void
60d2f8f3 4632__malloc_stats (void)
f65fd747 4633{
8a4b65b4 4634 int i;
fa8d436c 4635 mstate ar_ptr;
fa8d436c 4636 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
8a4b65b4
UD
4637#if THREAD_STATS
4638 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
4639#endif
4640
a234e27d
UD
4641 if(__malloc_initialized < 0)
4642 ptmalloc_init ();
8dab36a1
UD
4643 _IO_flockfile (stderr);
4644 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
4645 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
fa8d436c 4646 for (i=0, ar_ptr = &main_arena;; i++) {
bedee953
PP
4647 struct mallinfo mi;
4648
4649 memset(&mi, 0, sizeof(mi));
fa8d436c 4650 (void)mutex_lock(&ar_ptr->mutex);
bedee953 4651 int_mallinfo(ar_ptr, &mi);
8a4b65b4
UD
4652 fprintf(stderr, "Arena %d:\n", i);
4653 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
4654 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
fa8d436c
UD
4655#if MALLOC_DEBUG > 1
4656 if (i > 0)
4657 dump_heap(heap_for_ptr(top(ar_ptr)));
4658#endif
8a4b65b4
UD
4659 system_b += mi.arena;
4660 in_use_b += mi.uordblks;
4661#if THREAD_STATS
4662 stat_lock_direct += ar_ptr->stat_lock_direct;
4663 stat_lock_loop += ar_ptr->stat_lock_loop;
4664 stat_lock_wait += ar_ptr->stat_lock_wait;
4665#endif
fa8d436c 4666 (void)mutex_unlock(&ar_ptr->mutex);
7e3be507
UD
4667 ar_ptr = ar_ptr->next;
4668 if(ar_ptr == &main_arena) break;
8a4b65b4
UD
4669 }
4670 fprintf(stderr, "Total (incl. mmap):\n");
4671 fprintf(stderr, "system bytes = %10u\n", system_b);
4672 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
fa8d436c
UD
4673 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
4674 fprintf(stderr, "max mmap bytes = %10lu\n",
4675 (unsigned long)mp_.max_mmapped_mem);
f65fd747 4676#if THREAD_STATS
8a4b65b4 4677 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
f65fd747
UD
4678 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
4679 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
8a4b65b4
UD
4680 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
4681 fprintf(stderr, "locked total = %10ld\n",
72f90263 4682 stat_lock_direct + stat_lock_loop + stat_lock_wait);
f65fd747 4683#endif
8dab36a1
UD
4684 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
4685 _IO_funlockfile (stderr);
f65fd747
UD
4686}
4687
f65fd747
UD
4688
4689/*
fa8d436c 4690 ------------------------------ mallopt ------------------------------
f65fd747
UD
4691*/
4692
3b49edc0 4693int __libc_mallopt(int param_number, int value)
f65fd747 4694{
fa8d436c
UD
4695 mstate av = &main_arena;
4696 int res = 1;
f65fd747 4697
0cb71e02
UD
4698 if(__malloc_initialized < 0)
4699 ptmalloc_init ();
fa8d436c
UD
4700 (void)mutex_lock(&av->mutex);
4701 /* Ensure initialization/consolidation */
4702 malloc_consolidate(av);
2f6d1f1b 4703
3ea5be54
AO
4704 LIBC_PROBE (memory_mallopt, 2, param_number, value);
4705
fa8d436c
UD
4706 switch(param_number) {
4707 case M_MXFAST:
0653427f
AO
4708 if (value >= 0 && value <= MAX_FAST_SIZE)
4709 {
4710 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
4711 set_max_fast(value);
4712 }
fa8d436c
UD
4713 else
4714 res = 0;
4715 break;
2f6d1f1b 4716
fa8d436c 4717 case M_TRIM_THRESHOLD:
0653427f
AO
4718 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value,
4719 mp_.trim_threshold, mp_.no_dyn_threshold);
fa8d436c 4720 mp_.trim_threshold = value;
1d05c2fb 4721 mp_.no_dyn_threshold = 1;
fa8d436c 4722 break;
2f6d1f1b 4723
fa8d436c 4724 case M_TOP_PAD:
0653427f
AO
4725 LIBC_PROBE (memory_mallopt_top_pad, 3, value,
4726 mp_.top_pad, mp_.no_dyn_threshold);
fa8d436c 4727 mp_.top_pad = value;
1d05c2fb 4728 mp_.no_dyn_threshold = 1;
fa8d436c 4729 break;
2f6d1f1b 4730
fa8d436c 4731 case M_MMAP_THRESHOLD:
fa8d436c
UD
4732 /* Forbid setting the threshold too high. */
4733 if((unsigned long)value > HEAP_MAX_SIZE/2)
4734 res = 0;
4735 else
5c447383 4736 {
0653427f
AO
4737 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value,
4738 mp_.mmap_threshold, mp_.no_dyn_threshold);
5c447383
AO
4739 mp_.mmap_threshold = value;
4740 mp_.no_dyn_threshold = 1;
4741 }
fa8d436c 4742 break;
2f6d1f1b 4743
fa8d436c 4744 case M_MMAP_MAX:
0653427f
AO
4745 LIBC_PROBE (memory_mallopt_mmap_max, 3, value,
4746 mp_.n_mmaps_max, mp_.no_dyn_threshold);
61d58174
AO
4747 mp_.n_mmaps_max = value;
4748 mp_.no_dyn_threshold = 1;
fa8d436c 4749 break;
10dc2a90 4750
fa8d436c 4751 case M_CHECK_ACTION:
0653427f 4752 LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
fa8d436c
UD
4753 check_action = value;
4754 break;
854278df
UD
4755
4756 case M_PERTURB:
0653427f 4757 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
854278df
UD
4758 perturb_byte = value;
4759 break;
425ce2ed
UD
4760
4761#ifdef PER_THREAD
4762 case M_ARENA_TEST:
4763 if (value > 0)
0653427f
AO
4764 {
4765 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
4766 mp_.arena_test = value;
4767 }
425ce2ed
UD
4768 break;
4769
4770 case M_ARENA_MAX:
4771 if (value > 0)
0653427f
AO
4772 {
4773 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
4774 mp_.arena_max = value;
4775 }
425ce2ed
UD
4776 break;
4777#endif
b22fc5f5 4778 }
fa8d436c
UD
4779 (void)mutex_unlock(&av->mutex);
4780 return res;
b22fc5f5 4781}
3b49edc0 4782libc_hidden_def (__libc_mallopt)
b22fc5f5 4783
10dc2a90 4784
a9177ff5 4785/*
fa8d436c
UD
4786 -------------------- Alternative MORECORE functions --------------------
4787*/
10dc2a90 4788
b22fc5f5 4789
fa8d436c
UD
4790/*
4791 General Requirements for MORECORE.
b22fc5f5 4792
fa8d436c 4793 The MORECORE function must have the following properties:
b22fc5f5 4794
fa8d436c 4795 If MORECORE_CONTIGUOUS is false:
10dc2a90 4796
fa8d436c
UD
4797 * MORECORE must allocate in multiples of pagesize. It will
4798 only be called with arguments that are multiples of pagesize.
10dc2a90 4799
a9177ff5 4800 * MORECORE(0) must return an address that is at least
fa8d436c 4801 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
10dc2a90 4802
fa8d436c 4803 else (i.e. If MORECORE_CONTIGUOUS is true):
10dc2a90 4804
fa8d436c
UD
4805 * Consecutive calls to MORECORE with positive arguments
4806 return increasing addresses, indicating that space has been
4807 contiguously extended.
10dc2a90 4808
fa8d436c
UD
4809 * MORECORE need not allocate in multiples of pagesize.
4810 Calls to MORECORE need not have args of multiples of pagesize.
10dc2a90 4811
fa8d436c 4812 * MORECORE need not page-align.
10dc2a90 4813
fa8d436c 4814 In either case:
10dc2a90 4815
fa8d436c
UD
4816 * MORECORE may allocate more memory than requested. (Or even less,
4817 but this will generally result in a malloc failure.)
10dc2a90 4818
fa8d436c
UD
4819 * MORECORE must not allocate memory when given argument zero, but
4820 instead return one past the end address of memory from previous
4821 nonzero call. This malloc does NOT call MORECORE(0)
4822 until at least one call with positive arguments is made, so
4823 the initial value returned is not important.
10dc2a90 4824
fa8d436c
UD
4825 * Even though consecutive calls to MORECORE need not return contiguous
4826 addresses, it must be OK for malloc'ed chunks to span multiple
4827 regions in those cases where they do happen to be contiguous.
10dc2a90 4828
fa8d436c
UD
4829 * MORECORE need not handle negative arguments -- it may instead
4830 just return MORECORE_FAILURE when given negative arguments.
4831 Negative arguments are always multiples of pagesize. MORECORE
4832 must not misinterpret negative args as large positive unsigned
4833 args. You can suppress all such calls from even occurring by defining
4834 MORECORE_CANNOT_TRIM,
10dc2a90 4835
fa8d436c
UD
4836 There is some variation across systems about the type of the
4837 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4838 actually be size_t, because sbrk supports negative args, so it is
4839 normally the signed type of the same width as size_t (sometimes
4840 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
4841 matter though. Internally, we use "long" as arguments, which should
4842 work across all reasonable possibilities.
ee74a442 4843
fa8d436c 4844 Additionally, if MORECORE ever returns failure for a positive
22a89187
UD
4845 request, then mmap is used as a noncontiguous system allocator. This
4846 is a useful backup strategy for systems with holes in address spaces
4847 -- in this case sbrk cannot contiguously expand the heap, but mmap
4848 may be able to map noncontiguous space.
7e3be507 4849
fa8d436c
UD
4850 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4851 a function that always returns MORECORE_FAILURE.
2e65ca2b 4852
fa8d436c
UD
4853 If you are using this malloc with something other than sbrk (or its
4854 emulation) to supply memory regions, you probably want to set
4855 MORECORE_CONTIGUOUS as false. As an example, here is a custom
4856 allocator kindly contributed for pre-OSX macOS. It uses virtually
4857 but not necessarily physically contiguous non-paged memory (locked
4858 in, present and won't get swapped out). You can use it by
4859 uncommenting this section, adding some #includes, and setting up the
4860 appropriate defines above:
7e3be507 4861
fa8d436c
UD
4862 #define MORECORE osMoreCore
4863 #define MORECORE_CONTIGUOUS 0
7e3be507 4864
fa8d436c
UD
4865 There is also a shutdown routine that should somehow be called for
4866 cleanup upon program exit.
7e3be507 4867
fa8d436c
UD
4868 #define MAX_POOL_ENTRIES 100
4869 #define MINIMUM_MORECORE_SIZE (64 * 1024)
4870 static int next_os_pool;
4871 void *our_os_pools[MAX_POOL_ENTRIES];
7e3be507 4872
fa8d436c
UD
4873 void *osMoreCore(int size)
4874 {
4875 void *ptr = 0;
4876 static void *sbrk_top = 0;
ca34d7a7 4877
fa8d436c
UD
4878 if (size > 0)
4879 {
4880 if (size < MINIMUM_MORECORE_SIZE)
72f90263 4881 size = MINIMUM_MORECORE_SIZE;
fa8d436c 4882 if (CurrentExecutionLevel() == kTaskLevel)
72f90263 4883 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
fa8d436c
UD
4884 if (ptr == 0)
4885 {
72f90263 4886 return (void *) MORECORE_FAILURE;
fa8d436c
UD
4887 }
4888 // save ptrs so they can be freed during cleanup
4889 our_os_pools[next_os_pool] = ptr;
4890 next_os_pool++;
4891 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4892 sbrk_top = (char *) ptr + size;
4893 return ptr;
4894 }
4895 else if (size < 0)
4896 {
4897 // we don't currently support shrink behavior
4898 return (void *) MORECORE_FAILURE;
4899 }
4900 else
4901 {
4902 return sbrk_top;
431c33c0 4903 }
ca34d7a7 4904 }
ca34d7a7 4905
fa8d436c
UD
4906 // cleanup any allocated memory pools
4907 // called as last thing before shutting down driver
ca34d7a7 4908
fa8d436c 4909 void osCleanupMem(void)
ca34d7a7 4910 {
fa8d436c 4911 void **ptr;
ca34d7a7 4912
fa8d436c
UD
4913 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4914 if (*ptr)
4915 {
72f90263
UD
4916 PoolDeallocate(*ptr);
4917 *ptr = 0;
fa8d436c
UD
4918 }
4919 }
ee74a442 4920
fa8d436c 4921*/
f65fd747 4922
7e3be507 4923
3e030bd5
UD
4924/* Helper code. */
4925
ae7f5313
UD
4926extern char **__libc_argv attribute_hidden;
4927
3e030bd5 4928static void
6bf4302e 4929malloc_printerr(int action, const char *str, void *ptr)
3e030bd5 4930{
553cc5f9
UD
4931 if ((action & 5) == 5)
4932 __libc_message (action & 2, "%s\n", str);
4933 else if (action & 1)
3e030bd5 4934 {
a9055cab 4935 char buf[2 * sizeof (uintptr_t) + 1];
3e030bd5 4936
a9055cab
UD
4937 buf[sizeof (buf) - 1] = '\0';
4938 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
4939 while (cp > buf)
4940 *--cp = '0';
4941
bb9510dc 4942 __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n",
ae7f5313 4943 __libc_argv[0] ?: "<unknown>", str, cp);
3e030bd5 4944 }
a9055cab 4945 else if (action & 2)
3e030bd5
UD
4946 abort ();
4947}
4948
22a89187 4949#include <sys/param.h>
fa8d436c 4950
a204dbb2
UD
4951/* We need a wrapper function for one of the additions of POSIX. */
4952int
4953__posix_memalign (void **memptr, size_t alignment, size_t size)
4954{
4955 void *mem;
4956
4957 /* Test whether the SIZE argument is valid. It must be a power of
4958 two multiple of sizeof (void *). */
de02bd05
UD
4959 if (alignment % sizeof (void *) != 0
4960 || !powerof2 (alignment / sizeof (void *)) != 0
4961 || alignment == 0)
a204dbb2
UD
4962 return EINVAL;
4963
e796f92f
UD
4964 /* Call the hook here, so that caller is posix_memalign's caller
4965 and not posix_memalign itself. */
a222d91a 4966 void *(*hook) (size_t, size_t, const void *) =
df77455c 4967 force_reg (__memalign_hook);
bfacf1af 4968 if (__builtin_expect (hook != NULL, 0))
e796f92f
UD
4969 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
4970 else
3b49edc0 4971 mem = __libc_memalign (alignment, size);
a204dbb2 4972
fa8d436c
UD
4973 if (mem != NULL) {
4974 *memptr = mem;
4975 return 0;
4976 }
a204dbb2
UD
4977
4978 return ENOMEM;
4979}
4980weak_alias (__posix_memalign, posix_memalign)
4981
bb066545
UD
4982
4983int
4984malloc_info (int options, FILE *fp)
4985{
4986 /* For now, at least. */
4987 if (options != 0)
4988 return EINVAL;
4989
4990 int n = 0;
4991 size_t total_nblocks = 0;
4992 size_t total_nfastblocks = 0;
4993 size_t total_avail = 0;
4994 size_t total_fastavail = 0;
da2d2fb6
UD
4995 size_t total_system = 0;
4996 size_t total_max_system = 0;
0588a9cb
UD
4997 size_t total_aspace = 0;
4998 size_t total_aspace_mprotect = 0;
bb066545
UD
4999
5000 void mi_arena (mstate ar_ptr)
5001 {
5002 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5003
5004 size_t nblocks = 0;
5005 size_t nfastblocks = 0;
5006 size_t avail = 0;
5007 size_t fastavail = 0;
5008 struct
5009 {
5010 size_t from;
5011 size_t to;
5012 size_t total;
5013 size_t count;
5014 } sizes[NFASTBINS + NBINS - 1];
5015#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5016
5017 mutex_lock (&ar_ptr->mutex);
5018
5019 for (size_t i = 0; i < NFASTBINS; ++i)
5020 {
5021 mchunkptr p = fastbin (ar_ptr, i);
5022 if (p != NULL)
5023 {
5024 size_t nthissize = 0;
5025 size_t thissize = chunksize (p);
5026
5027 while (p != NULL)
5028 {
5029 ++nthissize;
5030 p = p->fd;
5031 }
5032
5033 fastavail += nthissize * thissize;
5034 nfastblocks += nthissize;
5035 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5036 sizes[i].to = thissize;
5037 sizes[i].count = nthissize;
5038 }
5039 else
5040 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5041
5042 sizes[i].total = sizes[i].count * sizes[i].to;
5043 }
5044
5045 mbinptr bin = bin_at (ar_ptr, 1);
5046 struct malloc_chunk *r = bin->fd;
346bc35c 5047 if (r != NULL)
bb066545 5048 {
346bc35c
UD
5049 while (r != bin)
5050 {
5051 ++sizes[NFASTBINS].count;
5052 sizes[NFASTBINS].total += r->size;
5053 sizes[NFASTBINS].from = MIN (sizes[NFASTBINS].from, r->size);
5054 sizes[NFASTBINS].to = MAX (sizes[NFASTBINS].to, r->size);
5055 r = r->fd;
5056 }
5057 nblocks += sizes[NFASTBINS].count;
5058 avail += sizes[NFASTBINS].total;
bb066545 5059 }
bb066545
UD
5060
5061 for (size_t i = 2; i < NBINS; ++i)
5062 {
5063 bin = bin_at (ar_ptr, i);
5064 r = bin->fd;
5065 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5066 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5067 = sizes[NFASTBINS - 1 + i].count = 0;
5068
346bc35c
UD
5069 if (r != NULL)
5070 while (r != bin)
5071 {
5072 ++sizes[NFASTBINS - 1 + i].count;
5073 sizes[NFASTBINS - 1 + i].total += r->size;
5074 sizes[NFASTBINS - 1 + i].from
5075 = MIN (sizes[NFASTBINS - 1 + i].from, r->size);
5076 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
bb066545 5077 r->size);
bb066545 5078
346bc35c
UD
5079 r = r->fd;
5080 }
bb066545
UD
5081
5082 if (sizes[NFASTBINS - 1 + i].count == 0)
5083 sizes[NFASTBINS - 1 + i].from = 0;
5084 nblocks += sizes[NFASTBINS - 1 + i].count;
5085 avail += sizes[NFASTBINS - 1 + i].total;
5086 }
5087
5088 mutex_unlock (&ar_ptr->mutex);
5089
5090 total_nfastblocks += nfastblocks;
5091 total_fastavail += fastavail;
5092
5093 total_nblocks += nblocks;
5094 total_avail += avail;
5095
5096 for (size_t i = 0; i < nsizes; ++i)
5097 if (sizes[i].count != 0 && i != NFASTBINS)
5098 fprintf (fp, "\
5099<size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5100 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5101
5102 if (sizes[NFASTBINS].count != 0)
5103 fprintf (fp, "\
5104<unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5105 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5106 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5107
da2d2fb6
UD
5108 total_system += ar_ptr->system_mem;
5109 total_max_system += ar_ptr->max_system_mem;
5110
bb066545
UD
5111 fprintf (fp,
5112 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5113 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
da2d2fb6 5114 "<system type=\"current\" size=\"%zu\"/>\n"
0588a9cb 5115 "<system type=\"max\" size=\"%zu\"/>\n",
da2d2fb6
UD
5116 nfastblocks, fastavail, nblocks, avail,
5117 ar_ptr->system_mem, ar_ptr->max_system_mem);
0588a9cb
UD
5118
5119 if (ar_ptr != &main_arena)
5120 {
5121 heap_info *heap = heap_for_ptr(top(ar_ptr));
5122 fprintf (fp,
5123 "<aspace type=\"total\" size=\"%zu\"/>\n"
5124 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5125 heap->size, heap->mprotect_size);
5126 total_aspace += heap->size;
5127 total_aspace_mprotect += heap->mprotect_size;
5128 }
5129 else
5130 {
5131 fprintf (fp,
5132 "<aspace type=\"total\" size=\"%zu\"/>\n"
5133 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5134 ar_ptr->system_mem, ar_ptr->system_mem);
5135 total_aspace += ar_ptr->system_mem;
5136 total_aspace_mprotect += ar_ptr->system_mem;
5137 }
5138
5139 fputs ("</heap>\n", fp);
bb066545
UD
5140 }
5141
346bc35c
UD
5142 if(__malloc_initialized < 0)
5143 ptmalloc_init ();
5144
bb066545
UD
5145 fputs ("<malloc version=\"1\">\n", fp);
5146
5147 /* Iterate over all arenas currently in use. */
5148 mstate ar_ptr = &main_arena;
5149 do
5150 {
5151 mi_arena (ar_ptr);
5152 ar_ptr = ar_ptr->next;
5153 }
5154 while (ar_ptr != &main_arena);
5155
5156 fprintf (fp,
5157 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5158 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
ee6f3538
JS
5159 "<system type=\"current\" size=\"%zu\"/>\n"
5160 "<system type=\"max\" size=\"%zu\"/>\n"
0588a9cb
UD
5161 "<aspace type=\"total\" size=\"%zu\"/>\n"
5162 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
bb066545 5163 "</malloc>\n",
da2d2fb6 5164 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
0588a9cb
UD
5165 total_system, total_max_system,
5166 total_aspace, total_aspace_mprotect);
bb066545
UD
5167
5168 return 0;
5169}
5170
5171
eba19d2b
UD
5172strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5173strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5174strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5175strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5176strong_alias (__libc_memalign, __memalign)
5177weak_alias (__libc_memalign, memalign)
5178strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5179strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5180strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5181strong_alias (__libc_mallinfo, __mallinfo)
5182weak_alias (__libc_mallinfo, mallinfo)
5183strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
7e3be507
UD
5184
5185weak_alias (__malloc_stats, malloc_stats)
5186weak_alias (__malloc_usable_size, malloc_usable_size)
5187weak_alias (__malloc_trim, malloc_trim)
2f6d1f1b
UD
5188weak_alias (__malloc_get_state, malloc_get_state)
5189weak_alias (__malloc_set_state, malloc_set_state)
7e3be507 5190
f65fd747 5191
fa8d436c 5192/* ------------------------------------------------------------
f65fd747
UD
5193History:
5194
fa8d436c 5195[see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
f65fd747
UD
5196
5197*/
fa8d436c
UD
5198/*
5199 * Local variables:
5200 * c-basic-offset: 2
5201 * End:
5202 */