]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/malloc.c
Break more lines before not after operators.
[thirdparty/glibc.git] / malloc / malloc.c
CommitLineData
56137dbc 1/* Malloc implementation for multiple threads without lock contention.
04277e02 2 Copyright (C) 1996-2019 Free Software Foundation, Inc.
f65fd747 3 This file is part of the GNU C Library.
fa8d436c
UD
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
f65fd747
UD
6
7 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
fa8d436c 10 License, or (at your option) any later version.
f65fd747
UD
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 15 Lesser General Public License for more details.
f65fd747 16
cc7375ce 17 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
f65fd747 20
fa8d436c
UD
21/*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
bb2ce416 25 There have been substantial changes made after the integration into
da2d2fb6
UD
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
fa8d436c 29* Version ptmalloc2-20011215
fa8d436c
UD
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
f65fd747 32
fa8d436c 33* Quickstart
f65fd747 34
fa8d436c
UD
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
f65fd747
UD
43
44* Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
fa8d436c
UD
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71* Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
22a89187
UD
76 free(void* p);
77 realloc(void* p, size_t n);
fa8d436c
UD
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
22a89187
UD
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
fa8d436c 86 pvalloc(size_t n);
fa8d436c 87 malloc_trim(size_t pad);
22a89187 88 malloc_usable_size(void* p);
fa8d436c 89 malloc_stats();
f65fd747
UD
90
91* Vital statistics:
92
fa8d436c 93 Supported pointer representation: 4 or 8 bytes
a9177ff5 94 Supported size_t representation: 4 or 8 bytes
f65fd747 95 Note that size_t is allowed to be 4 bytes even if pointers are 8.
fa8d436c
UD
96 You can adjust this by defining INTERNAL_SIZE_T
97
98 Alignment: 2 * sizeof(size_t) (default)
99 (i.e., 8 byte alignment with 4byte size_t). This suffices for
100 nearly all current machines and C compilers. However, you can
101 define MALLOC_ALIGNMENT to be wider than this if necessary.
f65fd747 102
fa8d436c
UD
103 Minimum overhead per allocated chunk: 4 or 8 bytes
104 Each malloced chunk has a hidden word of overhead holding size
f65fd747
UD
105 and status information.
106
107 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
72f90263 108 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
f65fd747
UD
109
110 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
111 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
fa8d436c
UD
112 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 free list pointers. Thus, the minimum allocatable size is
114 16/24/32 bytes.
f65fd747
UD
115
116 Even a request for zero bytes (i.e., malloc(0)) returns a
117 pointer to something of the minimum allocatable size.
118
fa8d436c
UD
119 The maximum overhead wastage (i.e., number of extra bytes
120 allocated than were requested in malloc) is less than or equal
121 to the minimum size, except for requests >= mmap_threshold that
122 are serviced via mmap(), where the worst case wastage is 2 *
123 sizeof(size_t) bytes plus the remainder from a system page (the
124 minimal mmap unit); typically 4096 or 8192 bytes.
f65fd747 125
a9177ff5 126 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
72f90263 127 8-byte size_t: 2^64 minus about two pages
fa8d436c
UD
128
129 It is assumed that (possibly signed) size_t values suffice to
f65fd747
UD
130 represent chunk sizes. `Possibly signed' is due to the fact
131 that `size_t' may be defined on a system as either a signed or
fa8d436c
UD
132 an unsigned type. The ISO C standard says that it must be
133 unsigned, but a few systems are known not to adhere to this.
134 Additionally, even when size_t is unsigned, sbrk (which is by
135 default used to obtain memory from system) accepts signed
136 arguments, and may not be able to handle size_t-wide arguments
137 with negative sign bit. Generally, values that would
138 appear as negative after accounting for overhead and alignment
139 are supported only via mmap(), which does not have this
140 limitation.
141
142 Requests for sizes outside the allowed range will perform an optional
143 failure action and then return null. (Requests may also
144 also fail because a system is out of memory.)
145
22a89187 146 Thread-safety: thread-safe
fa8d436c
UD
147
148 Compliance: I believe it is compliant with the 1997 Single Unix Specification
2b0fba75 149 Also SVID/XPG, ANSI C, and probably others as well.
f65fd747
UD
150
151* Synopsis of compile-time options:
152
153 People have reported using previous versions of this malloc on all
154 versions of Unix, sometimes by tweaking some of the defines
22a89187 155 below. It has been tested most extensively on Solaris and Linux.
fa8d436c
UD
156 People also report using it in stand-alone embedded systems.
157
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
165
166 OPTION DEFAULT VALUE
167
168 Compilation Environment options:
169
2a26ef3a 170 HAVE_MREMAP 0
fa8d436c
UD
171
172 Changing default word sizes:
173
174 INTERNAL_SIZE_T size_t
fa8d436c
UD
175
176 Configuration and functionality options:
177
fa8d436c
UD
178 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
179 USE_MALLOC_LOCK NOT defined
180 MALLOC_DEBUG NOT defined
181 REALLOC_ZERO_BYTES_FREES 1
fa8d436c
UD
182 TRIM_FASTBINS 0
183
184 Options for customizing MORECORE:
185
186 MORECORE sbrk
187 MORECORE_FAILURE -1
a9177ff5 188 MORECORE_CONTIGUOUS 1
fa8d436c
UD
189 MORECORE_CANNOT_TRIM NOT defined
190 MORECORE_CLEARS 1
a9177ff5 191 MMAP_AS_MORECORE_SIZE (1024 * 1024)
fa8d436c
UD
192
193 Tuning options that are also dynamically changeable via mallopt:
194
425ce2ed 195 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
fa8d436c
UD
196 DEFAULT_TRIM_THRESHOLD 128 * 1024
197 DEFAULT_TOP_PAD 0
198 DEFAULT_MMAP_THRESHOLD 128 * 1024
199 DEFAULT_MMAP_MAX 65536
200
201 There are several other #defined constants and macros that you
202 probably don't want to touch unless you are extending or adapting malloc. */
f65fd747
UD
203
204/*
22a89187 205 void* is the pointer type that malloc should say it returns
f65fd747
UD
206*/
207
22a89187
UD
208#ifndef void
209#define void void
210#endif /*void*/
f65fd747 211
fa8d436c
UD
212#include <stddef.h> /* for size_t */
213#include <stdlib.h> /* for getenv(), abort() */
2a26ef3a 214#include <unistd.h> /* for __libc_enable_secure */
f65fd747 215
425ce2ed 216#include <atomic.h>
eb96ffb0 217#include <_itoa.h>
e404fb16 218#include <bits/wordsize.h>
425ce2ed 219#include <sys/sysinfo.h>
c56da3a3 220
02d46fc4
UD
221#include <ldsodefs.h>
222
fa8d436c 223#include <unistd.h>
fa8d436c 224#include <stdio.h> /* needed for malloc_stats */
8e58439c 225#include <errno.h>
406e7a0a 226#include <assert.h>
f65fd747 227
66274218
AJ
228#include <shlib-compat.h>
229
5d78bb43
UD
230/* For uintptr_t. */
231#include <stdint.h>
f65fd747 232
3e030bd5
UD
233/* For va_arg, va_start, va_end. */
234#include <stdarg.h>
235
070906ff
RM
236/* For MIN, MAX, powerof2. */
237#include <sys/param.h>
238
ca6be165 239/* For ALIGN_UP et. al. */
9090848d 240#include <libc-pointer-arith.h>
8a35c3fe 241
d5c3fafc
DD
242/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
243#include <libc-diag.h>
244
29d79486 245#include <malloc/malloc-internal.h>
c0f62c56 246
6d43de4b
WD
247/* For SINGLE_THREAD_P. */
248#include <sysdep-cancel.h>
249
fa8d436c
UD
250/*
251 Debugging:
252
253 Because freed chunks may be overwritten with bookkeeping fields, this
254 malloc will often die when freed memory is overwritten by user
255 programs. This can be very effective (albeit in an annoying way)
256 in helping track down dangling pointers.
257
258 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
259 enabled that will catch more memory errors. You probably won't be
260 able to make much sense of the actual assertion errors, but they
261 should help you locate incorrectly overwritten memory. The checking
262 is fairly extensive, and will slow down execution
263 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
264 will attempt to check every non-mmapped allocated and free chunk in
265 the course of computing the summmaries. (By nature, mmapped regions
266 cannot be checked very much automatically.)
267
268 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
269 this code. The assertions in the check routines spell out in more
270 detail the assumptions and invariants underlying the algorithms.
271
272 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
273 checking that all accesses to malloced memory stay within their
274 bounds. However, there are several add-ons and adaptations of this
275 or other mallocs available that do this.
f65fd747
UD
276*/
277
439bda32
WN
278#ifndef MALLOC_DEBUG
279#define MALLOC_DEBUG 0
280#endif
281
406e7a0a
ST
282#ifndef NDEBUG
283# define __assert_fail(assertion, file, line, function) \
284 __malloc_assert(assertion, file, line, function)
72f90263
UD
285
286extern const char *__progname;
287
288static void
289__malloc_assert (const char *assertion, const char *file, unsigned int line,
290 const char *function)
291{
292 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
293 __progname, __progname[0] ? ": " : "",
294 file, line,
295 function ? function : "", function ? ": " : "",
296 assertion);
297 fflush (stderr);
298 abort ();
299}
300#endif
f65fd747 301
d5c3fafc
DD
302#if USE_TCACHE
303/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
304# define TCACHE_MAX_BINS 64
305# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
306
307/* Only used to pre-fill the tunables. */
308# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
309
310/* When "x" is from chunksize(). */
311# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
312/* When "x" is a user-provided size. */
313# define usize2tidx(x) csize2tidx (request2size (x))
314
315/* With rounding and alignment, the bins are...
316 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
317 idx 1 bytes 25..40 or 13..20
318 idx 2 bytes 41..56 or 21..28
319 etc. */
320
321/* This is another arbitrary limit, which tunables can change. Each
322 tcache bin will hold at most this number of chunks. */
323# define TCACHE_FILL_COUNT 7
324#endif
325
f65fd747 326
fa8d436c
UD
327/*
328 REALLOC_ZERO_BYTES_FREES should be set if a call to
329 realloc with zero bytes should be the same as a call to free.
330 This is required by the C standard. Otherwise, since this malloc
331 returns a unique pointer for malloc(0), so does realloc(p, 0).
332*/
333
334#ifndef REALLOC_ZERO_BYTES_FREES
335#define REALLOC_ZERO_BYTES_FREES 1
336#endif
337
338/*
339 TRIM_FASTBINS controls whether free() of a very small chunk can
340 immediately lead to trimming. Setting to true (1) can reduce memory
341 footprint, but will almost always slow down programs that use a lot
342 of small chunks.
343
344 Define this only if you are willing to give up some speed to more
345 aggressively reduce system-level memory footprint when releasing
346 memory in programs that use many small chunks. You can get
347 essentially the same effect by setting MXFAST to 0, but this can
348 lead to even greater slowdowns in programs using many small chunks.
349 TRIM_FASTBINS is an in-between compile-time option, that disables
350 only those chunks bordering topmost memory from being placed in
351 fastbins.
352*/
353
354#ifndef TRIM_FASTBINS
355#define TRIM_FASTBINS 0
356#endif
357
358
3b49edc0 359/* Definition for getting more memory from the OS. */
fa8d436c
UD
360#define MORECORE (*__morecore)
361#define MORECORE_FAILURE 0
22a89187
UD
362void * __default_morecore (ptrdiff_t);
363void *(*__morecore)(ptrdiff_t) = __default_morecore;
f65fd747 364
f65fd747 365
22a89187 366#include <string.h>
f65fd747 367
fa8d436c
UD
368/*
369 MORECORE-related declarations. By default, rely on sbrk
370*/
09f5e163 371
f65fd747 372
fa8d436c
UD
373/*
374 MORECORE is the name of the routine to call to obtain more memory
375 from the system. See below for general guidance on writing
376 alternative MORECORE functions, as well as a version for WIN32 and a
377 sample version for pre-OSX macos.
378*/
f65fd747 379
fa8d436c
UD
380#ifndef MORECORE
381#define MORECORE sbrk
382#endif
f65fd747 383
fa8d436c
UD
384/*
385 MORECORE_FAILURE is the value returned upon failure of MORECORE
386 as well as mmap. Since it cannot be an otherwise valid memory address,
387 and must reflect values of standard sys calls, you probably ought not
388 try to redefine it.
389*/
09f5e163 390
fa8d436c
UD
391#ifndef MORECORE_FAILURE
392#define MORECORE_FAILURE (-1)
393#endif
394
395/*
396 If MORECORE_CONTIGUOUS is true, take advantage of fact that
397 consecutive calls to MORECORE with positive arguments always return
398 contiguous increasing addresses. This is true of unix sbrk. Even
399 if not defined, when regions happen to be contiguous, malloc will
400 permit allocations spanning regions obtained from different
401 calls. But defining this when applicable enables some stronger
402 consistency checks and space efficiencies.
403*/
f65fd747 404
fa8d436c
UD
405#ifndef MORECORE_CONTIGUOUS
406#define MORECORE_CONTIGUOUS 1
f65fd747
UD
407#endif
408
fa8d436c
UD
409/*
410 Define MORECORE_CANNOT_TRIM if your version of MORECORE
411 cannot release space back to the system when given negative
412 arguments. This is generally necessary only if you are using
413 a hand-crafted MORECORE function that cannot handle negative arguments.
414*/
415
416/* #define MORECORE_CANNOT_TRIM */
f65fd747 417
fa8d436c
UD
418/* MORECORE_CLEARS (default 1)
419 The degree to which the routine mapped to MORECORE zeroes out
420 memory: never (0), only for newly allocated space (1) or always
421 (2). The distinction between (1) and (2) is necessary because on
422 some systems, if the application first decrements and then
423 increments the break value, the contents of the reallocated space
424 are unspecified.
6c8dbf00 425 */
fa8d436c
UD
426
427#ifndef MORECORE_CLEARS
6c8dbf00 428# define MORECORE_CLEARS 1
7cabd57c
UD
429#endif
430
fa8d436c 431
a9177ff5 432/*
fa8d436c 433 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
22a89187
UD
434 sbrk fails, and mmap is used as a backup. The value must be a
435 multiple of page size. This backup strategy generally applies only
436 when systems have "holes" in address space, so sbrk cannot perform
437 contiguous expansion, but there is still space available on system.
438 On systems for which this is known to be useful (i.e. most linux
439 kernels), this occurs only when programs allocate huge amounts of
440 memory. Between this, and the fact that mmap regions tend to be
441 limited, the size should be large, to avoid too many mmap calls and
442 thus avoid running out of kernel resources. */
fa8d436c
UD
443
444#ifndef MMAP_AS_MORECORE_SIZE
445#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
f65fd747
UD
446#endif
447
448/*
449 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
2a26ef3a 450 large blocks.
f65fd747
UD
451*/
452
453#ifndef HAVE_MREMAP
fa8d436c 454#define HAVE_MREMAP 0
f65fd747
UD
455#endif
456
2ba3cfa1
FW
457/* We may need to support __malloc_initialize_hook for backwards
458 compatibility. */
459
460#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
461# define HAVE_MALLOC_INIT_HOOK 1
462#else
463# define HAVE_MALLOC_INIT_HOOK 0
464#endif
465
f65fd747 466
f65fd747 467/*
f65fd747 468 This version of malloc supports the standard SVID/XPG mallinfo
fa8d436c
UD
469 routine that returns a struct containing usage properties and
470 statistics. It should work on any SVID/XPG compliant system that has
471 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
472 install such a thing yourself, cut out the preliminary declarations
473 as described above and below and save them in a malloc.h file. But
474 there's no compelling reason to bother to do this.)
f65fd747
UD
475
476 The main declaration needed is the mallinfo struct that is returned
477 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
fa8d436c
UD
478 bunch of fields that are not even meaningful in this version of
479 malloc. These fields are are instead filled by mallinfo() with
480 other numbers that might be of interest.
f65fd747
UD
481*/
482
f65fd747 483
fa8d436c 484/* ---------- description of public routines ------------ */
f65fd747
UD
485
486/*
fa8d436c
UD
487 malloc(size_t n)
488 Returns a pointer to a newly allocated chunk of at least n bytes, or null
489 if no space is available. Additionally, on failure, errno is
490 set to ENOMEM on ANSI C systems.
491
492 If n is zero, malloc returns a minumum-sized chunk. (The minimum
493 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
494 systems.) On most systems, size_t is an unsigned type, so calls
495 with negative arguments are interpreted as requests for huge amounts
496 of space, which will often fail. The maximum supported value of n
497 differs across systems, but is in all cases less than the maximum
498 representable value of a size_t.
f65fd747 499*/
3b49edc0
UD
500void* __libc_malloc(size_t);
501libc_hidden_proto (__libc_malloc)
f65fd747 502
fa8d436c 503/*
22a89187 504 free(void* p)
fa8d436c
UD
505 Releases the chunk of memory pointed to by p, that had been previously
506 allocated using malloc or a related routine such as realloc.
507 It has no effect if p is null. It can have arbitrary (i.e., bad!)
508 effects if p has already been freed.
509
510 Unless disabled (using mallopt), freeing very large spaces will
511 when possible, automatically trigger operations that give
512 back unused memory to the system, thus reducing program footprint.
513*/
3b49edc0
UD
514void __libc_free(void*);
515libc_hidden_proto (__libc_free)
f65fd747 516
fa8d436c
UD
517/*
518 calloc(size_t n_elements, size_t element_size);
519 Returns a pointer to n_elements * element_size bytes, with all locations
520 set to zero.
521*/
3b49edc0 522void* __libc_calloc(size_t, size_t);
f65fd747
UD
523
524/*
22a89187 525 realloc(void* p, size_t n)
fa8d436c
UD
526 Returns a pointer to a chunk of size n that contains the same data
527 as does chunk p up to the minimum of (n, p's size) bytes, or null
a9177ff5 528 if no space is available.
f65fd747 529
fa8d436c
UD
530 The returned pointer may or may not be the same as p. The algorithm
531 prefers extending p when possible, otherwise it employs the
532 equivalent of a malloc-copy-free sequence.
f65fd747 533
a9177ff5 534 If p is null, realloc is equivalent to malloc.
f65fd747 535
fa8d436c
UD
536 If space is not available, realloc returns null, errno is set (if on
537 ANSI) and p is NOT freed.
f65fd747 538
fa8d436c
UD
539 if n is for fewer bytes than already held by p, the newly unused
540 space is lopped off and freed if possible. Unless the #define
541 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
542 zero (re)allocates a minimum-sized chunk.
f65fd747 543
3b5f801d
DD
544 Large chunks that were internally obtained via mmap will always be
545 grown using malloc-copy-free sequences unless the system supports
546 MREMAP (currently only linux).
f65fd747 547
fa8d436c
UD
548 The old unix realloc convention of allowing the last-free'd chunk
549 to be used as an argument to realloc is not supported.
f65fd747 550*/
3b49edc0
UD
551void* __libc_realloc(void*, size_t);
552libc_hidden_proto (__libc_realloc)
f65fd747 553
fa8d436c
UD
554/*
555 memalign(size_t alignment, size_t n);
556 Returns a pointer to a newly allocated chunk of n bytes, aligned
557 in accord with the alignment argument.
558
559 The alignment argument should be a power of two. If the argument is
560 not a power of two, the nearest greater power is used.
561 8-byte alignment is guaranteed by normal malloc calls, so don't
562 bother calling memalign with an argument of 8 or less.
563
564 Overreliance on memalign is a sure way to fragment space.
565*/
3b49edc0
UD
566void* __libc_memalign(size_t, size_t);
567libc_hidden_proto (__libc_memalign)
f65fd747
UD
568
569/*
fa8d436c
UD
570 valloc(size_t n);
571 Equivalent to memalign(pagesize, n), where pagesize is the page
572 size of the system. If the pagesize is unknown, 4096 is used.
573*/
3b49edc0 574void* __libc_valloc(size_t);
fa8d436c 575
f65fd747 576
f65fd747 577
fa8d436c
UD
578/*
579 mallopt(int parameter_number, int parameter_value)
580 Sets tunable parameters The format is to provide a
581 (parameter-number, parameter-value) pair. mallopt then sets the
582 corresponding parameter to the argument value if it can (i.e., so
583 long as the value is meaningful), and returns 1 if successful else
584 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
585 normally defined in malloc.h. Only one of these (M_MXFAST) is used
586 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
587 so setting them has no effect. But this malloc also supports four
588 other options in mallopt. See below for details. Briefly, supported
589 parameters are as follows (listed defaults are for "typical"
590 configurations).
591
592 Symbol param # default allowed param values
593 M_MXFAST 1 64 0-80 (0 disables fastbins)
594 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
a9177ff5 595 M_TOP_PAD -2 0 any
fa8d436c
UD
596 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
597 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
598*/
3b49edc0
UD
599int __libc_mallopt(int, int);
600libc_hidden_proto (__libc_mallopt)
fa8d436c
UD
601
602
603/*
604 mallinfo()
605 Returns (by copy) a struct containing various summary statistics:
606
a9177ff5
RM
607 arena: current total non-mmapped bytes allocated from system
608 ordblks: the number of free chunks
fa8d436c 609 smblks: the number of fastbin blocks (i.e., small chunks that
72f90263 610 have been freed but not use resused or consolidated)
a9177ff5
RM
611 hblks: current number of mmapped regions
612 hblkhd: total bytes held in mmapped regions
ca135f82 613 usmblks: always 0
a9177ff5 614 fsmblks: total bytes held in fastbin blocks
fa8d436c 615 uordblks: current total allocated space (normal or mmapped)
a9177ff5 616 fordblks: total free space
fa8d436c 617 keepcost: the maximum number of bytes that could ideally be released
72f90263
UD
618 back to system via malloc_trim. ("ideally" means that
619 it ignores page restrictions etc.)
fa8d436c
UD
620
621 Because these fields are ints, but internal bookkeeping may
a9177ff5 622 be kept as longs, the reported values may wrap around zero and
fa8d436c
UD
623 thus be inaccurate.
624*/
3b49edc0 625struct mallinfo __libc_mallinfo(void);
88764ae2 626
f65fd747 627
fa8d436c
UD
628/*
629 pvalloc(size_t n);
630 Equivalent to valloc(minimum-page-that-holds(n)), that is,
631 round up n to nearest pagesize.
632 */
3b49edc0 633void* __libc_pvalloc(size_t);
fa8d436c
UD
634
635/*
636 malloc_trim(size_t pad);
637
638 If possible, gives memory back to the system (via negative
639 arguments to sbrk) if there is unused memory at the `high' end of
640 the malloc pool. You can call this after freeing large blocks of
641 memory to potentially reduce the system-level memory requirements
642 of a program. However, it cannot guarantee to reduce memory. Under
643 some allocation patterns, some large free blocks of memory will be
644 locked between two used chunks, so they cannot be given back to
645 the system.
a9177ff5 646
fa8d436c
UD
647 The `pad' argument to malloc_trim represents the amount of free
648 trailing space to leave untrimmed. If this argument is zero,
649 only the minimum amount of memory to maintain internal data
650 structures will be left (one page or less). Non-zero arguments
651 can be supplied to maintain enough trailing space to service
652 future expected allocations without having to re-obtain memory
653 from the system.
a9177ff5 654
fa8d436c
UD
655 Malloc_trim returns 1 if it actually released any memory, else 0.
656 On systems that do not support "negative sbrks", it will always
c958a6a4 657 return 0.
fa8d436c 658*/
3b49edc0 659int __malloc_trim(size_t);
fa8d436c
UD
660
661/*
22a89187 662 malloc_usable_size(void* p);
fa8d436c
UD
663
664 Returns the number of bytes you can actually use in
665 an allocated chunk, which may be more than you requested (although
666 often not) due to alignment and minimum size constraints.
667 You can use this many bytes without worrying about
668 overwriting other allocated objects. This is not a particularly great
669 programming practice. malloc_usable_size can be more useful in
670 debugging and assertions, for example:
671
672 p = malloc(n);
673 assert(malloc_usable_size(p) >= 256);
674
675*/
3b49edc0 676size_t __malloc_usable_size(void*);
fa8d436c
UD
677
678/*
679 malloc_stats();
680 Prints on stderr the amount of space obtained from the system (both
681 via sbrk and mmap), the maximum amount (which may be more than
682 current if malloc_trim and/or munmap got called), and the current
683 number of bytes allocated via malloc (or realloc, etc) but not yet
684 freed. Note that this is the number of bytes allocated, not the
685 number requested. It will be larger than the number requested
686 because of alignment and bookkeeping overhead. Because it includes
687 alignment wastage as being in use, this figure may be greater than
688 zero even when no user-level chunks are allocated.
689
690 The reported current and maximum system memory can be inaccurate if
691 a program makes other calls to system memory allocation functions
692 (normally sbrk) outside of malloc.
693
694 malloc_stats prints only the most commonly interesting statistics.
695 More information can be obtained by calling mallinfo.
696
697*/
3b49edc0 698void __malloc_stats(void);
f65fd747 699
f7ddf3d3
UD
700/*
701 posix_memalign(void **memptr, size_t alignment, size_t size);
702
703 POSIX wrapper like memalign(), checking for validity of size.
704*/
705int __posix_memalign(void **, size_t, size_t);
f7ddf3d3 706
fa8d436c
UD
707/* mallopt tuning options */
708
f65fd747 709/*
fa8d436c
UD
710 M_MXFAST is the maximum request size used for "fastbins", special bins
711 that hold returned chunks without consolidating their spaces. This
712 enables future requests for chunks of the same size to be handled
713 very quickly, but can increase fragmentation, and thus increase the
714 overall memory footprint of a program.
715
716 This malloc manages fastbins very conservatively yet still
717 efficiently, so fragmentation is rarely a problem for values less
718 than or equal to the default. The maximum supported value of MXFAST
719 is 80. You wouldn't want it any higher than this anyway. Fastbins
720 are designed especially for use with many small structs, objects or
721 strings -- the default handles structs/objects/arrays with sizes up
722 to 8 4byte fields, or small strings representing words, tokens,
723 etc. Using fastbins for larger objects normally worsens
724 fragmentation without improving speed.
725
726 M_MXFAST is set in REQUEST size units. It is internally used in
727 chunksize units, which adds padding and alignment. You can reduce
728 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
729 algorithm to be a closer approximation of fifo-best-fit in all cases,
730 not just for larger requests, but will generally cause it to be
731 slower.
f65fd747
UD
732*/
733
734
fa8d436c
UD
735/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
736#ifndef M_MXFAST
a9177ff5 737#define M_MXFAST 1
fa8d436c 738#endif
f65fd747 739
fa8d436c 740#ifndef DEFAULT_MXFAST
425ce2ed 741#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
10dc2a90
UD
742#endif
743
10dc2a90 744
fa8d436c
UD
745/*
746 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
747 to keep before releasing via malloc_trim in free().
748
749 Automatic trimming is mainly useful in long-lived programs.
750 Because trimming via sbrk can be slow on some systems, and can
751 sometimes be wasteful (in cases where programs immediately
752 afterward allocate more large chunks) the value should be high
753 enough so that your overall system performance would improve by
754 releasing this much memory.
755
756 The trim threshold and the mmap control parameters (see below)
757 can be traded off with one another. Trimming and mmapping are
758 two different ways of releasing unused memory back to the
759 system. Between these two, it is often possible to keep
760 system-level demands of a long-lived program down to a bare
761 minimum. For example, in one test suite of sessions measuring
762 the XF86 X server on Linux, using a trim threshold of 128K and a
763 mmap threshold of 192K led to near-minimal long term resource
764 consumption.
765
766 If you are using this malloc in a long-lived program, it should
767 pay to experiment with these values. As a rough guide, you
768 might set to a value close to the average size of a process
769 (program) running on your system. Releasing this much memory
770 would allow such a process to run in memory. Generally, it's
771 worth it to tune for trimming rather tham memory mapping when a
772 program undergoes phases where several large chunks are
773 allocated and released in ways that can reuse each other's
774 storage, perhaps mixed with phases where there are no such
775 chunks at all. And in well-behaved long-lived programs,
776 controlling release of large blocks via trimming versus mapping
777 is usually faster.
778
779 However, in most programs, these parameters serve mainly as
780 protection against the system-level effects of carrying around
781 massive amounts of unneeded memory. Since frequent calls to
782 sbrk, mmap, and munmap otherwise degrade performance, the default
783 parameters are set to relatively high values that serve only as
784 safeguards.
785
786 The trim value It must be greater than page size to have any useful
a9177ff5 787 effect. To disable trimming completely, you can set to
fa8d436c
UD
788 (unsigned long)(-1)
789
790 Trim settings interact with fastbin (MXFAST) settings: Unless
791 TRIM_FASTBINS is defined, automatic trimming never takes place upon
792 freeing a chunk with size less than or equal to MXFAST. Trimming is
793 instead delayed until subsequent freeing of larger chunks. However,
794 you can still force an attempted trim by calling malloc_trim.
795
796 Also, trimming is not generally possible in cases where
797 the main arena is obtained via mmap.
798
799 Note that the trick some people use of mallocing a huge space and
800 then freeing it at program startup, in an attempt to reserve system
801 memory, doesn't have the intended effect under automatic trimming,
802 since that memory will immediately be returned to the system.
803*/
804
805#define M_TRIM_THRESHOLD -1
806
807#ifndef DEFAULT_TRIM_THRESHOLD
808#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
809#endif
810
811/*
812 M_TOP_PAD is the amount of extra `padding' space to allocate or
813 retain whenever sbrk is called. It is used in two ways internally:
814
815 * When sbrk is called to extend the top of the arena to satisfy
816 a new malloc request, this much padding is added to the sbrk
817 request.
818
819 * When malloc_trim is called automatically from free(),
820 it is used as the `pad' argument.
821
822 In both cases, the actual amount of padding is rounded
823 so that the end of the arena is always a system page boundary.
824
825 The main reason for using padding is to avoid calling sbrk so
826 often. Having even a small pad greatly reduces the likelihood
827 that nearly every malloc request during program start-up (or
828 after trimming) will invoke sbrk, which needlessly wastes
829 time.
830
831 Automatic rounding-up to page-size units is normally sufficient
832 to avoid measurable overhead, so the default is 0. However, in
833 systems where sbrk is relatively slow, it can pay to increase
834 this value, at the expense of carrying around more memory than
835 the program needs.
836*/
10dc2a90 837
fa8d436c 838#define M_TOP_PAD -2
10dc2a90 839
fa8d436c
UD
840#ifndef DEFAULT_TOP_PAD
841#define DEFAULT_TOP_PAD (0)
842#endif
f65fd747 843
1d05c2fb
UD
844/*
845 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
846 adjusted MMAP_THRESHOLD.
847*/
848
849#ifndef DEFAULT_MMAP_THRESHOLD_MIN
850#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
851#endif
852
853#ifndef DEFAULT_MMAP_THRESHOLD_MAX
e404fb16
UD
854 /* For 32-bit platforms we cannot increase the maximum mmap
855 threshold much because it is also the minimum value for the
bd2c2341
UD
856 maximum heap size and its alignment. Going above 512k (i.e., 1M
857 for new heaps) wastes too much address space. */
e404fb16 858# if __WORDSIZE == 32
bd2c2341 859# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
e404fb16 860# else
bd2c2341 861# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
e404fb16 862# endif
1d05c2fb
UD
863#endif
864
fa8d436c
UD
865/*
866 M_MMAP_THRESHOLD is the request size threshold for using mmap()
867 to service a request. Requests of at least this size that cannot
868 be allocated using already-existing space will be serviced via mmap.
869 (If enough normal freed space already exists it is used instead.)
870
871 Using mmap segregates relatively large chunks of memory so that
872 they can be individually obtained and released from the host
873 system. A request serviced through mmap is never reused by any
874 other request (at least not directly; the system may just so
875 happen to remap successive requests to the same locations).
876
877 Segregating space in this way has the benefits that:
878
a9177ff5
RM
879 1. Mmapped space can ALWAYS be individually released back
880 to the system, which helps keep the system level memory
881 demands of a long-lived program low.
fa8d436c
UD
882 2. Mapped memory can never become `locked' between
883 other chunks, as can happen with normally allocated chunks, which
884 means that even trimming via malloc_trim would not release them.
885 3. On some systems with "holes" in address spaces, mmap can obtain
886 memory that sbrk cannot.
887
888 However, it has the disadvantages that:
889
890 1. The space cannot be reclaimed, consolidated, and then
891 used to service later requests, as happens with normal chunks.
892 2. It can lead to more wastage because of mmap page alignment
893 requirements
894 3. It causes malloc performance to be more dependent on host
895 system memory management support routines which may vary in
896 implementation quality and may impose arbitrary
897 limitations. Generally, servicing a request via normal
898 malloc steps is faster than going through a system's mmap.
899
900 The advantages of mmap nearly always outweigh disadvantages for
901 "large" chunks, but the value of "large" varies across systems. The
902 default is an empirically derived value that works well in most
903 systems.
1d05c2fb
UD
904
905
906 Update in 2006:
907 The above was written in 2001. Since then the world has changed a lot.
908 Memory got bigger. Applications got bigger. The virtual address space
909 layout in 32 bit linux changed.
910
911 In the new situation, brk() and mmap space is shared and there are no
912 artificial limits on brk size imposed by the kernel. What is more,
913 applications have started using transient allocations larger than the
914 128Kb as was imagined in 2001.
915
916 The price for mmap is also high now; each time glibc mmaps from the
917 kernel, the kernel is forced to zero out the memory it gives to the
918 application. Zeroing memory is expensive and eats a lot of cache and
919 memory bandwidth. This has nothing to do with the efficiency of the
920 virtual memory system, by doing mmap the kernel just has no choice but
921 to zero.
922
923 In 2001, the kernel had a maximum size for brk() which was about 800
924 megabytes on 32 bit x86, at that point brk() would hit the first
925 mmaped shared libaries and couldn't expand anymore. With current 2.6
926 kernels, the VA space layout is different and brk() and mmap
927 both can span the entire heap at will.
928
929 Rather than using a static threshold for the brk/mmap tradeoff,
930 we are now using a simple dynamic one. The goal is still to avoid
931 fragmentation. The old goals we kept are
932 1) try to get the long lived large allocations to use mmap()
933 2) really large allocations should always use mmap()
934 and we're adding now:
935 3) transient allocations should use brk() to avoid forcing the kernel
936 having to zero memory over and over again
937
938 The implementation works with a sliding threshold, which is by default
939 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
940 out at 128Kb as per the 2001 default.
941
942 This allows us to satisfy requirement 1) under the assumption that long
943 lived allocations are made early in the process' lifespan, before it has
944 started doing dynamic allocations of the same size (which will
945 increase the threshold).
946
947 The upperbound on the threshold satisfies requirement 2)
948
949 The threshold goes up in value when the application frees memory that was
950 allocated with the mmap allocator. The idea is that once the application
951 starts freeing memory of a certain size, it's highly probable that this is
952 a size the application uses for transient allocations. This estimator
953 is there to satisfy the new third requirement.
954
f65fd747
UD
955*/
956
fa8d436c 957#define M_MMAP_THRESHOLD -3
f65fd747 958
fa8d436c 959#ifndef DEFAULT_MMAP_THRESHOLD
1d05c2fb 960#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
fa8d436c
UD
961#endif
962
963/*
964 M_MMAP_MAX is the maximum number of requests to simultaneously
965 service using mmap. This parameter exists because
966 some systems have a limited number of internal tables for
967 use by mmap, and using more than a few of them may degrade
968 performance.
969
970 The default is set to a value that serves only as a safeguard.
22a89187 971 Setting to 0 disables use of mmap for servicing large requests.
fa8d436c 972*/
f65fd747 973
fa8d436c
UD
974#define M_MMAP_MAX -4
975
976#ifndef DEFAULT_MMAP_MAX
fa8d436c 977#define DEFAULT_MMAP_MAX (65536)
f65fd747
UD
978#endif
979
100351c3 980#include <malloc.h>
f65fd747 981
fa8d436c
UD
982#ifndef RETURN_ADDRESS
983#define RETURN_ADDRESS(X_) (NULL)
9ae6fc54 984#endif
431c33c0 985
fa8d436c
UD
986/* Forward declarations. */
987struct malloc_chunk;
988typedef struct malloc_chunk* mchunkptr;
431c33c0 989
fa8d436c 990/* Internal routines. */
f65fd747 991
22a89187 992static void* _int_malloc(mstate, size_t);
425ce2ed 993static void _int_free(mstate, mchunkptr, int);
22a89187 994static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
6e4b2107 995 INTERNAL_SIZE_T);
22a89187 996static void* _int_memalign(mstate, size_t, size_t);
10ad46bc
OB
997static void* _mid_memalign(size_t, size_t, void *);
998
ac3ed168 999static void malloc_printerr(const char *str) __attribute__ ((noreturn));
fa8d436c 1000
0c71122c
FW
1001static void* mem2mem_check(void *p, size_t sz);
1002static void top_check(void);
1003static void munmap_chunk(mchunkptr p);
a9177ff5 1004#if HAVE_MREMAP
0c71122c 1005static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
a9177ff5 1006#endif
fa8d436c 1007
22a89187
UD
1008static void* malloc_check(size_t sz, const void *caller);
1009static void free_check(void* mem, const void *caller);
1010static void* realloc_check(void* oldmem, size_t bytes,
1011 const void *caller);
1012static void* memalign_check(size_t alignment, size_t bytes,
1013 const void *caller);
f65fd747 1014
fa8d436c 1015/* ------------------ MMAP support ------------------ */
f65fd747 1016
f65fd747 1017
fa8d436c 1018#include <fcntl.h>
fa8d436c 1019#include <sys/mman.h>
f65fd747 1020
fa8d436c
UD
1021#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1022# define MAP_ANONYMOUS MAP_ANON
1023#endif
f65fd747 1024
fa8d436c 1025#ifndef MAP_NORESERVE
3b49edc0 1026# define MAP_NORESERVE 0
f65fd747
UD
1027#endif
1028
fa8d436c 1029#define MMAP(addr, size, prot, flags) \
3b49edc0 1030 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
f65fd747 1031
f65fd747
UD
1032
1033/*
fa8d436c 1034 ----------------------- Chunk representations -----------------------
f65fd747
UD
1035*/
1036
1037
fa8d436c
UD
1038/*
1039 This struct declaration is misleading (but accurate and necessary).
1040 It declares a "view" into memory allowing access to necessary
1041 fields at known offsets from a given base. See explanation below.
1042*/
1043
1044struct malloc_chunk {
1045
e9c4fe93
FW
1046 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1047 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
fa8d436c
UD
1048
1049 struct malloc_chunk* fd; /* double links -- used only if free. */
f65fd747 1050 struct malloc_chunk* bk;
7ecfbd38
UD
1051
1052 /* Only used for large blocks: pointer to next larger size. */
1053 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1054 struct malloc_chunk* bk_nextsize;
f65fd747
UD
1055};
1056
f65fd747
UD
1057
1058/*
f65fd747
UD
1059 malloc_chunk details:
1060
1061 (The following includes lightly edited explanations by Colin Plumb.)
1062
1063 Chunks of memory are maintained using a `boundary tag' method as
1064 described in e.g., Knuth or Standish. (See the paper by Paul
1065 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1066 survey of such techniques.) Sizes of free chunks are stored both
1067 in the front of each chunk and at the end. This makes
1068 consolidating fragmented chunks into bigger chunks very fast. The
1069 size fields also hold bits representing whether chunks are free or
1070 in use.
1071
1072 An allocated chunk looks like this:
1073
1074
1075 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1076 | Size of previous chunk, if unallocated (P clear) |
72f90263 1077 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1078 | Size of chunk, in bytes |A|M|P|
f65fd747 1079 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1080 | User data starts here... .
1081 . .
1082 . (malloc_usable_size() bytes) .
1083 . |
f65fd747 1084nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2
FW
1085 | (size of chunk, but used for application data) |
1086 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1087 | Size of next chunk, in bytes |A|0|1|
72f90263 1088 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1089
1090 Where "chunk" is the front of the chunk for the purpose of most of
1091 the malloc code, but "mem" is the pointer that is returned to the
1092 user. "Nextchunk" is the beginning of the next contiguous chunk.
1093
6f65e668 1094 Chunks always begin on even word boundaries, so the mem portion
f65fd747 1095 (which is returned to the user) is also on an even word boundary, and
fa8d436c 1096 thus at least double-word aligned.
f65fd747
UD
1097
1098 Free chunks are stored in circular doubly-linked lists, and look like this:
1099
1100 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1101 | Size of previous chunk, if unallocated (P clear) |
72f90263 1102 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1103 `head:' | Size of chunk, in bytes |A|0|P|
f65fd747 1104 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1105 | Forward pointer to next chunk in list |
1106 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1107 | Back pointer to previous chunk in list |
1108 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1109 | Unused space (may be 0 bytes long) .
1110 . .
1111 . |
f65fd747
UD
1112nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1113 `foot:' | Size of chunk, in bytes |
72f90263 1114 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2
FW
1115 | Size of next chunk, in bytes |A|0|0|
1116 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1117
1118 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1119 chunk size (which is always a multiple of two words), is an in-use
1120 bit for the *previous* chunk. If that bit is *clear*, then the
1121 word before the current chunk size contains the previous chunk
1122 size, and can be used to find the front of the previous chunk.
fa8d436c
UD
1123 The very first chunk allocated always has this bit set,
1124 preventing access to non-existent (or non-owned) memory. If
1125 prev_inuse is set for any given chunk, then you CANNOT determine
1126 the size of the previous chunk, and might even get a memory
1127 addressing fault when trying to do so.
f65fd747 1128
ae9166f2
FW
1129 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1130 main arena, described by the main_arena variable. When additional
1131 threads are spawned, each thread receives its own arena (up to a
1132 configurable limit, after which arenas are reused for multiple
1133 threads), and the chunks in these arenas have the A bit set. To
1134 find the arena for a chunk on such a non-main arena, heap_for_ptr
1135 performs a bit mask operation and indirection through the ar_ptr
1136 member of the per-heap header heap_info (see arena.c).
1137
f65fd747 1138 Note that the `foot' of the current chunk is actually represented
fa8d436c
UD
1139 as the prev_size of the NEXT chunk. This makes it easier to
1140 deal with alignments etc but can be very confusing when trying
1141 to extend or adapt this code.
f65fd747 1142
ae9166f2 1143 The three exceptions to all this are:
f65fd747 1144
fa8d436c 1145 1. The special chunk `top' doesn't bother using the
72f90263
UD
1146 trailing size field since there is no next contiguous chunk
1147 that would have to index off it. After initialization, `top'
1148 is forced to always exist. If it would become less than
1149 MINSIZE bytes long, it is replenished.
f65fd747
UD
1150
1151 2. Chunks allocated via mmap, which have the second-lowest-order
72f90263 1152 bit M (IS_MMAPPED) set in their size fields. Because they are
ae9166f2
FW
1153 allocated one-by-one, each must contain its own trailing size
1154 field. If the M bit is set, the other bits are ignored
1155 (because mmapped chunks are neither in an arena, nor adjacent
1156 to a freed chunk). The M bit is also used for chunks which
1157 originally came from a dumped heap via malloc_set_state in
1158 hooks.c.
1159
1160 3. Chunks in fastbins are treated as allocated chunks from the
1161 point of view of the chunk allocator. They are consolidated
1162 with their neighbors only in bulk, in malloc_consolidate.
f65fd747
UD
1163*/
1164
1165/*
fa8d436c
UD
1166 ---------- Size and alignment checks and conversions ----------
1167*/
f65fd747 1168
fa8d436c 1169/* conversion from malloc headers to user pointers, and back */
f65fd747 1170
22a89187 1171#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
fa8d436c 1172#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
f65fd747 1173
fa8d436c 1174/* The smallest possible chunk */
7ecfbd38 1175#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
f65fd747 1176
fa8d436c 1177/* The smallest size we can malloc is an aligned minimal chunk */
f65fd747 1178
fa8d436c
UD
1179#define MINSIZE \
1180 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
f65fd747 1181
fa8d436c 1182/* Check if m has acceptable alignment */
f65fd747 1183
073f560e
UD
1184#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1185
1186#define misaligned_chunk(p) \
1187 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1188 & MALLOC_ALIGN_MASK)
f65fd747 1189
f65fd747 1190
a9177ff5 1191/*
fa8d436c
UD
1192 Check if a request is so large that it would wrap around zero when
1193 padded and aligned. To simplify some other code, the bound is made
1194 low enough so that adding MINSIZE will also not wrap around zero.
6c8dbf00 1195 */
f65fd747 1196
fa8d436c 1197#define REQUEST_OUT_OF_RANGE(req) \
6c8dbf00
OB
1198 ((unsigned long) (req) >= \
1199 (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
f65fd747 1200
fa8d436c 1201/* pad request bytes into a usable size -- internal version */
f65fd747 1202
fa8d436c
UD
1203#define request2size(req) \
1204 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1205 MINSIZE : \
1206 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
f65fd747 1207
8e448310
AS
1208/* Same, except also perform an argument and result check. First, we check
1209 that the padding done by request2size didn't result in an integer
1210 overflow. Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
1211 size isn't so large that a later alignment would lead to another integer
1212 overflow. */
1213#define checked_request2size(req, sz) \
1214({ \
1215 (sz) = request2size (req); \
1216 if (((sz) < (req)) \
1217 || REQUEST_OUT_OF_RANGE (sz)) \
1218 { \
1219 __set_errno (ENOMEM); \
1220 return 0; \
1221 } \
1222})
f65fd747
UD
1223
1224/*
6c8dbf00
OB
1225 --------------- Physical chunk operations ---------------
1226 */
f65fd747 1227
10dc2a90 1228
fa8d436c
UD
1229/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1230#define PREV_INUSE 0x1
f65fd747 1231
fa8d436c 1232/* extract inuse bit of previous chunk */
e9c4fe93 1233#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
f65fd747 1234
f65fd747 1235
fa8d436c
UD
1236/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1237#define IS_MMAPPED 0x2
f65fd747 1238
fa8d436c 1239/* check for mmap()'ed chunk */
e9c4fe93 1240#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
f65fd747 1241
f65fd747 1242
fa8d436c
UD
1243/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1244 from a non-main arena. This is only set immediately before handing
1245 the chunk to the user, if necessary. */
1246#define NON_MAIN_ARENA 0x4
f65fd747 1247
ae9166f2 1248/* Check for chunk from main arena. */
e9c4fe93
FW
1249#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1250
1251/* Mark a chunk as not being on the main arena. */
1252#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
f65fd747
UD
1253
1254
a9177ff5 1255/*
6c8dbf00 1256 Bits to mask off when extracting size
f65fd747 1257
6c8dbf00
OB
1258 Note: IS_MMAPPED is intentionally not masked off from size field in
1259 macros for which mmapped chunks should never be seen. This should
1260 cause helpful core dumps to occur if it is tried by accident by
1261 people extending or adapting this malloc.
1262 */
1263#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
f65fd747 1264
fa8d436c 1265/* Get size, ignoring use bits */
e9c4fe93 1266#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
f65fd747 1267
e9c4fe93
FW
1268/* Like chunksize, but do not mask SIZE_BITS. */
1269#define chunksize_nomask(p) ((p)->mchunk_size)
f65fd747 1270
fa8d436c 1271/* Ptr to next physical malloc_chunk. */
e9c4fe93
FW
1272#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1273
229855e5 1274/* Size of the chunk below P. Only valid if !prev_inuse (P). */
e9c4fe93
FW
1275#define prev_size(p) ((p)->mchunk_prev_size)
1276
229855e5 1277/* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
e9c4fe93 1278#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
f65fd747 1279
229855e5 1280/* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
e9c4fe93 1281#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
f65fd747 1282
fa8d436c 1283/* Treat space at ptr + offset as a chunk */
6c8dbf00 1284#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
fa8d436c
UD
1285
1286/* extract p's inuse bit */
6c8dbf00 1287#define inuse(p) \
e9c4fe93 1288 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
f65fd747 1289
fa8d436c 1290/* set/clear chunk as being inuse without otherwise disturbing */
6c8dbf00 1291#define set_inuse(p) \
e9c4fe93 1292 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
f65fd747 1293
6c8dbf00 1294#define clear_inuse(p) \
e9c4fe93 1295 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
f65fd747
UD
1296
1297
fa8d436c 1298/* check/set/clear inuse bits in known places */
6c8dbf00 1299#define inuse_bit_at_offset(p, s) \
e9c4fe93 1300 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
f65fd747 1301
6c8dbf00 1302#define set_inuse_bit_at_offset(p, s) \
e9c4fe93 1303 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
f65fd747 1304
6c8dbf00 1305#define clear_inuse_bit_at_offset(p, s) \
e9c4fe93 1306 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
f65fd747 1307
f65fd747 1308
fa8d436c 1309/* Set size at head, without disturbing its use bit */
e9c4fe93 1310#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
f65fd747 1311
fa8d436c 1312/* Set size/use field */
e9c4fe93 1313#define set_head(p, s) ((p)->mchunk_size = (s))
f65fd747 1314
fa8d436c 1315/* Set size at footer (only when chunk is not in use) */
e9c4fe93 1316#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
f65fd747
UD
1317
1318
e9c4fe93
FW
1319#pragma GCC poison mchunk_size
1320#pragma GCC poison mchunk_prev_size
1321
fa8d436c 1322/*
6c8dbf00 1323 -------------------- Internal data structures --------------------
fa8d436c
UD
1324
1325 All internal state is held in an instance of malloc_state defined
1326 below. There are no other static variables, except in two optional
a9177ff5 1327 cases:
6c8dbf00
OB
1328 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1329 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
22a89187 1330 for mmap.
fa8d436c
UD
1331
1332 Beware of lots of tricks that minimize the total bookkeeping space
1333 requirements. The result is a little over 1K bytes (for 4byte
1334 pointers and size_t.)
6c8dbf00 1335 */
f65fd747
UD
1336
1337/*
6c8dbf00 1338 Bins
fa8d436c
UD
1339
1340 An array of bin headers for free chunks. Each bin is doubly
1341 linked. The bins are approximately proportionally (log) spaced.
1342 There are a lot of these bins (128). This may look excessive, but
1343 works very well in practice. Most bins hold sizes that are
1344 unusual as malloc request sizes, but are more usual for fragments
1345 and consolidated sets of chunks, which is what these bins hold, so
1346 they can be found quickly. All procedures maintain the invariant
1347 that no consolidated chunk physically borders another one, so each
1348 chunk in a list is known to be preceeded and followed by either
1349 inuse chunks or the ends of memory.
1350
1351 Chunks in bins are kept in size order, with ties going to the
1352 approximately least recently used chunk. Ordering isn't needed
1353 for the small bins, which all contain the same-sized chunks, but
1354 facilitates best-fit allocation for larger chunks. These lists
1355 are just sequential. Keeping them in order almost never requires
1356 enough traversal to warrant using fancier ordered data
a9177ff5 1357 structures.
fa8d436c
UD
1358
1359 Chunks of the same size are linked with the most
1360 recently freed at the front, and allocations are taken from the
1361 back. This results in LRU (FIFO) allocation order, which tends
1362 to give each chunk an equal opportunity to be consolidated with
1363 adjacent freed chunks, resulting in larger free chunks and less
1364 fragmentation.
1365
1366 To simplify use in double-linked lists, each bin header acts
1367 as a malloc_chunk. This avoids special-casing for headers.
1368 But to conserve space and improve locality, we allocate
1369 only the fd/bk pointers of bins, and then use repositioning tricks
a9177ff5 1370 to treat these as the fields of a malloc_chunk*.
6c8dbf00 1371 */
f65fd747 1372
6c8dbf00 1373typedef struct malloc_chunk *mbinptr;
f65fd747 1374
fa8d436c 1375/* addressing -- note that bin_at(0) does not exist */
41999a1a
UD
1376#define bin_at(m, i) \
1377 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
6c8dbf00 1378 - offsetof (struct malloc_chunk, fd))
f65fd747 1379
fa8d436c 1380/* analog of ++bin */
6c8dbf00 1381#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
f65fd747 1382
fa8d436c
UD
1383/* Reminders about list directionality within bins */
1384#define first(b) ((b)->fd)
1385#define last(b) ((b)->bk)
f65fd747 1386
fa8d436c 1387/*
6c8dbf00 1388 Indexing
fa8d436c
UD
1389
1390 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1391 8 bytes apart. Larger bins are approximately logarithmically spaced:
f65fd747 1392
fa8d436c
UD
1393 64 bins of size 8
1394 32 bins of size 64
1395 16 bins of size 512
1396 8 bins of size 4096
1397 4 bins of size 32768
1398 2 bins of size 262144
1399 1 bin of size what's left
f65fd747 1400
fa8d436c
UD
1401 There is actually a little bit of slop in the numbers in bin_index
1402 for the sake of speed. This makes no difference elsewhere.
f65fd747 1403
fa8d436c
UD
1404 The bins top out around 1MB because we expect to service large
1405 requests via mmap.
b5a2bbe6
L
1406
1407 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1408 a valid chunk size the small bins are bumped up one.
6c8dbf00 1409 */
f65fd747 1410
fa8d436c
UD
1411#define NBINS 128
1412#define NSMALLBINS 64
1d47e92f 1413#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
b5a2bbe6
L
1414#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1415#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
f65fd747 1416
fa8d436c 1417#define in_smallbin_range(sz) \
6c8dbf00 1418 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
f65fd747 1419
1d47e92f 1420#define smallbin_index(sz) \
6c8dbf00 1421 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
b5a2bbe6 1422 + SMALLBIN_CORRECTION)
f65fd747 1423
1d47e92f 1424#define largebin_index_32(sz) \
6c8dbf00
OB
1425 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1426 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1427 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1428 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1429 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1430 126)
f65fd747 1431
b5a2bbe6 1432#define largebin_index_32_big(sz) \
6c8dbf00
OB
1433 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1434 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1435 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1436 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1437 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1438 126)
b5a2bbe6 1439
1d47e92f
UD
1440// XXX It remains to be seen whether it is good to keep the widths of
1441// XXX the buckets the same or whether it should be scaled by a factor
1442// XXX of two as well.
1443#define largebin_index_64(sz) \
6c8dbf00
OB
1444 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1445 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1446 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1447 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1448 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1449 126)
1d47e92f
UD
1450
1451#define largebin_index(sz) \
b5a2bbe6
L
1452 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1453 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1454 : largebin_index_32 (sz))
1d47e92f 1455
fa8d436c 1456#define bin_index(sz) \
6c8dbf00 1457 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
f65fd747 1458
1ecba1fa
FW
1459/* Take a chunk off a bin list. */
1460static void
1461unlink_chunk (mstate av, mchunkptr p)
1462{
1463 if (chunksize (p) != prev_size (next_chunk (p)))
1464 malloc_printerr ("corrupted size vs. prev_size");
1465
1466 mchunkptr fd = p->fd;
1467 mchunkptr bk = p->bk;
1468
1469 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1470 malloc_printerr ("corrupted double-linked list");
1471
1472 fd->bk = bk;
1473 bk->fd = fd;
1474 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1475 {
1476 if (p->fd_nextsize->bk_nextsize != p
1477 || p->bk_nextsize->fd_nextsize != p)
1478 malloc_printerr ("corrupted double-linked list (not small)");
1479
1480 if (fd->fd_nextsize == NULL)
1481 {
1482 if (p->fd_nextsize == p)
1483 fd->fd_nextsize = fd->bk_nextsize = fd;
1484 else
1485 {
1486 fd->fd_nextsize = p->fd_nextsize;
1487 fd->bk_nextsize = p->bk_nextsize;
1488 p->fd_nextsize->bk_nextsize = fd;
1489 p->bk_nextsize->fd_nextsize = fd;
1490 }
1491 }
1492 else
1493 {
1494 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1495 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1496 }
1497 }
1498}
f65fd747
UD
1499
1500/*
6c8dbf00 1501 Unsorted chunks
fa8d436c
UD
1502
1503 All remainders from chunk splits, as well as all returned chunks,
1504 are first placed in the "unsorted" bin. They are then placed
1505 in regular bins after malloc gives them ONE chance to be used before
1506 binning. So, basically, the unsorted_chunks list acts as a queue,
1507 with chunks being placed on it in free (and malloc_consolidate),
1508 and taken off (to be either used or placed in bins) in malloc.
1509
1510 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1511 does not have to be taken into account in size comparisons.
6c8dbf00 1512 */
f65fd747 1513
fa8d436c 1514/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
6c8dbf00 1515#define unsorted_chunks(M) (bin_at (M, 1))
f65fd747 1516
fa8d436c 1517/*
6c8dbf00 1518 Top
fa8d436c
UD
1519
1520 The top-most available chunk (i.e., the one bordering the end of
1521 available memory) is treated specially. It is never included in
1522 any bin, is used only if no other chunk is available, and is
1523 released back to the system if it is very large (see
1524 M_TRIM_THRESHOLD). Because top initially
1525 points to its own bin with initial zero size, thus forcing
1526 extension on the first malloc request, we avoid having any special
1527 code in malloc to check whether it even exists yet. But we still
1528 need to do so when getting memory from system, so we make
1529 initial_top treat the bin as a legal but unusable chunk during the
1530 interval between initialization and the first call to
3b49edc0 1531 sysmalloc. (This is somewhat delicate, since it relies on
fa8d436c 1532 the 2 preceding words to be zero during this interval as well.)
6c8dbf00 1533 */
f65fd747 1534
fa8d436c 1535/* Conveniently, the unsorted bin can be used as dummy top on first call */
6c8dbf00 1536#define initial_top(M) (unsorted_chunks (M))
f65fd747 1537
fa8d436c 1538/*
6c8dbf00 1539 Binmap
f65fd747 1540
fa8d436c
UD
1541 To help compensate for the large number of bins, a one-level index
1542 structure is used for bin-by-bin searching. `binmap' is a
1543 bitvector recording whether bins are definitely empty so they can
1544 be skipped over during during traversals. The bits are NOT always
1545 cleared as soon as bins are empty, but instead only
1546 when they are noticed to be empty during traversal in malloc.
6c8dbf00 1547 */
f65fd747 1548
fa8d436c
UD
1549/* Conservatively use 32 bits per map word, even if on 64bit system */
1550#define BINMAPSHIFT 5
1551#define BITSPERMAP (1U << BINMAPSHIFT)
1552#define BINMAPSIZE (NBINS / BITSPERMAP)
f65fd747 1553
fa8d436c 1554#define idx2block(i) ((i) >> BINMAPSHIFT)
6c8dbf00 1555#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
f65fd747 1556
6c8dbf00
OB
1557#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1558#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1559#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
f65fd747 1560
fa8d436c 1561/*
6c8dbf00 1562 Fastbins
fa8d436c
UD
1563
1564 An array of lists holding recently freed small chunks. Fastbins
1565 are not doubly linked. It is faster to single-link them, and
1566 since chunks are never removed from the middles of these lists,
1567 double linking is not necessary. Also, unlike regular bins, they
1568 are not even processed in FIFO order (they use faster LIFO) since
1569 ordering doesn't much matter in the transient contexts in which
1570 fastbins are normally used.
1571
1572 Chunks in fastbins keep their inuse bit set, so they cannot
1573 be consolidated with other free chunks. malloc_consolidate
1574 releases all chunks in fastbins and consolidates them with
a9177ff5 1575 other free chunks.
6c8dbf00 1576 */
f65fd747 1577
6c8dbf00 1578typedef struct malloc_chunk *mfastbinptr;
425ce2ed 1579#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
f65fd747 1580
fa8d436c 1581/* offset 2 to use otherwise unindexable first 2 bins */
425ce2ed 1582#define fastbin_index(sz) \
6c8dbf00 1583 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
425ce2ed 1584
f65fd747 1585
fa8d436c 1586/* The maximum fastbin request size we support */
425ce2ed 1587#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
f65fd747 1588
6c8dbf00 1589#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
f65fd747
UD
1590
1591/*
6c8dbf00
OB
1592 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1593 that triggers automatic consolidation of possibly-surrounding
1594 fastbin chunks. This is a heuristic, so the exact value should not
1595 matter too much. It is defined at half the default trim threshold as a
1596 compromise heuristic to only attempt consolidation if it is likely
1597 to lead to trimming. However, it is not dynamically tunable, since
1598 consolidation reduces fragmentation surrounding large chunks even
1599 if trimming is not used.
1600 */
f65fd747 1601
fa8d436c 1602#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
f65fd747 1603
f65fd747 1604/*
6c8dbf00
OB
1605 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1606 regions. Otherwise, contiguity is exploited in merging together,
1607 when possible, results from consecutive MORECORE calls.
f65fd747 1608
6c8dbf00
OB
1609 The initial value comes from MORECORE_CONTIGUOUS, but is
1610 changed dynamically if mmap is ever used as an sbrk substitute.
1611 */
f65fd747 1612
fa8d436c 1613#define NONCONTIGUOUS_BIT (2U)
f65fd747 1614
6c8dbf00
OB
1615#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1616#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1617#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
9bf248c6 1618#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
f65fd747 1619
eac43cbb
FW
1620/* Maximum size of memory handled in fastbins. */
1621static INTERNAL_SIZE_T global_max_fast;
1622
a9177ff5
RM
1623/*
1624 Set value of max_fast.
fa8d436c 1625 Use impossibly small value if 0.
3381be5c
WD
1626 Precondition: there are no existing fastbin chunks in the main arena.
1627 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1628 before changing max_fast. Note other arenas will leak their fast bin
1629 entries if max_fast is reduced.
6c8dbf00 1630 */
f65fd747 1631
9bf248c6 1632#define set_max_fast(s) \
991eda1e 1633 global_max_fast = (((s) == 0) \
6c8dbf00 1634 ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
f65fd747 1635
eac43cbb
FW
1636static inline INTERNAL_SIZE_T
1637get_max_fast (void)
1638{
1639 /* Tell the GCC optimizers that global_max_fast is never larger
1640 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1641 _int_malloc after constant propagation of the size parameter.
1642 (The code never executes because malloc preserves the
1643 global_max_fast invariant, but the optimizers may not recognize
1644 this.) */
1645 if (global_max_fast > MAX_FAST_SIZE)
1646 __builtin_unreachable ();
1647 return global_max_fast;
1648}
f65fd747
UD
1649
1650/*
fa8d436c 1651 ----------- Internal state representation and initialization -----------
6c8dbf00 1652 */
f65fd747 1653
e956075a
WD
1654/*
1655 have_fastchunks indicates that there are probably some fastbin chunks.
1656 It is set true on entering a chunk into any fastbin, and cleared early in
1657 malloc_consolidate. The value is approximate since it may be set when there
1658 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1659 available. Given it's sole purpose is to reduce number of redundant calls to
1660 malloc_consolidate, it does not affect correctness. As a result we can safely
1661 use relaxed atomic accesses.
1662 */
1663
1664
6c8dbf00
OB
1665struct malloc_state
1666{
fa8d436c 1667 /* Serialize access. */
cbb47fa1 1668 __libc_lock_define (, mutex);
9bf248c6
UD
1669
1670 /* Flags (formerly in max_fast). */
1671 int flags;
f65fd747 1672
e956075a 1673 /* Set if the fastbin chunks contain recently inserted free blocks. */
2c2245b9
WD
1674 /* Note this is a bool but not all targets support atomics on booleans. */
1675 int have_fastchunks;
e956075a 1676
fa8d436c 1677 /* Fastbins */
6c8dbf00 1678 mfastbinptr fastbinsY[NFASTBINS];
f65fd747 1679
fa8d436c 1680 /* Base of the topmost chunk -- not otherwise kept in a bin */
6c8dbf00 1681 mchunkptr top;
f65fd747 1682
fa8d436c 1683 /* The remainder from the most recent split of a small request */
6c8dbf00 1684 mchunkptr last_remainder;
f65fd747 1685
fa8d436c 1686 /* Normal bins packed as described above */
6c8dbf00 1687 mchunkptr bins[NBINS * 2 - 2];
f65fd747 1688
fa8d436c 1689 /* Bitmap of bins */
6c8dbf00 1690 unsigned int binmap[BINMAPSIZE];
f65fd747 1691
fa8d436c
UD
1692 /* Linked list */
1693 struct malloc_state *next;
f65fd747 1694
a62719ba 1695 /* Linked list for free arenas. Access to this field is serialized
90c400bd 1696 by free_list_lock in arena.c. */
425ce2ed 1697 struct malloc_state *next_free;
425ce2ed 1698
a62719ba 1699 /* Number of threads attached to this arena. 0 if the arena is on
90c400bd
FW
1700 the free list. Access to this field is serialized by
1701 free_list_lock in arena.c. */
a62719ba
FW
1702 INTERNAL_SIZE_T attached_threads;
1703
fa8d436c
UD
1704 /* Memory allocated from the system in this arena. */
1705 INTERNAL_SIZE_T system_mem;
1706 INTERNAL_SIZE_T max_system_mem;
1707};
f65fd747 1708
6c8dbf00
OB
1709struct malloc_par
1710{
fa8d436c 1711 /* Tunable parameters */
6c8dbf00
OB
1712 unsigned long trim_threshold;
1713 INTERNAL_SIZE_T top_pad;
1714 INTERNAL_SIZE_T mmap_threshold;
1715 INTERNAL_SIZE_T arena_test;
1716 INTERNAL_SIZE_T arena_max;
fa8d436c
UD
1717
1718 /* Memory map support */
6c8dbf00
OB
1719 int n_mmaps;
1720 int n_mmaps_max;
1721 int max_n_mmaps;
1d05c2fb
UD
1722 /* the mmap_threshold is dynamic, until the user sets
1723 it manually, at which point we need to disable any
1724 dynamic behavior. */
6c8dbf00 1725 int no_dyn_threshold;
fa8d436c 1726
fa8d436c 1727 /* Statistics */
6c8dbf00 1728 INTERNAL_SIZE_T mmapped_mem;
6c8dbf00 1729 INTERNAL_SIZE_T max_mmapped_mem;
fa8d436c
UD
1730
1731 /* First address handed out by MORECORE/sbrk. */
6c8dbf00 1732 char *sbrk_base;
d5c3fafc
DD
1733
1734#if USE_TCACHE
1735 /* Maximum number of buckets to use. */
1736 size_t tcache_bins;
1737 size_t tcache_max_bytes;
1738 /* Maximum number of chunks in each bucket. */
1739 size_t tcache_count;
1740 /* Maximum number of chunks to remove from the unsorted list, which
1741 aren't used to prefill the cache. */
1742 size_t tcache_unsorted_limit;
1743#endif
fa8d436c 1744};
f65fd747 1745
fa8d436c
UD
1746/* There are several instances of this struct ("arenas") in this
1747 malloc. If you are adapting this malloc in a way that does NOT use
1748 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1749 before using. This malloc relies on the property that malloc_state
1750 is initialized to all zeroes (as is true of C statics). */
f65fd747 1751
02d46fc4 1752static struct malloc_state main_arena =
6c8dbf00 1753{
400e1226 1754 .mutex = _LIBC_LOCK_INITIALIZER,
a62719ba
FW
1755 .next = &main_arena,
1756 .attached_threads = 1
6c8dbf00 1757};
f65fd747 1758
4cf6c72f
FW
1759/* These variables are used for undumping support. Chunked are marked
1760 as using mmap, but we leave them alone if they fall into this
1e8a8875
FW
1761 range. NB: The chunk size for these chunks only includes the
1762 initial size field (of SIZE_SZ bytes), there is no trailing size
1763 field (unlike with regular mmapped chunks). */
4cf6c72f
FW
1764static mchunkptr dumped_main_arena_start; /* Inclusive. */
1765static mchunkptr dumped_main_arena_end; /* Exclusive. */
1766
1767/* True if the pointer falls into the dumped arena. Use this after
1768 chunk_is_mmapped indicates a chunk is mmapped. */
1769#define DUMPED_MAIN_ARENA_CHUNK(p) \
1770 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1771
fa8d436c 1772/* There is only one instance of the malloc parameters. */
f65fd747 1773
02d46fc4 1774static struct malloc_par mp_ =
6c8dbf00
OB
1775{
1776 .top_pad = DEFAULT_TOP_PAD,
1777 .n_mmaps_max = DEFAULT_MMAP_MAX,
1778 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1779 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1780#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1781 .arena_test = NARENAS_FROM_NCORES (1)
d5c3fafc
DD
1782#if USE_TCACHE
1783 ,
1784 .tcache_count = TCACHE_FILL_COUNT,
1785 .tcache_bins = TCACHE_MAX_BINS,
1786 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1787 .tcache_unsorted_limit = 0 /* No limit. */
1788#endif
6c8dbf00 1789};
f65fd747 1790
fa8d436c 1791/*
6c8dbf00 1792 Initialize a malloc_state struct.
f65fd747 1793
3381be5c
WD
1794 This is called from ptmalloc_init () or from _int_new_arena ()
1795 when creating a new arena.
6c8dbf00 1796 */
f65fd747 1797
6c8dbf00
OB
1798static void
1799malloc_init_state (mstate av)
fa8d436c 1800{
6c8dbf00 1801 int i;
fa8d436c 1802 mbinptr bin;
a9177ff5 1803
fa8d436c 1804 /* Establish circular links for normal bins */
6c8dbf00
OB
1805 for (i = 1; i < NBINS; ++i)
1806 {
1807 bin = bin_at (av, i);
1808 bin->fd = bin->bk = bin;
1809 }
f65fd747 1810
fa8d436c
UD
1811#if MORECORE_CONTIGUOUS
1812 if (av != &main_arena)
1813#endif
6c8dbf00 1814 set_noncontiguous (av);
9bf248c6 1815 if (av == &main_arena)
6c8dbf00 1816 set_max_fast (DEFAULT_MXFAST);
e956075a 1817 atomic_store_relaxed (&av->have_fastchunks, false);
f65fd747 1818
6c8dbf00 1819 av->top = initial_top (av);
fa8d436c 1820}
e9b3e3c5 1821
a9177ff5 1822/*
fa8d436c 1823 Other internal utilities operating on mstates
6c8dbf00 1824 */
f65fd747 1825
6c8dbf00
OB
1826static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1827static int systrim (size_t, mstate);
1828static void malloc_consolidate (mstate);
7e3be507 1829
404d4cef
RM
1830
1831/* -------------- Early definitions for debugging hooks ---------------- */
1832
1833/* Define and initialize the hook variables. These weak definitions must
1834 appear before any use of the variables in a function (arena.c uses one). */
1835#ifndef weak_variable
404d4cef
RM
1836/* In GNU libc we want the hook variables to be weak definitions to
1837 avoid a problem with Emacs. */
22a89187 1838# define weak_variable weak_function
404d4cef
RM
1839#endif
1840
1841/* Forward declarations. */
6c8dbf00
OB
1842static void *malloc_hook_ini (size_t sz,
1843 const void *caller) __THROW;
1844static void *realloc_hook_ini (void *ptr, size_t sz,
1845 const void *caller) __THROW;
1846static void *memalign_hook_ini (size_t alignment, size_t sz,
1847 const void *caller) __THROW;
404d4cef 1848
2ba3cfa1 1849#if HAVE_MALLOC_INIT_HOOK
92e1ab0e
FW
1850void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1851compat_symbol (libc, __malloc_initialize_hook,
1852 __malloc_initialize_hook, GLIBC_2_0);
2ba3cfa1
FW
1853#endif
1854
a222d91a 1855void weak_variable (*__free_hook) (void *__ptr,
6c8dbf00 1856 const void *) = NULL;
a222d91a 1857void *weak_variable (*__malloc_hook)
6c8dbf00 1858 (size_t __size, const void *) = malloc_hook_ini;
a222d91a 1859void *weak_variable (*__realloc_hook)
6c8dbf00
OB
1860 (void *__ptr, size_t __size, const void *)
1861 = realloc_hook_ini;
a222d91a 1862void *weak_variable (*__memalign_hook)
6c8dbf00
OB
1863 (size_t __alignment, size_t __size, const void *)
1864 = memalign_hook_ini;
06d6611a 1865void weak_variable (*__after_morecore_hook) (void) = NULL;
404d4cef 1866
0a947e06
FW
1867/* This function is called from the arena shutdown hook, to free the
1868 thread cache (if it exists). */
1869static void tcache_thread_shutdown (void);
404d4cef 1870
854278df
UD
1871/* ------------------ Testing support ----------------------------------*/
1872
1873static int perturb_byte;
1874
af102d95 1875static void
e8349efd
OB
1876alloc_perturb (char *p, size_t n)
1877{
1878 if (__glibc_unlikely (perturb_byte))
1879 memset (p, perturb_byte ^ 0xff, n);
1880}
1881
af102d95 1882static void
e8349efd
OB
1883free_perturb (char *p, size_t n)
1884{
1885 if (__glibc_unlikely (perturb_byte))
1886 memset (p, perturb_byte, n);
1887}
1888
854278df
UD
1889
1890
3ea5be54
AO
1891#include <stap-probe.h>
1892
fa8d436c
UD
1893/* ------------------- Support for multiple arenas -------------------- */
1894#include "arena.c"
f65fd747 1895
fa8d436c 1896/*
6c8dbf00 1897 Debugging support
f65fd747 1898
6c8dbf00
OB
1899 These routines make a number of assertions about the states
1900 of data structures that should be true at all times. If any
1901 are not true, it's very likely that a user program has somehow
1902 trashed memory. (It's also possible that there is a coding error
1903 in malloc. In which case, please report it!)
1904 */
ee74a442 1905
6c8dbf00 1906#if !MALLOC_DEBUG
d8f00d46 1907
6c8dbf00
OB
1908# define check_chunk(A, P)
1909# define check_free_chunk(A, P)
1910# define check_inuse_chunk(A, P)
1911# define check_remalloced_chunk(A, P, N)
1912# define check_malloced_chunk(A, P, N)
1913# define check_malloc_state(A)
d8f00d46 1914
fa8d436c 1915#else
ca34d7a7 1916
6c8dbf00
OB
1917# define check_chunk(A, P) do_check_chunk (A, P)
1918# define check_free_chunk(A, P) do_check_free_chunk (A, P)
1919# define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
1920# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
1921# define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
1922# define check_malloc_state(A) do_check_malloc_state (A)
ca34d7a7 1923
fa8d436c 1924/*
6c8dbf00
OB
1925 Properties of all chunks
1926 */
ca34d7a7 1927
6c8dbf00
OB
1928static void
1929do_check_chunk (mstate av, mchunkptr p)
ca34d7a7 1930{
6c8dbf00 1931 unsigned long sz = chunksize (p);
fa8d436c 1932 /* min and max possible addresses assuming contiguous allocation */
6c8dbf00
OB
1933 char *max_address = (char *) (av->top) + chunksize (av->top);
1934 char *min_address = max_address - av->system_mem;
fa8d436c 1935
6c8dbf00
OB
1936 if (!chunk_is_mmapped (p))
1937 {
1938 /* Has legal address ... */
1939 if (p != av->top)
1940 {
1941 if (contiguous (av))
1942 {
1943 assert (((char *) p) >= min_address);
1944 assert (((char *) p + sz) <= ((char *) (av->top)));
1945 }
1946 }
1947 else
1948 {
1949 /* top size is always at least MINSIZE */
1950 assert ((unsigned long) (sz) >= MINSIZE);
1951 /* top predecessor always marked inuse */
1952 assert (prev_inuse (p));
1953 }
fa8d436c 1954 }
4cf6c72f 1955 else if (!DUMPED_MAIN_ARENA_CHUNK (p))
6c8dbf00
OB
1956 {
1957 /* address is outside main heap */
1958 if (contiguous (av) && av->top != initial_top (av))
1959 {
1960 assert (((char *) p) < min_address || ((char *) p) >= max_address);
1961 }
1962 /* chunk is page-aligned */
e9c4fe93 1963 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
6c8dbf00
OB
1964 /* mem is aligned */
1965 assert (aligned_OK (chunk2mem (p)));
fa8d436c 1966 }
eb406346
UD
1967}
1968
fa8d436c 1969/*
6c8dbf00
OB
1970 Properties of free chunks
1971 */
ee74a442 1972
6c8dbf00
OB
1973static void
1974do_check_free_chunk (mstate av, mchunkptr p)
67c94753 1975{
3381be5c 1976 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
6c8dbf00 1977 mchunkptr next = chunk_at_offset (p, sz);
67c94753 1978
6c8dbf00 1979 do_check_chunk (av, p);
67c94753 1980
fa8d436c 1981 /* Chunk must claim to be free ... */
6c8dbf00
OB
1982 assert (!inuse (p));
1983 assert (!chunk_is_mmapped (p));
67c94753 1984
fa8d436c 1985 /* Unless a special marker, must have OK fields */
6c8dbf00
OB
1986 if ((unsigned long) (sz) >= MINSIZE)
1987 {
1988 assert ((sz & MALLOC_ALIGN_MASK) == 0);
1989 assert (aligned_OK (chunk2mem (p)));
1990 /* ... matching footer field */
3381be5c 1991 assert (prev_size (next_chunk (p)) == sz);
6c8dbf00
OB
1992 /* ... and is fully consolidated */
1993 assert (prev_inuse (p));
1994 assert (next == av->top || inuse (next));
1995
1996 /* ... and has minimally sane links */
1997 assert (p->fd->bk == p);
1998 assert (p->bk->fd == p);
1999 }
fa8d436c 2000 else /* markers are always of size SIZE_SZ */
6c8dbf00 2001 assert (sz == SIZE_SZ);
67c94753 2002}
67c94753 2003
fa8d436c 2004/*
6c8dbf00
OB
2005 Properties of inuse chunks
2006 */
fa8d436c 2007
6c8dbf00
OB
2008static void
2009do_check_inuse_chunk (mstate av, mchunkptr p)
f65fd747 2010{
fa8d436c 2011 mchunkptr next;
f65fd747 2012
6c8dbf00 2013 do_check_chunk (av, p);
f65fd747 2014
6c8dbf00 2015 if (chunk_is_mmapped (p))
fa8d436c 2016 return; /* mmapped chunks have no next/prev */
ca34d7a7 2017
fa8d436c 2018 /* Check whether it claims to be in use ... */
6c8dbf00 2019 assert (inuse (p));
10dc2a90 2020
6c8dbf00 2021 next = next_chunk (p);
10dc2a90 2022
fa8d436c 2023 /* ... and is surrounded by OK chunks.
6c8dbf00
OB
2024 Since more things can be checked with free chunks than inuse ones,
2025 if an inuse chunk borders them and debug is on, it's worth doing them.
2026 */
2027 if (!prev_inuse (p))
2028 {
2029 /* Note that we cannot even look at prev unless it is not inuse */
2030 mchunkptr prv = prev_chunk (p);
2031 assert (next_chunk (prv) == p);
2032 do_check_free_chunk (av, prv);
2033 }
fa8d436c 2034
6c8dbf00
OB
2035 if (next == av->top)
2036 {
2037 assert (prev_inuse (next));
2038 assert (chunksize (next) >= MINSIZE);
2039 }
2040 else if (!inuse (next))
2041 do_check_free_chunk (av, next);
10dc2a90
UD
2042}
2043
fa8d436c 2044/*
6c8dbf00
OB
2045 Properties of chunks recycled from fastbins
2046 */
fa8d436c 2047
6c8dbf00
OB
2048static void
2049do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2050{
3381be5c 2051 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
fa8d436c 2052
6c8dbf00
OB
2053 if (!chunk_is_mmapped (p))
2054 {
2055 assert (av == arena_for_chunk (p));
e9c4fe93 2056 if (chunk_main_arena (p))
6c8dbf00 2057 assert (av == &main_arena);
e9c4fe93
FW
2058 else
2059 assert (av != &main_arena);
6c8dbf00 2060 }
fa8d436c 2061
6c8dbf00 2062 do_check_inuse_chunk (av, p);
fa8d436c
UD
2063
2064 /* Legal size ... */
6c8dbf00
OB
2065 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2066 assert ((unsigned long) (sz) >= MINSIZE);
fa8d436c 2067 /* ... and alignment */
6c8dbf00 2068 assert (aligned_OK (chunk2mem (p)));
fa8d436c 2069 /* chunk is less than MINSIZE more than request */
6c8dbf00
OB
2070 assert ((long) (sz) - (long) (s) >= 0);
2071 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
10dc2a90
UD
2072}
2073
fa8d436c 2074/*
6c8dbf00
OB
2075 Properties of nonrecycled chunks at the point they are malloced
2076 */
fa8d436c 2077
6c8dbf00
OB
2078static void
2079do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2080{
fa8d436c 2081 /* same as recycled case ... */
6c8dbf00 2082 do_check_remalloced_chunk (av, p, s);
10dc2a90 2083
fa8d436c 2084 /*
6c8dbf00
OB
2085 ... plus, must obey implementation invariant that prev_inuse is
2086 always true of any allocated chunk; i.e., that each allocated
2087 chunk borders either a previously allocated and still in-use
2088 chunk, or the base of its memory arena. This is ensured
2089 by making all allocations from the `lowest' part of any found
2090 chunk. This does not necessarily hold however for chunks
2091 recycled via fastbins.
2092 */
2093
2094 assert (prev_inuse (p));
fa8d436c 2095}
10dc2a90 2096
f65fd747 2097
fa8d436c 2098/*
6c8dbf00 2099 Properties of malloc_state.
f65fd747 2100
6c8dbf00
OB
2101 This may be useful for debugging malloc, as well as detecting user
2102 programmer errors that somehow write into malloc_state.
f65fd747 2103
6c8dbf00
OB
2104 If you are extending or experimenting with this malloc, you can
2105 probably figure out how to hack this routine to print out or
2106 display chunk addresses, sizes, bins, and other instrumentation.
2107 */
f65fd747 2108
6c8dbf00
OB
2109static void
2110do_check_malloc_state (mstate av)
fa8d436c
UD
2111{
2112 int i;
2113 mchunkptr p;
2114 mchunkptr q;
2115 mbinptr b;
fa8d436c
UD
2116 unsigned int idx;
2117 INTERNAL_SIZE_T size;
2118 unsigned long total = 0;
2119 int max_fast_bin;
f65fd747 2120
fa8d436c 2121 /* internal size_t must be no wider than pointer type */
6c8dbf00 2122 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
f65fd747 2123
fa8d436c 2124 /* alignment is a power of 2 */
6c8dbf00 2125 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
f65fd747 2126
3381be5c
WD
2127 /* Check the arena is initialized. */
2128 assert (av->top != 0);
2129
2130 /* No memory has been allocated yet, so doing more tests is not possible. */
2131 if (av->top == initial_top (av))
fa8d436c 2132 return;
f65fd747 2133
fa8d436c 2134 /* pagesize is a power of 2 */
8a35c3fe 2135 assert (powerof2(GLRO (dl_pagesize)));
f65fd747 2136
fa8d436c 2137 /* A contiguous main_arena is consistent with sbrk_base. */
6c8dbf00
OB
2138 if (av == &main_arena && contiguous (av))
2139 assert ((char *) mp_.sbrk_base + av->system_mem ==
2140 (char *) av->top + chunksize (av->top));
fa8d436c
UD
2141
2142 /* properties of fastbins */
2143
2144 /* max_fast is in allowed range */
6c8dbf00
OB
2145 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2146
2147 max_fast_bin = fastbin_index (get_max_fast ());
2148
2149 for (i = 0; i < NFASTBINS; ++i)
2150 {
2151 p = fastbin (av, i);
2152
2153 /* The following test can only be performed for the main arena.
2154 While mallopt calls malloc_consolidate to get rid of all fast
2155 bins (especially those larger than the new maximum) this does
2156 only happen for the main arena. Trying to do this for any
2157 other arena would mean those arenas have to be locked and
2158 malloc_consolidate be called for them. This is excessive. And
2159 even if this is acceptable to somebody it still cannot solve
2160 the problem completely since if the arena is locked a
2161 concurrent malloc call might create a new arena which then
2162 could use the newly invalid fast bins. */
2163
2164 /* all bins past max_fast are empty */
2165 if (av == &main_arena && i > max_fast_bin)
2166 assert (p == 0);
2167
2168 while (p != 0)
2169 {
2170 /* each chunk claims to be inuse */
2171 do_check_inuse_chunk (av, p);
2172 total += chunksize (p);
2173 /* chunk belongs in this bin */
2174 assert (fastbin_index (chunksize (p)) == i);
2175 p = p->fd;
2176 }
fa8d436c 2177 }
fa8d436c 2178
fa8d436c 2179 /* check normal bins */
6c8dbf00
OB
2180 for (i = 1; i < NBINS; ++i)
2181 {
2182 b = bin_at (av, i);
2183
2184 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2185 if (i >= 2)
2186 {
2187 unsigned int binbit = get_binmap (av, i);
2188 int empty = last (b) == b;
2189 if (!binbit)
2190 assert (empty);
2191 else if (!empty)
2192 assert (binbit);
2193 }
2194
2195 for (p = last (b); p != b; p = p->bk)
2196 {
2197 /* each chunk claims to be free */
2198 do_check_free_chunk (av, p);
2199 size = chunksize (p);
2200 total += size;
2201 if (i >= 2)
2202 {
2203 /* chunk belongs in bin */
2204 idx = bin_index (size);
2205 assert (idx == i);
2206 /* lists are sorted */
2207 assert (p->bk == b ||
2208 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2209
2210 if (!in_smallbin_range (size))
2211 {
2212 if (p->fd_nextsize != NULL)
2213 {
2214 if (p->fd_nextsize == p)
2215 assert (p->bk_nextsize == p);
2216 else
2217 {
2218 if (p->fd_nextsize == first (b))
2219 assert (chunksize (p) < chunksize (p->fd_nextsize));
2220 else
2221 assert (chunksize (p) > chunksize (p->fd_nextsize));
2222
2223 if (p == first (b))
2224 assert (chunksize (p) > chunksize (p->bk_nextsize));
2225 else
2226 assert (chunksize (p) < chunksize (p->bk_nextsize));
2227 }
2228 }
2229 else
2230 assert (p->bk_nextsize == NULL);
2231 }
2232 }
2233 else if (!in_smallbin_range (size))
2234 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2235 /* chunk is followed by a legal chain of inuse chunks */
2236 for (q = next_chunk (p);
2237 (q != av->top && inuse (q) &&
2238 (unsigned long) (chunksize (q)) >= MINSIZE);
2239 q = next_chunk (q))
2240 do_check_inuse_chunk (av, q);
2241 }
fa8d436c 2242 }
f65fd747 2243
fa8d436c 2244 /* top chunk is OK */
6c8dbf00 2245 check_chunk (av, av->top);
fa8d436c
UD
2246}
2247#endif
2248
2249
2250/* ----------------- Support for debugging hooks -------------------- */
2251#include "hooks.c"
2252
2253
2254/* ----------- Routines dealing with system allocation -------------- */
2255
2256/*
6c8dbf00
OB
2257 sysmalloc handles malloc cases requiring more memory from the system.
2258 On entry, it is assumed that av->top does not have enough
2259 space to service request for nb bytes, thus requiring that av->top
2260 be extended or replaced.
2261 */
fa8d436c 2262
6c8dbf00
OB
2263static void *
2264sysmalloc (INTERNAL_SIZE_T nb, mstate av)
f65fd747 2265{
6c8dbf00 2266 mchunkptr old_top; /* incoming value of av->top */
fa8d436c 2267 INTERNAL_SIZE_T old_size; /* its size */
6c8dbf00 2268 char *old_end; /* its end address */
f65fd747 2269
6c8dbf00
OB
2270 long size; /* arg to first MORECORE or mmap call */
2271 char *brk; /* return value from MORECORE */
f65fd747 2272
6c8dbf00
OB
2273 long correction; /* arg to 2nd MORECORE call */
2274 char *snd_brk; /* 2nd return val */
f65fd747 2275
fa8d436c
UD
2276 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2277 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
6c8dbf00 2278 char *aligned_brk; /* aligned offset into brk */
f65fd747 2279
6c8dbf00
OB
2280 mchunkptr p; /* the allocated/returned chunk */
2281 mchunkptr remainder; /* remainder from allocation */
2282 unsigned long remainder_size; /* its size */
fa8d436c 2283
fa8d436c 2284
8a35c3fe 2285 size_t pagesize = GLRO (dl_pagesize);
6c8dbf00 2286 bool tried_mmap = false;
fa8d436c
UD
2287
2288
fa8d436c 2289 /*
6c8dbf00
OB
2290 If have mmap, and the request size meets the mmap threshold, and
2291 the system supports mmap, and there are few enough currently
2292 allocated mmapped regions, try to directly map this request
2293 rather than expanding top.
2294 */
2295
fff94fa2
SP
2296 if (av == NULL
2297 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2298 && (mp_.n_mmaps < mp_.n_mmaps_max)))
6c8dbf00
OB
2299 {
2300 char *mm; /* return value from mmap call*/
a9177ff5 2301
6c8dbf00
OB
2302 try_mmap:
2303 /*
2304 Round up size to nearest page. For mmapped chunks, the overhead
2305 is one SIZE_SZ unit larger than for normal chunks, because there
2306 is no following chunk whose prev_size field could be used.
2307
2308 See the front_misalign handling below, for glibc there is no
2309 need for further alignments unless we have have high alignment.
2310 */
2311 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
8a35c3fe 2312 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
6c8dbf00 2313 else
8a35c3fe 2314 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
6c8dbf00
OB
2315 tried_mmap = true;
2316
2317 /* Don't try if size wraps around 0 */
2318 if ((unsigned long) (size) > (unsigned long) (nb))
2319 {
2320 mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2321
2322 if (mm != MAP_FAILED)
2323 {
2324 /*
2325 The offset to the start of the mmapped region is stored
2326 in the prev_size field of the chunk. This allows us to adjust
2327 returned start address to meet alignment requirements here
2328 and in memalign(), and still be able to compute proper
2329 address argument for later munmap in free() and realloc().
2330 */
2331
2332 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2333 {
2334 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2335 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2336 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2337 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2338 front_misalign = 0;
2339 }
2340 else
2341 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2342 if (front_misalign > 0)
2343 {
2344 correction = MALLOC_ALIGNMENT - front_misalign;
2345 p = (mchunkptr) (mm + correction);
e9c4fe93 2346 set_prev_size (p, correction);
6c8dbf00
OB
2347 set_head (p, (size - correction) | IS_MMAPPED);
2348 }
2349 else
2350 {
2351 p = (mchunkptr) mm;
681421f3 2352 set_prev_size (p, 0);
6c8dbf00
OB
2353 set_head (p, size | IS_MMAPPED);
2354 }
2355
2356 /* update statistics */
2357
2358 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2359 atomic_max (&mp_.max_n_mmaps, new);
2360
2361 unsigned long sum;
2362 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2363 atomic_max (&mp_.max_mmapped_mem, sum);
2364
2365 check_chunk (av, p);
2366
2367 return chunk2mem (p);
2368 }
2369 }
fa8d436c 2370 }
fa8d436c 2371
fff94fa2
SP
2372 /* There are no usable arenas and mmap also failed. */
2373 if (av == NULL)
2374 return 0;
2375
fa8d436c
UD
2376 /* Record incoming configuration of top */
2377
6c8dbf00
OB
2378 old_top = av->top;
2379 old_size = chunksize (old_top);
2380 old_end = (char *) (chunk_at_offset (old_top, old_size));
fa8d436c 2381
6c8dbf00 2382 brk = snd_brk = (char *) (MORECORE_FAILURE);
fa8d436c 2383
a9177ff5 2384 /*
fa8d436c
UD
2385 If not the first time through, we require old_size to be
2386 at least MINSIZE and to have prev_inuse set.
6c8dbf00 2387 */
fa8d436c 2388
6c8dbf00
OB
2389 assert ((old_top == initial_top (av) && old_size == 0) ||
2390 ((unsigned long) (old_size) >= MINSIZE &&
2391 prev_inuse (old_top) &&
8a35c3fe 2392 ((unsigned long) old_end & (pagesize - 1)) == 0));
fa8d436c
UD
2393
2394 /* Precondition: not enough current space to satisfy nb request */
6c8dbf00 2395 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
a9177ff5 2396
72f90263 2397
6c8dbf00
OB
2398 if (av != &main_arena)
2399 {
2400 heap_info *old_heap, *heap;
2401 size_t old_heap_size;
2402
2403 /* First try to extend the current heap. */
2404 old_heap = heap_for_ptr (old_top);
2405 old_heap_size = old_heap->size;
2406 if ((long) (MINSIZE + nb - old_size) > 0
2407 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2408 {
2409 av->system_mem += old_heap->size - old_heap_size;
6c8dbf00
OB
2410 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2411 | PREV_INUSE);
2412 }
2413 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2414 {
2415 /* Use a newly allocated heap. */
2416 heap->ar_ptr = av;
2417 heap->prev = old_heap;
2418 av->system_mem += heap->size;
6c8dbf00
OB
2419 /* Set up the new top. */
2420 top (av) = chunk_at_offset (heap, sizeof (*heap));
2421 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2422
2423 /* Setup fencepost and free the old top chunk with a multiple of
2424 MALLOC_ALIGNMENT in size. */
2425 /* The fencepost takes at least MINSIZE bytes, because it might
2426 become the top chunk again later. Note that a footer is set
2427 up, too, although the chunk is marked in use. */
2428 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2429 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
2430 if (old_size >= MINSIZE)
2431 {
2432 set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
2433 set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
2434 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2435 _int_free (av, old_top, 1);
2436 }
2437 else
2438 {
2439 set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
2440 set_foot (old_top, (old_size + 2 * SIZE_SZ));
2441 }
2442 }
2443 else if (!tried_mmap)
2444 /* We can at least try to use to mmap memory. */
2445 goto try_mmap;
fa8d436c 2446 }
6c8dbf00 2447 else /* av == main_arena */
fa8d436c 2448
fa8d436c 2449
6c8dbf00
OB
2450 { /* Request enough space for nb + pad + overhead */
2451 size = nb + mp_.top_pad + MINSIZE;
a9177ff5 2452
6c8dbf00
OB
2453 /*
2454 If contiguous, we can subtract out existing space that we hope to
2455 combine with new space. We add it back later only if
2456 we don't actually get contiguous space.
2457 */
a9177ff5 2458
6c8dbf00
OB
2459 if (contiguous (av))
2460 size -= old_size;
fa8d436c 2461
6c8dbf00
OB
2462 /*
2463 Round to a multiple of page size.
2464 If MORECORE is not contiguous, this ensures that we only call it
2465 with whole-page arguments. And if MORECORE is contiguous and
2466 this is not first time through, this preserves page-alignment of
2467 previous calls. Otherwise, we correct to page-align below.
2468 */
fa8d436c 2469
8a35c3fe 2470 size = ALIGN_UP (size, pagesize);
fa8d436c 2471
6c8dbf00
OB
2472 /*
2473 Don't try to call MORECORE if argument is so big as to appear
2474 negative. Note that since mmap takes size_t arg, it may succeed
2475 below even if we cannot call MORECORE.
2476 */
2477
2478 if (size > 0)
2479 {
2480 brk = (char *) (MORECORE (size));
2481 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2482 }
2483
2484 if (brk != (char *) (MORECORE_FAILURE))
2485 {
2486 /* Call the `morecore' hook if necessary. */
2487 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2488 if (__builtin_expect (hook != NULL, 0))
2489 (*hook)();
2490 }
2491 else
2492 {
2493 /*
2494 If have mmap, try using it as a backup when MORECORE fails or
2495 cannot be used. This is worth doing on systems that have "holes" in
2496 address space, so sbrk cannot extend to give contiguous space, but
2497 space is available elsewhere. Note that we ignore mmap max count
2498 and threshold limits, since the space will not be used as a
2499 segregated mmap region.
2500 */
2501
2502 /* Cannot merge with old top, so add its size back in */
2503 if (contiguous (av))
8a35c3fe 2504 size = ALIGN_UP (size + old_size, pagesize);
6c8dbf00
OB
2505
2506 /* If we are relying on mmap as backup, then use larger units */
2507 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2508 size = MMAP_AS_MORECORE_SIZE;
2509
2510 /* Don't try if size wraps around 0 */
2511 if ((unsigned long) (size) > (unsigned long) (nb))
2512 {
2513 char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2514
2515 if (mbrk != MAP_FAILED)
2516 {
2517 /* We do not need, and cannot use, another sbrk call to find end */
2518 brk = mbrk;
2519 snd_brk = brk + size;
2520
2521 /*
2522 Record that we no longer have a contiguous sbrk region.
2523 After the first time mmap is used as backup, we do not
2524 ever rely on contiguous space since this could incorrectly
2525 bridge regions.
2526 */
2527 set_noncontiguous (av);
2528 }
2529 }
2530 }
2531
2532 if (brk != (char *) (MORECORE_FAILURE))
2533 {
2534 if (mp_.sbrk_base == 0)
2535 mp_.sbrk_base = brk;
2536 av->system_mem += size;
2537
2538 /*
2539 If MORECORE extends previous space, we can likewise extend top size.
2540 */
2541
2542 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2543 set_head (old_top, (size + old_size) | PREV_INUSE);
2544
2545 else if (contiguous (av) && old_size && brk < old_end)
ac3ed168
FW
2546 /* Oops! Someone else killed our space.. Can't touch anything. */
2547 malloc_printerr ("break adjusted to free malloc space");
6c8dbf00
OB
2548
2549 /*
2550 Otherwise, make adjustments:
2551
2552 * If the first time through or noncontiguous, we need to call sbrk
2553 just to find out where the end of memory lies.
2554
2555 * We need to ensure that all returned chunks from malloc will meet
2556 MALLOC_ALIGNMENT
2557
2558 * If there was an intervening foreign sbrk, we need to adjust sbrk
2559 request size to account for fact that we will not be able to
2560 combine new space with existing space in old_top.
2561
2562 * Almost all systems internally allocate whole pages at a time, in
2563 which case we might as well use the whole last page of request.
2564 So we allocate enough more memory to hit a page boundary now,
2565 which in turn causes future contiguous calls to page-align.
2566 */
2567
2568 else
2569 {
2570 front_misalign = 0;
2571 end_misalign = 0;
2572 correction = 0;
2573 aligned_brk = brk;
2574
2575 /* handle contiguous cases */
2576 if (contiguous (av))
2577 {
2578 /* Count foreign sbrk as system_mem. */
2579 if (old_size)
2580 av->system_mem += brk - old_end;
2581
2582 /* Guarantee alignment of first new chunk made from this space */
2583
2584 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2585 if (front_misalign > 0)
2586 {
2587 /*
2588 Skip over some bytes to arrive at an aligned position.
2589 We don't need to specially mark these wasted front bytes.
2590 They will never be accessed anyway because
2591 prev_inuse of av->top (and any chunk created from its start)
2592 is always true after initialization.
2593 */
2594
2595 correction = MALLOC_ALIGNMENT - front_misalign;
2596 aligned_brk += correction;
2597 }
2598
2599 /*
2600 If this isn't adjacent to existing space, then we will not
2601 be able to merge with old_top space, so must add to 2nd request.
2602 */
2603
2604 correction += old_size;
2605
2606 /* Extend the end address to hit a page boundary */
2607 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
8a35c3fe 2608 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
6c8dbf00
OB
2609
2610 assert (correction >= 0);
2611 snd_brk = (char *) (MORECORE (correction));
2612
2613 /*
2614 If can't allocate correction, try to at least find out current
2615 brk. It might be enough to proceed without failing.
2616
2617 Note that if second sbrk did NOT fail, we assume that space
2618 is contiguous with first sbrk. This is a safe assumption unless
2619 program is multithreaded but doesn't use locks and a foreign sbrk
2620 occurred between our first and second calls.
2621 */
2622
2623 if (snd_brk == (char *) (MORECORE_FAILURE))
2624 {
2625 correction = 0;
2626 snd_brk = (char *) (MORECORE (0));
2627 }
2628 else
2629 {
2630 /* Call the `morecore' hook if necessary. */
2631 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2632 if (__builtin_expect (hook != NULL, 0))
2633 (*hook)();
2634 }
2635 }
2636
2637 /* handle non-contiguous cases */
2638 else
2639 {
2640 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2641 /* MORECORE/mmap must correctly align */
2642 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2643 else
2644 {
2645 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2646 if (front_misalign > 0)
2647 {
2648 /*
2649 Skip over some bytes to arrive at an aligned position.
2650 We don't need to specially mark these wasted front bytes.
2651 They will never be accessed anyway because
2652 prev_inuse of av->top (and any chunk created from its start)
2653 is always true after initialization.
2654 */
2655
2656 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2657 }
2658 }
2659
2660 /* Find out current end of memory */
2661 if (snd_brk == (char *) (MORECORE_FAILURE))
2662 {
2663 snd_brk = (char *) (MORECORE (0));
2664 }
2665 }
2666
2667 /* Adjust top based on results of second sbrk */
2668 if (snd_brk != (char *) (MORECORE_FAILURE))
2669 {
2670 av->top = (mchunkptr) aligned_brk;
2671 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2672 av->system_mem += correction;
2673
2674 /*
2675 If not the first time through, we either have a
2676 gap due to foreign sbrk or a non-contiguous region. Insert a
2677 double fencepost at old_top to prevent consolidation with space
2678 we don't own. These fenceposts are artificial chunks that are
2679 marked as inuse and are in any case too small to use. We need
2680 two to make sizes and alignments work out.
2681 */
2682
2683 if (old_size != 0)
2684 {
2685 /*
2686 Shrink old_top to insert fenceposts, keeping size a
2687 multiple of MALLOC_ALIGNMENT. We know there is at least
2688 enough space in old_top to do this.
2689 */
2690 old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2691 set_head (old_top, old_size | PREV_INUSE);
2692
2693 /*
2694 Note that the following assignments completely overwrite
2695 old_top when old_size was previously MINSIZE. This is
2696 intentional. We need the fencepost, even if old_top otherwise gets
2697 lost.
2698 */
e9c4fe93
FW
2699 set_head (chunk_at_offset (old_top, old_size),
2700 (2 * SIZE_SZ) | PREV_INUSE);
2701 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
2702 (2 * SIZE_SZ) | PREV_INUSE);
6c8dbf00
OB
2703
2704 /* If possible, release the rest. */
2705 if (old_size >= MINSIZE)
2706 {
2707 _int_free (av, old_top, 1);
2708 }
2709 }
2710 }
2711 }
2712 }
2713 } /* if (av != &main_arena) */
2714
2715 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
fa8d436c 2716 av->max_system_mem = av->system_mem;
6c8dbf00 2717 check_malloc_state (av);
a9177ff5 2718
fa8d436c
UD
2719 /* finally, do the allocation */
2720 p = av->top;
6c8dbf00 2721 size = chunksize (p);
fa8d436c
UD
2722
2723 /* check that one of the above allocation paths succeeded */
6c8dbf00
OB
2724 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2725 {
2726 remainder_size = size - nb;
2727 remainder = chunk_at_offset (p, nb);
2728 av->top = remainder;
2729 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2730 set_head (remainder, remainder_size | PREV_INUSE);
2731 check_malloced_chunk (av, p, nb);
2732 return chunk2mem (p);
2733 }
fa8d436c
UD
2734
2735 /* catch all failure paths */
8e58439c 2736 __set_errno (ENOMEM);
fa8d436c
UD
2737 return 0;
2738}
2739
2740
2741/*
6c8dbf00
OB
2742 systrim is an inverse of sorts to sysmalloc. It gives memory back
2743 to the system (via negative arguments to sbrk) if there is unused
2744 memory at the `high' end of the malloc pool. It is called
2745 automatically by free() when top space exceeds the trim
2746 threshold. It is also called by the public malloc_trim routine. It
2747 returns 1 if it actually released any memory, else 0.
2748 */
fa8d436c 2749
6c8dbf00
OB
2750static int
2751systrim (size_t pad, mstate av)
fa8d436c 2752{
6c8dbf00
OB
2753 long top_size; /* Amount of top-most memory */
2754 long extra; /* Amount to release */
2755 long released; /* Amount actually released */
2756 char *current_brk; /* address returned by pre-check sbrk call */
2757 char *new_brk; /* address returned by post-check sbrk call */
8a35c3fe 2758 size_t pagesize;
6c8dbf00 2759 long top_area;
fa8d436c 2760
8a35c3fe 2761 pagesize = GLRO (dl_pagesize);
6c8dbf00 2762 top_size = chunksize (av->top);
a9177ff5 2763
4b5b548c
FS
2764 top_area = top_size - MINSIZE - 1;
2765 if (top_area <= pad)
2766 return 0;
2767
ca6be165
CD
2768 /* Release in pagesize units and round down to the nearest page. */
2769 extra = ALIGN_DOWN(top_area - pad, pagesize);
a9177ff5 2770
51a7380b
WN
2771 if (extra == 0)
2772 return 0;
2773
4b5b548c 2774 /*
6c8dbf00
OB
2775 Only proceed if end of memory is where we last set it.
2776 This avoids problems if there were foreign sbrk calls.
2777 */
2778 current_brk = (char *) (MORECORE (0));
2779 if (current_brk == (char *) (av->top) + top_size)
2780 {
2781 /*
2782 Attempt to release memory. We ignore MORECORE return value,
2783 and instead call again to find out where new end of memory is.
2784 This avoids problems if first call releases less than we asked,
2785 of if failure somehow altered brk value. (We could still
2786 encounter problems if it altered brk in some very bad way,
2787 but the only thing we can do is adjust anyway, which will cause
2788 some downstream failure.)
2789 */
2790
2791 MORECORE (-extra);
2792 /* Call the `morecore' hook if necessary. */
2793 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2794 if (__builtin_expect (hook != NULL, 0))
2795 (*hook)();
2796 new_brk = (char *) (MORECORE (0));
2797
2798 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2799
2800 if (new_brk != (char *) MORECORE_FAILURE)
2801 {
2802 released = (long) (current_brk - new_brk);
2803
2804 if (released != 0)
2805 {
2806 /* Success. Adjust top. */
2807 av->system_mem -= released;
2808 set_head (av->top, (top_size - released) | PREV_INUSE);
2809 check_malloc_state (av);
2810 return 1;
2811 }
2812 }
fa8d436c 2813 }
fa8d436c 2814 return 0;
f65fd747
UD
2815}
2816
431c33c0 2817static void
6c8dbf00 2818munmap_chunk (mchunkptr p)
f65fd747 2819{
c0e82f11 2820 size_t pagesize = GLRO (dl_pagesize);
6c8dbf00 2821 INTERNAL_SIZE_T size = chunksize (p);
f65fd747 2822
6c8dbf00 2823 assert (chunk_is_mmapped (p));
8e635611 2824
4cf6c72f
FW
2825 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
2826 main arena. We never free this memory. */
2827 if (DUMPED_MAIN_ARENA_CHUNK (p))
2828 return;
2829
c0e82f11 2830 uintptr_t mem = (uintptr_t) chunk2mem (p);
e9c4fe93
FW
2831 uintptr_t block = (uintptr_t) p - prev_size (p);
2832 size_t total_size = prev_size (p) + size;
8e635611
UD
2833 /* Unfortunately we have to do the compilers job by hand here. Normally
2834 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2835 page size. But gcc does not recognize the optimization possibility
2836 (in the moment at least) so we combine the two values into one before
2837 the bit test. */
c0e82f11
IK
2838 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
2839 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
ac3ed168 2840 malloc_printerr ("munmap_chunk(): invalid pointer");
f65fd747 2841
c6e4925d
OB
2842 atomic_decrement (&mp_.n_mmaps);
2843 atomic_add (&mp_.mmapped_mem, -total_size);
f65fd747 2844
6ef76f3b
UD
2845 /* If munmap failed the process virtual memory address space is in a
2846 bad shape. Just leave the block hanging around, the process will
2847 terminate shortly anyway since not much can be done. */
6c8dbf00 2848 __munmap ((char *) block, total_size);
f65fd747
UD
2849}
2850
2851#if HAVE_MREMAP
2852
431c33c0 2853static mchunkptr
6c8dbf00 2854mremap_chunk (mchunkptr p, size_t new_size)
f65fd747 2855{
8a35c3fe 2856 size_t pagesize = GLRO (dl_pagesize);
e9c4fe93 2857 INTERNAL_SIZE_T offset = prev_size (p);
6c8dbf00 2858 INTERNAL_SIZE_T size = chunksize (p);
f65fd747
UD
2859 char *cp;
2860
6c8dbf00 2861 assert (chunk_is_mmapped (p));
ebe544bf
IK
2862
2863 uintptr_t block = (uintptr_t) p - offset;
2864 uintptr_t mem = (uintptr_t) chunk2mem(p);
2865 size_t total_size = offset + size;
2866 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
2867 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
2868 malloc_printerr("mremap_chunk(): invalid pointer");
f65fd747
UD
2869
2870 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
8a35c3fe 2871 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
f65fd747 2872
68f3802d 2873 /* No need to remap if the number of pages does not change. */
ebe544bf 2874 if (total_size == new_size)
68f3802d
UD
2875 return p;
2876
ebe544bf 2877 cp = (char *) __mremap ((char *) block, total_size, new_size,
6c8dbf00 2878 MREMAP_MAYMOVE);
f65fd747 2879
6c8dbf00
OB
2880 if (cp == MAP_FAILED)
2881 return 0;
f65fd747 2882
6c8dbf00 2883 p = (mchunkptr) (cp + offset);
f65fd747 2884
6c8dbf00 2885 assert (aligned_OK (chunk2mem (p)));
f65fd747 2886
e9c4fe93 2887 assert (prev_size (p) == offset);
6c8dbf00 2888 set_head (p, (new_size - offset) | IS_MMAPPED);
f65fd747 2889
c6e4925d
OB
2890 INTERNAL_SIZE_T new;
2891 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
6c8dbf00 2892 + new_size - size - offset;
c6e4925d 2893 atomic_max (&mp_.max_mmapped_mem, new);
f65fd747
UD
2894 return p;
2895}
f65fd747
UD
2896#endif /* HAVE_MREMAP */
2897
fa8d436c 2898/*------------------------ Public wrappers. --------------------------------*/
f65fd747 2899
d5c3fafc
DD
2900#if USE_TCACHE
2901
2902/* We overlay this structure on the user-data portion of a chunk when
2903 the chunk is stored in the per-thread cache. */
2904typedef struct tcache_entry
2905{
2906 struct tcache_entry *next;
bcdaad21
DD
2907 /* This field exists to detect double frees. */
2908 struct tcache_perthread_struct *key;
d5c3fafc
DD
2909} tcache_entry;
2910
2911/* There is one of these for each thread, which contains the
2912 per-thread cache (hence "tcache_perthread_struct"). Keeping
2913 overall size low is mildly important. Note that COUNTS and ENTRIES
2914 are redundant (we could have just counted the linked list each
2915 time), this is for performance reasons. */
2916typedef struct tcache_perthread_struct
2917{
2918 char counts[TCACHE_MAX_BINS];
2919 tcache_entry *entries[TCACHE_MAX_BINS];
2920} tcache_perthread_struct;
2921
1e26d351 2922static __thread bool tcache_shutting_down = false;
d5c3fafc
DD
2923static __thread tcache_perthread_struct *tcache = NULL;
2924
2925/* Caller must ensure that we know tc_idx is valid and there's room
2926 for more chunks. */
e4dd4ace 2927static __always_inline void
d5c3fafc
DD
2928tcache_put (mchunkptr chunk, size_t tc_idx)
2929{
2930 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
2931 assert (tc_idx < TCACHE_MAX_BINS);
bcdaad21
DD
2932
2933 /* Mark this chunk as "in the tcache" so the test in _int_free will
2934 detect a double free. */
2935 e->key = tcache;
2936
d5c3fafc
DD
2937 e->next = tcache->entries[tc_idx];
2938 tcache->entries[tc_idx] = e;
2939 ++(tcache->counts[tc_idx]);
2940}
2941
2942/* Caller must ensure that we know tc_idx is valid and there's
2943 available chunks to remove. */
e4dd4ace 2944static __always_inline void *
d5c3fafc
DD
2945tcache_get (size_t tc_idx)
2946{
2947 tcache_entry *e = tcache->entries[tc_idx];
2948 assert (tc_idx < TCACHE_MAX_BINS);
77dc0d86 2949 assert (tcache->counts[tc_idx] > 0);
d5c3fafc
DD
2950 tcache->entries[tc_idx] = e->next;
2951 --(tcache->counts[tc_idx]);
bcdaad21 2952 e->key = NULL;
d5c3fafc
DD
2953 return (void *) e;
2954}
2955
0a947e06
FW
2956static void
2957tcache_thread_shutdown (void)
d5c3fafc
DD
2958{
2959 int i;
2960 tcache_perthread_struct *tcache_tmp = tcache;
2961
2962 if (!tcache)
2963 return;
2964
1e26d351 2965 /* Disable the tcache and prevent it from being reinitialized. */
d5c3fafc 2966 tcache = NULL;
1e26d351 2967 tcache_shutting_down = true;
d5c3fafc 2968
1e26d351
CD
2969 /* Free all of the entries and the tcache itself back to the arena
2970 heap for coalescing. */
d5c3fafc
DD
2971 for (i = 0; i < TCACHE_MAX_BINS; ++i)
2972 {
2973 while (tcache_tmp->entries[i])
2974 {
2975 tcache_entry *e = tcache_tmp->entries[i];
2976 tcache_tmp->entries[i] = e->next;
2977 __libc_free (e);
2978 }
2979 }
2980
2981 __libc_free (tcache_tmp);
d5c3fafc 2982}
d5c3fafc
DD
2983
2984static void
2985tcache_init(void)
2986{
2987 mstate ar_ptr;
2988 void *victim = 0;
2989 const size_t bytes = sizeof (tcache_perthread_struct);
2990
2991 if (tcache_shutting_down)
2992 return;
2993
2994 arena_get (ar_ptr, bytes);
2995 victim = _int_malloc (ar_ptr, bytes);
2996 if (!victim && ar_ptr != NULL)
2997 {
2998 ar_ptr = arena_get_retry (ar_ptr, bytes);
2999 victim = _int_malloc (ar_ptr, bytes);
3000 }
3001
3002
3003 if (ar_ptr != NULL)
3004 __libc_lock_unlock (ar_ptr->mutex);
3005
3006 /* In a low memory situation, we may not be able to allocate memory
3007 - in which case, we just keep trying later. However, we
3008 typically do this very early, so either there is sufficient
3009 memory, or there isn't enough memory to do non-trivial
3010 allocations anyway. */
3011 if (victim)
3012 {
3013 tcache = (tcache_perthread_struct *) victim;
3014 memset (tcache, 0, sizeof (tcache_perthread_struct));
3015 }
3016
3017}
3018
0a947e06 3019# define MAYBE_INIT_TCACHE() \
d5c3fafc
DD
3020 if (__glibc_unlikely (tcache == NULL)) \
3021 tcache_init();
3022
0a947e06
FW
3023#else /* !USE_TCACHE */
3024# define MAYBE_INIT_TCACHE()
3025
3026static void
3027tcache_thread_shutdown (void)
3028{
3029 /* Nothing to do if there is no thread cache. */
3030}
3031
3032#endif /* !USE_TCACHE */
d5c3fafc 3033
6c8dbf00
OB
3034void *
3035__libc_malloc (size_t bytes)
fa8d436c
UD
3036{
3037 mstate ar_ptr;
22a89187 3038 void *victim;
f65fd747 3039
a222d91a 3040 void *(*hook) (size_t, const void *)
f3eeb3fc 3041 = atomic_forced_read (__malloc_hook);
bfacf1af 3042 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3043 return (*hook)(bytes, RETURN_ADDRESS (0));
d5c3fafc
DD
3044#if USE_TCACHE
3045 /* int_free also calls request2size, be careful to not pad twice. */
34697694
AS
3046 size_t tbytes;
3047 checked_request2size (bytes, tbytes);
d5c3fafc
DD
3048 size_t tc_idx = csize2tidx (tbytes);
3049
3050 MAYBE_INIT_TCACHE ();
3051
3052 DIAG_PUSH_NEEDS_COMMENT;
3053 if (tc_idx < mp_.tcache_bins
3054 /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
3055 && tcache
3056 && tcache->entries[tc_idx] != NULL)
3057 {
3058 return tcache_get (tc_idx);
3059 }
3060 DIAG_POP_NEEDS_COMMENT;
3061#endif
f65fd747 3062
3f6bb8a3
WD
3063 if (SINGLE_THREAD_P)
3064 {
3065 victim = _int_malloc (&main_arena, bytes);
3066 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3067 &main_arena == arena_for_chunk (mem2chunk (victim)));
3068 return victim;
3069 }
3070
94c5a52a 3071 arena_get (ar_ptr, bytes);
425ce2ed 3072
6c8dbf00 3073 victim = _int_malloc (ar_ptr, bytes);
fff94fa2
SP
3074 /* Retry with another arena only if we were able to find a usable arena
3075 before. */
3076 if (!victim && ar_ptr != NULL)
6c8dbf00
OB
3077 {
3078 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3079 ar_ptr = arena_get_retry (ar_ptr, bytes);
fff94fa2 3080 victim = _int_malloc (ar_ptr, bytes);
60f0e64b 3081 }
fff94fa2
SP
3082
3083 if (ar_ptr != NULL)
4bf5f222 3084 __libc_lock_unlock (ar_ptr->mutex);
fff94fa2 3085
6c8dbf00
OB
3086 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3087 ar_ptr == arena_for_chunk (mem2chunk (victim)));
fa8d436c 3088 return victim;
f65fd747 3089}
6c8dbf00 3090libc_hidden_def (__libc_malloc)
f65fd747 3091
fa8d436c 3092void
6c8dbf00 3093__libc_free (void *mem)
f65fd747 3094{
fa8d436c
UD
3095 mstate ar_ptr;
3096 mchunkptr p; /* chunk corresponding to mem */
3097
a222d91a 3098 void (*hook) (void *, const void *)
f3eeb3fc 3099 = atomic_forced_read (__free_hook);
6c8dbf00
OB
3100 if (__builtin_expect (hook != NULL, 0))
3101 {
3102 (*hook)(mem, RETURN_ADDRESS (0));
3103 return;
3104 }
f65fd747 3105
fa8d436c
UD
3106 if (mem == 0) /* free(0) has no effect */
3107 return;
f65fd747 3108
6c8dbf00 3109 p = mem2chunk (mem);
f65fd747 3110
6c8dbf00
OB
3111 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3112 {
4cf6c72f
FW
3113 /* See if the dynamic brk/mmap threshold needs adjusting.
3114 Dumped fake mmapped chunks do not affect the threshold. */
6c8dbf00 3115 if (!mp_.no_dyn_threshold
e9c4fe93
FW
3116 && chunksize_nomask (p) > mp_.mmap_threshold
3117 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
4cf6c72f 3118 && !DUMPED_MAIN_ARENA_CHUNK (p))
6c8dbf00
OB
3119 {
3120 mp_.mmap_threshold = chunksize (p);
3121 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3122 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3123 mp_.mmap_threshold, mp_.trim_threshold);
3124 }
3125 munmap_chunk (p);
3126 return;
3127 }
f65fd747 3128
d5c3fafc
DD
3129 MAYBE_INIT_TCACHE ();
3130
6c8dbf00
OB
3131 ar_ptr = arena_for_chunk (p);
3132 _int_free (ar_ptr, p, 0);
f65fd747 3133}
3b49edc0 3134libc_hidden_def (__libc_free)
f65fd747 3135
6c8dbf00
OB
3136void *
3137__libc_realloc (void *oldmem, size_t bytes)
f65fd747 3138{
fa8d436c 3139 mstate ar_ptr;
6c8dbf00 3140 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 3141
6c8dbf00 3142 void *newp; /* chunk to return */
f65fd747 3143
a222d91a 3144 void *(*hook) (void *, size_t, const void *) =
f3eeb3fc 3145 atomic_forced_read (__realloc_hook);
bfacf1af 3146 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3147 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
f65fd747 3148
fa8d436c 3149#if REALLOC_ZERO_BYTES_FREES
6c8dbf00
OB
3150 if (bytes == 0 && oldmem != NULL)
3151 {
3152 __libc_free (oldmem); return 0;
3153 }
f65fd747 3154#endif
f65fd747 3155
fa8d436c 3156 /* realloc of null is supposed to be same as malloc */
6c8dbf00
OB
3157 if (oldmem == 0)
3158 return __libc_malloc (bytes);
f65fd747 3159
78ac92ad 3160 /* chunk corresponding to oldmem */
6c8dbf00 3161 const mchunkptr oldp = mem2chunk (oldmem);
78ac92ad 3162 /* its size */
6c8dbf00 3163 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
f65fd747 3164
fff94fa2
SP
3165 if (chunk_is_mmapped (oldp))
3166 ar_ptr = NULL;
3167 else
d5c3fafc
DD
3168 {
3169 MAYBE_INIT_TCACHE ();
3170 ar_ptr = arena_for_chunk (oldp);
3171 }
fff94fa2 3172
4cf6c72f
FW
3173 /* Little security check which won't hurt performance: the allocator
3174 never wrapps around at the end of the address space. Therefore
3175 we can exclude some size values which might appear here by
3176 accident or by "design" from some intruder. We need to bypass
3177 this check for dumped fake mmap chunks from the old main arena
3178 because the new malloc may provide additional alignment. */
3179 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3180 || __builtin_expect (misaligned_chunk (oldp), 0))
3181 && !DUMPED_MAIN_ARENA_CHUNK (oldp))
ac3ed168 3182 malloc_printerr ("realloc(): invalid pointer");
dc165f7b 3183
6c8dbf00 3184 checked_request2size (bytes, nb);
f65fd747 3185
6c8dbf00
OB
3186 if (chunk_is_mmapped (oldp))
3187 {
4cf6c72f
FW
3188 /* If this is a faked mmapped chunk from the dumped main arena,
3189 always make a copy (and do not free the old chunk). */
3190 if (DUMPED_MAIN_ARENA_CHUNK (oldp))
3191 {
3192 /* Must alloc, copy, free. */
3193 void *newmem = __libc_malloc (bytes);
3194 if (newmem == 0)
3195 return NULL;
3196 /* Copy as many bytes as are available from the old chunk
1e8a8875
FW
3197 and fit into the new size. NB: The overhead for faked
3198 mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
3199 regular mmapped chunks. */
3200 if (bytes > oldsize - SIZE_SZ)
3201 bytes = oldsize - SIZE_SZ;
4cf6c72f
FW
3202 memcpy (newmem, oldmem, bytes);
3203 return newmem;
3204 }
3205
6c8dbf00 3206 void *newmem;
f65fd747 3207
fa8d436c 3208#if HAVE_MREMAP
6c8dbf00
OB
3209 newp = mremap_chunk (oldp, nb);
3210 if (newp)
3211 return chunk2mem (newp);
f65fd747 3212#endif
6c8dbf00
OB
3213 /* Note the extra SIZE_SZ overhead. */
3214 if (oldsize - SIZE_SZ >= nb)
3215 return oldmem; /* do nothing */
3216
3217 /* Must alloc, copy, free. */
3218 newmem = __libc_malloc (bytes);
3219 if (newmem == 0)
3220 return 0; /* propagate failure */
fa8d436c 3221
6c8dbf00
OB
3222 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
3223 munmap_chunk (oldp);
3224 return newmem;
3225 }
3226
3f6bb8a3
WD
3227 if (SINGLE_THREAD_P)
3228 {
3229 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3230 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3231 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3232
3233 return newp;
3234 }
3235
4bf5f222 3236 __libc_lock_lock (ar_ptr->mutex);
f65fd747 3237
6c8dbf00 3238 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
f65fd747 3239
4bf5f222 3240 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
3241 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3242 ar_ptr == arena_for_chunk (mem2chunk (newp)));
07014fca
UD
3243
3244 if (newp == NULL)
3245 {
3246 /* Try harder to allocate memory in other arenas. */
35fed6f1 3247 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
6c8dbf00 3248 newp = __libc_malloc (bytes);
07014fca 3249 if (newp != NULL)
6c8dbf00
OB
3250 {
3251 memcpy (newp, oldmem, oldsize - SIZE_SZ);
3252 _int_free (ar_ptr, oldp, 0);
3253 }
07014fca
UD
3254 }
3255
fa8d436c
UD
3256 return newp;
3257}
3b49edc0 3258libc_hidden_def (__libc_realloc)
f65fd747 3259
6c8dbf00
OB
3260void *
3261__libc_memalign (size_t alignment, size_t bytes)
10ad46bc
OB
3262{
3263 void *address = RETURN_ADDRESS (0);
3264 return _mid_memalign (alignment, bytes, address);
3265}
3266
3267static void *
3268_mid_memalign (size_t alignment, size_t bytes, void *address)
fa8d436c
UD
3269{
3270 mstate ar_ptr;
22a89187 3271 void *p;
f65fd747 3272
a222d91a 3273 void *(*hook) (size_t, size_t, const void *) =
f3eeb3fc 3274 atomic_forced_read (__memalign_hook);
bfacf1af 3275 if (__builtin_expect (hook != NULL, 0))
10ad46bc 3276 return (*hook)(alignment, bytes, address);
f65fd747 3277
10ad46bc 3278 /* If we need less alignment than we give anyway, just relay to malloc. */
6c8dbf00
OB
3279 if (alignment <= MALLOC_ALIGNMENT)
3280 return __libc_malloc (bytes);
1228ed5c 3281
fa8d436c 3282 /* Otherwise, ensure that it is at least a minimum chunk size */
6c8dbf00
OB
3283 if (alignment < MINSIZE)
3284 alignment = MINSIZE;
f65fd747 3285
a56ee40b
WN
3286 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3287 power of 2 and will cause overflow in the check below. */
3288 if (alignment > SIZE_MAX / 2 + 1)
3289 {
3290 __set_errno (EINVAL);
3291 return 0;
3292 }
3293
b73ed247
WN
3294 /* Check for overflow. */
3295 if (bytes > SIZE_MAX - alignment - MINSIZE)
3296 {
3297 __set_errno (ENOMEM);
3298 return 0;
3299 }
3300
10ad46bc
OB
3301
3302 /* Make sure alignment is power of 2. */
6c8dbf00
OB
3303 if (!powerof2 (alignment))
3304 {
3305 size_t a = MALLOC_ALIGNMENT * 2;
3306 while (a < alignment)
3307 a <<= 1;
3308 alignment = a;
3309 }
10ad46bc 3310
3f6bb8a3
WD
3311 if (SINGLE_THREAD_P)
3312 {
3313 p = _int_memalign (&main_arena, alignment, bytes);
3314 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3315 &main_arena == arena_for_chunk (mem2chunk (p)));
3316
3317 return p;
3318 }
3319
6c8dbf00 3320 arena_get (ar_ptr, bytes + alignment + MINSIZE);
6c8dbf00
OB
3321
3322 p = _int_memalign (ar_ptr, alignment, bytes);
fff94fa2 3323 if (!p && ar_ptr != NULL)
6c8dbf00
OB
3324 {
3325 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3326 ar_ptr = arena_get_retry (ar_ptr, bytes);
fff94fa2 3327 p = _int_memalign (ar_ptr, alignment, bytes);
f65fd747 3328 }
fff94fa2
SP
3329
3330 if (ar_ptr != NULL)
4bf5f222 3331 __libc_lock_unlock (ar_ptr->mutex);
fff94fa2 3332
6c8dbf00
OB
3333 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3334 ar_ptr == arena_for_chunk (mem2chunk (p)));
fa8d436c 3335 return p;
f65fd747 3336}
380d7e87 3337/* For ISO C11. */
3b49edc0
UD
3338weak_alias (__libc_memalign, aligned_alloc)
3339libc_hidden_def (__libc_memalign)
f65fd747 3340
6c8dbf00
OB
3341void *
3342__libc_valloc (size_t bytes)
fa8d436c 3343{
6c8dbf00 3344 if (__malloc_initialized < 0)
fa8d436c 3345 ptmalloc_init ();
8088488d 3346
10ad46bc 3347 void *address = RETURN_ADDRESS (0);
8a35c3fe
CD
3348 size_t pagesize = GLRO (dl_pagesize);
3349 return _mid_memalign (pagesize, bytes, address);
fa8d436c 3350}
f65fd747 3351
6c8dbf00
OB
3352void *
3353__libc_pvalloc (size_t bytes)
fa8d436c 3354{
6c8dbf00 3355 if (__malloc_initialized < 0)
fa8d436c 3356 ptmalloc_init ();
8088488d 3357
10ad46bc 3358 void *address = RETURN_ADDRESS (0);
8a35c3fe
CD
3359 size_t pagesize = GLRO (dl_pagesize);
3360 size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
dba38551 3361
1159a193 3362 /* Check for overflow. */
8a35c3fe 3363 if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
1159a193
WN
3364 {
3365 __set_errno (ENOMEM);
3366 return 0;
3367 }
3368
8a35c3fe 3369 return _mid_memalign (pagesize, rounded_bytes, address);
fa8d436c 3370}
f65fd747 3371
6c8dbf00
OB
3372void *
3373__libc_calloc (size_t n, size_t elem_size)
f65fd747 3374{
d6285c9f
CD
3375 mstate av;
3376 mchunkptr oldtop, p;
3377 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
6c8dbf00 3378 void *mem;
d6285c9f
CD
3379 unsigned long clearsize;
3380 unsigned long nclears;
3381 INTERNAL_SIZE_T *d;
0950889b
UD
3382
3383 /* size_t is unsigned so the behavior on overflow is defined. */
3384 bytes = n * elem_size;
d9af917d
UD
3385#define HALF_INTERNAL_SIZE_T \
3386 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
6c8dbf00
OB
3387 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
3388 {
3389 if (elem_size != 0 && bytes / elem_size != n)
3390 {
3391 __set_errno (ENOMEM);
3392 return 0;
3393 }
d9af917d 3394 }
0950889b 3395
a222d91a 3396 void *(*hook) (size_t, const void *) =
f3eeb3fc 3397 atomic_forced_read (__malloc_hook);
6c8dbf00
OB
3398 if (__builtin_expect (hook != NULL, 0))
3399 {
d6285c9f
CD
3400 sz = bytes;
3401 mem = (*hook)(sz, RETURN_ADDRESS (0));
3402 if (mem == 0)
3403 return 0;
3404
3405 return memset (mem, 0, sz);
7799b7b3 3406 }
f65fd747 3407
d6285c9f
CD
3408 sz = bytes;
3409
d5c3fafc
DD
3410 MAYBE_INIT_TCACHE ();
3411
3f6bb8a3
WD
3412 if (SINGLE_THREAD_P)
3413 av = &main_arena;
3414 else
3415 arena_get (av, sz);
3416
fff94fa2
SP
3417 if (av)
3418 {
3419 /* Check if we hand out the top chunk, in which case there may be no
3420 need to clear. */
d6285c9f 3421#if MORECORE_CLEARS
fff94fa2
SP
3422 oldtop = top (av);
3423 oldtopsize = chunksize (top (av));
d6285c9f 3424# if MORECORE_CLEARS < 2
fff94fa2
SP
3425 /* Only newly allocated memory is guaranteed to be cleared. */
3426 if (av == &main_arena &&
3427 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3428 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
d6285c9f 3429# endif
fff94fa2
SP
3430 if (av != &main_arena)
3431 {
3432 heap_info *heap = heap_for_ptr (oldtop);
3433 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3434 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3435 }
3436#endif
3437 }
3438 else
d6285c9f 3439 {
fff94fa2
SP
3440 /* No usable arenas. */
3441 oldtop = 0;
3442 oldtopsize = 0;
d6285c9f 3443 }
d6285c9f
CD
3444 mem = _int_malloc (av, sz);
3445
d6285c9f
CD
3446 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3447 av == arena_for_chunk (mem2chunk (mem)));
3448
3f6bb8a3 3449 if (!SINGLE_THREAD_P)
d6285c9f 3450 {
3f6bb8a3
WD
3451 if (mem == 0 && av != NULL)
3452 {
3453 LIBC_PROBE (memory_calloc_retry, 1, sz);
3454 av = arena_get_retry (av, sz);
3455 mem = _int_malloc (av, sz);
3456 }
fff94fa2 3457
3f6bb8a3
WD
3458 if (av != NULL)
3459 __libc_lock_unlock (av->mutex);
3460 }
fff94fa2
SP
3461
3462 /* Allocation failed even after a retry. */
3463 if (mem == 0)
3464 return 0;
3465
d6285c9f
CD
3466 p = mem2chunk (mem);
3467
3468 /* Two optional cases in which clearing not necessary */
3469 if (chunk_is_mmapped (p))
3470 {
3471 if (__builtin_expect (perturb_byte, 0))
3472 return memset (mem, 0, sz);
3473
3474 return mem;
3475 }
3476
3477 csz = chunksize (p);
3478
3479#if MORECORE_CLEARS
3480 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3481 {
3482 /* clear only the bytes from non-freshly-sbrked memory */
3483 csz = oldtopsize;
3484 }
3485#endif
3486
3487 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3488 contents have an odd number of INTERNAL_SIZE_T-sized words;
3489 minimally 3. */
3490 d = (INTERNAL_SIZE_T *) mem;
3491 clearsize = csz - SIZE_SZ;
3492 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3493 assert (nclears >= 3);
3494
3495 if (nclears > 9)
3496 return memset (d, 0, clearsize);
3497
3498 else
3499 {
3500 *(d + 0) = 0;
3501 *(d + 1) = 0;
3502 *(d + 2) = 0;
3503 if (nclears > 4)
3504 {
3505 *(d + 3) = 0;
3506 *(d + 4) = 0;
3507 if (nclears > 6)
3508 {
3509 *(d + 5) = 0;
3510 *(d + 6) = 0;
3511 if (nclears > 8)
3512 {
3513 *(d + 7) = 0;
3514 *(d + 8) = 0;
3515 }
3516 }
3517 }
3518 }
3519
3520 return mem;
fa8d436c 3521}
f65fd747 3522
f65fd747 3523/*
6c8dbf00
OB
3524 ------------------------------ malloc ------------------------------
3525 */
f65fd747 3526
6c8dbf00
OB
3527static void *
3528_int_malloc (mstate av, size_t bytes)
f65fd747 3529{
fa8d436c 3530 INTERNAL_SIZE_T nb; /* normalized request size */
6c8dbf00
OB
3531 unsigned int idx; /* associated bin index */
3532 mbinptr bin; /* associated bin */
f65fd747 3533
6c8dbf00 3534 mchunkptr victim; /* inspected/selected chunk */
fa8d436c 3535 INTERNAL_SIZE_T size; /* its size */
6c8dbf00 3536 int victim_index; /* its bin index */
f65fd747 3537
6c8dbf00
OB
3538 mchunkptr remainder; /* remainder from a split */
3539 unsigned long remainder_size; /* its size */
8a4b65b4 3540
6c8dbf00
OB
3541 unsigned int block; /* bit map traverser */
3542 unsigned int bit; /* bit map traverser */
3543 unsigned int map; /* current word of binmap */
8a4b65b4 3544
6c8dbf00
OB
3545 mchunkptr fwd; /* misc temp for linking */
3546 mchunkptr bck; /* misc temp for linking */
8a4b65b4 3547
d5c3fafc
DD
3548#if USE_TCACHE
3549 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3550#endif
3551
fa8d436c 3552 /*
6c8dbf00
OB
3553 Convert request size to internal form by adding SIZE_SZ bytes
3554 overhead plus possibly more to obtain necessary alignment and/or
3555 to obtain a size of at least MINSIZE, the smallest allocatable
3556 size. Also, checked_request2size traps (returning 0) request sizes
3557 that are so large that they wrap around zero when padded and
3558 aligned.
3559 */
f65fd747 3560
6c8dbf00 3561 checked_request2size (bytes, nb);
f65fd747 3562
fff94fa2
SP
3563 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3564 mmap. */
3565 if (__glibc_unlikely (av == NULL))
3566 {
3567 void *p = sysmalloc (nb, av);
3568 if (p != NULL)
3569 alloc_perturb (p, bytes);
3570 return p;
3571 }
3572
fa8d436c 3573 /*
6c8dbf00
OB
3574 If the size qualifies as a fastbin, first check corresponding bin.
3575 This code is safe to execute even if av is not yet initialized, so we
3576 can try it without checking, which saves some time on this fast path.
3577 */
f65fd747 3578
71effcea
FW
3579#define REMOVE_FB(fb, victim, pp) \
3580 do \
3581 { \
3582 victim = pp; \
3583 if (victim == NULL) \
3584 break; \
3585 } \
3586 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
3587 != victim); \
3588
6c8dbf00
OB
3589 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3590 {
3591 idx = fastbin_index (nb);
3592 mfastbinptr *fb = &fastbin (av, idx);
71effcea
FW
3593 mchunkptr pp;
3594 victim = *fb;
3595
905a7725
WD
3596 if (victim != NULL)
3597 {
71effcea
FW
3598 if (SINGLE_THREAD_P)
3599 *fb = victim->fd;
3600 else
3601 REMOVE_FB (fb, pp, victim);
3602 if (__glibc_likely (victim != NULL))
6923f6db 3603 {
71effcea
FW
3604 size_t victim_idx = fastbin_index (chunksize (victim));
3605 if (__builtin_expect (victim_idx != idx, 0))
3606 malloc_printerr ("malloc(): memory corruption (fast)");
3607 check_remalloced_chunk (av, victim, nb);
3608#if USE_TCACHE
3609 /* While we're here, if we see other chunks of the same size,
3610 stash them in the tcache. */
3611 size_t tc_idx = csize2tidx (nb);
3612 if (tcache && tc_idx < mp_.tcache_bins)
d5c3fafc 3613 {
71effcea
FW
3614 mchunkptr tc_victim;
3615
3616 /* While bin not empty and tcache not full, copy chunks. */
3617 while (tcache->counts[tc_idx] < mp_.tcache_count
3618 && (tc_victim = *fb) != NULL)
3619 {
3620 if (SINGLE_THREAD_P)
3621 *fb = tc_victim->fd;
3622 else
3623 {
3624 REMOVE_FB (fb, pp, tc_victim);
3625 if (__glibc_unlikely (tc_victim == NULL))
3626 break;
3627 }
3628 tcache_put (tc_victim, tc_idx);
3629 }
d5c3fafc 3630 }
6923f6db 3631#endif
71effcea
FW
3632 void *p = chunk2mem (victim);
3633 alloc_perturb (p, bytes);
3634 return p;
3635 }
905a7725 3636 }
fa8d436c 3637 }
f65fd747 3638
fa8d436c 3639 /*
6c8dbf00
OB
3640 If a small request, check regular bin. Since these "smallbins"
3641 hold one size each, no searching within bins is necessary.
3642 (For a large request, we need to wait until unsorted chunks are
3643 processed to find best fit. But for small ones, fits are exact
3644 anyway, so we can check now, which is faster.)
3645 */
3646
3647 if (in_smallbin_range (nb))
3648 {
3649 idx = smallbin_index (nb);
3650 bin = bin_at (av, idx);
3651
3652 if ((victim = last (bin)) != bin)
3653 {
3381be5c
WD
3654 bck = victim->bk;
3655 if (__glibc_unlikely (bck->fd != victim))
3656 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3657 set_inuse_bit_at_offset (victim, nb);
3658 bin->bk = bck;
3659 bck->fd = bin;
3660
3661 if (av != &main_arena)
3662 set_non_main_arena (victim);
3663 check_malloced_chunk (av, victim, nb);
d5c3fafc
DD
3664#if USE_TCACHE
3665 /* While we're here, if we see other chunks of the same size,
3666 stash them in the tcache. */
3667 size_t tc_idx = csize2tidx (nb);
3668 if (tcache && tc_idx < mp_.tcache_bins)
3669 {
3670 mchunkptr tc_victim;
3671
3672 /* While bin not empty and tcache not full, copy chunks over. */
3673 while (tcache->counts[tc_idx] < mp_.tcache_count
3674 && (tc_victim = last (bin)) != bin)
3675 {
3676 if (tc_victim != 0)
3677 {
3678 bck = tc_victim->bk;
3679 set_inuse_bit_at_offset (tc_victim, nb);
3680 if (av != &main_arena)
3681 set_non_main_arena (tc_victim);
3682 bin->bk = bck;
3683 bck->fd = bin;
3684
3685 tcache_put (tc_victim, tc_idx);
3686 }
3687 }
3688 }
3689#endif
3381be5c
WD
3690 void *p = chunk2mem (victim);
3691 alloc_perturb (p, bytes);
3692 return p;
6c8dbf00 3693 }
fa8d436c 3694 }
f65fd747 3695
a9177ff5 3696 /*
fa8d436c
UD
3697 If this is a large request, consolidate fastbins before continuing.
3698 While it might look excessive to kill all fastbins before
3699 even seeing if there is space available, this avoids
3700 fragmentation problems normally associated with fastbins.
3701 Also, in practice, programs tend to have runs of either small or
a9177ff5 3702 large requests, but less often mixtures, so consolidation is not
fa8d436c
UD
3703 invoked all that often in most programs. And the programs that
3704 it is called frequently in otherwise tend to fragment.
6c8dbf00 3705 */
7799b7b3 3706
6c8dbf00
OB
3707 else
3708 {
3709 idx = largebin_index (nb);
e956075a 3710 if (atomic_load_relaxed (&av->have_fastchunks))
6c8dbf00
OB
3711 malloc_consolidate (av);
3712 }
f65fd747 3713
fa8d436c 3714 /*
6c8dbf00
OB
3715 Process recently freed or remaindered chunks, taking one only if
3716 it is exact fit, or, if this a small request, the chunk is remainder from
3717 the most recent non-exact fit. Place other traversed chunks in
3718 bins. Note that this step is the only place in any routine where
3719 chunks are placed in bins.
3720
3721 The outer loop here is needed because we might not realize until
3722 near the end of malloc that we should have consolidated, so must
3723 do so and retry. This happens at most once, and only when we would
3724 otherwise need to expand memory to service a "small" request.
3725 */
3726
d5c3fafc
DD
3727#if USE_TCACHE
3728 INTERNAL_SIZE_T tcache_nb = 0;
3729 size_t tc_idx = csize2tidx (nb);
3730 if (tcache && tc_idx < mp_.tcache_bins)
3731 tcache_nb = nb;
3732 int return_cached = 0;
3733
3734 tcache_unsorted_count = 0;
3735#endif
3736
6c8dbf00
OB
3737 for (;; )
3738 {
3739 int iters = 0;
3740 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3741 {
3742 bck = victim->bk;
6c8dbf00 3743 size = chunksize (victim);
b90ddd08
IK
3744 mchunkptr next = chunk_at_offset (victim, size);
3745
3746 if (__glibc_unlikely (size <= 2 * SIZE_SZ)
3747 || __glibc_unlikely (size > av->system_mem))
3748 malloc_printerr ("malloc(): invalid size (unsorted)");
3749 if (__glibc_unlikely (chunksize_nomask (next) < 2 * SIZE_SZ)
3750 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
3751 malloc_printerr ("malloc(): invalid next size (unsorted)");
3752 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
3753 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
3754 if (__glibc_unlikely (bck->fd != victim)
3755 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
3756 malloc_printerr ("malloc(): unsorted double linked list corrupted");
35cfefd9 3757 if (__glibc_unlikely (prev_inuse (next)))
b90ddd08 3758 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
6c8dbf00
OB
3759
3760 /*
3761 If a small request, try to use last remainder if it is the
3762 only chunk in unsorted bin. This helps promote locality for
3763 runs of consecutive small requests. This is the only
3764 exception to best-fit, and applies only when there is
3765 no exact fit for a small chunk.
3766 */
3767
3768 if (in_smallbin_range (nb) &&
3769 bck == unsorted_chunks (av) &&
3770 victim == av->last_remainder &&
3771 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3772 {
3773 /* split and reattach remainder */
3774 remainder_size = size - nb;
3775 remainder = chunk_at_offset (victim, nb);
3776 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3777 av->last_remainder = remainder;
3778 remainder->bk = remainder->fd = unsorted_chunks (av);
3779 if (!in_smallbin_range (remainder_size))
3780 {
3781 remainder->fd_nextsize = NULL;
3782 remainder->bk_nextsize = NULL;
3783 }
3784
3785 set_head (victim, nb | PREV_INUSE |
3786 (av != &main_arena ? NON_MAIN_ARENA : 0));
3787 set_head (remainder, remainder_size | PREV_INUSE);
3788 set_foot (remainder, remainder_size);
3789
3790 check_malloced_chunk (av, victim, nb);
3791 void *p = chunk2mem (victim);
3792 alloc_perturb (p, bytes);
3793 return p;
3794 }
3795
3796 /* remove from unsorted list */
bdc3009b
FG
3797 if (__glibc_unlikely (bck->fd != victim))
3798 malloc_printerr ("malloc(): corrupted unsorted chunks 3");
6c8dbf00
OB
3799 unsorted_chunks (av)->bk = bck;
3800 bck->fd = unsorted_chunks (av);
3801
3802 /* Take now instead of binning if exact fit */
3803
3804 if (size == nb)
3805 {
3806 set_inuse_bit_at_offset (victim, size);
3807 if (av != &main_arena)
e9c4fe93 3808 set_non_main_arena (victim);
d5c3fafc
DD
3809#if USE_TCACHE
3810 /* Fill cache first, return to user only if cache fills.
3811 We may return one of these chunks later. */
3812 if (tcache_nb
3813 && tcache->counts[tc_idx] < mp_.tcache_count)
3814 {
3815 tcache_put (victim, tc_idx);
3816 return_cached = 1;
3817 continue;
3818 }
3819 else
3820 {
3821#endif
6c8dbf00
OB
3822 check_malloced_chunk (av, victim, nb);
3823 void *p = chunk2mem (victim);
3824 alloc_perturb (p, bytes);
3825 return p;
d5c3fafc
DD
3826#if USE_TCACHE
3827 }
3828#endif
6c8dbf00
OB
3829 }
3830
3831 /* place chunk in bin */
3832
3833 if (in_smallbin_range (size))
3834 {
3835 victim_index = smallbin_index (size);
3836 bck = bin_at (av, victim_index);
3837 fwd = bck->fd;
3838 }
3839 else
3840 {
3841 victim_index = largebin_index (size);
3842 bck = bin_at (av, victim_index);
3843 fwd = bck->fd;
3844
3845 /* maintain large bins in sorted order */
3846 if (fwd != bck)
3847 {
3848 /* Or with inuse bit to speed comparisons */
3849 size |= PREV_INUSE;
3850 /* if smaller than smallest, bypass loop below */
e9c4fe93
FW
3851 assert (chunk_main_arena (bck->bk));
3852 if ((unsigned long) (size)
3853 < (unsigned long) chunksize_nomask (bck->bk))
6c8dbf00
OB
3854 {
3855 fwd = bck;
3856 bck = bck->bk;
3857
3858 victim->fd_nextsize = fwd->fd;
3859 victim->bk_nextsize = fwd->fd->bk_nextsize;
3860 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3861 }
3862 else
3863 {
e9c4fe93
FW
3864 assert (chunk_main_arena (fwd));
3865 while ((unsigned long) size < chunksize_nomask (fwd))
6c8dbf00
OB
3866 {
3867 fwd = fwd->fd_nextsize;
e9c4fe93 3868 assert (chunk_main_arena (fwd));
6c8dbf00
OB
3869 }
3870
e9c4fe93
FW
3871 if ((unsigned long) size
3872 == (unsigned long) chunksize_nomask (fwd))
6c8dbf00
OB
3873 /* Always insert in the second position. */
3874 fwd = fwd->fd;
3875 else
3876 {
3877 victim->fd_nextsize = fwd;
3878 victim->bk_nextsize = fwd->bk_nextsize;
3879 fwd->bk_nextsize = victim;
3880 victim->bk_nextsize->fd_nextsize = victim;
3881 }
3882 bck = fwd->bk;
3883 }
3884 }
3885 else
3886 victim->fd_nextsize = victim->bk_nextsize = victim;
3887 }
3888
3889 mark_bin (av, victim_index);
3890 victim->bk = bck;
3891 victim->fd = fwd;
3892 fwd->bk = victim;
3893 bck->fd = victim;
3894
d5c3fafc
DD
3895#if USE_TCACHE
3896 /* If we've processed as many chunks as we're allowed while
3897 filling the cache, return one of the cached ones. */
3898 ++tcache_unsorted_count;
3899 if (return_cached
3900 && mp_.tcache_unsorted_limit > 0
3901 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
3902 {
3903 return tcache_get (tc_idx);
3904 }
3905#endif
3906
6c8dbf00
OB
3907#define MAX_ITERS 10000
3908 if (++iters >= MAX_ITERS)
3909 break;
3910 }
fa8d436c 3911
d5c3fafc
DD
3912#if USE_TCACHE
3913 /* If all the small chunks we found ended up cached, return one now. */
3914 if (return_cached)
3915 {
3916 return tcache_get (tc_idx);
3917 }
3918#endif
3919
a9177ff5 3920 /*
6c8dbf00
OB
3921 If a large request, scan through the chunks of current bin in
3922 sorted order to find smallest that fits. Use the skip list for this.
3923 */
3924
3925 if (!in_smallbin_range (nb))
3926 {
3927 bin = bin_at (av, idx);
3928
3929 /* skip scan if empty or largest chunk is too small */
e9c4fe93
FW
3930 if ((victim = first (bin)) != bin
3931 && (unsigned long) chunksize_nomask (victim)
3932 >= (unsigned long) (nb))
6c8dbf00
OB
3933 {
3934 victim = victim->bk_nextsize;
3935 while (((unsigned long) (size = chunksize (victim)) <
3936 (unsigned long) (nb)))
3937 victim = victim->bk_nextsize;
3938
3939 /* Avoid removing the first entry for a size so that the skip
3940 list does not have to be rerouted. */
e9c4fe93
FW
3941 if (victim != last (bin)
3942 && chunksize_nomask (victim)
3943 == chunksize_nomask (victim->fd))
6c8dbf00
OB
3944 victim = victim->fd;
3945
3946 remainder_size = size - nb;
1ecba1fa 3947 unlink_chunk (av, victim);
6c8dbf00
OB
3948
3949 /* Exhaust */
3950 if (remainder_size < MINSIZE)
3951 {
3952 set_inuse_bit_at_offset (victim, size);
3953 if (av != &main_arena)
e9c4fe93 3954 set_non_main_arena (victim);
6c8dbf00
OB
3955 }
3956 /* Split */
3957 else
3958 {
3959 remainder = chunk_at_offset (victim, nb);
3960 /* We cannot assume the unsorted list is empty and therefore
3961 have to perform a complete insert here. */
3962 bck = unsorted_chunks (av);
3963 fwd = bck->fd;
ac3ed168
FW
3964 if (__glibc_unlikely (fwd->bk != bck))
3965 malloc_printerr ("malloc(): corrupted unsorted chunks");
6c8dbf00
OB
3966 remainder->bk = bck;
3967 remainder->fd = fwd;
3968 bck->fd = remainder;
3969 fwd->bk = remainder;
3970 if (!in_smallbin_range (remainder_size))
3971 {
3972 remainder->fd_nextsize = NULL;
3973 remainder->bk_nextsize = NULL;
3974 }
3975 set_head (victim, nb | PREV_INUSE |
3976 (av != &main_arena ? NON_MAIN_ARENA : 0));
3977 set_head (remainder, remainder_size | PREV_INUSE);
3978 set_foot (remainder, remainder_size);
3979 }
3980 check_malloced_chunk (av, victim, nb);
3981 void *p = chunk2mem (victim);
3982 alloc_perturb (p, bytes);
3983 return p;
3984 }
3985 }
f65fd747 3986
6c8dbf00
OB
3987 /*
3988 Search for a chunk by scanning bins, starting with next largest
3989 bin. This search is strictly by best-fit; i.e., the smallest
3990 (with ties going to approximately the least recently used) chunk
3991 that fits is selected.
3992
3993 The bitmap avoids needing to check that most blocks are nonempty.
3994 The particular case of skipping all bins during warm-up phases
3995 when no chunks have been returned yet is faster than it might look.
3996 */
3997
3998 ++idx;
3999 bin = bin_at (av, idx);
4000 block = idx2block (idx);
4001 map = av->binmap[block];
4002 bit = idx2bit (idx);
4003
4004 for (;; )
4005 {
4006 /* Skip rest of block if there are no more set bits in this block. */
4007 if (bit > map || bit == 0)
4008 {
4009 do
4010 {
4011 if (++block >= BINMAPSIZE) /* out of bins */
4012 goto use_top;
4013 }
4014 while ((map = av->binmap[block]) == 0);
4015
4016 bin = bin_at (av, (block << BINMAPSHIFT));
4017 bit = 1;
4018 }
4019
4020 /* Advance to bin with set bit. There must be one. */
4021 while ((bit & map) == 0)
4022 {
4023 bin = next_bin (bin);
4024 bit <<= 1;
4025 assert (bit != 0);
4026 }
4027
4028 /* Inspect the bin. It is likely to be non-empty */
4029 victim = last (bin);
4030
4031 /* If a false alarm (empty bin), clear the bit. */
4032 if (victim == bin)
4033 {
4034 av->binmap[block] = map &= ~bit; /* Write through */
4035 bin = next_bin (bin);
4036 bit <<= 1;
4037 }
4038
4039 else
4040 {
4041 size = chunksize (victim);
4042
4043 /* We know the first chunk in this bin is big enough to use. */
4044 assert ((unsigned long) (size) >= (unsigned long) (nb));
4045
4046 remainder_size = size - nb;
4047
4048 /* unlink */
1ecba1fa 4049 unlink_chunk (av, victim);
6c8dbf00
OB
4050
4051 /* Exhaust */
4052 if (remainder_size < MINSIZE)
4053 {
4054 set_inuse_bit_at_offset (victim, size);
4055 if (av != &main_arena)
e9c4fe93 4056 set_non_main_arena (victim);
6c8dbf00
OB
4057 }
4058
4059 /* Split */
4060 else
4061 {
4062 remainder = chunk_at_offset (victim, nb);
4063
4064 /* We cannot assume the unsorted list is empty and therefore
4065 have to perform a complete insert here. */
4066 bck = unsorted_chunks (av);
4067 fwd = bck->fd;
ac3ed168
FW
4068 if (__glibc_unlikely (fwd->bk != bck))
4069 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
6c8dbf00
OB
4070 remainder->bk = bck;
4071 remainder->fd = fwd;
4072 bck->fd = remainder;
4073 fwd->bk = remainder;
4074
4075 /* advertise as last remainder */
4076 if (in_smallbin_range (nb))
4077 av->last_remainder = remainder;
4078 if (!in_smallbin_range (remainder_size))
4079 {
4080 remainder->fd_nextsize = NULL;
4081 remainder->bk_nextsize = NULL;
4082 }
4083 set_head (victim, nb | PREV_INUSE |
4084 (av != &main_arena ? NON_MAIN_ARENA : 0));
4085 set_head (remainder, remainder_size | PREV_INUSE);
4086 set_foot (remainder, remainder_size);
4087 }
4088 check_malloced_chunk (av, victim, nb);
4089 void *p = chunk2mem (victim);
4090 alloc_perturb (p, bytes);
4091 return p;
4092 }
4093 }
4094
4095 use_top:
4096 /*
4097 If large enough, split off the chunk bordering the end of memory
4098 (held in av->top). Note that this is in accord with the best-fit
4099 search rule. In effect, av->top is treated as larger (and thus
4100 less well fitting) than any other available chunk since it can
4101 be extended to be as large as necessary (up to system
4102 limitations).
4103
4104 We require that av->top always exists (i.e., has size >=
4105 MINSIZE) after initialization, so if it would otherwise be
4106 exhausted by current request, it is replenished. (The main
4107 reason for ensuring it exists is that we may need MINSIZE space
4108 to put in fenceposts in sysmalloc.)
4109 */
4110
4111 victim = av->top;
4112 size = chunksize (victim);
4113
30a17d8c
PC
4114 if (__glibc_unlikely (size > av->system_mem))
4115 malloc_printerr ("malloc(): corrupted top size");
4116
6c8dbf00
OB
4117 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4118 {
4119 remainder_size = size - nb;
4120 remainder = chunk_at_offset (victim, nb);
4121 av->top = remainder;
4122 set_head (victim, nb | PREV_INUSE |
4123 (av != &main_arena ? NON_MAIN_ARENA : 0));
4124 set_head (remainder, remainder_size | PREV_INUSE);
4125
4126 check_malloced_chunk (av, victim, nb);
4127 void *p = chunk2mem (victim);
4128 alloc_perturb (p, bytes);
4129 return p;
4130 }
4131
4132 /* When we are using atomic ops to free fast chunks we can get
4133 here for all block sizes. */
e956075a 4134 else if (atomic_load_relaxed (&av->have_fastchunks))
6c8dbf00
OB
4135 {
4136 malloc_consolidate (av);
4137 /* restore original bin index */
4138 if (in_smallbin_range (nb))
4139 idx = smallbin_index (nb);
4140 else
4141 idx = largebin_index (nb);
4142 }
f65fd747 4143
6c8dbf00
OB
4144 /*
4145 Otherwise, relay to handle system-dependent cases
4146 */
425ce2ed 4147 else
6c8dbf00
OB
4148 {
4149 void *p = sysmalloc (nb, av);
4150 if (p != NULL)
4151 alloc_perturb (p, bytes);
4152 return p;
4153 }
425ce2ed 4154 }
fa8d436c 4155}
f65fd747 4156
fa8d436c 4157/*
6c8dbf00
OB
4158 ------------------------------ free ------------------------------
4159 */
f65fd747 4160
78ac92ad 4161static void
6c8dbf00 4162_int_free (mstate av, mchunkptr p, int have_lock)
f65fd747 4163{
fa8d436c 4164 INTERNAL_SIZE_T size; /* its size */
6c8dbf00
OB
4165 mfastbinptr *fb; /* associated fastbin */
4166 mchunkptr nextchunk; /* next contiguous chunk */
fa8d436c 4167 INTERNAL_SIZE_T nextsize; /* its size */
6c8dbf00 4168 int nextinuse; /* true if nextchunk is used */
fa8d436c 4169 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
6c8dbf00
OB
4170 mchunkptr bck; /* misc temp for linking */
4171 mchunkptr fwd; /* misc temp for linking */
fa8d436c 4172
6c8dbf00 4173 size = chunksize (p);
f65fd747 4174
37fa1953
UD
4175 /* Little security check which won't hurt performance: the
4176 allocator never wrapps around at the end of the address space.
4177 Therefore we can exclude some size values which might appear
4178 here by accident or by "design" from some intruder. */
dc165f7b 4179 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
073f560e 4180 || __builtin_expect (misaligned_chunk (p), 0))
ac3ed168 4181 malloc_printerr ("free(): invalid pointer");
347c92e9
L
4182 /* We know that each chunk is at least MINSIZE bytes in size or a
4183 multiple of MALLOC_ALIGNMENT. */
a1ffb40e 4184 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
ac3ed168 4185 malloc_printerr ("free(): invalid size");
f65fd747 4186
37fa1953 4187 check_inuse_chunk(av, p);
f65fd747 4188
d5c3fafc
DD
4189#if USE_TCACHE
4190 {
4191 size_t tc_idx = csize2tidx (size);
affec03b 4192 if (tcache != NULL && tc_idx < mp_.tcache_bins)
d5c3fafc 4193 {
affec03b
FW
4194 /* Check to see if it's already in the tcache. */
4195 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4196
4197 /* This test succeeds on double free. However, we don't 100%
4198 trust it (it also matches random payload data at a 1 in
4199 2^<size_t> chance), so verify it's not an unlikely
4200 coincidence before aborting. */
4201 if (__glibc_unlikely (e->key == tcache))
4202 {
4203 tcache_entry *tmp;
4204 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4205 for (tmp = tcache->entries[tc_idx];
4206 tmp;
4207 tmp = tmp->next)
4208 if (tmp == e)
4209 malloc_printerr ("free(): double free detected in tcache 2");
4210 /* If we get here, it was a coincidence. We've wasted a
4211 few cycles, but don't abort. */
4212 }
4213
4214 if (tcache->counts[tc_idx] < mp_.tcache_count)
4215 {
4216 tcache_put (p, tc_idx);
4217 return;
4218 }
d5c3fafc
DD
4219 }
4220 }
4221#endif
4222
37fa1953
UD
4223 /*
4224 If eligible, place chunk on a fastbin so it can be found
4225 and used quickly in malloc.
4226 */
6bf4302e 4227
9bf248c6 4228 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
6bf4302e 4229
37fa1953
UD
4230#if TRIM_FASTBINS
4231 /*
4232 If TRIM_FASTBINS set, don't place chunks
4233 bordering top into fastbins
4234 */
4235 && (chunk_at_offset(p, size) != av->top)
4236#endif
4237 ) {
fa8d436c 4238
e9c4fe93
FW
4239 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4240 <= 2 * SIZE_SZ, 0)
893e6098
UD
4241 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4242 >= av->system_mem, 0))
4243 {
d74e6f6c 4244 bool fail = true;
bec466d9 4245 /* We might not have a lock at this point and concurrent modifications
d74e6f6c
WD
4246 of system_mem might result in a false positive. Redo the test after
4247 getting the lock. */
4248 if (!have_lock)
4249 {
4250 __libc_lock_lock (av->mutex);
4251 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
4252 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4253 __libc_lock_unlock (av->mutex);
4254 }
4255
4256 if (fail)
ac3ed168 4257 malloc_printerr ("free(): invalid next size (fast)");
893e6098
UD
4258 }
4259
e8349efd 4260 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
425ce2ed 4261
e956075a 4262 atomic_store_relaxed (&av->have_fastchunks, true);
90a3055e
UD
4263 unsigned int idx = fastbin_index(size);
4264 fb = &fastbin (av, idx);
425ce2ed 4265
362b47fe 4266 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
71effcea
FW
4267 mchunkptr old = *fb, old2;
4268
4269 if (SINGLE_THREAD_P)
4270 {
4271 /* Check that the top of the bin is not the record we are going to
4272 add (i.e., double free). */
4273 if (__builtin_expect (old == p, 0))
4274 malloc_printerr ("double free or corruption (fasttop)");
4275 p->fd = old;
4276 *fb = p;
4277 }
4278 else
4279 do
4280 {
4281 /* Check that the top of the bin is not the record we are going to
4282 add (i.e., double free). */
4283 if (__builtin_expect (old == p, 0))
4284 malloc_printerr ("double free or corruption (fasttop)");
4285 p->fd = old2 = old;
4286 }
4287 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4288 != old2);
a15d53e2
WD
4289
4290 /* Check that size of fastbin chunk at the top is the same as
4291 size of the chunk that we are adding. We can dereference OLD
4292 only if we have the lock, otherwise it might have already been
4293 allocated again. */
4294 if (have_lock && old != NULL
4295 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
ac3ed168 4296 malloc_printerr ("invalid fastbin entry (free)");
37fa1953 4297 }
f65fd747 4298
37fa1953
UD
4299 /*
4300 Consolidate other non-mmapped chunks as they arrive.
4301 */
fa8d436c 4302
37fa1953 4303 else if (!chunk_is_mmapped(p)) {
a15d53e2
WD
4304
4305 /* If we're single-threaded, don't lock the arena. */
4306 if (SINGLE_THREAD_P)
4307 have_lock = true;
4308
24cffce7 4309 if (!have_lock)
4bf5f222 4310 __libc_lock_lock (av->mutex);
425ce2ed 4311
37fa1953 4312 nextchunk = chunk_at_offset(p, size);
fa8d436c 4313
37fa1953
UD
4314 /* Lightweight tests: check whether the block is already the
4315 top block. */
a1ffb40e 4316 if (__glibc_unlikely (p == av->top))
ac3ed168 4317 malloc_printerr ("double free or corruption (top)");
37fa1953
UD
4318 /* Or whether the next chunk is beyond the boundaries of the arena. */
4319 if (__builtin_expect (contiguous (av)
4320 && (char *) nextchunk
4321 >= ((char *) av->top + chunksize(av->top)), 0))
ac3ed168 4322 malloc_printerr ("double free or corruption (out)");
37fa1953 4323 /* Or whether the block is actually not marked used. */
a1ffb40e 4324 if (__glibc_unlikely (!prev_inuse(nextchunk)))
ac3ed168 4325 malloc_printerr ("double free or corruption (!prev)");
fa8d436c 4326
37fa1953 4327 nextsize = chunksize(nextchunk);
e9c4fe93 4328 if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
893e6098 4329 || __builtin_expect (nextsize >= av->system_mem, 0))
ac3ed168 4330 malloc_printerr ("free(): invalid next size (normal)");
fa8d436c 4331
e8349efd 4332 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
854278df 4333
37fa1953
UD
4334 /* consolidate backward */
4335 if (!prev_inuse(p)) {
e9c4fe93 4336 prevsize = prev_size (p);
37fa1953
UD
4337 size += prevsize;
4338 p = chunk_at_offset(p, -((long) prevsize));
d6db68e6
ME
4339 if (__glibc_unlikely (chunksize(p) != prevsize))
4340 malloc_printerr ("corrupted size vs. prev_size while consolidating");
1ecba1fa 4341 unlink_chunk (av, p);
37fa1953 4342 }
a9177ff5 4343
37fa1953
UD
4344 if (nextchunk != av->top) {
4345 /* get and clear inuse bit */
4346 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4347
4348 /* consolidate forward */
4349 if (!nextinuse) {
1ecba1fa 4350 unlink_chunk (av, nextchunk);
37fa1953
UD
4351 size += nextsize;
4352 } else
4353 clear_inuse_bit_at_offset(nextchunk, 0);
10dc2a90 4354
fa8d436c 4355 /*
37fa1953
UD
4356 Place the chunk in unsorted chunk list. Chunks are
4357 not placed into regular bins until after they have
4358 been given one chance to be used in malloc.
fa8d436c 4359 */
f65fd747 4360
37fa1953
UD
4361 bck = unsorted_chunks(av);
4362 fwd = bck->fd;
a1ffb40e 4363 if (__glibc_unlikely (fwd->bk != bck))
ac3ed168 4364 malloc_printerr ("free(): corrupted unsorted chunks");
37fa1953 4365 p->fd = fwd;
7ecfbd38
UD
4366 p->bk = bck;
4367 if (!in_smallbin_range(size))
4368 {
4369 p->fd_nextsize = NULL;
4370 p->bk_nextsize = NULL;
4371 }
37fa1953
UD
4372 bck->fd = p;
4373 fwd->bk = p;
8a4b65b4 4374
37fa1953
UD
4375 set_head(p, size | PREV_INUSE);
4376 set_foot(p, size);
4377
4378 check_free_chunk(av, p);
4379 }
4380
4381 /*
4382 If the chunk borders the current high end of memory,
4383 consolidate into top
4384 */
4385
4386 else {
4387 size += nextsize;
4388 set_head(p, size | PREV_INUSE);
4389 av->top = p;
4390 check_chunk(av, p);
4391 }
4392
4393 /*
4394 If freeing a large space, consolidate possibly-surrounding
4395 chunks. Then, if the total unused topmost memory exceeds trim
4396 threshold, ask malloc_trim to reduce top.
4397
4398 Unless max_fast is 0, we don't know if there are fastbins
4399 bordering top, so we cannot tell for sure whether threshold
4400 has been reached unless fastbins are consolidated. But we
4401 don't want to consolidate on each free. As a compromise,
4402 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4403 is reached.
4404 */
fa8d436c 4405
37fa1953 4406 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
e956075a 4407 if (atomic_load_relaxed (&av->have_fastchunks))
37fa1953 4408 malloc_consolidate(av);
fa8d436c 4409
37fa1953 4410 if (av == &main_arena) {
a9177ff5 4411#ifndef MORECORE_CANNOT_TRIM
37fa1953
UD
4412 if ((unsigned long)(chunksize(av->top)) >=
4413 (unsigned long)(mp_.trim_threshold))
3b49edc0 4414 systrim(mp_.top_pad, av);
fa8d436c 4415#endif
37fa1953
UD
4416 } else {
4417 /* Always try heap_trim(), even if the top chunk is not
4418 large, because the corresponding heap might go away. */
4419 heap_info *heap = heap_for_ptr(top(av));
fa8d436c 4420
37fa1953
UD
4421 assert(heap->ar_ptr == av);
4422 heap_trim(heap, mp_.top_pad);
fa8d436c 4423 }
fa8d436c 4424 }
10dc2a90 4425
24cffce7 4426 if (!have_lock)
4bf5f222 4427 __libc_lock_unlock (av->mutex);
37fa1953
UD
4428 }
4429 /*
22a89187 4430 If the chunk was allocated via mmap, release via munmap().
37fa1953
UD
4431 */
4432
4433 else {
c120d94d 4434 munmap_chunk (p);
fa8d436c 4435 }
10dc2a90
UD
4436}
4437
fa8d436c
UD
4438/*
4439 ------------------------- malloc_consolidate -------------------------
4440
4441 malloc_consolidate is a specialized version of free() that tears
4442 down chunks held in fastbins. Free itself cannot be used for this
4443 purpose since, among other things, it might place chunks back onto
4444 fastbins. So, instead, we need to use a minor variant of the same
4445 code.
fa8d436c
UD
4446*/
4447
fa8d436c 4448static void malloc_consolidate(mstate av)
10dc2a90 4449{
fa8d436c
UD
4450 mfastbinptr* fb; /* current fastbin being consolidated */
4451 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4452 mchunkptr p; /* current chunk being consolidated */
4453 mchunkptr nextp; /* next chunk to consolidate */
4454 mchunkptr unsorted_bin; /* bin header */
4455 mchunkptr first_unsorted; /* chunk to link to */
4456
4457 /* These have same use as in free() */
4458 mchunkptr nextchunk;
4459 INTERNAL_SIZE_T size;
4460 INTERNAL_SIZE_T nextsize;
4461 INTERNAL_SIZE_T prevsize;
4462 int nextinuse;
10dc2a90 4463
3381be5c 4464 atomic_store_relaxed (&av->have_fastchunks, false);
10dc2a90 4465
3381be5c 4466 unsorted_bin = unsorted_chunks(av);
a9177ff5 4467
3381be5c
WD
4468 /*
4469 Remove each chunk from fast bin and consolidate it, placing it
4470 then in unsorted bin. Among other reasons for doing this,
4471 placing in unsorted bin avoids needing to calculate actual bins
4472 until malloc is sure that chunks aren't immediately going to be
4473 reused anyway.
4474 */
72f90263 4475
3381be5c
WD
4476 maxfb = &fastbin (av, NFASTBINS - 1);
4477 fb = &fastbin (av, 0);
4478 do {
71effcea 4479 p = atomic_exchange_acq (fb, NULL);
3381be5c
WD
4480 if (p != 0) {
4481 do {
249a5895
IK
4482 {
4483 unsigned int idx = fastbin_index (chunksize (p));
4484 if ((&fastbin (av, idx)) != fb)
4485 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4486 }
4487
3381be5c
WD
4488 check_inuse_chunk(av, p);
4489 nextp = p->fd;
4490
4491 /* Slightly streamlined version of consolidation code in free() */
4492 size = chunksize (p);
4493 nextchunk = chunk_at_offset(p, size);
4494 nextsize = chunksize(nextchunk);
4495
4496 if (!prev_inuse(p)) {
4497 prevsize = prev_size (p);
4498 size += prevsize;
4499 p = chunk_at_offset(p, -((long) prevsize));
d6db68e6
ME
4500 if (__glibc_unlikely (chunksize(p) != prevsize))
4501 malloc_printerr ("corrupted size vs. prev_size in fastbins");
1ecba1fa 4502 unlink_chunk (av, p);
3381be5c 4503 }
72f90263 4504
3381be5c
WD
4505 if (nextchunk != av->top) {
4506 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
a9177ff5 4507
3381be5c
WD
4508 if (!nextinuse) {
4509 size += nextsize;
1ecba1fa 4510 unlink_chunk (av, nextchunk);
3381be5c
WD
4511 } else
4512 clear_inuse_bit_at_offset(nextchunk, 0);
a9177ff5 4513
3381be5c
WD
4514 first_unsorted = unsorted_bin->fd;
4515 unsorted_bin->fd = p;
4516 first_unsorted->bk = p;
7ecfbd38 4517
3381be5c
WD
4518 if (!in_smallbin_range (size)) {
4519 p->fd_nextsize = NULL;
4520 p->bk_nextsize = NULL;
72f90263 4521 }
a9177ff5 4522
3381be5c
WD
4523 set_head(p, size | PREV_INUSE);
4524 p->bk = unsorted_bin;
4525 p->fd = first_unsorted;
4526 set_foot(p, size);
4527 }
a9177ff5 4528
3381be5c
WD
4529 else {
4530 size += nextsize;
4531 set_head(p, size | PREV_INUSE);
4532 av->top = p;
4533 }
a9177ff5 4534
3381be5c
WD
4535 } while ( (p = nextp) != 0);
4536
4537 }
4538 } while (fb++ != maxfb);
fa8d436c 4539}
10dc2a90 4540
fa8d436c
UD
4541/*
4542 ------------------------------ realloc ------------------------------
4543*/
f65fd747 4544
22a89187 4545void*
4c8b8cc3
UD
4546_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4547 INTERNAL_SIZE_T nb)
fa8d436c 4548{
fa8d436c
UD
4549 mchunkptr newp; /* chunk to return */
4550 INTERNAL_SIZE_T newsize; /* its size */
22a89187 4551 void* newmem; /* corresponding user mem */
f65fd747 4552
fa8d436c 4553 mchunkptr next; /* next contiguous chunk after oldp */
f65fd747 4554
fa8d436c
UD
4555 mchunkptr remainder; /* extra space at end of newp */
4556 unsigned long remainder_size; /* its size */
f65fd747 4557
6dd6a580 4558 /* oldmem size */
e9c4fe93 4559 if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
76761b63 4560 || __builtin_expect (oldsize >= av->system_mem, 0))
ac3ed168 4561 malloc_printerr ("realloc(): invalid old size");
76761b63 4562
6c8dbf00 4563 check_inuse_chunk (av, oldp);
f65fd747 4564
4c8b8cc3 4565 /* All callers already filter out mmap'ed chunks. */
6c8dbf00 4566 assert (!chunk_is_mmapped (oldp));
f65fd747 4567
6c8dbf00
OB
4568 next = chunk_at_offset (oldp, oldsize);
4569 INTERNAL_SIZE_T nextsize = chunksize (next);
e9c4fe93 4570 if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
22a89187 4571 || __builtin_expect (nextsize >= av->system_mem, 0))
ac3ed168 4572 malloc_printerr ("realloc(): invalid next size");
22a89187 4573
6c8dbf00
OB
4574 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4575 {
4576 /* already big enough; split below */
fa8d436c 4577 newp = oldp;
6c8dbf00 4578 newsize = oldsize;
7799b7b3 4579 }
f65fd747 4580
6c8dbf00
OB
4581 else
4582 {
4583 /* Try to expand forward into top */
4584 if (next == av->top &&
4585 (unsigned long) (newsize = oldsize + nextsize) >=
4586 (unsigned long) (nb + MINSIZE))
4587 {
4588 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4589 av->top = chunk_at_offset (oldp, nb);
4590 set_head (av->top, (newsize - nb) | PREV_INUSE);
4591 check_inuse_chunk (av, oldp);
4592 return chunk2mem (oldp);
4593 }
4594
4595 /* Try to expand forward into next chunk; split off remainder below */
4596 else if (next != av->top &&
4597 !inuse (next) &&
4598 (unsigned long) (newsize = oldsize + nextsize) >=
4599 (unsigned long) (nb))
4600 {
4601 newp = oldp;
1ecba1fa 4602 unlink_chunk (av, next);
6c8dbf00
OB
4603 }
4604
4605 /* allocate, copy, free */
4606 else
4607 {
4608 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4609 if (newmem == 0)
4610 return 0; /* propagate failure */
4611
4612 newp = mem2chunk (newmem);
4613 newsize = chunksize (newp);
4614
4615 /*
4616 Avoid copy if newp is next chunk after oldp.
4617 */
4618 if (newp == next)
4619 {
4620 newsize += oldsize;
4621 newp = oldp;
4622 }
4623 else
4624 {
b50dd3bc 4625 memcpy (newmem, chunk2mem (oldp), oldsize - SIZE_SZ);
6c8dbf00
OB
4626 _int_free (av, oldp, 1);
4627 check_inuse_chunk (av, newp);
4628 return chunk2mem (newp);
4629 }
4630 }
fa8d436c 4631 }
f65fd747 4632
22a89187 4633 /* If possible, free extra space in old or extended chunk */
f65fd747 4634
6c8dbf00 4635 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
f65fd747 4636
22a89187 4637 remainder_size = newsize - nb;
10dc2a90 4638
6c8dbf00
OB
4639 if (remainder_size < MINSIZE) /* not enough extra to split off */
4640 {
4641 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4642 set_inuse_bit_at_offset (newp, newsize);
4643 }
4644 else /* split remainder */
4645 {
4646 remainder = chunk_at_offset (newp, nb);
4647 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4648 set_head (remainder, remainder_size | PREV_INUSE |
4649 (av != &main_arena ? NON_MAIN_ARENA : 0));
4650 /* Mark remainder as inuse so free() won't complain */
4651 set_inuse_bit_at_offset (remainder, remainder_size);
4652 _int_free (av, remainder, 1);
4653 }
22a89187 4654
6c8dbf00
OB
4655 check_inuse_chunk (av, newp);
4656 return chunk2mem (newp);
fa8d436c
UD
4657}
4658
4659/*
6c8dbf00
OB
4660 ------------------------------ memalign ------------------------------
4661 */
fa8d436c 4662
6c8dbf00
OB
4663static void *
4664_int_memalign (mstate av, size_t alignment, size_t bytes)
fa8d436c
UD
4665{
4666 INTERNAL_SIZE_T nb; /* padded request size */
6c8dbf00
OB
4667 char *m; /* memory returned by malloc call */
4668 mchunkptr p; /* corresponding chunk */
4669 char *brk; /* alignment point within p */
4670 mchunkptr newp; /* chunk to return */
fa8d436c
UD
4671 INTERNAL_SIZE_T newsize; /* its size */
4672 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
6c8dbf00
OB
4673 mchunkptr remainder; /* spare room at end to split off */
4674 unsigned long remainder_size; /* its size */
fa8d436c 4675 INTERNAL_SIZE_T size;
f65fd747 4676
f65fd747 4677
f65fd747 4678
6c8dbf00 4679 checked_request2size (bytes, nb);
fa8d436c
UD
4680
4681 /*
6c8dbf00
OB
4682 Strategy: find a spot within that chunk that meets the alignment
4683 request, and then possibly free the leading and trailing space.
4684 */
fa8d436c
UD
4685
4686
8e448310
AS
4687 /* Check for overflow. */
4688 if (nb > SIZE_MAX - alignment - MINSIZE)
4689 {
4690 __set_errno (ENOMEM);
4691 return 0;
4692 }
4693
fa8d436c
UD
4694 /* Call malloc with worst case padding to hit alignment. */
4695
6c8dbf00
OB
4696 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4697
4698 if (m == 0)
4699 return 0; /* propagate failure */
4700
4701 p = mem2chunk (m);
4702
4703 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4704
4705 { /*
4706 Find an aligned spot inside chunk. Since we need to give back
4707 leading space in a chunk of at least MINSIZE, if the first
4708 calculation places us at a spot with less than MINSIZE leader,
4709 we can move to the next aligned spot -- we've allocated enough
4710 total room so that this is always possible.
4711 */
4712 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4713 - ((signed long) alignment));
4714 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4715 brk += alignment;
4716
4717 newp = (mchunkptr) brk;
4718 leadsize = brk - (char *) (p);
4719 newsize = chunksize (p) - leadsize;
4720
4721 /* For mmapped chunks, just adjust offset */
4722 if (chunk_is_mmapped (p))
4723 {
e9c4fe93 4724 set_prev_size (newp, prev_size (p) + leadsize);
6c8dbf00
OB
4725 set_head (newp, newsize | IS_MMAPPED);
4726 return chunk2mem (newp);
4727 }
4728
4729 /* Otherwise, give back leader, use the rest */
4730 set_head (newp, newsize | PREV_INUSE |
4731 (av != &main_arena ? NON_MAIN_ARENA : 0));
4732 set_inuse_bit_at_offset (newp, newsize);
4733 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4734 _int_free (av, p, 1);
4735 p = newp;
4736
4737 assert (newsize >= nb &&
4738 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
f65fd747 4739 }
f65fd747 4740
f65fd747 4741 /* Also give back spare room at the end */
6c8dbf00
OB
4742 if (!chunk_is_mmapped (p))
4743 {
4744 size = chunksize (p);
4745 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4746 {
4747 remainder_size = size - nb;
4748 remainder = chunk_at_offset (p, nb);
4749 set_head (remainder, remainder_size | PREV_INUSE |
4750 (av != &main_arena ? NON_MAIN_ARENA : 0));
4751 set_head_size (p, nb);
4752 _int_free (av, remainder, 1);
4753 }
fa8d436c 4754 }
f65fd747 4755
6c8dbf00
OB
4756 check_inuse_chunk (av, p);
4757 return chunk2mem (p);
f65fd747
UD
4758}
4759
f65fd747 4760
fa8d436c 4761/*
6c8dbf00
OB
4762 ------------------------------ malloc_trim ------------------------------
4763 */
8a4b65b4 4764
6c8dbf00
OB
4765static int
4766mtrim (mstate av, size_t pad)
f65fd747 4767{
3381be5c 4768 /* Ensure all blocks are consolidated. */
68631c8e
UD
4769 malloc_consolidate (av);
4770
6c8dbf00 4771 const size_t ps = GLRO (dl_pagesize);
68631c8e
UD
4772 int psindex = bin_index (ps);
4773 const size_t psm1 = ps - 1;
4774
4775 int result = 0;
4776 for (int i = 1; i < NBINS; ++i)
4777 if (i == 1 || i >= psindex)
4778 {
6c8dbf00 4779 mbinptr bin = bin_at (av, i);
68631c8e 4780
6c8dbf00
OB
4781 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4782 {
4783 INTERNAL_SIZE_T size = chunksize (p);
68631c8e 4784
6c8dbf00
OB
4785 if (size > psm1 + sizeof (struct malloc_chunk))
4786 {
4787 /* See whether the chunk contains at least one unused page. */
4788 char *paligned_mem = (char *) (((uintptr_t) p
4789 + sizeof (struct malloc_chunk)
4790 + psm1) & ~psm1);
68631c8e 4791
6c8dbf00
OB
4792 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4793 assert ((char *) p + size > paligned_mem);
68631c8e 4794
6c8dbf00
OB
4795 /* This is the size we could potentially free. */
4796 size -= paligned_mem - (char *) p;
68631c8e 4797
6c8dbf00
OB
4798 if (size > psm1)
4799 {
439bda32 4800#if MALLOC_DEBUG
6c8dbf00
OB
4801 /* When debugging we simulate destroying the memory
4802 content. */
4803 memset (paligned_mem, 0x89, size & ~psm1);
68631c8e 4804#endif
6c8dbf00 4805 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
68631c8e 4806
6c8dbf00
OB
4807 result = 1;
4808 }
4809 }
4810 }
68631c8e 4811 }
8a4b65b4 4812
a9177ff5 4813#ifndef MORECORE_CANNOT_TRIM
3b49edc0 4814 return result | (av == &main_arena ? systrim (pad, av) : 0);
6c8dbf00 4815
8a4b65b4 4816#else
68631c8e 4817 return result;
f65fd747 4818#endif
f65fd747
UD
4819}
4820
f65fd747 4821
3b49edc0 4822int
6c8dbf00 4823__malloc_trim (size_t s)
3b49edc0
UD
4824{
4825 int result = 0;
4826
6c8dbf00 4827 if (__malloc_initialized < 0)
3b49edc0
UD
4828 ptmalloc_init ();
4829
4830 mstate ar_ptr = &main_arena;
4831 do
4832 {
4bf5f222 4833 __libc_lock_lock (ar_ptr->mutex);
3b49edc0 4834 result |= mtrim (ar_ptr, s);
4bf5f222 4835 __libc_lock_unlock (ar_ptr->mutex);
3b49edc0
UD
4836
4837 ar_ptr = ar_ptr->next;
4838 }
4839 while (ar_ptr != &main_arena);
4840
4841 return result;
4842}
4843
4844
f65fd747 4845/*
6c8dbf00
OB
4846 ------------------------- malloc_usable_size -------------------------
4847 */
f65fd747 4848
3b49edc0 4849static size_t
6c8dbf00 4850musable (void *mem)
f65fd747
UD
4851{
4852 mchunkptr p;
6c8dbf00
OB
4853 if (mem != 0)
4854 {
4855 p = mem2chunk (mem);
4856
4857 if (__builtin_expect (using_malloc_checking == 1, 0))
4858 return malloc_check_get_size (p);
4859
4860 if (chunk_is_mmapped (p))
073f8214
FW
4861 {
4862 if (DUMPED_MAIN_ARENA_CHUNK (p))
4863 return chunksize (p) - SIZE_SZ;
4864 else
4865 return chunksize (p) - 2 * SIZE_SZ;
4866 }
6c8dbf00
OB
4867 else if (inuse (p))
4868 return chunksize (p) - SIZE_SZ;
4869 }
fa8d436c 4870 return 0;
f65fd747
UD
4871}
4872
3b49edc0
UD
4873
4874size_t
6c8dbf00 4875__malloc_usable_size (void *m)
3b49edc0
UD
4876{
4877 size_t result;
4878
6c8dbf00 4879 result = musable (m);
3b49edc0
UD
4880 return result;
4881}
4882
fa8d436c 4883/*
6c8dbf00
OB
4884 ------------------------------ mallinfo ------------------------------
4885 Accumulate malloc statistics for arena AV into M.
4886 */
f65fd747 4887
bedee953 4888static void
6c8dbf00 4889int_mallinfo (mstate av, struct mallinfo *m)
f65fd747 4890{
6dd67bd5 4891 size_t i;
f65fd747
UD
4892 mbinptr b;
4893 mchunkptr p;
f65fd747 4894 INTERNAL_SIZE_T avail;
fa8d436c
UD
4895 INTERNAL_SIZE_T fastavail;
4896 int nblocks;
4897 int nfastblocks;
f65fd747 4898
6c8dbf00 4899 check_malloc_state (av);
8a4b65b4 4900
fa8d436c 4901 /* Account for top */
6c8dbf00 4902 avail = chunksize (av->top);
fa8d436c 4903 nblocks = 1; /* top always exists */
f65fd747 4904
fa8d436c
UD
4905 /* traverse fastbins */
4906 nfastblocks = 0;
4907 fastavail = 0;
4908
6c8dbf00
OB
4909 for (i = 0; i < NFASTBINS; ++i)
4910 {
4911 for (p = fastbin (av, i); p != 0; p = p->fd)
4912 {
4913 ++nfastblocks;
4914 fastavail += chunksize (p);
4915 }
fa8d436c 4916 }
fa8d436c
UD
4917
4918 avail += fastavail;
f65fd747 4919
fa8d436c 4920 /* traverse regular bins */
6c8dbf00
OB
4921 for (i = 1; i < NBINS; ++i)
4922 {
4923 b = bin_at (av, i);
4924 for (p = last (b); p != b; p = p->bk)
4925 {
4926 ++nblocks;
4927 avail += chunksize (p);
4928 }
fa8d436c 4929 }
f65fd747 4930
bedee953
PP
4931 m->smblks += nfastblocks;
4932 m->ordblks += nblocks;
4933 m->fordblks += avail;
4934 m->uordblks += av->system_mem - avail;
4935 m->arena += av->system_mem;
4936 m->fsmblks += fastavail;
4937 if (av == &main_arena)
4938 {
4939 m->hblks = mp_.n_mmaps;
4940 m->hblkhd = mp_.mmapped_mem;
ca135f82 4941 m->usmblks = 0;
6c8dbf00 4942 m->keepcost = chunksize (av->top);
bedee953 4943 }
fa8d436c 4944}
f65fd747 4945
3b49edc0 4946
6c8dbf00 4947struct mallinfo
9dd346ff 4948__libc_mallinfo (void)
3b49edc0
UD
4949{
4950 struct mallinfo m;
bedee953 4951 mstate ar_ptr;
3b49edc0 4952
6c8dbf00 4953 if (__malloc_initialized < 0)
3b49edc0 4954 ptmalloc_init ();
bedee953 4955
6c8dbf00 4956 memset (&m, 0, sizeof (m));
bedee953 4957 ar_ptr = &main_arena;
6c8dbf00
OB
4958 do
4959 {
4bf5f222 4960 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00 4961 int_mallinfo (ar_ptr, &m);
4bf5f222 4962 __libc_lock_unlock (ar_ptr->mutex);
bedee953 4963
6c8dbf00
OB
4964 ar_ptr = ar_ptr->next;
4965 }
4966 while (ar_ptr != &main_arena);
bedee953 4967
3b49edc0
UD
4968 return m;
4969}
4970
fa8d436c 4971/*
6c8dbf00
OB
4972 ------------------------------ malloc_stats ------------------------------
4973 */
f65fd747 4974
3b49edc0 4975void
60d2f8f3 4976__malloc_stats (void)
f65fd747 4977{
8a4b65b4 4978 int i;
fa8d436c 4979 mstate ar_ptr;
fa8d436c 4980 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
8a4b65b4 4981
6c8dbf00 4982 if (__malloc_initialized < 0)
a234e27d 4983 ptmalloc_init ();
8dab36a1 4984 _IO_flockfile (stderr);
9964a145
ZW
4985 int old_flags2 = stderr->_flags2;
4986 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
6c8dbf00
OB
4987 for (i = 0, ar_ptr = &main_arena;; i++)
4988 {
4989 struct mallinfo mi;
4990
4991 memset (&mi, 0, sizeof (mi));
4bf5f222 4992 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00
OB
4993 int_mallinfo (ar_ptr, &mi);
4994 fprintf (stderr, "Arena %d:\n", i);
4995 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
4996 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
fa8d436c 4997#if MALLOC_DEBUG > 1
6c8dbf00
OB
4998 if (i > 0)
4999 dump_heap (heap_for_ptr (top (ar_ptr)));
fa8d436c 5000#endif
6c8dbf00
OB
5001 system_b += mi.arena;
5002 in_use_b += mi.uordblks;
4bf5f222 5003 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
5004 ar_ptr = ar_ptr->next;
5005 if (ar_ptr == &main_arena)
5006 break;
5007 }
5008 fprintf (stderr, "Total (incl. mmap):\n");
5009 fprintf (stderr, "system bytes = %10u\n", system_b);
5010 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5011 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5012 fprintf (stderr, "max mmap bytes = %10lu\n",
5013 (unsigned long) mp_.max_mmapped_mem);
9964a145 5014 stderr->_flags2 = old_flags2;
8dab36a1 5015 _IO_funlockfile (stderr);
f65fd747
UD
5016}
5017
f65fd747
UD
5018
5019/*
6c8dbf00
OB
5020 ------------------------------ mallopt ------------------------------
5021 */
c2d8f0b7 5022static __always_inline int
be7991c0
SP
5023do_set_trim_threshold (size_t value)
5024{
5025 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5026 mp_.no_dyn_threshold);
5027 mp_.trim_threshold = value;
5028 mp_.no_dyn_threshold = 1;
5029 return 1;
5030}
5031
c2d8f0b7 5032static __always_inline int
be7991c0
SP
5033do_set_top_pad (size_t value)
5034{
5035 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5036 mp_.no_dyn_threshold);
5037 mp_.top_pad = value;
5038 mp_.no_dyn_threshold = 1;
5039 return 1;
5040}
5041
c2d8f0b7 5042static __always_inline int
be7991c0
SP
5043do_set_mmap_threshold (size_t value)
5044{
5045 /* Forbid setting the threshold too high. */
5046 if (value <= HEAP_MAX_SIZE / 2)
5047 {
5048 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5049 mp_.no_dyn_threshold);
5050 mp_.mmap_threshold = value;
5051 mp_.no_dyn_threshold = 1;
5052 return 1;
5053 }
5054 return 0;
5055}
5056
c2d8f0b7 5057static __always_inline int
be7991c0
SP
5058do_set_mmaps_max (int32_t value)
5059{
5060 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5061 mp_.no_dyn_threshold);
5062 mp_.n_mmaps_max = value;
5063 mp_.no_dyn_threshold = 1;
5064 return 1;
5065}
5066
c2d8f0b7 5067static __always_inline int
be7991c0
SP
5068do_set_mallopt_check (int32_t value)
5069{
be7991c0
SP
5070 return 1;
5071}
5072
c2d8f0b7 5073static __always_inline int
be7991c0
SP
5074do_set_perturb_byte (int32_t value)
5075{
5076 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5077 perturb_byte = value;
5078 return 1;
5079}
5080
c2d8f0b7 5081static __always_inline int
be7991c0
SP
5082do_set_arena_test (size_t value)
5083{
5084 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5085 mp_.arena_test = value;
5086 return 1;
5087}
5088
c2d8f0b7 5089static __always_inline int
be7991c0
SP
5090do_set_arena_max (size_t value)
5091{
5092 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5093 mp_.arena_max = value;
5094 return 1;
5095}
5096
d5c3fafc 5097#if USE_TCACHE
c2d8f0b7 5098static __always_inline int
d5c3fafc
DD
5099do_set_tcache_max (size_t value)
5100{
5101 if (value >= 0 && value <= MAX_TCACHE_SIZE)
5102 {
5103 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5104 mp_.tcache_max_bytes = value;
5105 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5106 }
5107 return 1;
5108}
5109
c2d8f0b7 5110static __always_inline int
d5c3fafc
DD
5111do_set_tcache_count (size_t value)
5112{
5113 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5114 mp_.tcache_count = value;
5115 return 1;
5116}
5117
c2d8f0b7 5118static __always_inline int
d5c3fafc
DD
5119do_set_tcache_unsorted_limit (size_t value)
5120{
5121 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5122 mp_.tcache_unsorted_limit = value;
5123 return 1;
5124}
5125#endif
f65fd747 5126
6c8dbf00
OB
5127int
5128__libc_mallopt (int param_number, int value)
f65fd747 5129{
fa8d436c
UD
5130 mstate av = &main_arena;
5131 int res = 1;
f65fd747 5132
6c8dbf00 5133 if (__malloc_initialized < 0)
0cb71e02 5134 ptmalloc_init ();
4bf5f222 5135 __libc_lock_lock (av->mutex);
2f6d1f1b 5136
3ea5be54
AO
5137 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5138
3381be5c
WD
5139 /* We must consolidate main arena before changing max_fast
5140 (see definition of set_max_fast). */
5141 malloc_consolidate (av);
5142
6c8dbf00
OB
5143 switch (param_number)
5144 {
5145 case M_MXFAST:
5146 if (value >= 0 && value <= MAX_FAST_SIZE)
5147 {
5148 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5149 set_max_fast (value);
5150 }
5151 else
5152 res = 0;
5153 break;
5154
5155 case M_TRIM_THRESHOLD:
be7991c0 5156 do_set_trim_threshold (value);
6c8dbf00
OB
5157 break;
5158
5159 case M_TOP_PAD:
be7991c0 5160 do_set_top_pad (value);
6c8dbf00
OB
5161 break;
5162
5163 case M_MMAP_THRESHOLD:
be7991c0 5164 res = do_set_mmap_threshold (value);
6c8dbf00
OB
5165 break;
5166
5167 case M_MMAP_MAX:
be7991c0 5168 do_set_mmaps_max (value);
6c8dbf00
OB
5169 break;
5170
5171 case M_CHECK_ACTION:
be7991c0 5172 do_set_mallopt_check (value);
6c8dbf00
OB
5173 break;
5174
5175 case M_PERTURB:
be7991c0 5176 do_set_perturb_byte (value);
6c8dbf00
OB
5177 break;
5178
5179 case M_ARENA_TEST:
5180 if (value > 0)
be7991c0 5181 do_set_arena_test (value);
6c8dbf00
OB
5182 break;
5183
5184 case M_ARENA_MAX:
5185 if (value > 0)
62222284 5186 do_set_arena_max (value);
6c8dbf00
OB
5187 break;
5188 }
4bf5f222 5189 __libc_lock_unlock (av->mutex);
fa8d436c 5190 return res;
b22fc5f5 5191}
3b49edc0 5192libc_hidden_def (__libc_mallopt)
b22fc5f5 5193
10dc2a90 5194
a9177ff5 5195/*
6c8dbf00
OB
5196 -------------------- Alternative MORECORE functions --------------------
5197 */
10dc2a90 5198
b22fc5f5 5199
fa8d436c 5200/*
6c8dbf00 5201 General Requirements for MORECORE.
b22fc5f5 5202
6c8dbf00 5203 The MORECORE function must have the following properties:
b22fc5f5 5204
6c8dbf00 5205 If MORECORE_CONTIGUOUS is false:
10dc2a90 5206
6c8dbf00 5207 * MORECORE must allocate in multiples of pagesize. It will
fa8d436c 5208 only be called with arguments that are multiples of pagesize.
10dc2a90 5209
6c8dbf00 5210 * MORECORE(0) must return an address that is at least
fa8d436c 5211 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
10dc2a90 5212
6c8dbf00 5213 else (i.e. If MORECORE_CONTIGUOUS is true):
10dc2a90 5214
6c8dbf00 5215 * Consecutive calls to MORECORE with positive arguments
fa8d436c
UD
5216 return increasing addresses, indicating that space has been
5217 contiguously extended.
10dc2a90 5218
6c8dbf00 5219 * MORECORE need not allocate in multiples of pagesize.
fa8d436c 5220 Calls to MORECORE need not have args of multiples of pagesize.
10dc2a90 5221
6c8dbf00 5222 * MORECORE need not page-align.
10dc2a90 5223
6c8dbf00 5224 In either case:
10dc2a90 5225
6c8dbf00 5226 * MORECORE may allocate more memory than requested. (Or even less,
fa8d436c 5227 but this will generally result in a malloc failure.)
10dc2a90 5228
6c8dbf00 5229 * MORECORE must not allocate memory when given argument zero, but
fa8d436c
UD
5230 instead return one past the end address of memory from previous
5231 nonzero call. This malloc does NOT call MORECORE(0)
5232 until at least one call with positive arguments is made, so
5233 the initial value returned is not important.
10dc2a90 5234
6c8dbf00 5235 * Even though consecutive calls to MORECORE need not return contiguous
fa8d436c
UD
5236 addresses, it must be OK for malloc'ed chunks to span multiple
5237 regions in those cases where they do happen to be contiguous.
10dc2a90 5238
6c8dbf00 5239 * MORECORE need not handle negative arguments -- it may instead
fa8d436c
UD
5240 just return MORECORE_FAILURE when given negative arguments.
5241 Negative arguments are always multiples of pagesize. MORECORE
5242 must not misinterpret negative args as large positive unsigned
5243 args. You can suppress all such calls from even occurring by defining
5244 MORECORE_CANNOT_TRIM,
10dc2a90 5245
6c8dbf00
OB
5246 There is some variation across systems about the type of the
5247 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5248 actually be size_t, because sbrk supports negative args, so it is
5249 normally the signed type of the same width as size_t (sometimes
5250 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5251 matter though. Internally, we use "long" as arguments, which should
5252 work across all reasonable possibilities.
5253
5254 Additionally, if MORECORE ever returns failure for a positive
5255 request, then mmap is used as a noncontiguous system allocator. This
5256 is a useful backup strategy for systems with holes in address spaces
5257 -- in this case sbrk cannot contiguously expand the heap, but mmap
5258 may be able to map noncontiguous space.
5259
5260 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5261 a function that always returns MORECORE_FAILURE.
5262
5263 If you are using this malloc with something other than sbrk (or its
5264 emulation) to supply memory regions, you probably want to set
5265 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5266 allocator kindly contributed for pre-OSX macOS. It uses virtually
5267 but not necessarily physically contiguous non-paged memory (locked
5268 in, present and won't get swapped out). You can use it by
5269 uncommenting this section, adding some #includes, and setting up the
5270 appropriate defines above:
5271
5272 *#define MORECORE osMoreCore
5273 *#define MORECORE_CONTIGUOUS 0
5274
5275 There is also a shutdown routine that should somehow be called for
5276 cleanup upon program exit.
5277
5278 *#define MAX_POOL_ENTRIES 100
5279 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5280 static int next_os_pool;
5281 void *our_os_pools[MAX_POOL_ENTRIES];
5282
5283 void *osMoreCore(int size)
5284 {
fa8d436c
UD
5285 void *ptr = 0;
5286 static void *sbrk_top = 0;
ca34d7a7 5287
fa8d436c
UD
5288 if (size > 0)
5289 {
5290 if (size < MINIMUM_MORECORE_SIZE)
6c8dbf00 5291 size = MINIMUM_MORECORE_SIZE;
fa8d436c 5292 if (CurrentExecutionLevel() == kTaskLevel)
6c8dbf00 5293 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
fa8d436c
UD
5294 if (ptr == 0)
5295 {
6c8dbf00 5296 return (void *) MORECORE_FAILURE;
fa8d436c
UD
5297 }
5298 // save ptrs so they can be freed during cleanup
5299 our_os_pools[next_os_pool] = ptr;
5300 next_os_pool++;
5301 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5302 sbrk_top = (char *) ptr + size;
5303 return ptr;
5304 }
5305 else if (size < 0)
5306 {
5307 // we don't currently support shrink behavior
5308 return (void *) MORECORE_FAILURE;
5309 }
5310 else
5311 {
5312 return sbrk_top;
431c33c0 5313 }
6c8dbf00 5314 }
ca34d7a7 5315
6c8dbf00
OB
5316 // cleanup any allocated memory pools
5317 // called as last thing before shutting down driver
ca34d7a7 5318
6c8dbf00
OB
5319 void osCleanupMem(void)
5320 {
fa8d436c 5321 void **ptr;
ca34d7a7 5322
fa8d436c
UD
5323 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5324 if (*ptr)
5325 {
6c8dbf00
OB
5326 PoolDeallocate(*ptr);
5327 * ptr = 0;
fa8d436c 5328 }
6c8dbf00 5329 }
ee74a442 5330
6c8dbf00 5331 */
f65fd747 5332
7e3be507 5333
3e030bd5
UD
5334/* Helper code. */
5335
ae7f5313
UD
5336extern char **__libc_argv attribute_hidden;
5337
3e030bd5 5338static void
ac3ed168 5339malloc_printerr (const char *str)
3e030bd5 5340{
ec2c1fce
FW
5341 __libc_message (do_abort, "%s\n", str);
5342 __builtin_unreachable ();
3e030bd5
UD
5343}
5344
a204dbb2
UD
5345/* We need a wrapper function for one of the additions of POSIX. */
5346int
5347__posix_memalign (void **memptr, size_t alignment, size_t size)
5348{
5349 void *mem;
5350
5351 /* Test whether the SIZE argument is valid. It must be a power of
5352 two multiple of sizeof (void *). */
de02bd05 5353 if (alignment % sizeof (void *) != 0
fc56e970 5354 || !powerof2 (alignment / sizeof (void *))
de02bd05 5355 || alignment == 0)
a204dbb2
UD
5356 return EINVAL;
5357
10ad46bc
OB
5358
5359 void *address = RETURN_ADDRESS (0);
5360 mem = _mid_memalign (alignment, size, address);
a204dbb2 5361
6c8dbf00
OB
5362 if (mem != NULL)
5363 {
5364 *memptr = mem;
5365 return 0;
5366 }
a204dbb2
UD
5367
5368 return ENOMEM;
5369}
5370weak_alias (__posix_memalign, posix_memalign)
5371
20c13899
OB
5372
5373int
c52ff39e 5374__malloc_info (int options, FILE *fp)
bb066545 5375{
20c13899
OB
5376 /* For now, at least. */
5377 if (options != 0)
5378 return EINVAL;
bb066545 5379
20c13899
OB
5380 int n = 0;
5381 size_t total_nblocks = 0;
5382 size_t total_nfastblocks = 0;
5383 size_t total_avail = 0;
5384 size_t total_fastavail = 0;
5385 size_t total_system = 0;
5386 size_t total_max_system = 0;
5387 size_t total_aspace = 0;
5388 size_t total_aspace_mprotect = 0;
bb066545 5389
6c8dbf00 5390
6c8dbf00 5391
987c0269
OB
5392 if (__malloc_initialized < 0)
5393 ptmalloc_init ();
bb066545 5394
987c0269 5395 fputs ("<malloc version=\"1\">\n", fp);
bb066545 5396
987c0269
OB
5397 /* Iterate over all arenas currently in use. */
5398 mstate ar_ptr = &main_arena;
5399 do
5400 {
5401 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
8b35e35d 5402
987c0269
OB
5403 size_t nblocks = 0;
5404 size_t nfastblocks = 0;
5405 size_t avail = 0;
5406 size_t fastavail = 0;
5407 struct
5408 {
5409 size_t from;
5410 size_t to;
5411 size_t total;
5412 size_t count;
5413 } sizes[NFASTBINS + NBINS - 1];
5414#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
6c8dbf00 5415
4bf5f222 5416 __libc_lock_lock (ar_ptr->mutex);
bb066545 5417
987c0269
OB
5418 for (size_t i = 0; i < NFASTBINS; ++i)
5419 {
5420 mchunkptr p = fastbin (ar_ptr, i);
5421 if (p != NULL)
5422 {
5423 size_t nthissize = 0;
5424 size_t thissize = chunksize (p);
5425
5426 while (p != NULL)
5427 {
5428 ++nthissize;
5429 p = p->fd;
5430 }
5431
5432 fastavail += nthissize * thissize;
5433 nfastblocks += nthissize;
5434 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5435 sizes[i].to = thissize;
5436 sizes[i].count = nthissize;
5437 }
5438 else
5439 sizes[i].from = sizes[i].to = sizes[i].count = 0;
bb066545 5440
987c0269
OB
5441 sizes[i].total = sizes[i].count * sizes[i].to;
5442 }
bb066545 5443
bb066545 5444
987c0269
OB
5445 mbinptr bin;
5446 struct malloc_chunk *r;
bb066545 5447
987c0269
OB
5448 for (size_t i = 1; i < NBINS; ++i)
5449 {
5450 bin = bin_at (ar_ptr, i);
5451 r = bin->fd;
5452 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5453 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5454 = sizes[NFASTBINS - 1 + i].count = 0;
5455
5456 if (r != NULL)
5457 while (r != bin)
5458 {
e9c4fe93 5459 size_t r_size = chunksize_nomask (r);
987c0269 5460 ++sizes[NFASTBINS - 1 + i].count;
e9c4fe93 5461 sizes[NFASTBINS - 1 + i].total += r_size;
987c0269 5462 sizes[NFASTBINS - 1 + i].from
e9c4fe93 5463 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
987c0269 5464 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
e9c4fe93 5465 r_size);
987c0269
OB
5466
5467 r = r->fd;
5468 }
5469
5470 if (sizes[NFASTBINS - 1 + i].count == 0)
5471 sizes[NFASTBINS - 1 + i].from = 0;
5472 nblocks += sizes[NFASTBINS - 1 + i].count;
5473 avail += sizes[NFASTBINS - 1 + i].total;
5474 }
bb066545 5475
7a9368a1
FW
5476 size_t heap_size = 0;
5477 size_t heap_mprotect_size = 0;
34eb4157 5478 size_t heap_count = 0;
7a9368a1
FW
5479 if (ar_ptr != &main_arena)
5480 {
34eb4157 5481 /* Iterate over the arena heaps from back to front. */
7a9368a1 5482 heap_info *heap = heap_for_ptr (top (ar_ptr));
34eb4157
FW
5483 do
5484 {
5485 heap_size += heap->size;
5486 heap_mprotect_size += heap->mprotect_size;
5487 heap = heap->prev;
5488 ++heap_count;
5489 }
5490 while (heap != NULL);
7a9368a1
FW
5491 }
5492
4bf5f222 5493 __libc_lock_unlock (ar_ptr->mutex);
da2d2fb6 5494
987c0269
OB
5495 total_nfastblocks += nfastblocks;
5496 total_fastavail += fastavail;
0588a9cb 5497
987c0269
OB
5498 total_nblocks += nblocks;
5499 total_avail += avail;
0588a9cb 5500
987c0269
OB
5501 for (size_t i = 0; i < nsizes; ++i)
5502 if (sizes[i].count != 0 && i != NFASTBINS)
5503 fprintf (fp, " \
5504 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5505 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
fdfd175d 5506
987c0269
OB
5507 if (sizes[NFASTBINS].count != 0)
5508 fprintf (fp, "\
5509 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5510 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5511 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
fdfd175d 5512
987c0269
OB
5513 total_system += ar_ptr->system_mem;
5514 total_max_system += ar_ptr->max_system_mem;
bb066545 5515
987c0269
OB
5516 fprintf (fp,
5517 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5518 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5519 "<system type=\"current\" size=\"%zu\"/>\n"
5520 "<system type=\"max\" size=\"%zu\"/>\n",
5521 nfastblocks, fastavail, nblocks, avail,
5522 ar_ptr->system_mem, ar_ptr->max_system_mem);
346bc35c 5523
987c0269
OB
5524 if (ar_ptr != &main_arena)
5525 {
987c0269
OB
5526 fprintf (fp,
5527 "<aspace type=\"total\" size=\"%zu\"/>\n"
34eb4157
FW
5528 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5529 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5530 heap_size, heap_mprotect_size, heap_count);
7a9368a1
FW
5531 total_aspace += heap_size;
5532 total_aspace_mprotect += heap_mprotect_size;
987c0269
OB
5533 }
5534 else
5535 {
5536 fprintf (fp,
5537 "<aspace type=\"total\" size=\"%zu\"/>\n"
5538 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5539 ar_ptr->system_mem, ar_ptr->system_mem);
5540 total_aspace += ar_ptr->system_mem;
5541 total_aspace_mprotect += ar_ptr->system_mem;
5542 }
bb066545 5543
987c0269 5544 fputs ("</heap>\n", fp);
bb066545
UD
5545 ar_ptr = ar_ptr->next;
5546 }
5547 while (ar_ptr != &main_arena);
5548
5549 fprintf (fp,
62a58816
SP
5550 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5551 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
9fa76613 5552 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
62a58816
SP
5553 "<system type=\"current\" size=\"%zu\"/>\n"
5554 "<system type=\"max\" size=\"%zu\"/>\n"
5555 "<aspace type=\"total\" size=\"%zu\"/>\n"
5556 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5557 "</malloc>\n",
5558 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
4d653a59 5559 mp_.n_mmaps, mp_.mmapped_mem,
62a58816
SP
5560 total_system, total_max_system,
5561 total_aspace, total_aspace_mprotect);
bb066545
UD
5562
5563 return 0;
5564}
c52ff39e 5565weak_alias (__malloc_info, malloc_info)
bb066545
UD
5566
5567
eba19d2b 5568strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
eba19d2b
UD
5569strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5570strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5571strong_alias (__libc_memalign, __memalign)
5572weak_alias (__libc_memalign, memalign)
5573strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5574strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5575strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5576strong_alias (__libc_mallinfo, __mallinfo)
5577weak_alias (__libc_mallinfo, mallinfo)
5578strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
7e3be507
UD
5579
5580weak_alias (__malloc_stats, malloc_stats)
5581weak_alias (__malloc_usable_size, malloc_usable_size)
5582weak_alias (__malloc_trim, malloc_trim)
7e3be507 5583
025b33ae
FW
5584#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5585compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
5586#endif
f65fd747 5587
fa8d436c 5588/* ------------------------------------------------------------
6c8dbf00 5589 History:
f65fd747 5590
6c8dbf00 5591 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
f65fd747 5592
6c8dbf00 5593 */
fa8d436c
UD
5594/*
5595 * Local variables:
5596 * c-basic-offset: 2
5597 * End:
5598 */