]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/malloc.c
Add glibc.malloc.mxfast tunable
[thirdparty/glibc.git] / malloc / malloc.c
CommitLineData
56137dbc 1/* Malloc implementation for multiple threads without lock contention.
04277e02 2 Copyright (C) 1996-2019 Free Software Foundation, Inc.
f65fd747 3 This file is part of the GNU C Library.
fa8d436c
UD
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
f65fd747
UD
6
7 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
fa8d436c 10 License, or (at your option) any later version.
f65fd747
UD
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 15 Lesser General Public License for more details.
f65fd747 16
cc7375ce 17 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
f65fd747 20
fa8d436c
UD
21/*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
bb2ce416 25 There have been substantial changes made after the integration into
da2d2fb6
UD
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
fa8d436c 29* Version ptmalloc2-20011215
fa8d436c
UD
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
f65fd747 32
fa8d436c 33* Quickstart
f65fd747 34
fa8d436c
UD
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
f65fd747
UD
43
44* Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
fa8d436c
UD
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71* Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
22a89187
UD
76 free(void* p);
77 realloc(void* p, size_t n);
fa8d436c
UD
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
22a89187
UD
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
fa8d436c 86 pvalloc(size_t n);
fa8d436c 87 malloc_trim(size_t pad);
22a89187 88 malloc_usable_size(void* p);
fa8d436c 89 malloc_stats();
f65fd747
UD
90
91* Vital statistics:
92
fa8d436c 93 Supported pointer representation: 4 or 8 bytes
a9177ff5 94 Supported size_t representation: 4 or 8 bytes
f65fd747 95 Note that size_t is allowed to be 4 bytes even if pointers are 8.
fa8d436c
UD
96 You can adjust this by defining INTERNAL_SIZE_T
97
98 Alignment: 2 * sizeof(size_t) (default)
99 (i.e., 8 byte alignment with 4byte size_t). This suffices for
100 nearly all current machines and C compilers. However, you can
101 define MALLOC_ALIGNMENT to be wider than this if necessary.
f65fd747 102
fa8d436c
UD
103 Minimum overhead per allocated chunk: 4 or 8 bytes
104 Each malloced chunk has a hidden word of overhead holding size
f65fd747
UD
105 and status information.
106
107 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
72f90263 108 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
f65fd747
UD
109
110 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
111 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
fa8d436c
UD
112 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 free list pointers. Thus, the minimum allocatable size is
114 16/24/32 bytes.
f65fd747
UD
115
116 Even a request for zero bytes (i.e., malloc(0)) returns a
117 pointer to something of the minimum allocatable size.
118
fa8d436c
UD
119 The maximum overhead wastage (i.e., number of extra bytes
120 allocated than were requested in malloc) is less than or equal
121 to the minimum size, except for requests >= mmap_threshold that
122 are serviced via mmap(), where the worst case wastage is 2 *
123 sizeof(size_t) bytes plus the remainder from a system page (the
124 minimal mmap unit); typically 4096 or 8192 bytes.
f65fd747 125
a9177ff5 126 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
72f90263 127 8-byte size_t: 2^64 minus about two pages
fa8d436c
UD
128
129 It is assumed that (possibly signed) size_t values suffice to
f65fd747
UD
130 represent chunk sizes. `Possibly signed' is due to the fact
131 that `size_t' may be defined on a system as either a signed or
fa8d436c
UD
132 an unsigned type. The ISO C standard says that it must be
133 unsigned, but a few systems are known not to adhere to this.
134 Additionally, even when size_t is unsigned, sbrk (which is by
135 default used to obtain memory from system) accepts signed
136 arguments, and may not be able to handle size_t-wide arguments
137 with negative sign bit. Generally, values that would
138 appear as negative after accounting for overhead and alignment
139 are supported only via mmap(), which does not have this
140 limitation.
141
142 Requests for sizes outside the allowed range will perform an optional
143 failure action and then return null. (Requests may also
144 also fail because a system is out of memory.)
145
22a89187 146 Thread-safety: thread-safe
fa8d436c
UD
147
148 Compliance: I believe it is compliant with the 1997 Single Unix Specification
2b0fba75 149 Also SVID/XPG, ANSI C, and probably others as well.
f65fd747
UD
150
151* Synopsis of compile-time options:
152
153 People have reported using previous versions of this malloc on all
154 versions of Unix, sometimes by tweaking some of the defines
22a89187 155 below. It has been tested most extensively on Solaris and Linux.
fa8d436c
UD
156 People also report using it in stand-alone embedded systems.
157
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
165
166 OPTION DEFAULT VALUE
167
168 Compilation Environment options:
169
2a26ef3a 170 HAVE_MREMAP 0
fa8d436c
UD
171
172 Changing default word sizes:
173
174 INTERNAL_SIZE_T size_t
fa8d436c
UD
175
176 Configuration and functionality options:
177
fa8d436c
UD
178 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
179 USE_MALLOC_LOCK NOT defined
180 MALLOC_DEBUG NOT defined
181 REALLOC_ZERO_BYTES_FREES 1
fa8d436c
UD
182 TRIM_FASTBINS 0
183
184 Options for customizing MORECORE:
185
186 MORECORE sbrk
187 MORECORE_FAILURE -1
a9177ff5 188 MORECORE_CONTIGUOUS 1
fa8d436c
UD
189 MORECORE_CANNOT_TRIM NOT defined
190 MORECORE_CLEARS 1
a9177ff5 191 MMAP_AS_MORECORE_SIZE (1024 * 1024)
fa8d436c
UD
192
193 Tuning options that are also dynamically changeable via mallopt:
194
425ce2ed 195 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
fa8d436c
UD
196 DEFAULT_TRIM_THRESHOLD 128 * 1024
197 DEFAULT_TOP_PAD 0
198 DEFAULT_MMAP_THRESHOLD 128 * 1024
199 DEFAULT_MMAP_MAX 65536
200
201 There are several other #defined constants and macros that you
202 probably don't want to touch unless you are extending or adapting malloc. */
f65fd747
UD
203
204/*
22a89187 205 void* is the pointer type that malloc should say it returns
f65fd747
UD
206*/
207
22a89187
UD
208#ifndef void
209#define void void
210#endif /*void*/
f65fd747 211
fa8d436c
UD
212#include <stddef.h> /* for size_t */
213#include <stdlib.h> /* for getenv(), abort() */
2a26ef3a 214#include <unistd.h> /* for __libc_enable_secure */
f65fd747 215
425ce2ed 216#include <atomic.h>
eb96ffb0 217#include <_itoa.h>
e404fb16 218#include <bits/wordsize.h>
425ce2ed 219#include <sys/sysinfo.h>
c56da3a3 220
02d46fc4
UD
221#include <ldsodefs.h>
222
fa8d436c 223#include <unistd.h>
fa8d436c 224#include <stdio.h> /* needed for malloc_stats */
8e58439c 225#include <errno.h>
406e7a0a 226#include <assert.h>
f65fd747 227
66274218
AJ
228#include <shlib-compat.h>
229
5d78bb43
UD
230/* For uintptr_t. */
231#include <stdint.h>
f65fd747 232
3e030bd5
UD
233/* For va_arg, va_start, va_end. */
234#include <stdarg.h>
235
070906ff
RM
236/* For MIN, MAX, powerof2. */
237#include <sys/param.h>
238
ca6be165 239/* For ALIGN_UP et. al. */
9090848d 240#include <libc-pointer-arith.h>
8a35c3fe 241
d5c3fafc
DD
242/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
243#include <libc-diag.h>
244
29d79486 245#include <malloc/malloc-internal.h>
c0f62c56 246
6d43de4b
WD
247/* For SINGLE_THREAD_P. */
248#include <sysdep-cancel.h>
249
fa8d436c
UD
250/*
251 Debugging:
252
253 Because freed chunks may be overwritten with bookkeeping fields, this
254 malloc will often die when freed memory is overwritten by user
255 programs. This can be very effective (albeit in an annoying way)
256 in helping track down dangling pointers.
257
258 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
259 enabled that will catch more memory errors. You probably won't be
260 able to make much sense of the actual assertion errors, but they
261 should help you locate incorrectly overwritten memory. The checking
262 is fairly extensive, and will slow down execution
263 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
264 will attempt to check every non-mmapped allocated and free chunk in
265 the course of computing the summmaries. (By nature, mmapped regions
266 cannot be checked very much automatically.)
267
268 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
269 this code. The assertions in the check routines spell out in more
270 detail the assumptions and invariants underlying the algorithms.
271
272 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
273 checking that all accesses to malloced memory stay within their
274 bounds. However, there are several add-ons and adaptations of this
275 or other mallocs available that do this.
f65fd747
UD
276*/
277
439bda32
WN
278#ifndef MALLOC_DEBUG
279#define MALLOC_DEBUG 0
280#endif
281
406e7a0a
ST
282#ifndef NDEBUG
283# define __assert_fail(assertion, file, line, function) \
284 __malloc_assert(assertion, file, line, function)
72f90263
UD
285
286extern const char *__progname;
287
288static void
289__malloc_assert (const char *assertion, const char *file, unsigned int line,
290 const char *function)
291{
292 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
293 __progname, __progname[0] ? ": " : "",
294 file, line,
295 function ? function : "", function ? ": " : "",
296 assertion);
297 fflush (stderr);
298 abort ();
299}
300#endif
f65fd747 301
d5c3fafc
DD
302#if USE_TCACHE
303/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
304# define TCACHE_MAX_BINS 64
305# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
306
307/* Only used to pre-fill the tunables. */
308# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
309
310/* When "x" is from chunksize(). */
311# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
312/* When "x" is a user-provided size. */
313# define usize2tidx(x) csize2tidx (request2size (x))
314
315/* With rounding and alignment, the bins are...
316 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
317 idx 1 bytes 25..40 or 13..20
318 idx 2 bytes 41..56 or 21..28
319 etc. */
320
321/* This is another arbitrary limit, which tunables can change. Each
322 tcache bin will hold at most this number of chunks. */
323# define TCACHE_FILL_COUNT 7
0ad788fa
WD
324
325/* Maximum chunks in tcache bins for tunables. This value must fit the range
326 of tcache->counts[] entries, else they may overflow. */
327# define MAX_TCACHE_COUNT UINT16_MAX
d5c3fafc
DD
328#endif
329
f65fd747 330
fa8d436c
UD
331/*
332 REALLOC_ZERO_BYTES_FREES should be set if a call to
333 realloc with zero bytes should be the same as a call to free.
334 This is required by the C standard. Otherwise, since this malloc
335 returns a unique pointer for malloc(0), so does realloc(p, 0).
336*/
337
338#ifndef REALLOC_ZERO_BYTES_FREES
339#define REALLOC_ZERO_BYTES_FREES 1
340#endif
341
342/*
343 TRIM_FASTBINS controls whether free() of a very small chunk can
344 immediately lead to trimming. Setting to true (1) can reduce memory
345 footprint, but will almost always slow down programs that use a lot
346 of small chunks.
347
348 Define this only if you are willing to give up some speed to more
349 aggressively reduce system-level memory footprint when releasing
350 memory in programs that use many small chunks. You can get
351 essentially the same effect by setting MXFAST to 0, but this can
352 lead to even greater slowdowns in programs using many small chunks.
353 TRIM_FASTBINS is an in-between compile-time option, that disables
354 only those chunks bordering topmost memory from being placed in
355 fastbins.
356*/
357
358#ifndef TRIM_FASTBINS
359#define TRIM_FASTBINS 0
360#endif
361
362
3b49edc0 363/* Definition for getting more memory from the OS. */
fa8d436c
UD
364#define MORECORE (*__morecore)
365#define MORECORE_FAILURE 0
22a89187
UD
366void * __default_morecore (ptrdiff_t);
367void *(*__morecore)(ptrdiff_t) = __default_morecore;
f65fd747 368
f65fd747 369
22a89187 370#include <string.h>
f65fd747 371
fa8d436c
UD
372/*
373 MORECORE-related declarations. By default, rely on sbrk
374*/
09f5e163 375
f65fd747 376
fa8d436c
UD
377/*
378 MORECORE is the name of the routine to call to obtain more memory
379 from the system. See below for general guidance on writing
380 alternative MORECORE functions, as well as a version for WIN32 and a
381 sample version for pre-OSX macos.
382*/
f65fd747 383
fa8d436c
UD
384#ifndef MORECORE
385#define MORECORE sbrk
386#endif
f65fd747 387
fa8d436c
UD
388/*
389 MORECORE_FAILURE is the value returned upon failure of MORECORE
390 as well as mmap. Since it cannot be an otherwise valid memory address,
391 and must reflect values of standard sys calls, you probably ought not
392 try to redefine it.
393*/
09f5e163 394
fa8d436c
UD
395#ifndef MORECORE_FAILURE
396#define MORECORE_FAILURE (-1)
397#endif
398
399/*
400 If MORECORE_CONTIGUOUS is true, take advantage of fact that
401 consecutive calls to MORECORE with positive arguments always return
402 contiguous increasing addresses. This is true of unix sbrk. Even
403 if not defined, when regions happen to be contiguous, malloc will
404 permit allocations spanning regions obtained from different
405 calls. But defining this when applicable enables some stronger
406 consistency checks and space efficiencies.
407*/
f65fd747 408
fa8d436c
UD
409#ifndef MORECORE_CONTIGUOUS
410#define MORECORE_CONTIGUOUS 1
f65fd747
UD
411#endif
412
fa8d436c
UD
413/*
414 Define MORECORE_CANNOT_TRIM if your version of MORECORE
415 cannot release space back to the system when given negative
416 arguments. This is generally necessary only if you are using
417 a hand-crafted MORECORE function that cannot handle negative arguments.
418*/
419
420/* #define MORECORE_CANNOT_TRIM */
f65fd747 421
fa8d436c
UD
422/* MORECORE_CLEARS (default 1)
423 The degree to which the routine mapped to MORECORE zeroes out
424 memory: never (0), only for newly allocated space (1) or always
425 (2). The distinction between (1) and (2) is necessary because on
426 some systems, if the application first decrements and then
427 increments the break value, the contents of the reallocated space
428 are unspecified.
6c8dbf00 429 */
fa8d436c
UD
430
431#ifndef MORECORE_CLEARS
6c8dbf00 432# define MORECORE_CLEARS 1
7cabd57c
UD
433#endif
434
fa8d436c 435
a9177ff5 436/*
fa8d436c 437 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
22a89187
UD
438 sbrk fails, and mmap is used as a backup. The value must be a
439 multiple of page size. This backup strategy generally applies only
440 when systems have "holes" in address space, so sbrk cannot perform
441 contiguous expansion, but there is still space available on system.
442 On systems for which this is known to be useful (i.e. most linux
443 kernels), this occurs only when programs allocate huge amounts of
444 memory. Between this, and the fact that mmap regions tend to be
445 limited, the size should be large, to avoid too many mmap calls and
446 thus avoid running out of kernel resources. */
fa8d436c
UD
447
448#ifndef MMAP_AS_MORECORE_SIZE
449#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
f65fd747
UD
450#endif
451
452/*
453 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
2a26ef3a 454 large blocks.
f65fd747
UD
455*/
456
457#ifndef HAVE_MREMAP
fa8d436c 458#define HAVE_MREMAP 0
f65fd747
UD
459#endif
460
2ba3cfa1
FW
461/* We may need to support __malloc_initialize_hook for backwards
462 compatibility. */
463
464#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
465# define HAVE_MALLOC_INIT_HOOK 1
466#else
467# define HAVE_MALLOC_INIT_HOOK 0
468#endif
469
f65fd747 470
f65fd747 471/*
f65fd747 472 This version of malloc supports the standard SVID/XPG mallinfo
fa8d436c
UD
473 routine that returns a struct containing usage properties and
474 statistics. It should work on any SVID/XPG compliant system that has
475 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
476 install such a thing yourself, cut out the preliminary declarations
477 as described above and below and save them in a malloc.h file. But
478 there's no compelling reason to bother to do this.)
f65fd747
UD
479
480 The main declaration needed is the mallinfo struct that is returned
481 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
fa8d436c
UD
482 bunch of fields that are not even meaningful in this version of
483 malloc. These fields are are instead filled by mallinfo() with
484 other numbers that might be of interest.
f65fd747
UD
485*/
486
f65fd747 487
fa8d436c 488/* ---------- description of public routines ------------ */
f65fd747
UD
489
490/*
fa8d436c
UD
491 malloc(size_t n)
492 Returns a pointer to a newly allocated chunk of at least n bytes, or null
493 if no space is available. Additionally, on failure, errno is
494 set to ENOMEM on ANSI C systems.
495
496 If n is zero, malloc returns a minumum-sized chunk. (The minimum
497 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
498 systems.) On most systems, size_t is an unsigned type, so calls
499 with negative arguments are interpreted as requests for huge amounts
500 of space, which will often fail. The maximum supported value of n
501 differs across systems, but is in all cases less than the maximum
502 representable value of a size_t.
f65fd747 503*/
3b49edc0
UD
504void* __libc_malloc(size_t);
505libc_hidden_proto (__libc_malloc)
f65fd747 506
fa8d436c 507/*
22a89187 508 free(void* p)
fa8d436c
UD
509 Releases the chunk of memory pointed to by p, that had been previously
510 allocated using malloc or a related routine such as realloc.
511 It has no effect if p is null. It can have arbitrary (i.e., bad!)
512 effects if p has already been freed.
513
514 Unless disabled (using mallopt), freeing very large spaces will
515 when possible, automatically trigger operations that give
516 back unused memory to the system, thus reducing program footprint.
517*/
3b49edc0
UD
518void __libc_free(void*);
519libc_hidden_proto (__libc_free)
f65fd747 520
fa8d436c
UD
521/*
522 calloc(size_t n_elements, size_t element_size);
523 Returns a pointer to n_elements * element_size bytes, with all locations
524 set to zero.
525*/
3b49edc0 526void* __libc_calloc(size_t, size_t);
f65fd747
UD
527
528/*
22a89187 529 realloc(void* p, size_t n)
fa8d436c
UD
530 Returns a pointer to a chunk of size n that contains the same data
531 as does chunk p up to the minimum of (n, p's size) bytes, or null
a9177ff5 532 if no space is available.
f65fd747 533
fa8d436c
UD
534 The returned pointer may or may not be the same as p. The algorithm
535 prefers extending p when possible, otherwise it employs the
536 equivalent of a malloc-copy-free sequence.
f65fd747 537
a9177ff5 538 If p is null, realloc is equivalent to malloc.
f65fd747 539
fa8d436c
UD
540 If space is not available, realloc returns null, errno is set (if on
541 ANSI) and p is NOT freed.
f65fd747 542
fa8d436c
UD
543 if n is for fewer bytes than already held by p, the newly unused
544 space is lopped off and freed if possible. Unless the #define
545 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
546 zero (re)allocates a minimum-sized chunk.
f65fd747 547
3b5f801d
DD
548 Large chunks that were internally obtained via mmap will always be
549 grown using malloc-copy-free sequences unless the system supports
550 MREMAP (currently only linux).
f65fd747 551
fa8d436c
UD
552 The old unix realloc convention of allowing the last-free'd chunk
553 to be used as an argument to realloc is not supported.
f65fd747 554*/
3b49edc0
UD
555void* __libc_realloc(void*, size_t);
556libc_hidden_proto (__libc_realloc)
f65fd747 557
fa8d436c
UD
558/*
559 memalign(size_t alignment, size_t n);
560 Returns a pointer to a newly allocated chunk of n bytes, aligned
561 in accord with the alignment argument.
562
563 The alignment argument should be a power of two. If the argument is
564 not a power of two, the nearest greater power is used.
565 8-byte alignment is guaranteed by normal malloc calls, so don't
566 bother calling memalign with an argument of 8 or less.
567
568 Overreliance on memalign is a sure way to fragment space.
569*/
3b49edc0
UD
570void* __libc_memalign(size_t, size_t);
571libc_hidden_proto (__libc_memalign)
f65fd747
UD
572
573/*
fa8d436c
UD
574 valloc(size_t n);
575 Equivalent to memalign(pagesize, n), where pagesize is the page
576 size of the system. If the pagesize is unknown, 4096 is used.
577*/
3b49edc0 578void* __libc_valloc(size_t);
fa8d436c 579
f65fd747 580
f65fd747 581
fa8d436c
UD
582/*
583 mallopt(int parameter_number, int parameter_value)
584 Sets tunable parameters The format is to provide a
585 (parameter-number, parameter-value) pair. mallopt then sets the
586 corresponding parameter to the argument value if it can (i.e., so
587 long as the value is meaningful), and returns 1 if successful else
588 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
589 normally defined in malloc.h. Only one of these (M_MXFAST) is used
590 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
591 so setting them has no effect. But this malloc also supports four
592 other options in mallopt. See below for details. Briefly, supported
593 parameters are as follows (listed defaults are for "typical"
594 configurations).
595
596 Symbol param # default allowed param values
597 M_MXFAST 1 64 0-80 (0 disables fastbins)
598 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
a9177ff5 599 M_TOP_PAD -2 0 any
fa8d436c
UD
600 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
601 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
602*/
3b49edc0
UD
603int __libc_mallopt(int, int);
604libc_hidden_proto (__libc_mallopt)
fa8d436c
UD
605
606
607/*
608 mallinfo()
609 Returns (by copy) a struct containing various summary statistics:
610
a9177ff5
RM
611 arena: current total non-mmapped bytes allocated from system
612 ordblks: the number of free chunks
fa8d436c 613 smblks: the number of fastbin blocks (i.e., small chunks that
72f90263 614 have been freed but not use resused or consolidated)
a9177ff5
RM
615 hblks: current number of mmapped regions
616 hblkhd: total bytes held in mmapped regions
ca135f82 617 usmblks: always 0
a9177ff5 618 fsmblks: total bytes held in fastbin blocks
fa8d436c 619 uordblks: current total allocated space (normal or mmapped)
a9177ff5 620 fordblks: total free space
fa8d436c 621 keepcost: the maximum number of bytes that could ideally be released
72f90263
UD
622 back to system via malloc_trim. ("ideally" means that
623 it ignores page restrictions etc.)
fa8d436c
UD
624
625 Because these fields are ints, but internal bookkeeping may
a9177ff5 626 be kept as longs, the reported values may wrap around zero and
fa8d436c
UD
627 thus be inaccurate.
628*/
3b49edc0 629struct mallinfo __libc_mallinfo(void);
88764ae2 630
f65fd747 631
fa8d436c
UD
632/*
633 pvalloc(size_t n);
634 Equivalent to valloc(minimum-page-that-holds(n)), that is,
635 round up n to nearest pagesize.
636 */
3b49edc0 637void* __libc_pvalloc(size_t);
fa8d436c
UD
638
639/*
640 malloc_trim(size_t pad);
641
642 If possible, gives memory back to the system (via negative
643 arguments to sbrk) if there is unused memory at the `high' end of
644 the malloc pool. You can call this after freeing large blocks of
645 memory to potentially reduce the system-level memory requirements
646 of a program. However, it cannot guarantee to reduce memory. Under
647 some allocation patterns, some large free blocks of memory will be
648 locked between two used chunks, so they cannot be given back to
649 the system.
a9177ff5 650
fa8d436c
UD
651 The `pad' argument to malloc_trim represents the amount of free
652 trailing space to leave untrimmed. If this argument is zero,
653 only the minimum amount of memory to maintain internal data
654 structures will be left (one page or less). Non-zero arguments
655 can be supplied to maintain enough trailing space to service
656 future expected allocations without having to re-obtain memory
657 from the system.
a9177ff5 658
fa8d436c
UD
659 Malloc_trim returns 1 if it actually released any memory, else 0.
660 On systems that do not support "negative sbrks", it will always
c958a6a4 661 return 0.
fa8d436c 662*/
3b49edc0 663int __malloc_trim(size_t);
fa8d436c
UD
664
665/*
22a89187 666 malloc_usable_size(void* p);
fa8d436c
UD
667
668 Returns the number of bytes you can actually use in
669 an allocated chunk, which may be more than you requested (although
670 often not) due to alignment and minimum size constraints.
671 You can use this many bytes without worrying about
672 overwriting other allocated objects. This is not a particularly great
673 programming practice. malloc_usable_size can be more useful in
674 debugging and assertions, for example:
675
676 p = malloc(n);
677 assert(malloc_usable_size(p) >= 256);
678
679*/
3b49edc0 680size_t __malloc_usable_size(void*);
fa8d436c
UD
681
682/*
683 malloc_stats();
684 Prints on stderr the amount of space obtained from the system (both
685 via sbrk and mmap), the maximum amount (which may be more than
686 current if malloc_trim and/or munmap got called), and the current
687 number of bytes allocated via malloc (or realloc, etc) but not yet
688 freed. Note that this is the number of bytes allocated, not the
689 number requested. It will be larger than the number requested
690 because of alignment and bookkeeping overhead. Because it includes
691 alignment wastage as being in use, this figure may be greater than
692 zero even when no user-level chunks are allocated.
693
694 The reported current and maximum system memory can be inaccurate if
695 a program makes other calls to system memory allocation functions
696 (normally sbrk) outside of malloc.
697
698 malloc_stats prints only the most commonly interesting statistics.
699 More information can be obtained by calling mallinfo.
700
701*/
3b49edc0 702void __malloc_stats(void);
f65fd747 703
f7ddf3d3
UD
704/*
705 posix_memalign(void **memptr, size_t alignment, size_t size);
706
707 POSIX wrapper like memalign(), checking for validity of size.
708*/
709int __posix_memalign(void **, size_t, size_t);
f7ddf3d3 710
fa8d436c
UD
711/* mallopt tuning options */
712
f65fd747 713/*
fa8d436c
UD
714 M_MXFAST is the maximum request size used for "fastbins", special bins
715 that hold returned chunks without consolidating their spaces. This
716 enables future requests for chunks of the same size to be handled
717 very quickly, but can increase fragmentation, and thus increase the
718 overall memory footprint of a program.
719
720 This malloc manages fastbins very conservatively yet still
721 efficiently, so fragmentation is rarely a problem for values less
722 than or equal to the default. The maximum supported value of MXFAST
723 is 80. You wouldn't want it any higher than this anyway. Fastbins
724 are designed especially for use with many small structs, objects or
725 strings -- the default handles structs/objects/arrays with sizes up
726 to 8 4byte fields, or small strings representing words, tokens,
727 etc. Using fastbins for larger objects normally worsens
728 fragmentation without improving speed.
729
730 M_MXFAST is set in REQUEST size units. It is internally used in
731 chunksize units, which adds padding and alignment. You can reduce
732 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
733 algorithm to be a closer approximation of fifo-best-fit in all cases,
734 not just for larger requests, but will generally cause it to be
735 slower.
f65fd747
UD
736*/
737
738
fa8d436c
UD
739/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
740#ifndef M_MXFAST
a9177ff5 741#define M_MXFAST 1
fa8d436c 742#endif
f65fd747 743
fa8d436c 744#ifndef DEFAULT_MXFAST
425ce2ed 745#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
10dc2a90
UD
746#endif
747
10dc2a90 748
fa8d436c
UD
749/*
750 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
751 to keep before releasing via malloc_trim in free().
752
753 Automatic trimming is mainly useful in long-lived programs.
754 Because trimming via sbrk can be slow on some systems, and can
755 sometimes be wasteful (in cases where programs immediately
756 afterward allocate more large chunks) the value should be high
757 enough so that your overall system performance would improve by
758 releasing this much memory.
759
760 The trim threshold and the mmap control parameters (see below)
761 can be traded off with one another. Trimming and mmapping are
762 two different ways of releasing unused memory back to the
763 system. Between these two, it is often possible to keep
764 system-level demands of a long-lived program down to a bare
765 minimum. For example, in one test suite of sessions measuring
766 the XF86 X server on Linux, using a trim threshold of 128K and a
767 mmap threshold of 192K led to near-minimal long term resource
768 consumption.
769
770 If you are using this malloc in a long-lived program, it should
771 pay to experiment with these values. As a rough guide, you
772 might set to a value close to the average size of a process
773 (program) running on your system. Releasing this much memory
774 would allow such a process to run in memory. Generally, it's
775 worth it to tune for trimming rather tham memory mapping when a
776 program undergoes phases where several large chunks are
777 allocated and released in ways that can reuse each other's
778 storage, perhaps mixed with phases where there are no such
779 chunks at all. And in well-behaved long-lived programs,
780 controlling release of large blocks via trimming versus mapping
781 is usually faster.
782
783 However, in most programs, these parameters serve mainly as
784 protection against the system-level effects of carrying around
785 massive amounts of unneeded memory. Since frequent calls to
786 sbrk, mmap, and munmap otherwise degrade performance, the default
787 parameters are set to relatively high values that serve only as
788 safeguards.
789
790 The trim value It must be greater than page size to have any useful
a9177ff5 791 effect. To disable trimming completely, you can set to
fa8d436c
UD
792 (unsigned long)(-1)
793
794 Trim settings interact with fastbin (MXFAST) settings: Unless
795 TRIM_FASTBINS is defined, automatic trimming never takes place upon
796 freeing a chunk with size less than or equal to MXFAST. Trimming is
797 instead delayed until subsequent freeing of larger chunks. However,
798 you can still force an attempted trim by calling malloc_trim.
799
800 Also, trimming is not generally possible in cases where
801 the main arena is obtained via mmap.
802
803 Note that the trick some people use of mallocing a huge space and
804 then freeing it at program startup, in an attempt to reserve system
805 memory, doesn't have the intended effect under automatic trimming,
806 since that memory will immediately be returned to the system.
807*/
808
809#define M_TRIM_THRESHOLD -1
810
811#ifndef DEFAULT_TRIM_THRESHOLD
812#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
813#endif
814
815/*
816 M_TOP_PAD is the amount of extra `padding' space to allocate or
817 retain whenever sbrk is called. It is used in two ways internally:
818
819 * When sbrk is called to extend the top of the arena to satisfy
820 a new malloc request, this much padding is added to the sbrk
821 request.
822
823 * When malloc_trim is called automatically from free(),
824 it is used as the `pad' argument.
825
826 In both cases, the actual amount of padding is rounded
827 so that the end of the arena is always a system page boundary.
828
829 The main reason for using padding is to avoid calling sbrk so
830 often. Having even a small pad greatly reduces the likelihood
831 that nearly every malloc request during program start-up (or
832 after trimming) will invoke sbrk, which needlessly wastes
833 time.
834
835 Automatic rounding-up to page-size units is normally sufficient
836 to avoid measurable overhead, so the default is 0. However, in
837 systems where sbrk is relatively slow, it can pay to increase
838 this value, at the expense of carrying around more memory than
839 the program needs.
840*/
10dc2a90 841
fa8d436c 842#define M_TOP_PAD -2
10dc2a90 843
fa8d436c
UD
844#ifndef DEFAULT_TOP_PAD
845#define DEFAULT_TOP_PAD (0)
846#endif
f65fd747 847
1d05c2fb
UD
848/*
849 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
850 adjusted MMAP_THRESHOLD.
851*/
852
853#ifndef DEFAULT_MMAP_THRESHOLD_MIN
854#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
855#endif
856
857#ifndef DEFAULT_MMAP_THRESHOLD_MAX
e404fb16
UD
858 /* For 32-bit platforms we cannot increase the maximum mmap
859 threshold much because it is also the minimum value for the
bd2c2341
UD
860 maximum heap size and its alignment. Going above 512k (i.e., 1M
861 for new heaps) wastes too much address space. */
e404fb16 862# if __WORDSIZE == 32
bd2c2341 863# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
e404fb16 864# else
bd2c2341 865# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
e404fb16 866# endif
1d05c2fb
UD
867#endif
868
fa8d436c
UD
869/*
870 M_MMAP_THRESHOLD is the request size threshold for using mmap()
871 to service a request. Requests of at least this size that cannot
872 be allocated using already-existing space will be serviced via mmap.
873 (If enough normal freed space already exists it is used instead.)
874
875 Using mmap segregates relatively large chunks of memory so that
876 they can be individually obtained and released from the host
877 system. A request serviced through mmap is never reused by any
878 other request (at least not directly; the system may just so
879 happen to remap successive requests to the same locations).
880
881 Segregating space in this way has the benefits that:
882
a9177ff5
RM
883 1. Mmapped space can ALWAYS be individually released back
884 to the system, which helps keep the system level memory
885 demands of a long-lived program low.
fa8d436c
UD
886 2. Mapped memory can never become `locked' between
887 other chunks, as can happen with normally allocated chunks, which
888 means that even trimming via malloc_trim would not release them.
889 3. On some systems with "holes" in address spaces, mmap can obtain
890 memory that sbrk cannot.
891
892 However, it has the disadvantages that:
893
894 1. The space cannot be reclaimed, consolidated, and then
895 used to service later requests, as happens with normal chunks.
896 2. It can lead to more wastage because of mmap page alignment
897 requirements
898 3. It causes malloc performance to be more dependent on host
899 system memory management support routines which may vary in
900 implementation quality and may impose arbitrary
901 limitations. Generally, servicing a request via normal
902 malloc steps is faster than going through a system's mmap.
903
904 The advantages of mmap nearly always outweigh disadvantages for
905 "large" chunks, but the value of "large" varies across systems. The
906 default is an empirically derived value that works well in most
907 systems.
1d05c2fb
UD
908
909
910 Update in 2006:
911 The above was written in 2001. Since then the world has changed a lot.
912 Memory got bigger. Applications got bigger. The virtual address space
913 layout in 32 bit linux changed.
914
915 In the new situation, brk() and mmap space is shared and there are no
916 artificial limits on brk size imposed by the kernel. What is more,
917 applications have started using transient allocations larger than the
918 128Kb as was imagined in 2001.
919
920 The price for mmap is also high now; each time glibc mmaps from the
921 kernel, the kernel is forced to zero out the memory it gives to the
922 application. Zeroing memory is expensive and eats a lot of cache and
923 memory bandwidth. This has nothing to do with the efficiency of the
924 virtual memory system, by doing mmap the kernel just has no choice but
925 to zero.
926
927 In 2001, the kernel had a maximum size for brk() which was about 800
928 megabytes on 32 bit x86, at that point brk() would hit the first
929 mmaped shared libaries and couldn't expand anymore. With current 2.6
930 kernels, the VA space layout is different and brk() and mmap
931 both can span the entire heap at will.
932
933 Rather than using a static threshold for the brk/mmap tradeoff,
934 we are now using a simple dynamic one. The goal is still to avoid
935 fragmentation. The old goals we kept are
936 1) try to get the long lived large allocations to use mmap()
937 2) really large allocations should always use mmap()
938 and we're adding now:
939 3) transient allocations should use brk() to avoid forcing the kernel
940 having to zero memory over and over again
941
942 The implementation works with a sliding threshold, which is by default
943 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
944 out at 128Kb as per the 2001 default.
945
946 This allows us to satisfy requirement 1) under the assumption that long
947 lived allocations are made early in the process' lifespan, before it has
948 started doing dynamic allocations of the same size (which will
949 increase the threshold).
950
951 The upperbound on the threshold satisfies requirement 2)
952
953 The threshold goes up in value when the application frees memory that was
954 allocated with the mmap allocator. The idea is that once the application
955 starts freeing memory of a certain size, it's highly probable that this is
956 a size the application uses for transient allocations. This estimator
957 is there to satisfy the new third requirement.
958
f65fd747
UD
959*/
960
fa8d436c 961#define M_MMAP_THRESHOLD -3
f65fd747 962
fa8d436c 963#ifndef DEFAULT_MMAP_THRESHOLD
1d05c2fb 964#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
fa8d436c
UD
965#endif
966
967/*
968 M_MMAP_MAX is the maximum number of requests to simultaneously
969 service using mmap. This parameter exists because
970 some systems have a limited number of internal tables for
971 use by mmap, and using more than a few of them may degrade
972 performance.
973
974 The default is set to a value that serves only as a safeguard.
22a89187 975 Setting to 0 disables use of mmap for servicing large requests.
fa8d436c 976*/
f65fd747 977
fa8d436c
UD
978#define M_MMAP_MAX -4
979
980#ifndef DEFAULT_MMAP_MAX
fa8d436c 981#define DEFAULT_MMAP_MAX (65536)
f65fd747
UD
982#endif
983
100351c3 984#include <malloc.h>
f65fd747 985
fa8d436c
UD
986#ifndef RETURN_ADDRESS
987#define RETURN_ADDRESS(X_) (NULL)
9ae6fc54 988#endif
431c33c0 989
fa8d436c
UD
990/* Forward declarations. */
991struct malloc_chunk;
992typedef struct malloc_chunk* mchunkptr;
431c33c0 993
fa8d436c 994/* Internal routines. */
f65fd747 995
22a89187 996static void* _int_malloc(mstate, size_t);
425ce2ed 997static void _int_free(mstate, mchunkptr, int);
22a89187 998static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
6e4b2107 999 INTERNAL_SIZE_T);
22a89187 1000static void* _int_memalign(mstate, size_t, size_t);
10ad46bc
OB
1001static void* _mid_memalign(size_t, size_t, void *);
1002
ac3ed168 1003static void malloc_printerr(const char *str) __attribute__ ((noreturn));
fa8d436c 1004
0c71122c
FW
1005static void* mem2mem_check(void *p, size_t sz);
1006static void top_check(void);
1007static void munmap_chunk(mchunkptr p);
a9177ff5 1008#if HAVE_MREMAP
0c71122c 1009static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
a9177ff5 1010#endif
fa8d436c 1011
22a89187
UD
1012static void* malloc_check(size_t sz, const void *caller);
1013static void free_check(void* mem, const void *caller);
1014static void* realloc_check(void* oldmem, size_t bytes,
1015 const void *caller);
1016static void* memalign_check(size_t alignment, size_t bytes,
1017 const void *caller);
f65fd747 1018
fa8d436c 1019/* ------------------ MMAP support ------------------ */
f65fd747 1020
f65fd747 1021
fa8d436c 1022#include <fcntl.h>
fa8d436c 1023#include <sys/mman.h>
f65fd747 1024
fa8d436c
UD
1025#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1026# define MAP_ANONYMOUS MAP_ANON
1027#endif
f65fd747 1028
fa8d436c 1029#ifndef MAP_NORESERVE
3b49edc0 1030# define MAP_NORESERVE 0
f65fd747
UD
1031#endif
1032
fa8d436c 1033#define MMAP(addr, size, prot, flags) \
3b49edc0 1034 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
f65fd747 1035
f65fd747
UD
1036
1037/*
fa8d436c 1038 ----------------------- Chunk representations -----------------------
f65fd747
UD
1039*/
1040
1041
fa8d436c
UD
1042/*
1043 This struct declaration is misleading (but accurate and necessary).
1044 It declares a "view" into memory allowing access to necessary
1045 fields at known offsets from a given base. See explanation below.
1046*/
1047
1048struct malloc_chunk {
1049
e9c4fe93
FW
1050 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1051 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
fa8d436c
UD
1052
1053 struct malloc_chunk* fd; /* double links -- used only if free. */
f65fd747 1054 struct malloc_chunk* bk;
7ecfbd38
UD
1055
1056 /* Only used for large blocks: pointer to next larger size. */
1057 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1058 struct malloc_chunk* bk_nextsize;
f65fd747
UD
1059};
1060
f65fd747
UD
1061
1062/*
f65fd747
UD
1063 malloc_chunk details:
1064
1065 (The following includes lightly edited explanations by Colin Plumb.)
1066
1067 Chunks of memory are maintained using a `boundary tag' method as
1068 described in e.g., Knuth or Standish. (See the paper by Paul
1069 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1070 survey of such techniques.) Sizes of free chunks are stored both
1071 in the front of each chunk and at the end. This makes
1072 consolidating fragmented chunks into bigger chunks very fast. The
1073 size fields also hold bits representing whether chunks are free or
1074 in use.
1075
1076 An allocated chunk looks like this:
1077
1078
1079 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1080 | Size of previous chunk, if unallocated (P clear) |
72f90263 1081 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1082 | Size of chunk, in bytes |A|M|P|
f65fd747 1083 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1084 | User data starts here... .
1085 . .
1086 . (malloc_usable_size() bytes) .
1087 . |
f65fd747 1088nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2
FW
1089 | (size of chunk, but used for application data) |
1090 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1091 | Size of next chunk, in bytes |A|0|1|
72f90263 1092 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1093
1094 Where "chunk" is the front of the chunk for the purpose of most of
1095 the malloc code, but "mem" is the pointer that is returned to the
1096 user. "Nextchunk" is the beginning of the next contiguous chunk.
1097
6f65e668 1098 Chunks always begin on even word boundaries, so the mem portion
f65fd747 1099 (which is returned to the user) is also on an even word boundary, and
fa8d436c 1100 thus at least double-word aligned.
f65fd747
UD
1101
1102 Free chunks are stored in circular doubly-linked lists, and look like this:
1103
1104 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1105 | Size of previous chunk, if unallocated (P clear) |
72f90263 1106 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1107 `head:' | Size of chunk, in bytes |A|0|P|
f65fd747 1108 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1109 | Forward pointer to next chunk in list |
1110 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1111 | Back pointer to previous chunk in list |
1112 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1113 | Unused space (may be 0 bytes long) .
1114 . .
1115 . |
f65fd747
UD
1116nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1117 `foot:' | Size of chunk, in bytes |
72f90263 1118 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2
FW
1119 | Size of next chunk, in bytes |A|0|0|
1120 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1121
1122 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1123 chunk size (which is always a multiple of two words), is an in-use
1124 bit for the *previous* chunk. If that bit is *clear*, then the
1125 word before the current chunk size contains the previous chunk
1126 size, and can be used to find the front of the previous chunk.
fa8d436c
UD
1127 The very first chunk allocated always has this bit set,
1128 preventing access to non-existent (or non-owned) memory. If
1129 prev_inuse is set for any given chunk, then you CANNOT determine
1130 the size of the previous chunk, and might even get a memory
1131 addressing fault when trying to do so.
f65fd747 1132
ae9166f2
FW
1133 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1134 main arena, described by the main_arena variable. When additional
1135 threads are spawned, each thread receives its own arena (up to a
1136 configurable limit, after which arenas are reused for multiple
1137 threads), and the chunks in these arenas have the A bit set. To
1138 find the arena for a chunk on such a non-main arena, heap_for_ptr
1139 performs a bit mask operation and indirection through the ar_ptr
1140 member of the per-heap header heap_info (see arena.c).
1141
f65fd747 1142 Note that the `foot' of the current chunk is actually represented
fa8d436c
UD
1143 as the prev_size of the NEXT chunk. This makes it easier to
1144 deal with alignments etc but can be very confusing when trying
1145 to extend or adapt this code.
f65fd747 1146
ae9166f2 1147 The three exceptions to all this are:
f65fd747 1148
fa8d436c 1149 1. The special chunk `top' doesn't bother using the
72f90263
UD
1150 trailing size field since there is no next contiguous chunk
1151 that would have to index off it. After initialization, `top'
1152 is forced to always exist. If it would become less than
1153 MINSIZE bytes long, it is replenished.
f65fd747
UD
1154
1155 2. Chunks allocated via mmap, which have the second-lowest-order
72f90263 1156 bit M (IS_MMAPPED) set in their size fields. Because they are
ae9166f2
FW
1157 allocated one-by-one, each must contain its own trailing size
1158 field. If the M bit is set, the other bits are ignored
1159 (because mmapped chunks are neither in an arena, nor adjacent
1160 to a freed chunk). The M bit is also used for chunks which
1161 originally came from a dumped heap via malloc_set_state in
1162 hooks.c.
1163
1164 3. Chunks in fastbins are treated as allocated chunks from the
1165 point of view of the chunk allocator. They are consolidated
1166 with their neighbors only in bulk, in malloc_consolidate.
f65fd747
UD
1167*/
1168
1169/*
fa8d436c
UD
1170 ---------- Size and alignment checks and conversions ----------
1171*/
f65fd747 1172
fa8d436c 1173/* conversion from malloc headers to user pointers, and back */
f65fd747 1174
22a89187 1175#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
fa8d436c 1176#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
f65fd747 1177
fa8d436c 1178/* The smallest possible chunk */
7ecfbd38 1179#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
f65fd747 1180
fa8d436c 1181/* The smallest size we can malloc is an aligned minimal chunk */
f65fd747 1182
fa8d436c
UD
1183#define MINSIZE \
1184 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
f65fd747 1185
fa8d436c 1186/* Check if m has acceptable alignment */
f65fd747 1187
073f560e
UD
1188#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1189
1190#define misaligned_chunk(p) \
1191 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1192 & MALLOC_ALIGN_MASK)
f65fd747 1193
f65fd747 1194
a9177ff5 1195/*
fa8d436c
UD
1196 Check if a request is so large that it would wrap around zero when
1197 padded and aligned. To simplify some other code, the bound is made
1198 low enough so that adding MINSIZE will also not wrap around zero.
6c8dbf00 1199 */
f65fd747 1200
fa8d436c 1201#define REQUEST_OUT_OF_RANGE(req) \
6c8dbf00
OB
1202 ((unsigned long) (req) >= \
1203 (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
f65fd747 1204
fa8d436c 1205/* pad request bytes into a usable size -- internal version */
f65fd747 1206
fa8d436c
UD
1207#define request2size(req) \
1208 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1209 MINSIZE : \
1210 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
f65fd747 1211
8e448310
AS
1212/* Same, except also perform an argument and result check. First, we check
1213 that the padding done by request2size didn't result in an integer
1214 overflow. Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
1215 size isn't so large that a later alignment would lead to another integer
1216 overflow. */
1217#define checked_request2size(req, sz) \
1218({ \
1219 (sz) = request2size (req); \
1220 if (((sz) < (req)) \
1221 || REQUEST_OUT_OF_RANGE (sz)) \
1222 { \
1223 __set_errno (ENOMEM); \
1224 return 0; \
1225 } \
1226})
f65fd747
UD
1227
1228/*
6c8dbf00
OB
1229 --------------- Physical chunk operations ---------------
1230 */
f65fd747 1231
10dc2a90 1232
fa8d436c
UD
1233/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1234#define PREV_INUSE 0x1
f65fd747 1235
fa8d436c 1236/* extract inuse bit of previous chunk */
e9c4fe93 1237#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
f65fd747 1238
f65fd747 1239
fa8d436c
UD
1240/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1241#define IS_MMAPPED 0x2
f65fd747 1242
fa8d436c 1243/* check for mmap()'ed chunk */
e9c4fe93 1244#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
f65fd747 1245
f65fd747 1246
fa8d436c
UD
1247/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1248 from a non-main arena. This is only set immediately before handing
1249 the chunk to the user, if necessary. */
1250#define NON_MAIN_ARENA 0x4
f65fd747 1251
ae9166f2 1252/* Check for chunk from main arena. */
e9c4fe93
FW
1253#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1254
1255/* Mark a chunk as not being on the main arena. */
1256#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
f65fd747
UD
1257
1258
a9177ff5 1259/*
6c8dbf00 1260 Bits to mask off when extracting size
f65fd747 1261
6c8dbf00
OB
1262 Note: IS_MMAPPED is intentionally not masked off from size field in
1263 macros for which mmapped chunks should never be seen. This should
1264 cause helpful core dumps to occur if it is tried by accident by
1265 people extending or adapting this malloc.
1266 */
1267#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
f65fd747 1268
fa8d436c 1269/* Get size, ignoring use bits */
e9c4fe93 1270#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
f65fd747 1271
e9c4fe93
FW
1272/* Like chunksize, but do not mask SIZE_BITS. */
1273#define chunksize_nomask(p) ((p)->mchunk_size)
f65fd747 1274
fa8d436c 1275/* Ptr to next physical malloc_chunk. */
e9c4fe93
FW
1276#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1277
229855e5 1278/* Size of the chunk below P. Only valid if !prev_inuse (P). */
e9c4fe93
FW
1279#define prev_size(p) ((p)->mchunk_prev_size)
1280
229855e5 1281/* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
e9c4fe93 1282#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
f65fd747 1283
229855e5 1284/* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
e9c4fe93 1285#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
f65fd747 1286
fa8d436c 1287/* Treat space at ptr + offset as a chunk */
6c8dbf00 1288#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
fa8d436c
UD
1289
1290/* extract p's inuse bit */
6c8dbf00 1291#define inuse(p) \
e9c4fe93 1292 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
f65fd747 1293
fa8d436c 1294/* set/clear chunk as being inuse without otherwise disturbing */
6c8dbf00 1295#define set_inuse(p) \
e9c4fe93 1296 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
f65fd747 1297
6c8dbf00 1298#define clear_inuse(p) \
e9c4fe93 1299 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
f65fd747
UD
1300
1301
fa8d436c 1302/* check/set/clear inuse bits in known places */
6c8dbf00 1303#define inuse_bit_at_offset(p, s) \
e9c4fe93 1304 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
f65fd747 1305
6c8dbf00 1306#define set_inuse_bit_at_offset(p, s) \
e9c4fe93 1307 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
f65fd747 1308
6c8dbf00 1309#define clear_inuse_bit_at_offset(p, s) \
e9c4fe93 1310 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
f65fd747 1311
f65fd747 1312
fa8d436c 1313/* Set size at head, without disturbing its use bit */
e9c4fe93 1314#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
f65fd747 1315
fa8d436c 1316/* Set size/use field */
e9c4fe93 1317#define set_head(p, s) ((p)->mchunk_size = (s))
f65fd747 1318
fa8d436c 1319/* Set size at footer (only when chunk is not in use) */
e9c4fe93 1320#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
f65fd747
UD
1321
1322
e9c4fe93
FW
1323#pragma GCC poison mchunk_size
1324#pragma GCC poison mchunk_prev_size
1325
fa8d436c 1326/*
6c8dbf00 1327 -------------------- Internal data structures --------------------
fa8d436c
UD
1328
1329 All internal state is held in an instance of malloc_state defined
1330 below. There are no other static variables, except in two optional
a9177ff5 1331 cases:
6c8dbf00
OB
1332 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1333 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
22a89187 1334 for mmap.
fa8d436c
UD
1335
1336 Beware of lots of tricks that minimize the total bookkeeping space
1337 requirements. The result is a little over 1K bytes (for 4byte
1338 pointers and size_t.)
6c8dbf00 1339 */
f65fd747
UD
1340
1341/*
6c8dbf00 1342 Bins
fa8d436c
UD
1343
1344 An array of bin headers for free chunks. Each bin is doubly
1345 linked. The bins are approximately proportionally (log) spaced.
1346 There are a lot of these bins (128). This may look excessive, but
1347 works very well in practice. Most bins hold sizes that are
1348 unusual as malloc request sizes, but are more usual for fragments
1349 and consolidated sets of chunks, which is what these bins hold, so
1350 they can be found quickly. All procedures maintain the invariant
1351 that no consolidated chunk physically borders another one, so each
1352 chunk in a list is known to be preceeded and followed by either
1353 inuse chunks or the ends of memory.
1354
1355 Chunks in bins are kept in size order, with ties going to the
1356 approximately least recently used chunk. Ordering isn't needed
1357 for the small bins, which all contain the same-sized chunks, but
1358 facilitates best-fit allocation for larger chunks. These lists
1359 are just sequential. Keeping them in order almost never requires
1360 enough traversal to warrant using fancier ordered data
a9177ff5 1361 structures.
fa8d436c
UD
1362
1363 Chunks of the same size are linked with the most
1364 recently freed at the front, and allocations are taken from the
1365 back. This results in LRU (FIFO) allocation order, which tends
1366 to give each chunk an equal opportunity to be consolidated with
1367 adjacent freed chunks, resulting in larger free chunks and less
1368 fragmentation.
1369
1370 To simplify use in double-linked lists, each bin header acts
1371 as a malloc_chunk. This avoids special-casing for headers.
1372 But to conserve space and improve locality, we allocate
1373 only the fd/bk pointers of bins, and then use repositioning tricks
a9177ff5 1374 to treat these as the fields of a malloc_chunk*.
6c8dbf00 1375 */
f65fd747 1376
6c8dbf00 1377typedef struct malloc_chunk *mbinptr;
f65fd747 1378
fa8d436c 1379/* addressing -- note that bin_at(0) does not exist */
41999a1a
UD
1380#define bin_at(m, i) \
1381 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
6c8dbf00 1382 - offsetof (struct malloc_chunk, fd))
f65fd747 1383
fa8d436c 1384/* analog of ++bin */
6c8dbf00 1385#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
f65fd747 1386
fa8d436c
UD
1387/* Reminders about list directionality within bins */
1388#define first(b) ((b)->fd)
1389#define last(b) ((b)->bk)
f65fd747 1390
fa8d436c 1391/*
6c8dbf00 1392 Indexing
fa8d436c
UD
1393
1394 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1395 8 bytes apart. Larger bins are approximately logarithmically spaced:
f65fd747 1396
fa8d436c
UD
1397 64 bins of size 8
1398 32 bins of size 64
1399 16 bins of size 512
1400 8 bins of size 4096
1401 4 bins of size 32768
1402 2 bins of size 262144
1403 1 bin of size what's left
f65fd747 1404
fa8d436c
UD
1405 There is actually a little bit of slop in the numbers in bin_index
1406 for the sake of speed. This makes no difference elsewhere.
f65fd747 1407
fa8d436c
UD
1408 The bins top out around 1MB because we expect to service large
1409 requests via mmap.
b5a2bbe6
L
1410
1411 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1412 a valid chunk size the small bins are bumped up one.
6c8dbf00 1413 */
f65fd747 1414
fa8d436c
UD
1415#define NBINS 128
1416#define NSMALLBINS 64
1d47e92f 1417#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
b5a2bbe6
L
1418#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1419#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
f65fd747 1420
fa8d436c 1421#define in_smallbin_range(sz) \
6c8dbf00 1422 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
f65fd747 1423
1d47e92f 1424#define smallbin_index(sz) \
6c8dbf00 1425 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
b5a2bbe6 1426 + SMALLBIN_CORRECTION)
f65fd747 1427
1d47e92f 1428#define largebin_index_32(sz) \
6c8dbf00
OB
1429 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1430 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1431 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1432 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1433 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1434 126)
f65fd747 1435
b5a2bbe6 1436#define largebin_index_32_big(sz) \
6c8dbf00
OB
1437 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1438 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1439 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1440 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1441 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1442 126)
b5a2bbe6 1443
1d47e92f
UD
1444// XXX It remains to be seen whether it is good to keep the widths of
1445// XXX the buckets the same or whether it should be scaled by a factor
1446// XXX of two as well.
1447#define largebin_index_64(sz) \
6c8dbf00
OB
1448 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1449 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1450 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1451 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1452 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1453 126)
1d47e92f
UD
1454
1455#define largebin_index(sz) \
b5a2bbe6
L
1456 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1457 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1458 : largebin_index_32 (sz))
1d47e92f 1459
fa8d436c 1460#define bin_index(sz) \
6c8dbf00 1461 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
f65fd747 1462
1ecba1fa
FW
1463/* Take a chunk off a bin list. */
1464static void
1465unlink_chunk (mstate av, mchunkptr p)
1466{
1467 if (chunksize (p) != prev_size (next_chunk (p)))
1468 malloc_printerr ("corrupted size vs. prev_size");
1469
1470 mchunkptr fd = p->fd;
1471 mchunkptr bk = p->bk;
1472
1473 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1474 malloc_printerr ("corrupted double-linked list");
1475
1476 fd->bk = bk;
1477 bk->fd = fd;
1478 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1479 {
1480 if (p->fd_nextsize->bk_nextsize != p
1481 || p->bk_nextsize->fd_nextsize != p)
1482 malloc_printerr ("corrupted double-linked list (not small)");
1483
1484 if (fd->fd_nextsize == NULL)
1485 {
1486 if (p->fd_nextsize == p)
1487 fd->fd_nextsize = fd->bk_nextsize = fd;
1488 else
1489 {
1490 fd->fd_nextsize = p->fd_nextsize;
1491 fd->bk_nextsize = p->bk_nextsize;
1492 p->fd_nextsize->bk_nextsize = fd;
1493 p->bk_nextsize->fd_nextsize = fd;
1494 }
1495 }
1496 else
1497 {
1498 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1499 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1500 }
1501 }
1502}
f65fd747
UD
1503
1504/*
6c8dbf00 1505 Unsorted chunks
fa8d436c
UD
1506
1507 All remainders from chunk splits, as well as all returned chunks,
1508 are first placed in the "unsorted" bin. They are then placed
1509 in regular bins after malloc gives them ONE chance to be used before
1510 binning. So, basically, the unsorted_chunks list acts as a queue,
1511 with chunks being placed on it in free (and malloc_consolidate),
1512 and taken off (to be either used or placed in bins) in malloc.
1513
1514 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1515 does not have to be taken into account in size comparisons.
6c8dbf00 1516 */
f65fd747 1517
fa8d436c 1518/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
6c8dbf00 1519#define unsorted_chunks(M) (bin_at (M, 1))
f65fd747 1520
fa8d436c 1521/*
6c8dbf00 1522 Top
fa8d436c
UD
1523
1524 The top-most available chunk (i.e., the one bordering the end of
1525 available memory) is treated specially. It is never included in
1526 any bin, is used only if no other chunk is available, and is
1527 released back to the system if it is very large (see
1528 M_TRIM_THRESHOLD). Because top initially
1529 points to its own bin with initial zero size, thus forcing
1530 extension on the first malloc request, we avoid having any special
1531 code in malloc to check whether it even exists yet. But we still
1532 need to do so when getting memory from system, so we make
1533 initial_top treat the bin as a legal but unusable chunk during the
1534 interval between initialization and the first call to
3b49edc0 1535 sysmalloc. (This is somewhat delicate, since it relies on
fa8d436c 1536 the 2 preceding words to be zero during this interval as well.)
6c8dbf00 1537 */
f65fd747 1538
fa8d436c 1539/* Conveniently, the unsorted bin can be used as dummy top on first call */
6c8dbf00 1540#define initial_top(M) (unsorted_chunks (M))
f65fd747 1541
fa8d436c 1542/*
6c8dbf00 1543 Binmap
f65fd747 1544
fa8d436c
UD
1545 To help compensate for the large number of bins, a one-level index
1546 structure is used for bin-by-bin searching. `binmap' is a
1547 bitvector recording whether bins are definitely empty so they can
1548 be skipped over during during traversals. The bits are NOT always
1549 cleared as soon as bins are empty, but instead only
1550 when they are noticed to be empty during traversal in malloc.
6c8dbf00 1551 */
f65fd747 1552
fa8d436c
UD
1553/* Conservatively use 32 bits per map word, even if on 64bit system */
1554#define BINMAPSHIFT 5
1555#define BITSPERMAP (1U << BINMAPSHIFT)
1556#define BINMAPSIZE (NBINS / BITSPERMAP)
f65fd747 1557
fa8d436c 1558#define idx2block(i) ((i) >> BINMAPSHIFT)
6c8dbf00 1559#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
f65fd747 1560
6c8dbf00
OB
1561#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1562#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1563#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
f65fd747 1564
fa8d436c 1565/*
6c8dbf00 1566 Fastbins
fa8d436c
UD
1567
1568 An array of lists holding recently freed small chunks. Fastbins
1569 are not doubly linked. It is faster to single-link them, and
1570 since chunks are never removed from the middles of these lists,
1571 double linking is not necessary. Also, unlike regular bins, they
1572 are not even processed in FIFO order (they use faster LIFO) since
1573 ordering doesn't much matter in the transient contexts in which
1574 fastbins are normally used.
1575
1576 Chunks in fastbins keep their inuse bit set, so they cannot
1577 be consolidated with other free chunks. malloc_consolidate
1578 releases all chunks in fastbins and consolidates them with
a9177ff5 1579 other free chunks.
6c8dbf00 1580 */
f65fd747 1581
6c8dbf00 1582typedef struct malloc_chunk *mfastbinptr;
425ce2ed 1583#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
f65fd747 1584
fa8d436c 1585/* offset 2 to use otherwise unindexable first 2 bins */
425ce2ed 1586#define fastbin_index(sz) \
6c8dbf00 1587 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
425ce2ed 1588
f65fd747 1589
fa8d436c 1590/* The maximum fastbin request size we support */
425ce2ed 1591#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
f65fd747 1592
6c8dbf00 1593#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
f65fd747
UD
1594
1595/*
6c8dbf00
OB
1596 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1597 that triggers automatic consolidation of possibly-surrounding
1598 fastbin chunks. This is a heuristic, so the exact value should not
1599 matter too much. It is defined at half the default trim threshold as a
1600 compromise heuristic to only attempt consolidation if it is likely
1601 to lead to trimming. However, it is not dynamically tunable, since
1602 consolidation reduces fragmentation surrounding large chunks even
1603 if trimming is not used.
1604 */
f65fd747 1605
fa8d436c 1606#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
f65fd747 1607
f65fd747 1608/*
6c8dbf00
OB
1609 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1610 regions. Otherwise, contiguity is exploited in merging together,
1611 when possible, results from consecutive MORECORE calls.
f65fd747 1612
6c8dbf00
OB
1613 The initial value comes from MORECORE_CONTIGUOUS, but is
1614 changed dynamically if mmap is ever used as an sbrk substitute.
1615 */
f65fd747 1616
fa8d436c 1617#define NONCONTIGUOUS_BIT (2U)
f65fd747 1618
6c8dbf00
OB
1619#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1620#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1621#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
9bf248c6 1622#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
f65fd747 1623
eac43cbb
FW
1624/* Maximum size of memory handled in fastbins. */
1625static INTERNAL_SIZE_T global_max_fast;
1626
a9177ff5
RM
1627/*
1628 Set value of max_fast.
fa8d436c 1629 Use impossibly small value if 0.
3381be5c
WD
1630 Precondition: there are no existing fastbin chunks in the main arena.
1631 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1632 before changing max_fast. Note other arenas will leak their fast bin
1633 entries if max_fast is reduced.
6c8dbf00 1634 */
f65fd747 1635
9bf248c6 1636#define set_max_fast(s) \
991eda1e 1637 global_max_fast = (((s) == 0) \
6c8dbf00 1638 ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
f65fd747 1639
eac43cbb
FW
1640static inline INTERNAL_SIZE_T
1641get_max_fast (void)
1642{
1643 /* Tell the GCC optimizers that global_max_fast is never larger
1644 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1645 _int_malloc after constant propagation of the size parameter.
1646 (The code never executes because malloc preserves the
1647 global_max_fast invariant, but the optimizers may not recognize
1648 this.) */
1649 if (global_max_fast > MAX_FAST_SIZE)
1650 __builtin_unreachable ();
1651 return global_max_fast;
1652}
f65fd747
UD
1653
1654/*
fa8d436c 1655 ----------- Internal state representation and initialization -----------
6c8dbf00 1656 */
f65fd747 1657
e956075a
WD
1658/*
1659 have_fastchunks indicates that there are probably some fastbin chunks.
1660 It is set true on entering a chunk into any fastbin, and cleared early in
1661 malloc_consolidate. The value is approximate since it may be set when there
1662 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1663 available. Given it's sole purpose is to reduce number of redundant calls to
1664 malloc_consolidate, it does not affect correctness. As a result we can safely
1665 use relaxed atomic accesses.
1666 */
1667
1668
6c8dbf00
OB
1669struct malloc_state
1670{
fa8d436c 1671 /* Serialize access. */
cbb47fa1 1672 __libc_lock_define (, mutex);
9bf248c6
UD
1673
1674 /* Flags (formerly in max_fast). */
1675 int flags;
f65fd747 1676
e956075a 1677 /* Set if the fastbin chunks contain recently inserted free blocks. */
2c2245b9
WD
1678 /* Note this is a bool but not all targets support atomics on booleans. */
1679 int have_fastchunks;
e956075a 1680
fa8d436c 1681 /* Fastbins */
6c8dbf00 1682 mfastbinptr fastbinsY[NFASTBINS];
f65fd747 1683
fa8d436c 1684 /* Base of the topmost chunk -- not otherwise kept in a bin */
6c8dbf00 1685 mchunkptr top;
f65fd747 1686
fa8d436c 1687 /* The remainder from the most recent split of a small request */
6c8dbf00 1688 mchunkptr last_remainder;
f65fd747 1689
fa8d436c 1690 /* Normal bins packed as described above */
6c8dbf00 1691 mchunkptr bins[NBINS * 2 - 2];
f65fd747 1692
fa8d436c 1693 /* Bitmap of bins */
6c8dbf00 1694 unsigned int binmap[BINMAPSIZE];
f65fd747 1695
fa8d436c
UD
1696 /* Linked list */
1697 struct malloc_state *next;
f65fd747 1698
a62719ba 1699 /* Linked list for free arenas. Access to this field is serialized
90c400bd 1700 by free_list_lock in arena.c. */
425ce2ed 1701 struct malloc_state *next_free;
425ce2ed 1702
a62719ba 1703 /* Number of threads attached to this arena. 0 if the arena is on
90c400bd
FW
1704 the free list. Access to this field is serialized by
1705 free_list_lock in arena.c. */
a62719ba
FW
1706 INTERNAL_SIZE_T attached_threads;
1707
fa8d436c
UD
1708 /* Memory allocated from the system in this arena. */
1709 INTERNAL_SIZE_T system_mem;
1710 INTERNAL_SIZE_T max_system_mem;
1711};
f65fd747 1712
6c8dbf00
OB
1713struct malloc_par
1714{
fa8d436c 1715 /* Tunable parameters */
6c8dbf00
OB
1716 unsigned long trim_threshold;
1717 INTERNAL_SIZE_T top_pad;
1718 INTERNAL_SIZE_T mmap_threshold;
1719 INTERNAL_SIZE_T arena_test;
1720 INTERNAL_SIZE_T arena_max;
fa8d436c
UD
1721
1722 /* Memory map support */
6c8dbf00
OB
1723 int n_mmaps;
1724 int n_mmaps_max;
1725 int max_n_mmaps;
1d05c2fb
UD
1726 /* the mmap_threshold is dynamic, until the user sets
1727 it manually, at which point we need to disable any
1728 dynamic behavior. */
6c8dbf00 1729 int no_dyn_threshold;
fa8d436c 1730
fa8d436c 1731 /* Statistics */
6c8dbf00 1732 INTERNAL_SIZE_T mmapped_mem;
6c8dbf00 1733 INTERNAL_SIZE_T max_mmapped_mem;
fa8d436c
UD
1734
1735 /* First address handed out by MORECORE/sbrk. */
6c8dbf00 1736 char *sbrk_base;
d5c3fafc
DD
1737
1738#if USE_TCACHE
1739 /* Maximum number of buckets to use. */
1740 size_t tcache_bins;
1741 size_t tcache_max_bytes;
1742 /* Maximum number of chunks in each bucket. */
1743 size_t tcache_count;
1744 /* Maximum number of chunks to remove from the unsorted list, which
1745 aren't used to prefill the cache. */
1746 size_t tcache_unsorted_limit;
1747#endif
fa8d436c 1748};
f65fd747 1749
fa8d436c
UD
1750/* There are several instances of this struct ("arenas") in this
1751 malloc. If you are adapting this malloc in a way that does NOT use
1752 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1753 before using. This malloc relies on the property that malloc_state
1754 is initialized to all zeroes (as is true of C statics). */
f65fd747 1755
02d46fc4 1756static struct malloc_state main_arena =
6c8dbf00 1757{
400e1226 1758 .mutex = _LIBC_LOCK_INITIALIZER,
a62719ba
FW
1759 .next = &main_arena,
1760 .attached_threads = 1
6c8dbf00 1761};
f65fd747 1762
4cf6c72f
FW
1763/* These variables are used for undumping support. Chunked are marked
1764 as using mmap, but we leave them alone if they fall into this
1e8a8875
FW
1765 range. NB: The chunk size for these chunks only includes the
1766 initial size field (of SIZE_SZ bytes), there is no trailing size
1767 field (unlike with regular mmapped chunks). */
4cf6c72f
FW
1768static mchunkptr dumped_main_arena_start; /* Inclusive. */
1769static mchunkptr dumped_main_arena_end; /* Exclusive. */
1770
1771/* True if the pointer falls into the dumped arena. Use this after
1772 chunk_is_mmapped indicates a chunk is mmapped. */
1773#define DUMPED_MAIN_ARENA_CHUNK(p) \
1774 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1775
fa8d436c 1776/* There is only one instance of the malloc parameters. */
f65fd747 1777
02d46fc4 1778static struct malloc_par mp_ =
6c8dbf00
OB
1779{
1780 .top_pad = DEFAULT_TOP_PAD,
1781 .n_mmaps_max = DEFAULT_MMAP_MAX,
1782 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1783 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1784#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1785 .arena_test = NARENAS_FROM_NCORES (1)
d5c3fafc
DD
1786#if USE_TCACHE
1787 ,
1788 .tcache_count = TCACHE_FILL_COUNT,
1789 .tcache_bins = TCACHE_MAX_BINS,
1790 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1791 .tcache_unsorted_limit = 0 /* No limit. */
1792#endif
6c8dbf00 1793};
f65fd747 1794
fa8d436c 1795/*
6c8dbf00 1796 Initialize a malloc_state struct.
f65fd747 1797
3381be5c
WD
1798 This is called from ptmalloc_init () or from _int_new_arena ()
1799 when creating a new arena.
6c8dbf00 1800 */
f65fd747 1801
6c8dbf00
OB
1802static void
1803malloc_init_state (mstate av)
fa8d436c 1804{
6c8dbf00 1805 int i;
fa8d436c 1806 mbinptr bin;
a9177ff5 1807
fa8d436c 1808 /* Establish circular links for normal bins */
6c8dbf00
OB
1809 for (i = 1; i < NBINS; ++i)
1810 {
1811 bin = bin_at (av, i);
1812 bin->fd = bin->bk = bin;
1813 }
f65fd747 1814
fa8d436c
UD
1815#if MORECORE_CONTIGUOUS
1816 if (av != &main_arena)
1817#endif
6c8dbf00 1818 set_noncontiguous (av);
9bf248c6 1819 if (av == &main_arena)
6c8dbf00 1820 set_max_fast (DEFAULT_MXFAST);
e956075a 1821 atomic_store_relaxed (&av->have_fastchunks, false);
f65fd747 1822
6c8dbf00 1823 av->top = initial_top (av);
fa8d436c 1824}
e9b3e3c5 1825
a9177ff5 1826/*
fa8d436c 1827 Other internal utilities operating on mstates
6c8dbf00 1828 */
f65fd747 1829
6c8dbf00
OB
1830static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1831static int systrim (size_t, mstate);
1832static void malloc_consolidate (mstate);
7e3be507 1833
404d4cef
RM
1834
1835/* -------------- Early definitions for debugging hooks ---------------- */
1836
1837/* Define and initialize the hook variables. These weak definitions must
1838 appear before any use of the variables in a function (arena.c uses one). */
1839#ifndef weak_variable
404d4cef
RM
1840/* In GNU libc we want the hook variables to be weak definitions to
1841 avoid a problem with Emacs. */
22a89187 1842# define weak_variable weak_function
404d4cef
RM
1843#endif
1844
1845/* Forward declarations. */
6c8dbf00
OB
1846static void *malloc_hook_ini (size_t sz,
1847 const void *caller) __THROW;
1848static void *realloc_hook_ini (void *ptr, size_t sz,
1849 const void *caller) __THROW;
1850static void *memalign_hook_ini (size_t alignment, size_t sz,
1851 const void *caller) __THROW;
404d4cef 1852
2ba3cfa1 1853#if HAVE_MALLOC_INIT_HOOK
92e1ab0e
FW
1854void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1855compat_symbol (libc, __malloc_initialize_hook,
1856 __malloc_initialize_hook, GLIBC_2_0);
2ba3cfa1
FW
1857#endif
1858
a222d91a 1859void weak_variable (*__free_hook) (void *__ptr,
6c8dbf00 1860 const void *) = NULL;
a222d91a 1861void *weak_variable (*__malloc_hook)
6c8dbf00 1862 (size_t __size, const void *) = malloc_hook_ini;
a222d91a 1863void *weak_variable (*__realloc_hook)
6c8dbf00
OB
1864 (void *__ptr, size_t __size, const void *)
1865 = realloc_hook_ini;
a222d91a 1866void *weak_variable (*__memalign_hook)
6c8dbf00
OB
1867 (size_t __alignment, size_t __size, const void *)
1868 = memalign_hook_ini;
06d6611a 1869void weak_variable (*__after_morecore_hook) (void) = NULL;
404d4cef 1870
0a947e06
FW
1871/* This function is called from the arena shutdown hook, to free the
1872 thread cache (if it exists). */
1873static void tcache_thread_shutdown (void);
404d4cef 1874
854278df
UD
1875/* ------------------ Testing support ----------------------------------*/
1876
1877static int perturb_byte;
1878
af102d95 1879static void
e8349efd
OB
1880alloc_perturb (char *p, size_t n)
1881{
1882 if (__glibc_unlikely (perturb_byte))
1883 memset (p, perturb_byte ^ 0xff, n);
1884}
1885
af102d95 1886static void
e8349efd
OB
1887free_perturb (char *p, size_t n)
1888{
1889 if (__glibc_unlikely (perturb_byte))
1890 memset (p, perturb_byte, n);
1891}
1892
854278df
UD
1893
1894
3ea5be54
AO
1895#include <stap-probe.h>
1896
fa8d436c
UD
1897/* ------------------- Support for multiple arenas -------------------- */
1898#include "arena.c"
f65fd747 1899
fa8d436c 1900/*
6c8dbf00 1901 Debugging support
f65fd747 1902
6c8dbf00
OB
1903 These routines make a number of assertions about the states
1904 of data structures that should be true at all times. If any
1905 are not true, it's very likely that a user program has somehow
1906 trashed memory. (It's also possible that there is a coding error
1907 in malloc. In which case, please report it!)
1908 */
ee74a442 1909
6c8dbf00 1910#if !MALLOC_DEBUG
d8f00d46 1911
6c8dbf00
OB
1912# define check_chunk(A, P)
1913# define check_free_chunk(A, P)
1914# define check_inuse_chunk(A, P)
1915# define check_remalloced_chunk(A, P, N)
1916# define check_malloced_chunk(A, P, N)
1917# define check_malloc_state(A)
d8f00d46 1918
fa8d436c 1919#else
ca34d7a7 1920
6c8dbf00
OB
1921# define check_chunk(A, P) do_check_chunk (A, P)
1922# define check_free_chunk(A, P) do_check_free_chunk (A, P)
1923# define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
1924# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
1925# define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
1926# define check_malloc_state(A) do_check_malloc_state (A)
ca34d7a7 1927
fa8d436c 1928/*
6c8dbf00
OB
1929 Properties of all chunks
1930 */
ca34d7a7 1931
6c8dbf00
OB
1932static void
1933do_check_chunk (mstate av, mchunkptr p)
ca34d7a7 1934{
6c8dbf00 1935 unsigned long sz = chunksize (p);
fa8d436c 1936 /* min and max possible addresses assuming contiguous allocation */
6c8dbf00
OB
1937 char *max_address = (char *) (av->top) + chunksize (av->top);
1938 char *min_address = max_address - av->system_mem;
fa8d436c 1939
6c8dbf00
OB
1940 if (!chunk_is_mmapped (p))
1941 {
1942 /* Has legal address ... */
1943 if (p != av->top)
1944 {
1945 if (contiguous (av))
1946 {
1947 assert (((char *) p) >= min_address);
1948 assert (((char *) p + sz) <= ((char *) (av->top)));
1949 }
1950 }
1951 else
1952 {
1953 /* top size is always at least MINSIZE */
1954 assert ((unsigned long) (sz) >= MINSIZE);
1955 /* top predecessor always marked inuse */
1956 assert (prev_inuse (p));
1957 }
fa8d436c 1958 }
4cf6c72f 1959 else if (!DUMPED_MAIN_ARENA_CHUNK (p))
6c8dbf00
OB
1960 {
1961 /* address is outside main heap */
1962 if (contiguous (av) && av->top != initial_top (av))
1963 {
1964 assert (((char *) p) < min_address || ((char *) p) >= max_address);
1965 }
1966 /* chunk is page-aligned */
e9c4fe93 1967 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
6c8dbf00
OB
1968 /* mem is aligned */
1969 assert (aligned_OK (chunk2mem (p)));
fa8d436c 1970 }
eb406346
UD
1971}
1972
fa8d436c 1973/*
6c8dbf00
OB
1974 Properties of free chunks
1975 */
ee74a442 1976
6c8dbf00
OB
1977static void
1978do_check_free_chunk (mstate av, mchunkptr p)
67c94753 1979{
3381be5c 1980 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
6c8dbf00 1981 mchunkptr next = chunk_at_offset (p, sz);
67c94753 1982
6c8dbf00 1983 do_check_chunk (av, p);
67c94753 1984
fa8d436c 1985 /* Chunk must claim to be free ... */
6c8dbf00
OB
1986 assert (!inuse (p));
1987 assert (!chunk_is_mmapped (p));
67c94753 1988
fa8d436c 1989 /* Unless a special marker, must have OK fields */
6c8dbf00
OB
1990 if ((unsigned long) (sz) >= MINSIZE)
1991 {
1992 assert ((sz & MALLOC_ALIGN_MASK) == 0);
1993 assert (aligned_OK (chunk2mem (p)));
1994 /* ... matching footer field */
3381be5c 1995 assert (prev_size (next_chunk (p)) == sz);
6c8dbf00
OB
1996 /* ... and is fully consolidated */
1997 assert (prev_inuse (p));
1998 assert (next == av->top || inuse (next));
1999
2000 /* ... and has minimally sane links */
2001 assert (p->fd->bk == p);
2002 assert (p->bk->fd == p);
2003 }
fa8d436c 2004 else /* markers are always of size SIZE_SZ */
6c8dbf00 2005 assert (sz == SIZE_SZ);
67c94753 2006}
67c94753 2007
fa8d436c 2008/*
6c8dbf00
OB
2009 Properties of inuse chunks
2010 */
fa8d436c 2011
6c8dbf00
OB
2012static void
2013do_check_inuse_chunk (mstate av, mchunkptr p)
f65fd747 2014{
fa8d436c 2015 mchunkptr next;
f65fd747 2016
6c8dbf00 2017 do_check_chunk (av, p);
f65fd747 2018
6c8dbf00 2019 if (chunk_is_mmapped (p))
fa8d436c 2020 return; /* mmapped chunks have no next/prev */
ca34d7a7 2021
fa8d436c 2022 /* Check whether it claims to be in use ... */
6c8dbf00 2023 assert (inuse (p));
10dc2a90 2024
6c8dbf00 2025 next = next_chunk (p);
10dc2a90 2026
fa8d436c 2027 /* ... and is surrounded by OK chunks.
6c8dbf00
OB
2028 Since more things can be checked with free chunks than inuse ones,
2029 if an inuse chunk borders them and debug is on, it's worth doing them.
2030 */
2031 if (!prev_inuse (p))
2032 {
2033 /* Note that we cannot even look at prev unless it is not inuse */
2034 mchunkptr prv = prev_chunk (p);
2035 assert (next_chunk (prv) == p);
2036 do_check_free_chunk (av, prv);
2037 }
fa8d436c 2038
6c8dbf00
OB
2039 if (next == av->top)
2040 {
2041 assert (prev_inuse (next));
2042 assert (chunksize (next) >= MINSIZE);
2043 }
2044 else if (!inuse (next))
2045 do_check_free_chunk (av, next);
10dc2a90
UD
2046}
2047
fa8d436c 2048/*
6c8dbf00
OB
2049 Properties of chunks recycled from fastbins
2050 */
fa8d436c 2051
6c8dbf00
OB
2052static void
2053do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2054{
3381be5c 2055 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
fa8d436c 2056
6c8dbf00
OB
2057 if (!chunk_is_mmapped (p))
2058 {
2059 assert (av == arena_for_chunk (p));
e9c4fe93 2060 if (chunk_main_arena (p))
6c8dbf00 2061 assert (av == &main_arena);
e9c4fe93
FW
2062 else
2063 assert (av != &main_arena);
6c8dbf00 2064 }
fa8d436c 2065
6c8dbf00 2066 do_check_inuse_chunk (av, p);
fa8d436c
UD
2067
2068 /* Legal size ... */
6c8dbf00
OB
2069 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2070 assert ((unsigned long) (sz) >= MINSIZE);
fa8d436c 2071 /* ... and alignment */
6c8dbf00 2072 assert (aligned_OK (chunk2mem (p)));
fa8d436c 2073 /* chunk is less than MINSIZE more than request */
6c8dbf00
OB
2074 assert ((long) (sz) - (long) (s) >= 0);
2075 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
10dc2a90
UD
2076}
2077
fa8d436c 2078/*
6c8dbf00
OB
2079 Properties of nonrecycled chunks at the point they are malloced
2080 */
fa8d436c 2081
6c8dbf00
OB
2082static void
2083do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2084{
fa8d436c 2085 /* same as recycled case ... */
6c8dbf00 2086 do_check_remalloced_chunk (av, p, s);
10dc2a90 2087
fa8d436c 2088 /*
6c8dbf00
OB
2089 ... plus, must obey implementation invariant that prev_inuse is
2090 always true of any allocated chunk; i.e., that each allocated
2091 chunk borders either a previously allocated and still in-use
2092 chunk, or the base of its memory arena. This is ensured
2093 by making all allocations from the `lowest' part of any found
2094 chunk. This does not necessarily hold however for chunks
2095 recycled via fastbins.
2096 */
2097
2098 assert (prev_inuse (p));
fa8d436c 2099}
10dc2a90 2100
f65fd747 2101
fa8d436c 2102/*
6c8dbf00 2103 Properties of malloc_state.
f65fd747 2104
6c8dbf00
OB
2105 This may be useful for debugging malloc, as well as detecting user
2106 programmer errors that somehow write into malloc_state.
f65fd747 2107
6c8dbf00
OB
2108 If you are extending or experimenting with this malloc, you can
2109 probably figure out how to hack this routine to print out or
2110 display chunk addresses, sizes, bins, and other instrumentation.
2111 */
f65fd747 2112
6c8dbf00
OB
2113static void
2114do_check_malloc_state (mstate av)
fa8d436c
UD
2115{
2116 int i;
2117 mchunkptr p;
2118 mchunkptr q;
2119 mbinptr b;
fa8d436c
UD
2120 unsigned int idx;
2121 INTERNAL_SIZE_T size;
2122 unsigned long total = 0;
2123 int max_fast_bin;
f65fd747 2124
fa8d436c 2125 /* internal size_t must be no wider than pointer type */
6c8dbf00 2126 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
f65fd747 2127
fa8d436c 2128 /* alignment is a power of 2 */
6c8dbf00 2129 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
f65fd747 2130
3381be5c
WD
2131 /* Check the arena is initialized. */
2132 assert (av->top != 0);
2133
2134 /* No memory has been allocated yet, so doing more tests is not possible. */
2135 if (av->top == initial_top (av))
fa8d436c 2136 return;
f65fd747 2137
fa8d436c 2138 /* pagesize is a power of 2 */
8a35c3fe 2139 assert (powerof2(GLRO (dl_pagesize)));
f65fd747 2140
fa8d436c 2141 /* A contiguous main_arena is consistent with sbrk_base. */
6c8dbf00
OB
2142 if (av == &main_arena && contiguous (av))
2143 assert ((char *) mp_.sbrk_base + av->system_mem ==
2144 (char *) av->top + chunksize (av->top));
fa8d436c
UD
2145
2146 /* properties of fastbins */
2147
2148 /* max_fast is in allowed range */
6c8dbf00
OB
2149 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2150
2151 max_fast_bin = fastbin_index (get_max_fast ());
2152
2153 for (i = 0; i < NFASTBINS; ++i)
2154 {
2155 p = fastbin (av, i);
2156
2157 /* The following test can only be performed for the main arena.
2158 While mallopt calls malloc_consolidate to get rid of all fast
2159 bins (especially those larger than the new maximum) this does
2160 only happen for the main arena. Trying to do this for any
2161 other arena would mean those arenas have to be locked and
2162 malloc_consolidate be called for them. This is excessive. And
2163 even if this is acceptable to somebody it still cannot solve
2164 the problem completely since if the arena is locked a
2165 concurrent malloc call might create a new arena which then
2166 could use the newly invalid fast bins. */
2167
2168 /* all bins past max_fast are empty */
2169 if (av == &main_arena && i > max_fast_bin)
2170 assert (p == 0);
2171
2172 while (p != 0)
2173 {
2174 /* each chunk claims to be inuse */
2175 do_check_inuse_chunk (av, p);
2176 total += chunksize (p);
2177 /* chunk belongs in this bin */
2178 assert (fastbin_index (chunksize (p)) == i);
2179 p = p->fd;
2180 }
fa8d436c 2181 }
fa8d436c 2182
fa8d436c 2183 /* check normal bins */
6c8dbf00
OB
2184 for (i = 1; i < NBINS; ++i)
2185 {
2186 b = bin_at (av, i);
2187
2188 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2189 if (i >= 2)
2190 {
2191 unsigned int binbit = get_binmap (av, i);
2192 int empty = last (b) == b;
2193 if (!binbit)
2194 assert (empty);
2195 else if (!empty)
2196 assert (binbit);
2197 }
2198
2199 for (p = last (b); p != b; p = p->bk)
2200 {
2201 /* each chunk claims to be free */
2202 do_check_free_chunk (av, p);
2203 size = chunksize (p);
2204 total += size;
2205 if (i >= 2)
2206 {
2207 /* chunk belongs in bin */
2208 idx = bin_index (size);
2209 assert (idx == i);
2210 /* lists are sorted */
2211 assert (p->bk == b ||
2212 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2213
2214 if (!in_smallbin_range (size))
2215 {
2216 if (p->fd_nextsize != NULL)
2217 {
2218 if (p->fd_nextsize == p)
2219 assert (p->bk_nextsize == p);
2220 else
2221 {
2222 if (p->fd_nextsize == first (b))
2223 assert (chunksize (p) < chunksize (p->fd_nextsize));
2224 else
2225 assert (chunksize (p) > chunksize (p->fd_nextsize));
2226
2227 if (p == first (b))
2228 assert (chunksize (p) > chunksize (p->bk_nextsize));
2229 else
2230 assert (chunksize (p) < chunksize (p->bk_nextsize));
2231 }
2232 }
2233 else
2234 assert (p->bk_nextsize == NULL);
2235 }
2236 }
2237 else if (!in_smallbin_range (size))
2238 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2239 /* chunk is followed by a legal chain of inuse chunks */
2240 for (q = next_chunk (p);
2241 (q != av->top && inuse (q) &&
2242 (unsigned long) (chunksize (q)) >= MINSIZE);
2243 q = next_chunk (q))
2244 do_check_inuse_chunk (av, q);
2245 }
fa8d436c 2246 }
f65fd747 2247
fa8d436c 2248 /* top chunk is OK */
6c8dbf00 2249 check_chunk (av, av->top);
fa8d436c
UD
2250}
2251#endif
2252
2253
2254/* ----------------- Support for debugging hooks -------------------- */
2255#include "hooks.c"
2256
2257
2258/* ----------- Routines dealing with system allocation -------------- */
2259
2260/*
6c8dbf00
OB
2261 sysmalloc handles malloc cases requiring more memory from the system.
2262 On entry, it is assumed that av->top does not have enough
2263 space to service request for nb bytes, thus requiring that av->top
2264 be extended or replaced.
2265 */
fa8d436c 2266
6c8dbf00
OB
2267static void *
2268sysmalloc (INTERNAL_SIZE_T nb, mstate av)
f65fd747 2269{
6c8dbf00 2270 mchunkptr old_top; /* incoming value of av->top */
fa8d436c 2271 INTERNAL_SIZE_T old_size; /* its size */
6c8dbf00 2272 char *old_end; /* its end address */
f65fd747 2273
6c8dbf00
OB
2274 long size; /* arg to first MORECORE or mmap call */
2275 char *brk; /* return value from MORECORE */
f65fd747 2276
6c8dbf00
OB
2277 long correction; /* arg to 2nd MORECORE call */
2278 char *snd_brk; /* 2nd return val */
f65fd747 2279
fa8d436c
UD
2280 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2281 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
6c8dbf00 2282 char *aligned_brk; /* aligned offset into brk */
f65fd747 2283
6c8dbf00
OB
2284 mchunkptr p; /* the allocated/returned chunk */
2285 mchunkptr remainder; /* remainder from allocation */
2286 unsigned long remainder_size; /* its size */
fa8d436c 2287
fa8d436c 2288
8a35c3fe 2289 size_t pagesize = GLRO (dl_pagesize);
6c8dbf00 2290 bool tried_mmap = false;
fa8d436c
UD
2291
2292
fa8d436c 2293 /*
6c8dbf00
OB
2294 If have mmap, and the request size meets the mmap threshold, and
2295 the system supports mmap, and there are few enough currently
2296 allocated mmapped regions, try to directly map this request
2297 rather than expanding top.
2298 */
2299
fff94fa2
SP
2300 if (av == NULL
2301 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2302 && (mp_.n_mmaps < mp_.n_mmaps_max)))
6c8dbf00
OB
2303 {
2304 char *mm; /* return value from mmap call*/
a9177ff5 2305
6c8dbf00
OB
2306 try_mmap:
2307 /*
2308 Round up size to nearest page. For mmapped chunks, the overhead
2309 is one SIZE_SZ unit larger than for normal chunks, because there
2310 is no following chunk whose prev_size field could be used.
2311
2312 See the front_misalign handling below, for glibc there is no
2313 need for further alignments unless we have have high alignment.
2314 */
2315 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
8a35c3fe 2316 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
6c8dbf00 2317 else
8a35c3fe 2318 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
6c8dbf00
OB
2319 tried_mmap = true;
2320
2321 /* Don't try if size wraps around 0 */
2322 if ((unsigned long) (size) > (unsigned long) (nb))
2323 {
2324 mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2325
2326 if (mm != MAP_FAILED)
2327 {
2328 /*
2329 The offset to the start of the mmapped region is stored
2330 in the prev_size field of the chunk. This allows us to adjust
2331 returned start address to meet alignment requirements here
2332 and in memalign(), and still be able to compute proper
2333 address argument for later munmap in free() and realloc().
2334 */
2335
2336 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2337 {
2338 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2339 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2340 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2341 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2342 front_misalign = 0;
2343 }
2344 else
2345 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2346 if (front_misalign > 0)
2347 {
2348 correction = MALLOC_ALIGNMENT - front_misalign;
2349 p = (mchunkptr) (mm + correction);
e9c4fe93 2350 set_prev_size (p, correction);
6c8dbf00
OB
2351 set_head (p, (size - correction) | IS_MMAPPED);
2352 }
2353 else
2354 {
2355 p = (mchunkptr) mm;
681421f3 2356 set_prev_size (p, 0);
6c8dbf00
OB
2357 set_head (p, size | IS_MMAPPED);
2358 }
2359
2360 /* update statistics */
2361
2362 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2363 atomic_max (&mp_.max_n_mmaps, new);
2364
2365 unsigned long sum;
2366 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2367 atomic_max (&mp_.max_mmapped_mem, sum);
2368
2369 check_chunk (av, p);
2370
2371 return chunk2mem (p);
2372 }
2373 }
fa8d436c 2374 }
fa8d436c 2375
fff94fa2
SP
2376 /* There are no usable arenas and mmap also failed. */
2377 if (av == NULL)
2378 return 0;
2379
fa8d436c
UD
2380 /* Record incoming configuration of top */
2381
6c8dbf00
OB
2382 old_top = av->top;
2383 old_size = chunksize (old_top);
2384 old_end = (char *) (chunk_at_offset (old_top, old_size));
fa8d436c 2385
6c8dbf00 2386 brk = snd_brk = (char *) (MORECORE_FAILURE);
fa8d436c 2387
a9177ff5 2388 /*
fa8d436c
UD
2389 If not the first time through, we require old_size to be
2390 at least MINSIZE and to have prev_inuse set.
6c8dbf00 2391 */
fa8d436c 2392
6c8dbf00
OB
2393 assert ((old_top == initial_top (av) && old_size == 0) ||
2394 ((unsigned long) (old_size) >= MINSIZE &&
2395 prev_inuse (old_top) &&
8a35c3fe 2396 ((unsigned long) old_end & (pagesize - 1)) == 0));
fa8d436c
UD
2397
2398 /* Precondition: not enough current space to satisfy nb request */
6c8dbf00 2399 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
a9177ff5 2400
72f90263 2401
6c8dbf00
OB
2402 if (av != &main_arena)
2403 {
2404 heap_info *old_heap, *heap;
2405 size_t old_heap_size;
2406
2407 /* First try to extend the current heap. */
2408 old_heap = heap_for_ptr (old_top);
2409 old_heap_size = old_heap->size;
2410 if ((long) (MINSIZE + nb - old_size) > 0
2411 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2412 {
2413 av->system_mem += old_heap->size - old_heap_size;
6c8dbf00
OB
2414 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2415 | PREV_INUSE);
2416 }
2417 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2418 {
2419 /* Use a newly allocated heap. */
2420 heap->ar_ptr = av;
2421 heap->prev = old_heap;
2422 av->system_mem += heap->size;
6c8dbf00
OB
2423 /* Set up the new top. */
2424 top (av) = chunk_at_offset (heap, sizeof (*heap));
2425 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2426
2427 /* Setup fencepost and free the old top chunk with a multiple of
2428 MALLOC_ALIGNMENT in size. */
2429 /* The fencepost takes at least MINSIZE bytes, because it might
2430 become the top chunk again later. Note that a footer is set
2431 up, too, although the chunk is marked in use. */
2432 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2433 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
2434 if (old_size >= MINSIZE)
2435 {
2436 set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
2437 set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
2438 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2439 _int_free (av, old_top, 1);
2440 }
2441 else
2442 {
2443 set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
2444 set_foot (old_top, (old_size + 2 * SIZE_SZ));
2445 }
2446 }
2447 else if (!tried_mmap)
2448 /* We can at least try to use to mmap memory. */
2449 goto try_mmap;
fa8d436c 2450 }
6c8dbf00 2451 else /* av == main_arena */
fa8d436c 2452
fa8d436c 2453
6c8dbf00
OB
2454 { /* Request enough space for nb + pad + overhead */
2455 size = nb + mp_.top_pad + MINSIZE;
a9177ff5 2456
6c8dbf00
OB
2457 /*
2458 If contiguous, we can subtract out existing space that we hope to
2459 combine with new space. We add it back later only if
2460 we don't actually get contiguous space.
2461 */
a9177ff5 2462
6c8dbf00
OB
2463 if (contiguous (av))
2464 size -= old_size;
fa8d436c 2465
6c8dbf00
OB
2466 /*
2467 Round to a multiple of page size.
2468 If MORECORE is not contiguous, this ensures that we only call it
2469 with whole-page arguments. And if MORECORE is contiguous and
2470 this is not first time through, this preserves page-alignment of
2471 previous calls. Otherwise, we correct to page-align below.
2472 */
fa8d436c 2473
8a35c3fe 2474 size = ALIGN_UP (size, pagesize);
fa8d436c 2475
6c8dbf00
OB
2476 /*
2477 Don't try to call MORECORE if argument is so big as to appear
2478 negative. Note that since mmap takes size_t arg, it may succeed
2479 below even if we cannot call MORECORE.
2480 */
2481
2482 if (size > 0)
2483 {
2484 brk = (char *) (MORECORE (size));
2485 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2486 }
2487
2488 if (brk != (char *) (MORECORE_FAILURE))
2489 {
2490 /* Call the `morecore' hook if necessary. */
2491 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2492 if (__builtin_expect (hook != NULL, 0))
2493 (*hook)();
2494 }
2495 else
2496 {
2497 /*
2498 If have mmap, try using it as a backup when MORECORE fails or
2499 cannot be used. This is worth doing on systems that have "holes" in
2500 address space, so sbrk cannot extend to give contiguous space, but
2501 space is available elsewhere. Note that we ignore mmap max count
2502 and threshold limits, since the space will not be used as a
2503 segregated mmap region.
2504 */
2505
2506 /* Cannot merge with old top, so add its size back in */
2507 if (contiguous (av))
8a35c3fe 2508 size = ALIGN_UP (size + old_size, pagesize);
6c8dbf00
OB
2509
2510 /* If we are relying on mmap as backup, then use larger units */
2511 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2512 size = MMAP_AS_MORECORE_SIZE;
2513
2514 /* Don't try if size wraps around 0 */
2515 if ((unsigned long) (size) > (unsigned long) (nb))
2516 {
2517 char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2518
2519 if (mbrk != MAP_FAILED)
2520 {
2521 /* We do not need, and cannot use, another sbrk call to find end */
2522 brk = mbrk;
2523 snd_brk = brk + size;
2524
2525 /*
2526 Record that we no longer have a contiguous sbrk region.
2527 After the first time mmap is used as backup, we do not
2528 ever rely on contiguous space since this could incorrectly
2529 bridge regions.
2530 */
2531 set_noncontiguous (av);
2532 }
2533 }
2534 }
2535
2536 if (brk != (char *) (MORECORE_FAILURE))
2537 {
2538 if (mp_.sbrk_base == 0)
2539 mp_.sbrk_base = brk;
2540 av->system_mem += size;
2541
2542 /*
2543 If MORECORE extends previous space, we can likewise extend top size.
2544 */
2545
2546 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2547 set_head (old_top, (size + old_size) | PREV_INUSE);
2548
2549 else if (contiguous (av) && old_size && brk < old_end)
ac3ed168
FW
2550 /* Oops! Someone else killed our space.. Can't touch anything. */
2551 malloc_printerr ("break adjusted to free malloc space");
6c8dbf00
OB
2552
2553 /*
2554 Otherwise, make adjustments:
2555
2556 * If the first time through or noncontiguous, we need to call sbrk
2557 just to find out where the end of memory lies.
2558
2559 * We need to ensure that all returned chunks from malloc will meet
2560 MALLOC_ALIGNMENT
2561
2562 * If there was an intervening foreign sbrk, we need to adjust sbrk
2563 request size to account for fact that we will not be able to
2564 combine new space with existing space in old_top.
2565
2566 * Almost all systems internally allocate whole pages at a time, in
2567 which case we might as well use the whole last page of request.
2568 So we allocate enough more memory to hit a page boundary now,
2569 which in turn causes future contiguous calls to page-align.
2570 */
2571
2572 else
2573 {
2574 front_misalign = 0;
2575 end_misalign = 0;
2576 correction = 0;
2577 aligned_brk = brk;
2578
2579 /* handle contiguous cases */
2580 if (contiguous (av))
2581 {
2582 /* Count foreign sbrk as system_mem. */
2583 if (old_size)
2584 av->system_mem += brk - old_end;
2585
2586 /* Guarantee alignment of first new chunk made from this space */
2587
2588 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2589 if (front_misalign > 0)
2590 {
2591 /*
2592 Skip over some bytes to arrive at an aligned position.
2593 We don't need to specially mark these wasted front bytes.
2594 They will never be accessed anyway because
2595 prev_inuse of av->top (and any chunk created from its start)
2596 is always true after initialization.
2597 */
2598
2599 correction = MALLOC_ALIGNMENT - front_misalign;
2600 aligned_brk += correction;
2601 }
2602
2603 /*
2604 If this isn't adjacent to existing space, then we will not
2605 be able to merge with old_top space, so must add to 2nd request.
2606 */
2607
2608 correction += old_size;
2609
2610 /* Extend the end address to hit a page boundary */
2611 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
8a35c3fe 2612 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
6c8dbf00
OB
2613
2614 assert (correction >= 0);
2615 snd_brk = (char *) (MORECORE (correction));
2616
2617 /*
2618 If can't allocate correction, try to at least find out current
2619 brk. It might be enough to proceed without failing.
2620
2621 Note that if second sbrk did NOT fail, we assume that space
2622 is contiguous with first sbrk. This is a safe assumption unless
2623 program is multithreaded but doesn't use locks and a foreign sbrk
2624 occurred between our first and second calls.
2625 */
2626
2627 if (snd_brk == (char *) (MORECORE_FAILURE))
2628 {
2629 correction = 0;
2630 snd_brk = (char *) (MORECORE (0));
2631 }
2632 else
2633 {
2634 /* Call the `morecore' hook if necessary. */
2635 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2636 if (__builtin_expect (hook != NULL, 0))
2637 (*hook)();
2638 }
2639 }
2640
2641 /* handle non-contiguous cases */
2642 else
2643 {
2644 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2645 /* MORECORE/mmap must correctly align */
2646 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2647 else
2648 {
2649 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2650 if (front_misalign > 0)
2651 {
2652 /*
2653 Skip over some bytes to arrive at an aligned position.
2654 We don't need to specially mark these wasted front bytes.
2655 They will never be accessed anyway because
2656 prev_inuse of av->top (and any chunk created from its start)
2657 is always true after initialization.
2658 */
2659
2660 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2661 }
2662 }
2663
2664 /* Find out current end of memory */
2665 if (snd_brk == (char *) (MORECORE_FAILURE))
2666 {
2667 snd_brk = (char *) (MORECORE (0));
2668 }
2669 }
2670
2671 /* Adjust top based on results of second sbrk */
2672 if (snd_brk != (char *) (MORECORE_FAILURE))
2673 {
2674 av->top = (mchunkptr) aligned_brk;
2675 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2676 av->system_mem += correction;
2677
2678 /*
2679 If not the first time through, we either have a
2680 gap due to foreign sbrk or a non-contiguous region. Insert a
2681 double fencepost at old_top to prevent consolidation with space
2682 we don't own. These fenceposts are artificial chunks that are
2683 marked as inuse and are in any case too small to use. We need
2684 two to make sizes and alignments work out.
2685 */
2686
2687 if (old_size != 0)
2688 {
2689 /*
2690 Shrink old_top to insert fenceposts, keeping size a
2691 multiple of MALLOC_ALIGNMENT. We know there is at least
2692 enough space in old_top to do this.
2693 */
2694 old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2695 set_head (old_top, old_size | PREV_INUSE);
2696
2697 /*
2698 Note that the following assignments completely overwrite
2699 old_top when old_size was previously MINSIZE. This is
2700 intentional. We need the fencepost, even if old_top otherwise gets
2701 lost.
2702 */
e9c4fe93
FW
2703 set_head (chunk_at_offset (old_top, old_size),
2704 (2 * SIZE_SZ) | PREV_INUSE);
2705 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
2706 (2 * SIZE_SZ) | PREV_INUSE);
6c8dbf00
OB
2707
2708 /* If possible, release the rest. */
2709 if (old_size >= MINSIZE)
2710 {
2711 _int_free (av, old_top, 1);
2712 }
2713 }
2714 }
2715 }
2716 }
2717 } /* if (av != &main_arena) */
2718
2719 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
fa8d436c 2720 av->max_system_mem = av->system_mem;
6c8dbf00 2721 check_malloc_state (av);
a9177ff5 2722
fa8d436c
UD
2723 /* finally, do the allocation */
2724 p = av->top;
6c8dbf00 2725 size = chunksize (p);
fa8d436c
UD
2726
2727 /* check that one of the above allocation paths succeeded */
6c8dbf00
OB
2728 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2729 {
2730 remainder_size = size - nb;
2731 remainder = chunk_at_offset (p, nb);
2732 av->top = remainder;
2733 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2734 set_head (remainder, remainder_size | PREV_INUSE);
2735 check_malloced_chunk (av, p, nb);
2736 return chunk2mem (p);
2737 }
fa8d436c
UD
2738
2739 /* catch all failure paths */
8e58439c 2740 __set_errno (ENOMEM);
fa8d436c
UD
2741 return 0;
2742}
2743
2744
2745/*
6c8dbf00
OB
2746 systrim is an inverse of sorts to sysmalloc. It gives memory back
2747 to the system (via negative arguments to sbrk) if there is unused
2748 memory at the `high' end of the malloc pool. It is called
2749 automatically by free() when top space exceeds the trim
2750 threshold. It is also called by the public malloc_trim routine. It
2751 returns 1 if it actually released any memory, else 0.
2752 */
fa8d436c 2753
6c8dbf00
OB
2754static int
2755systrim (size_t pad, mstate av)
fa8d436c 2756{
6c8dbf00
OB
2757 long top_size; /* Amount of top-most memory */
2758 long extra; /* Amount to release */
2759 long released; /* Amount actually released */
2760 char *current_brk; /* address returned by pre-check sbrk call */
2761 char *new_brk; /* address returned by post-check sbrk call */
8a35c3fe 2762 size_t pagesize;
6c8dbf00 2763 long top_area;
fa8d436c 2764
8a35c3fe 2765 pagesize = GLRO (dl_pagesize);
6c8dbf00 2766 top_size = chunksize (av->top);
a9177ff5 2767
4b5b548c
FS
2768 top_area = top_size - MINSIZE - 1;
2769 if (top_area <= pad)
2770 return 0;
2771
ca6be165
CD
2772 /* Release in pagesize units and round down to the nearest page. */
2773 extra = ALIGN_DOWN(top_area - pad, pagesize);
a9177ff5 2774
51a7380b
WN
2775 if (extra == 0)
2776 return 0;
2777
4b5b548c 2778 /*
6c8dbf00
OB
2779 Only proceed if end of memory is where we last set it.
2780 This avoids problems if there were foreign sbrk calls.
2781 */
2782 current_brk = (char *) (MORECORE (0));
2783 if (current_brk == (char *) (av->top) + top_size)
2784 {
2785 /*
2786 Attempt to release memory. We ignore MORECORE return value,
2787 and instead call again to find out where new end of memory is.
2788 This avoids problems if first call releases less than we asked,
2789 of if failure somehow altered brk value. (We could still
2790 encounter problems if it altered brk in some very bad way,
2791 but the only thing we can do is adjust anyway, which will cause
2792 some downstream failure.)
2793 */
2794
2795 MORECORE (-extra);
2796 /* Call the `morecore' hook if necessary. */
2797 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2798 if (__builtin_expect (hook != NULL, 0))
2799 (*hook)();
2800 new_brk = (char *) (MORECORE (0));
2801
2802 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2803
2804 if (new_brk != (char *) MORECORE_FAILURE)
2805 {
2806 released = (long) (current_brk - new_brk);
2807
2808 if (released != 0)
2809 {
2810 /* Success. Adjust top. */
2811 av->system_mem -= released;
2812 set_head (av->top, (top_size - released) | PREV_INUSE);
2813 check_malloc_state (av);
2814 return 1;
2815 }
2816 }
fa8d436c 2817 }
fa8d436c 2818 return 0;
f65fd747
UD
2819}
2820
431c33c0 2821static void
6c8dbf00 2822munmap_chunk (mchunkptr p)
f65fd747 2823{
c0e82f11 2824 size_t pagesize = GLRO (dl_pagesize);
6c8dbf00 2825 INTERNAL_SIZE_T size = chunksize (p);
f65fd747 2826
6c8dbf00 2827 assert (chunk_is_mmapped (p));
8e635611 2828
4cf6c72f
FW
2829 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
2830 main arena. We never free this memory. */
2831 if (DUMPED_MAIN_ARENA_CHUNK (p))
2832 return;
2833
c0e82f11 2834 uintptr_t mem = (uintptr_t) chunk2mem (p);
e9c4fe93
FW
2835 uintptr_t block = (uintptr_t) p - prev_size (p);
2836 size_t total_size = prev_size (p) + size;
8e635611
UD
2837 /* Unfortunately we have to do the compilers job by hand here. Normally
2838 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2839 page size. But gcc does not recognize the optimization possibility
2840 (in the moment at least) so we combine the two values into one before
2841 the bit test. */
c0e82f11
IK
2842 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
2843 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
ac3ed168 2844 malloc_printerr ("munmap_chunk(): invalid pointer");
f65fd747 2845
c6e4925d
OB
2846 atomic_decrement (&mp_.n_mmaps);
2847 atomic_add (&mp_.mmapped_mem, -total_size);
f65fd747 2848
6ef76f3b
UD
2849 /* If munmap failed the process virtual memory address space is in a
2850 bad shape. Just leave the block hanging around, the process will
2851 terminate shortly anyway since not much can be done. */
6c8dbf00 2852 __munmap ((char *) block, total_size);
f65fd747
UD
2853}
2854
2855#if HAVE_MREMAP
2856
431c33c0 2857static mchunkptr
6c8dbf00 2858mremap_chunk (mchunkptr p, size_t new_size)
f65fd747 2859{
8a35c3fe 2860 size_t pagesize = GLRO (dl_pagesize);
e9c4fe93 2861 INTERNAL_SIZE_T offset = prev_size (p);
6c8dbf00 2862 INTERNAL_SIZE_T size = chunksize (p);
f65fd747
UD
2863 char *cp;
2864
6c8dbf00 2865 assert (chunk_is_mmapped (p));
ebe544bf
IK
2866
2867 uintptr_t block = (uintptr_t) p - offset;
2868 uintptr_t mem = (uintptr_t) chunk2mem(p);
2869 size_t total_size = offset + size;
2870 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
2871 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
2872 malloc_printerr("mremap_chunk(): invalid pointer");
f65fd747
UD
2873
2874 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
8a35c3fe 2875 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
f65fd747 2876
68f3802d 2877 /* No need to remap if the number of pages does not change. */
ebe544bf 2878 if (total_size == new_size)
68f3802d
UD
2879 return p;
2880
ebe544bf 2881 cp = (char *) __mremap ((char *) block, total_size, new_size,
6c8dbf00 2882 MREMAP_MAYMOVE);
f65fd747 2883
6c8dbf00
OB
2884 if (cp == MAP_FAILED)
2885 return 0;
f65fd747 2886
6c8dbf00 2887 p = (mchunkptr) (cp + offset);
f65fd747 2888
6c8dbf00 2889 assert (aligned_OK (chunk2mem (p)));
f65fd747 2890
e9c4fe93 2891 assert (prev_size (p) == offset);
6c8dbf00 2892 set_head (p, (new_size - offset) | IS_MMAPPED);
f65fd747 2893
c6e4925d
OB
2894 INTERNAL_SIZE_T new;
2895 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
6c8dbf00 2896 + new_size - size - offset;
c6e4925d 2897 atomic_max (&mp_.max_mmapped_mem, new);
f65fd747
UD
2898 return p;
2899}
f65fd747
UD
2900#endif /* HAVE_MREMAP */
2901
fa8d436c 2902/*------------------------ Public wrappers. --------------------------------*/
f65fd747 2903
d5c3fafc
DD
2904#if USE_TCACHE
2905
2906/* We overlay this structure on the user-data portion of a chunk when
2907 the chunk is stored in the per-thread cache. */
2908typedef struct tcache_entry
2909{
2910 struct tcache_entry *next;
bcdaad21
DD
2911 /* This field exists to detect double frees. */
2912 struct tcache_perthread_struct *key;
d5c3fafc
DD
2913} tcache_entry;
2914
2915/* There is one of these for each thread, which contains the
2916 per-thread cache (hence "tcache_perthread_struct"). Keeping
2917 overall size low is mildly important. Note that COUNTS and ENTRIES
2918 are redundant (we could have just counted the linked list each
2919 time), this is for performance reasons. */
2920typedef struct tcache_perthread_struct
2921{
0ad788fa 2922 uint16_t counts[TCACHE_MAX_BINS];
d5c3fafc
DD
2923 tcache_entry *entries[TCACHE_MAX_BINS];
2924} tcache_perthread_struct;
2925
1e26d351 2926static __thread bool tcache_shutting_down = false;
d5c3fafc
DD
2927static __thread tcache_perthread_struct *tcache = NULL;
2928
2929/* Caller must ensure that we know tc_idx is valid and there's room
2930 for more chunks. */
e4dd4ace 2931static __always_inline void
d5c3fafc
DD
2932tcache_put (mchunkptr chunk, size_t tc_idx)
2933{
2934 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
bcdaad21
DD
2935
2936 /* Mark this chunk as "in the tcache" so the test in _int_free will
2937 detect a double free. */
2938 e->key = tcache;
2939
d5c3fafc
DD
2940 e->next = tcache->entries[tc_idx];
2941 tcache->entries[tc_idx] = e;
2942 ++(tcache->counts[tc_idx]);
2943}
2944
2945/* Caller must ensure that we know tc_idx is valid and there's
2946 available chunks to remove. */
e4dd4ace 2947static __always_inline void *
d5c3fafc
DD
2948tcache_get (size_t tc_idx)
2949{
2950 tcache_entry *e = tcache->entries[tc_idx];
d5c3fafc
DD
2951 tcache->entries[tc_idx] = e->next;
2952 --(tcache->counts[tc_idx]);
bcdaad21 2953 e->key = NULL;
d5c3fafc
DD
2954 return (void *) e;
2955}
2956
0a947e06
FW
2957static void
2958tcache_thread_shutdown (void)
d5c3fafc
DD
2959{
2960 int i;
2961 tcache_perthread_struct *tcache_tmp = tcache;
2962
2963 if (!tcache)
2964 return;
2965
1e26d351 2966 /* Disable the tcache and prevent it from being reinitialized. */
d5c3fafc 2967 tcache = NULL;
1e26d351 2968 tcache_shutting_down = true;
d5c3fafc 2969
1e26d351
CD
2970 /* Free all of the entries and the tcache itself back to the arena
2971 heap for coalescing. */
d5c3fafc
DD
2972 for (i = 0; i < TCACHE_MAX_BINS; ++i)
2973 {
2974 while (tcache_tmp->entries[i])
2975 {
2976 tcache_entry *e = tcache_tmp->entries[i];
2977 tcache_tmp->entries[i] = e->next;
2978 __libc_free (e);
2979 }
2980 }
2981
2982 __libc_free (tcache_tmp);
d5c3fafc 2983}
d5c3fafc
DD
2984
2985static void
2986tcache_init(void)
2987{
2988 mstate ar_ptr;
2989 void *victim = 0;
2990 const size_t bytes = sizeof (tcache_perthread_struct);
2991
2992 if (tcache_shutting_down)
2993 return;
2994
2995 arena_get (ar_ptr, bytes);
2996 victim = _int_malloc (ar_ptr, bytes);
2997 if (!victim && ar_ptr != NULL)
2998 {
2999 ar_ptr = arena_get_retry (ar_ptr, bytes);
3000 victim = _int_malloc (ar_ptr, bytes);
3001 }
3002
3003
3004 if (ar_ptr != NULL)
3005 __libc_lock_unlock (ar_ptr->mutex);
3006
3007 /* In a low memory situation, we may not be able to allocate memory
3008 - in which case, we just keep trying later. However, we
3009 typically do this very early, so either there is sufficient
3010 memory, or there isn't enough memory to do non-trivial
3011 allocations anyway. */
3012 if (victim)
3013 {
3014 tcache = (tcache_perthread_struct *) victim;
3015 memset (tcache, 0, sizeof (tcache_perthread_struct));
3016 }
3017
3018}
3019
0a947e06 3020# define MAYBE_INIT_TCACHE() \
d5c3fafc
DD
3021 if (__glibc_unlikely (tcache == NULL)) \
3022 tcache_init();
3023
0a947e06
FW
3024#else /* !USE_TCACHE */
3025# define MAYBE_INIT_TCACHE()
3026
3027static void
3028tcache_thread_shutdown (void)
3029{
3030 /* Nothing to do if there is no thread cache. */
3031}
3032
3033#endif /* !USE_TCACHE */
d5c3fafc 3034
6c8dbf00
OB
3035void *
3036__libc_malloc (size_t bytes)
fa8d436c
UD
3037{
3038 mstate ar_ptr;
22a89187 3039 void *victim;
f65fd747 3040
a222d91a 3041 void *(*hook) (size_t, const void *)
f3eeb3fc 3042 = atomic_forced_read (__malloc_hook);
bfacf1af 3043 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3044 return (*hook)(bytes, RETURN_ADDRESS (0));
d5c3fafc
DD
3045#if USE_TCACHE
3046 /* int_free also calls request2size, be careful to not pad twice. */
34697694
AS
3047 size_t tbytes;
3048 checked_request2size (bytes, tbytes);
d5c3fafc
DD
3049 size_t tc_idx = csize2tidx (tbytes);
3050
3051 MAYBE_INIT_TCACHE ();
3052
3053 DIAG_PUSH_NEEDS_COMMENT;
3054 if (tc_idx < mp_.tcache_bins
d5c3fafc 3055 && tcache
0ad788fa 3056 && tcache->counts[tc_idx] > 0)
d5c3fafc
DD
3057 {
3058 return tcache_get (tc_idx);
3059 }
3060 DIAG_POP_NEEDS_COMMENT;
3061#endif
f65fd747 3062
3f6bb8a3
WD
3063 if (SINGLE_THREAD_P)
3064 {
3065 victim = _int_malloc (&main_arena, bytes);
3066 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3067 &main_arena == arena_for_chunk (mem2chunk (victim)));
3068 return victim;
3069 }
3070
94c5a52a 3071 arena_get (ar_ptr, bytes);
425ce2ed 3072
6c8dbf00 3073 victim = _int_malloc (ar_ptr, bytes);
fff94fa2
SP
3074 /* Retry with another arena only if we were able to find a usable arena
3075 before. */
3076 if (!victim && ar_ptr != NULL)
6c8dbf00
OB
3077 {
3078 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3079 ar_ptr = arena_get_retry (ar_ptr, bytes);
fff94fa2 3080 victim = _int_malloc (ar_ptr, bytes);
60f0e64b 3081 }
fff94fa2
SP
3082
3083 if (ar_ptr != NULL)
4bf5f222 3084 __libc_lock_unlock (ar_ptr->mutex);
fff94fa2 3085
6c8dbf00
OB
3086 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3087 ar_ptr == arena_for_chunk (mem2chunk (victim)));
fa8d436c 3088 return victim;
f65fd747 3089}
6c8dbf00 3090libc_hidden_def (__libc_malloc)
f65fd747 3091
fa8d436c 3092void
6c8dbf00 3093__libc_free (void *mem)
f65fd747 3094{
fa8d436c
UD
3095 mstate ar_ptr;
3096 mchunkptr p; /* chunk corresponding to mem */
3097
a222d91a 3098 void (*hook) (void *, const void *)
f3eeb3fc 3099 = atomic_forced_read (__free_hook);
6c8dbf00
OB
3100 if (__builtin_expect (hook != NULL, 0))
3101 {
3102 (*hook)(mem, RETURN_ADDRESS (0));
3103 return;
3104 }
f65fd747 3105
fa8d436c
UD
3106 if (mem == 0) /* free(0) has no effect */
3107 return;
f65fd747 3108
6c8dbf00 3109 p = mem2chunk (mem);
f65fd747 3110
6c8dbf00
OB
3111 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3112 {
4cf6c72f
FW
3113 /* See if the dynamic brk/mmap threshold needs adjusting.
3114 Dumped fake mmapped chunks do not affect the threshold. */
6c8dbf00 3115 if (!mp_.no_dyn_threshold
e9c4fe93
FW
3116 && chunksize_nomask (p) > mp_.mmap_threshold
3117 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
4cf6c72f 3118 && !DUMPED_MAIN_ARENA_CHUNK (p))
6c8dbf00
OB
3119 {
3120 mp_.mmap_threshold = chunksize (p);
3121 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3122 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3123 mp_.mmap_threshold, mp_.trim_threshold);
3124 }
3125 munmap_chunk (p);
3126 return;
3127 }
f65fd747 3128
d5c3fafc
DD
3129 MAYBE_INIT_TCACHE ();
3130
6c8dbf00
OB
3131 ar_ptr = arena_for_chunk (p);
3132 _int_free (ar_ptr, p, 0);
f65fd747 3133}
3b49edc0 3134libc_hidden_def (__libc_free)
f65fd747 3135
6c8dbf00
OB
3136void *
3137__libc_realloc (void *oldmem, size_t bytes)
f65fd747 3138{
fa8d436c 3139 mstate ar_ptr;
6c8dbf00 3140 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 3141
6c8dbf00 3142 void *newp; /* chunk to return */
f65fd747 3143
a222d91a 3144 void *(*hook) (void *, size_t, const void *) =
f3eeb3fc 3145 atomic_forced_read (__realloc_hook);
bfacf1af 3146 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3147 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
f65fd747 3148
fa8d436c 3149#if REALLOC_ZERO_BYTES_FREES
6c8dbf00
OB
3150 if (bytes == 0 && oldmem != NULL)
3151 {
3152 __libc_free (oldmem); return 0;
3153 }
f65fd747 3154#endif
f65fd747 3155
fa8d436c 3156 /* realloc of null is supposed to be same as malloc */
6c8dbf00
OB
3157 if (oldmem == 0)
3158 return __libc_malloc (bytes);
f65fd747 3159
78ac92ad 3160 /* chunk corresponding to oldmem */
6c8dbf00 3161 const mchunkptr oldp = mem2chunk (oldmem);
78ac92ad 3162 /* its size */
6c8dbf00 3163 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
f65fd747 3164
fff94fa2
SP
3165 if (chunk_is_mmapped (oldp))
3166 ar_ptr = NULL;
3167 else
d5c3fafc
DD
3168 {
3169 MAYBE_INIT_TCACHE ();
3170 ar_ptr = arena_for_chunk (oldp);
3171 }
fff94fa2 3172
4cf6c72f
FW
3173 /* Little security check which won't hurt performance: the allocator
3174 never wrapps around at the end of the address space. Therefore
3175 we can exclude some size values which might appear here by
3176 accident or by "design" from some intruder. We need to bypass
3177 this check for dumped fake mmap chunks from the old main arena
3178 because the new malloc may provide additional alignment. */
3179 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3180 || __builtin_expect (misaligned_chunk (oldp), 0))
3181 && !DUMPED_MAIN_ARENA_CHUNK (oldp))
ac3ed168 3182 malloc_printerr ("realloc(): invalid pointer");
dc165f7b 3183
6c8dbf00 3184 checked_request2size (bytes, nb);
f65fd747 3185
6c8dbf00
OB
3186 if (chunk_is_mmapped (oldp))
3187 {
4cf6c72f
FW
3188 /* If this is a faked mmapped chunk from the dumped main arena,
3189 always make a copy (and do not free the old chunk). */
3190 if (DUMPED_MAIN_ARENA_CHUNK (oldp))
3191 {
3192 /* Must alloc, copy, free. */
3193 void *newmem = __libc_malloc (bytes);
3194 if (newmem == 0)
3195 return NULL;
3196 /* Copy as many bytes as are available from the old chunk
1e8a8875
FW
3197 and fit into the new size. NB: The overhead for faked
3198 mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
3199 regular mmapped chunks. */
3200 if (bytes > oldsize - SIZE_SZ)
3201 bytes = oldsize - SIZE_SZ;
4cf6c72f
FW
3202 memcpy (newmem, oldmem, bytes);
3203 return newmem;
3204 }
3205
6c8dbf00 3206 void *newmem;
f65fd747 3207
fa8d436c 3208#if HAVE_MREMAP
6c8dbf00
OB
3209 newp = mremap_chunk (oldp, nb);
3210 if (newp)
3211 return chunk2mem (newp);
f65fd747 3212#endif
6c8dbf00
OB
3213 /* Note the extra SIZE_SZ overhead. */
3214 if (oldsize - SIZE_SZ >= nb)
3215 return oldmem; /* do nothing */
3216
3217 /* Must alloc, copy, free. */
3218 newmem = __libc_malloc (bytes);
3219 if (newmem == 0)
3220 return 0; /* propagate failure */
fa8d436c 3221
6c8dbf00
OB
3222 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
3223 munmap_chunk (oldp);
3224 return newmem;
3225 }
3226
3f6bb8a3
WD
3227 if (SINGLE_THREAD_P)
3228 {
3229 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3230 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3231 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3232
3233 return newp;
3234 }
3235
4bf5f222 3236 __libc_lock_lock (ar_ptr->mutex);
f65fd747 3237
6c8dbf00 3238 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
f65fd747 3239
4bf5f222 3240 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
3241 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3242 ar_ptr == arena_for_chunk (mem2chunk (newp)));
07014fca
UD
3243
3244 if (newp == NULL)
3245 {
3246 /* Try harder to allocate memory in other arenas. */
35fed6f1 3247 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
6c8dbf00 3248 newp = __libc_malloc (bytes);
07014fca 3249 if (newp != NULL)
6c8dbf00
OB
3250 {
3251 memcpy (newp, oldmem, oldsize - SIZE_SZ);
3252 _int_free (ar_ptr, oldp, 0);
3253 }
07014fca
UD
3254 }
3255
fa8d436c
UD
3256 return newp;
3257}
3b49edc0 3258libc_hidden_def (__libc_realloc)
f65fd747 3259
6c8dbf00
OB
3260void *
3261__libc_memalign (size_t alignment, size_t bytes)
10ad46bc
OB
3262{
3263 void *address = RETURN_ADDRESS (0);
3264 return _mid_memalign (alignment, bytes, address);
3265}
3266
3267static void *
3268_mid_memalign (size_t alignment, size_t bytes, void *address)
fa8d436c
UD
3269{
3270 mstate ar_ptr;
22a89187 3271 void *p;
f65fd747 3272
a222d91a 3273 void *(*hook) (size_t, size_t, const void *) =
f3eeb3fc 3274 atomic_forced_read (__memalign_hook);
bfacf1af 3275 if (__builtin_expect (hook != NULL, 0))
10ad46bc 3276 return (*hook)(alignment, bytes, address);
f65fd747 3277
10ad46bc 3278 /* If we need less alignment than we give anyway, just relay to malloc. */
6c8dbf00
OB
3279 if (alignment <= MALLOC_ALIGNMENT)
3280 return __libc_malloc (bytes);
1228ed5c 3281
fa8d436c 3282 /* Otherwise, ensure that it is at least a minimum chunk size */
6c8dbf00
OB
3283 if (alignment < MINSIZE)
3284 alignment = MINSIZE;
f65fd747 3285
a56ee40b
WN
3286 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3287 power of 2 and will cause overflow in the check below. */
3288 if (alignment > SIZE_MAX / 2 + 1)
3289 {
3290 __set_errno (EINVAL);
3291 return 0;
3292 }
3293
b73ed247
WN
3294 /* Check for overflow. */
3295 if (bytes > SIZE_MAX - alignment - MINSIZE)
3296 {
3297 __set_errno (ENOMEM);
3298 return 0;
3299 }
3300
10ad46bc
OB
3301
3302 /* Make sure alignment is power of 2. */
6c8dbf00
OB
3303 if (!powerof2 (alignment))
3304 {
3305 size_t a = MALLOC_ALIGNMENT * 2;
3306 while (a < alignment)
3307 a <<= 1;
3308 alignment = a;
3309 }
10ad46bc 3310
3f6bb8a3
WD
3311 if (SINGLE_THREAD_P)
3312 {
3313 p = _int_memalign (&main_arena, alignment, bytes);
3314 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3315 &main_arena == arena_for_chunk (mem2chunk (p)));
3316
3317 return p;
3318 }
3319
6c8dbf00 3320 arena_get (ar_ptr, bytes + alignment + MINSIZE);
6c8dbf00
OB
3321
3322 p = _int_memalign (ar_ptr, alignment, bytes);
fff94fa2 3323 if (!p && ar_ptr != NULL)
6c8dbf00
OB
3324 {
3325 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3326 ar_ptr = arena_get_retry (ar_ptr, bytes);
fff94fa2 3327 p = _int_memalign (ar_ptr, alignment, bytes);
f65fd747 3328 }
fff94fa2
SP
3329
3330 if (ar_ptr != NULL)
4bf5f222 3331 __libc_lock_unlock (ar_ptr->mutex);
fff94fa2 3332
6c8dbf00
OB
3333 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3334 ar_ptr == arena_for_chunk (mem2chunk (p)));
fa8d436c 3335 return p;
f65fd747 3336}
380d7e87 3337/* For ISO C11. */
3b49edc0
UD
3338weak_alias (__libc_memalign, aligned_alloc)
3339libc_hidden_def (__libc_memalign)
f65fd747 3340
6c8dbf00
OB
3341void *
3342__libc_valloc (size_t bytes)
fa8d436c 3343{
6c8dbf00 3344 if (__malloc_initialized < 0)
fa8d436c 3345 ptmalloc_init ();
8088488d 3346
10ad46bc 3347 void *address = RETURN_ADDRESS (0);
8a35c3fe
CD
3348 size_t pagesize = GLRO (dl_pagesize);
3349 return _mid_memalign (pagesize, bytes, address);
fa8d436c 3350}
f65fd747 3351
6c8dbf00
OB
3352void *
3353__libc_pvalloc (size_t bytes)
fa8d436c 3354{
6c8dbf00 3355 if (__malloc_initialized < 0)
fa8d436c 3356 ptmalloc_init ();
8088488d 3357
10ad46bc 3358 void *address = RETURN_ADDRESS (0);
8a35c3fe
CD
3359 size_t pagesize = GLRO (dl_pagesize);
3360 size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
dba38551 3361
1159a193 3362 /* Check for overflow. */
8a35c3fe 3363 if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
1159a193
WN
3364 {
3365 __set_errno (ENOMEM);
3366 return 0;
3367 }
3368
8a35c3fe 3369 return _mid_memalign (pagesize, rounded_bytes, address);
fa8d436c 3370}
f65fd747 3371
6c8dbf00
OB
3372void *
3373__libc_calloc (size_t n, size_t elem_size)
f65fd747 3374{
d6285c9f
CD
3375 mstate av;
3376 mchunkptr oldtop, p;
3377 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
6c8dbf00 3378 void *mem;
d6285c9f
CD
3379 unsigned long clearsize;
3380 unsigned long nclears;
3381 INTERNAL_SIZE_T *d;
0950889b
UD
3382
3383 /* size_t is unsigned so the behavior on overflow is defined. */
3384 bytes = n * elem_size;
d9af917d
UD
3385#define HALF_INTERNAL_SIZE_T \
3386 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
6c8dbf00
OB
3387 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
3388 {
3389 if (elem_size != 0 && bytes / elem_size != n)
3390 {
3391 __set_errno (ENOMEM);
3392 return 0;
3393 }
d9af917d 3394 }
0950889b 3395
a222d91a 3396 void *(*hook) (size_t, const void *) =
f3eeb3fc 3397 atomic_forced_read (__malloc_hook);
6c8dbf00
OB
3398 if (__builtin_expect (hook != NULL, 0))
3399 {
d6285c9f
CD
3400 sz = bytes;
3401 mem = (*hook)(sz, RETURN_ADDRESS (0));
3402 if (mem == 0)
3403 return 0;
3404
3405 return memset (mem, 0, sz);
7799b7b3 3406 }
f65fd747 3407
d6285c9f
CD
3408 sz = bytes;
3409
d5c3fafc
DD
3410 MAYBE_INIT_TCACHE ();
3411
3f6bb8a3
WD
3412 if (SINGLE_THREAD_P)
3413 av = &main_arena;
3414 else
3415 arena_get (av, sz);
3416
fff94fa2
SP
3417 if (av)
3418 {
3419 /* Check if we hand out the top chunk, in which case there may be no
3420 need to clear. */
d6285c9f 3421#if MORECORE_CLEARS
fff94fa2
SP
3422 oldtop = top (av);
3423 oldtopsize = chunksize (top (av));
d6285c9f 3424# if MORECORE_CLEARS < 2
fff94fa2
SP
3425 /* Only newly allocated memory is guaranteed to be cleared. */
3426 if (av == &main_arena &&
3427 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3428 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
d6285c9f 3429# endif
fff94fa2
SP
3430 if (av != &main_arena)
3431 {
3432 heap_info *heap = heap_for_ptr (oldtop);
3433 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3434 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3435 }
3436#endif
3437 }
3438 else
d6285c9f 3439 {
fff94fa2
SP
3440 /* No usable arenas. */
3441 oldtop = 0;
3442 oldtopsize = 0;
d6285c9f 3443 }
d6285c9f
CD
3444 mem = _int_malloc (av, sz);
3445
d6285c9f
CD
3446 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3447 av == arena_for_chunk (mem2chunk (mem)));
3448
3f6bb8a3 3449 if (!SINGLE_THREAD_P)
d6285c9f 3450 {
3f6bb8a3
WD
3451 if (mem == 0 && av != NULL)
3452 {
3453 LIBC_PROBE (memory_calloc_retry, 1, sz);
3454 av = arena_get_retry (av, sz);
3455 mem = _int_malloc (av, sz);
3456 }
fff94fa2 3457
3f6bb8a3
WD
3458 if (av != NULL)
3459 __libc_lock_unlock (av->mutex);
3460 }
fff94fa2
SP
3461
3462 /* Allocation failed even after a retry. */
3463 if (mem == 0)
3464 return 0;
3465
d6285c9f
CD
3466 p = mem2chunk (mem);
3467
3468 /* Two optional cases in which clearing not necessary */
3469 if (chunk_is_mmapped (p))
3470 {
3471 if (__builtin_expect (perturb_byte, 0))
3472 return memset (mem, 0, sz);
3473
3474 return mem;
3475 }
3476
3477 csz = chunksize (p);
3478
3479#if MORECORE_CLEARS
3480 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3481 {
3482 /* clear only the bytes from non-freshly-sbrked memory */
3483 csz = oldtopsize;
3484 }
3485#endif
3486
3487 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3488 contents have an odd number of INTERNAL_SIZE_T-sized words;
3489 minimally 3. */
3490 d = (INTERNAL_SIZE_T *) mem;
3491 clearsize = csz - SIZE_SZ;
3492 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3493 assert (nclears >= 3);
3494
3495 if (nclears > 9)
3496 return memset (d, 0, clearsize);
3497
3498 else
3499 {
3500 *(d + 0) = 0;
3501 *(d + 1) = 0;
3502 *(d + 2) = 0;
3503 if (nclears > 4)
3504 {
3505 *(d + 3) = 0;
3506 *(d + 4) = 0;
3507 if (nclears > 6)
3508 {
3509 *(d + 5) = 0;
3510 *(d + 6) = 0;
3511 if (nclears > 8)
3512 {
3513 *(d + 7) = 0;
3514 *(d + 8) = 0;
3515 }
3516 }
3517 }
3518 }
3519
3520 return mem;
fa8d436c 3521}
f65fd747 3522
f65fd747 3523/*
6c8dbf00
OB
3524 ------------------------------ malloc ------------------------------
3525 */
f65fd747 3526
6c8dbf00
OB
3527static void *
3528_int_malloc (mstate av, size_t bytes)
f65fd747 3529{
fa8d436c 3530 INTERNAL_SIZE_T nb; /* normalized request size */
6c8dbf00
OB
3531 unsigned int idx; /* associated bin index */
3532 mbinptr bin; /* associated bin */
f65fd747 3533
6c8dbf00 3534 mchunkptr victim; /* inspected/selected chunk */
fa8d436c 3535 INTERNAL_SIZE_T size; /* its size */
6c8dbf00 3536 int victim_index; /* its bin index */
f65fd747 3537
6c8dbf00
OB
3538 mchunkptr remainder; /* remainder from a split */
3539 unsigned long remainder_size; /* its size */
8a4b65b4 3540
6c8dbf00
OB
3541 unsigned int block; /* bit map traverser */
3542 unsigned int bit; /* bit map traverser */
3543 unsigned int map; /* current word of binmap */
8a4b65b4 3544
6c8dbf00
OB
3545 mchunkptr fwd; /* misc temp for linking */
3546 mchunkptr bck; /* misc temp for linking */
8a4b65b4 3547
d5c3fafc
DD
3548#if USE_TCACHE
3549 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3550#endif
3551
fa8d436c 3552 /*
6c8dbf00
OB
3553 Convert request size to internal form by adding SIZE_SZ bytes
3554 overhead plus possibly more to obtain necessary alignment and/or
3555 to obtain a size of at least MINSIZE, the smallest allocatable
3556 size. Also, checked_request2size traps (returning 0) request sizes
3557 that are so large that they wrap around zero when padded and
3558 aligned.
3559 */
f65fd747 3560
6c8dbf00 3561 checked_request2size (bytes, nb);
f65fd747 3562
fff94fa2
SP
3563 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3564 mmap. */
3565 if (__glibc_unlikely (av == NULL))
3566 {
3567 void *p = sysmalloc (nb, av);
3568 if (p != NULL)
3569 alloc_perturb (p, bytes);
3570 return p;
3571 }
3572
fa8d436c 3573 /*
6c8dbf00
OB
3574 If the size qualifies as a fastbin, first check corresponding bin.
3575 This code is safe to execute even if av is not yet initialized, so we
3576 can try it without checking, which saves some time on this fast path.
3577 */
f65fd747 3578
71effcea
FW
3579#define REMOVE_FB(fb, victim, pp) \
3580 do \
3581 { \
3582 victim = pp; \
3583 if (victim == NULL) \
3584 break; \
3585 } \
3586 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
3587 != victim); \
3588
6c8dbf00
OB
3589 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3590 {
3591 idx = fastbin_index (nb);
3592 mfastbinptr *fb = &fastbin (av, idx);
71effcea
FW
3593 mchunkptr pp;
3594 victim = *fb;
3595
905a7725
WD
3596 if (victim != NULL)
3597 {
71effcea
FW
3598 if (SINGLE_THREAD_P)
3599 *fb = victim->fd;
3600 else
3601 REMOVE_FB (fb, pp, victim);
3602 if (__glibc_likely (victim != NULL))
6923f6db 3603 {
71effcea
FW
3604 size_t victim_idx = fastbin_index (chunksize (victim));
3605 if (__builtin_expect (victim_idx != idx, 0))
3606 malloc_printerr ("malloc(): memory corruption (fast)");
3607 check_remalloced_chunk (av, victim, nb);
3608#if USE_TCACHE
3609 /* While we're here, if we see other chunks of the same size,
3610 stash them in the tcache. */
3611 size_t tc_idx = csize2tidx (nb);
3612 if (tcache && tc_idx < mp_.tcache_bins)
d5c3fafc 3613 {
71effcea
FW
3614 mchunkptr tc_victim;
3615
3616 /* While bin not empty and tcache not full, copy chunks. */
3617 while (tcache->counts[tc_idx] < mp_.tcache_count
3618 && (tc_victim = *fb) != NULL)
3619 {
3620 if (SINGLE_THREAD_P)
3621 *fb = tc_victim->fd;
3622 else
3623 {
3624 REMOVE_FB (fb, pp, tc_victim);
3625 if (__glibc_unlikely (tc_victim == NULL))
3626 break;
3627 }
3628 tcache_put (tc_victim, tc_idx);
3629 }
d5c3fafc 3630 }
6923f6db 3631#endif
71effcea
FW
3632 void *p = chunk2mem (victim);
3633 alloc_perturb (p, bytes);
3634 return p;
3635 }
905a7725 3636 }
fa8d436c 3637 }
f65fd747 3638
fa8d436c 3639 /*
6c8dbf00
OB
3640 If a small request, check regular bin. Since these "smallbins"
3641 hold one size each, no searching within bins is necessary.
3642 (For a large request, we need to wait until unsorted chunks are
3643 processed to find best fit. But for small ones, fits are exact
3644 anyway, so we can check now, which is faster.)
3645 */
3646
3647 if (in_smallbin_range (nb))
3648 {
3649 idx = smallbin_index (nb);
3650 bin = bin_at (av, idx);
3651
3652 if ((victim = last (bin)) != bin)
3653 {
3381be5c
WD
3654 bck = victim->bk;
3655 if (__glibc_unlikely (bck->fd != victim))
3656 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3657 set_inuse_bit_at_offset (victim, nb);
3658 bin->bk = bck;
3659 bck->fd = bin;
3660
3661 if (av != &main_arena)
3662 set_non_main_arena (victim);
3663 check_malloced_chunk (av, victim, nb);
d5c3fafc
DD
3664#if USE_TCACHE
3665 /* While we're here, if we see other chunks of the same size,
3666 stash them in the tcache. */
3667 size_t tc_idx = csize2tidx (nb);
3668 if (tcache && tc_idx < mp_.tcache_bins)
3669 {
3670 mchunkptr tc_victim;
3671
3672 /* While bin not empty and tcache not full, copy chunks over. */
3673 while (tcache->counts[tc_idx] < mp_.tcache_count
3674 && (tc_victim = last (bin)) != bin)
3675 {
3676 if (tc_victim != 0)
3677 {
3678 bck = tc_victim->bk;
3679 set_inuse_bit_at_offset (tc_victim, nb);
3680 if (av != &main_arena)
3681 set_non_main_arena (tc_victim);
3682 bin->bk = bck;
3683 bck->fd = bin;
3684
3685 tcache_put (tc_victim, tc_idx);
3686 }
3687 }
3688 }
3689#endif
3381be5c
WD
3690 void *p = chunk2mem (victim);
3691 alloc_perturb (p, bytes);
3692 return p;
6c8dbf00 3693 }
fa8d436c 3694 }
f65fd747 3695
a9177ff5 3696 /*
fa8d436c
UD
3697 If this is a large request, consolidate fastbins before continuing.
3698 While it might look excessive to kill all fastbins before
3699 even seeing if there is space available, this avoids
3700 fragmentation problems normally associated with fastbins.
3701 Also, in practice, programs tend to have runs of either small or
a9177ff5 3702 large requests, but less often mixtures, so consolidation is not
fa8d436c
UD
3703 invoked all that often in most programs. And the programs that
3704 it is called frequently in otherwise tend to fragment.
6c8dbf00 3705 */
7799b7b3 3706
6c8dbf00
OB
3707 else
3708 {
3709 idx = largebin_index (nb);
e956075a 3710 if (atomic_load_relaxed (&av->have_fastchunks))
6c8dbf00
OB
3711 malloc_consolidate (av);
3712 }
f65fd747 3713
fa8d436c 3714 /*
6c8dbf00
OB
3715 Process recently freed or remaindered chunks, taking one only if
3716 it is exact fit, or, if this a small request, the chunk is remainder from
3717 the most recent non-exact fit. Place other traversed chunks in
3718 bins. Note that this step is the only place in any routine where
3719 chunks are placed in bins.
3720
3721 The outer loop here is needed because we might not realize until
3722 near the end of malloc that we should have consolidated, so must
3723 do so and retry. This happens at most once, and only when we would
3724 otherwise need to expand memory to service a "small" request.
3725 */
3726
d5c3fafc
DD
3727#if USE_TCACHE
3728 INTERNAL_SIZE_T tcache_nb = 0;
3729 size_t tc_idx = csize2tidx (nb);
3730 if (tcache && tc_idx < mp_.tcache_bins)
3731 tcache_nb = nb;
3732 int return_cached = 0;
3733
3734 tcache_unsorted_count = 0;
3735#endif
3736
6c8dbf00
OB
3737 for (;; )
3738 {
3739 int iters = 0;
3740 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3741 {
3742 bck = victim->bk;
6c8dbf00 3743 size = chunksize (victim);
b90ddd08
IK
3744 mchunkptr next = chunk_at_offset (victim, size);
3745
3746 if (__glibc_unlikely (size <= 2 * SIZE_SZ)
3747 || __glibc_unlikely (size > av->system_mem))
3748 malloc_printerr ("malloc(): invalid size (unsorted)");
3749 if (__glibc_unlikely (chunksize_nomask (next) < 2 * SIZE_SZ)
3750 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
3751 malloc_printerr ("malloc(): invalid next size (unsorted)");
3752 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
3753 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
3754 if (__glibc_unlikely (bck->fd != victim)
3755 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
3756 malloc_printerr ("malloc(): unsorted double linked list corrupted");
35cfefd9 3757 if (__glibc_unlikely (prev_inuse (next)))
b90ddd08 3758 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
6c8dbf00
OB
3759
3760 /*
3761 If a small request, try to use last remainder if it is the
3762 only chunk in unsorted bin. This helps promote locality for
3763 runs of consecutive small requests. This is the only
3764 exception to best-fit, and applies only when there is
3765 no exact fit for a small chunk.
3766 */
3767
3768 if (in_smallbin_range (nb) &&
3769 bck == unsorted_chunks (av) &&
3770 victim == av->last_remainder &&
3771 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3772 {
3773 /* split and reattach remainder */
3774 remainder_size = size - nb;
3775 remainder = chunk_at_offset (victim, nb);
3776 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3777 av->last_remainder = remainder;
3778 remainder->bk = remainder->fd = unsorted_chunks (av);
3779 if (!in_smallbin_range (remainder_size))
3780 {
3781 remainder->fd_nextsize = NULL;
3782 remainder->bk_nextsize = NULL;
3783 }
3784
3785 set_head (victim, nb | PREV_INUSE |
3786 (av != &main_arena ? NON_MAIN_ARENA : 0));
3787 set_head (remainder, remainder_size | PREV_INUSE);
3788 set_foot (remainder, remainder_size);
3789
3790 check_malloced_chunk (av, victim, nb);
3791 void *p = chunk2mem (victim);
3792 alloc_perturb (p, bytes);
3793 return p;
3794 }
3795
3796 /* remove from unsorted list */
bdc3009b
FG
3797 if (__glibc_unlikely (bck->fd != victim))
3798 malloc_printerr ("malloc(): corrupted unsorted chunks 3");
6c8dbf00
OB
3799 unsorted_chunks (av)->bk = bck;
3800 bck->fd = unsorted_chunks (av);
3801
3802 /* Take now instead of binning if exact fit */
3803
3804 if (size == nb)
3805 {
3806 set_inuse_bit_at_offset (victim, size);
3807 if (av != &main_arena)
e9c4fe93 3808 set_non_main_arena (victim);
d5c3fafc
DD
3809#if USE_TCACHE
3810 /* Fill cache first, return to user only if cache fills.
3811 We may return one of these chunks later. */
3812 if (tcache_nb
3813 && tcache->counts[tc_idx] < mp_.tcache_count)
3814 {
3815 tcache_put (victim, tc_idx);
3816 return_cached = 1;
3817 continue;
3818 }
3819 else
3820 {
3821#endif
6c8dbf00
OB
3822 check_malloced_chunk (av, victim, nb);
3823 void *p = chunk2mem (victim);
3824 alloc_perturb (p, bytes);
3825 return p;
d5c3fafc
DD
3826#if USE_TCACHE
3827 }
3828#endif
6c8dbf00
OB
3829 }
3830
3831 /* place chunk in bin */
3832
3833 if (in_smallbin_range (size))
3834 {
3835 victim_index = smallbin_index (size);
3836 bck = bin_at (av, victim_index);
3837 fwd = bck->fd;
3838 }
3839 else
3840 {
3841 victim_index = largebin_index (size);
3842 bck = bin_at (av, victim_index);
3843 fwd = bck->fd;
3844
3845 /* maintain large bins in sorted order */
3846 if (fwd != bck)
3847 {
3848 /* Or with inuse bit to speed comparisons */
3849 size |= PREV_INUSE;
3850 /* if smaller than smallest, bypass loop below */
e9c4fe93
FW
3851 assert (chunk_main_arena (bck->bk));
3852 if ((unsigned long) (size)
3853 < (unsigned long) chunksize_nomask (bck->bk))
6c8dbf00
OB
3854 {
3855 fwd = bck;
3856 bck = bck->bk;
3857
3858 victim->fd_nextsize = fwd->fd;
3859 victim->bk_nextsize = fwd->fd->bk_nextsize;
3860 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3861 }
3862 else
3863 {
e9c4fe93
FW
3864 assert (chunk_main_arena (fwd));
3865 while ((unsigned long) size < chunksize_nomask (fwd))
6c8dbf00
OB
3866 {
3867 fwd = fwd->fd_nextsize;
e9c4fe93 3868 assert (chunk_main_arena (fwd));
6c8dbf00
OB
3869 }
3870
e9c4fe93
FW
3871 if ((unsigned long) size
3872 == (unsigned long) chunksize_nomask (fwd))
6c8dbf00
OB
3873 /* Always insert in the second position. */
3874 fwd = fwd->fd;
3875 else
3876 {
3877 victim->fd_nextsize = fwd;
3878 victim->bk_nextsize = fwd->bk_nextsize;
52b7cd6e
AM
3879 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
3880 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
6c8dbf00
OB
3881 fwd->bk_nextsize = victim;
3882 victim->bk_nextsize->fd_nextsize = victim;
3883 }
3884 bck = fwd->bk;
52b7cd6e
AM
3885 if (bck->fd != fwd)
3886 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
6c8dbf00
OB
3887 }
3888 }
3889 else
3890 victim->fd_nextsize = victim->bk_nextsize = victim;
3891 }
3892
3893 mark_bin (av, victim_index);
3894 victim->bk = bck;
3895 victim->fd = fwd;
3896 fwd->bk = victim;
3897 bck->fd = victim;
3898
d5c3fafc
DD
3899#if USE_TCACHE
3900 /* If we've processed as many chunks as we're allowed while
3901 filling the cache, return one of the cached ones. */
3902 ++tcache_unsorted_count;
3903 if (return_cached
3904 && mp_.tcache_unsorted_limit > 0
3905 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
3906 {
3907 return tcache_get (tc_idx);
3908 }
3909#endif
3910
6c8dbf00
OB
3911#define MAX_ITERS 10000
3912 if (++iters >= MAX_ITERS)
3913 break;
3914 }
fa8d436c 3915
d5c3fafc
DD
3916#if USE_TCACHE
3917 /* If all the small chunks we found ended up cached, return one now. */
3918 if (return_cached)
3919 {
3920 return tcache_get (tc_idx);
3921 }
3922#endif
3923
a9177ff5 3924 /*
6c8dbf00
OB
3925 If a large request, scan through the chunks of current bin in
3926 sorted order to find smallest that fits. Use the skip list for this.
3927 */
3928
3929 if (!in_smallbin_range (nb))
3930 {
3931 bin = bin_at (av, idx);
3932
3933 /* skip scan if empty or largest chunk is too small */
e9c4fe93
FW
3934 if ((victim = first (bin)) != bin
3935 && (unsigned long) chunksize_nomask (victim)
3936 >= (unsigned long) (nb))
6c8dbf00
OB
3937 {
3938 victim = victim->bk_nextsize;
3939 while (((unsigned long) (size = chunksize (victim)) <
3940 (unsigned long) (nb)))
3941 victim = victim->bk_nextsize;
3942
3943 /* Avoid removing the first entry for a size so that the skip
3944 list does not have to be rerouted. */
e9c4fe93
FW
3945 if (victim != last (bin)
3946 && chunksize_nomask (victim)
3947 == chunksize_nomask (victim->fd))
6c8dbf00
OB
3948 victim = victim->fd;
3949
3950 remainder_size = size - nb;
1ecba1fa 3951 unlink_chunk (av, victim);
6c8dbf00
OB
3952
3953 /* Exhaust */
3954 if (remainder_size < MINSIZE)
3955 {
3956 set_inuse_bit_at_offset (victim, size);
3957 if (av != &main_arena)
e9c4fe93 3958 set_non_main_arena (victim);
6c8dbf00
OB
3959 }
3960 /* Split */
3961 else
3962 {
3963 remainder = chunk_at_offset (victim, nb);
3964 /* We cannot assume the unsorted list is empty and therefore
3965 have to perform a complete insert here. */
3966 bck = unsorted_chunks (av);
3967 fwd = bck->fd;
ac3ed168
FW
3968 if (__glibc_unlikely (fwd->bk != bck))
3969 malloc_printerr ("malloc(): corrupted unsorted chunks");
6c8dbf00
OB
3970 remainder->bk = bck;
3971 remainder->fd = fwd;
3972 bck->fd = remainder;
3973 fwd->bk = remainder;
3974 if (!in_smallbin_range (remainder_size))
3975 {
3976 remainder->fd_nextsize = NULL;
3977 remainder->bk_nextsize = NULL;
3978 }
3979 set_head (victim, nb | PREV_INUSE |
3980 (av != &main_arena ? NON_MAIN_ARENA : 0));
3981 set_head (remainder, remainder_size | PREV_INUSE);
3982 set_foot (remainder, remainder_size);
3983 }
3984 check_malloced_chunk (av, victim, nb);
3985 void *p = chunk2mem (victim);
3986 alloc_perturb (p, bytes);
3987 return p;
3988 }
3989 }
f65fd747 3990
6c8dbf00
OB
3991 /*
3992 Search for a chunk by scanning bins, starting with next largest
3993 bin. This search is strictly by best-fit; i.e., the smallest
3994 (with ties going to approximately the least recently used) chunk
3995 that fits is selected.
3996
3997 The bitmap avoids needing to check that most blocks are nonempty.
3998 The particular case of skipping all bins during warm-up phases
3999 when no chunks have been returned yet is faster than it might look.
4000 */
4001
4002 ++idx;
4003 bin = bin_at (av, idx);
4004 block = idx2block (idx);
4005 map = av->binmap[block];
4006 bit = idx2bit (idx);
4007
4008 for (;; )
4009 {
4010 /* Skip rest of block if there are no more set bits in this block. */
4011 if (bit > map || bit == 0)
4012 {
4013 do
4014 {
4015 if (++block >= BINMAPSIZE) /* out of bins */
4016 goto use_top;
4017 }
4018 while ((map = av->binmap[block]) == 0);
4019
4020 bin = bin_at (av, (block << BINMAPSHIFT));
4021 bit = 1;
4022 }
4023
4024 /* Advance to bin with set bit. There must be one. */
4025 while ((bit & map) == 0)
4026 {
4027 bin = next_bin (bin);
4028 bit <<= 1;
4029 assert (bit != 0);
4030 }
4031
4032 /* Inspect the bin. It is likely to be non-empty */
4033 victim = last (bin);
4034
4035 /* If a false alarm (empty bin), clear the bit. */
4036 if (victim == bin)
4037 {
4038 av->binmap[block] = map &= ~bit; /* Write through */
4039 bin = next_bin (bin);
4040 bit <<= 1;
4041 }
4042
4043 else
4044 {
4045 size = chunksize (victim);
4046
4047 /* We know the first chunk in this bin is big enough to use. */
4048 assert ((unsigned long) (size) >= (unsigned long) (nb));
4049
4050 remainder_size = size - nb;
4051
4052 /* unlink */
1ecba1fa 4053 unlink_chunk (av, victim);
6c8dbf00
OB
4054
4055 /* Exhaust */
4056 if (remainder_size < MINSIZE)
4057 {
4058 set_inuse_bit_at_offset (victim, size);
4059 if (av != &main_arena)
e9c4fe93 4060 set_non_main_arena (victim);
6c8dbf00
OB
4061 }
4062
4063 /* Split */
4064 else
4065 {
4066 remainder = chunk_at_offset (victim, nb);
4067
4068 /* We cannot assume the unsorted list is empty and therefore
4069 have to perform a complete insert here. */
4070 bck = unsorted_chunks (av);
4071 fwd = bck->fd;
ac3ed168
FW
4072 if (__glibc_unlikely (fwd->bk != bck))
4073 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
6c8dbf00
OB
4074 remainder->bk = bck;
4075 remainder->fd = fwd;
4076 bck->fd = remainder;
4077 fwd->bk = remainder;
4078
4079 /* advertise as last remainder */
4080 if (in_smallbin_range (nb))
4081 av->last_remainder = remainder;
4082 if (!in_smallbin_range (remainder_size))
4083 {
4084 remainder->fd_nextsize = NULL;
4085 remainder->bk_nextsize = NULL;
4086 }
4087 set_head (victim, nb | PREV_INUSE |
4088 (av != &main_arena ? NON_MAIN_ARENA : 0));
4089 set_head (remainder, remainder_size | PREV_INUSE);
4090 set_foot (remainder, remainder_size);
4091 }
4092 check_malloced_chunk (av, victim, nb);
4093 void *p = chunk2mem (victim);
4094 alloc_perturb (p, bytes);
4095 return p;
4096 }
4097 }
4098
4099 use_top:
4100 /*
4101 If large enough, split off the chunk bordering the end of memory
4102 (held in av->top). Note that this is in accord with the best-fit
4103 search rule. In effect, av->top is treated as larger (and thus
4104 less well fitting) than any other available chunk since it can
4105 be extended to be as large as necessary (up to system
4106 limitations).
4107
4108 We require that av->top always exists (i.e., has size >=
4109 MINSIZE) after initialization, so if it would otherwise be
4110 exhausted by current request, it is replenished. (The main
4111 reason for ensuring it exists is that we may need MINSIZE space
4112 to put in fenceposts in sysmalloc.)
4113 */
4114
4115 victim = av->top;
4116 size = chunksize (victim);
4117
30a17d8c
PC
4118 if (__glibc_unlikely (size > av->system_mem))
4119 malloc_printerr ("malloc(): corrupted top size");
4120
6c8dbf00
OB
4121 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4122 {
4123 remainder_size = size - nb;
4124 remainder = chunk_at_offset (victim, nb);
4125 av->top = remainder;
4126 set_head (victim, nb | PREV_INUSE |
4127 (av != &main_arena ? NON_MAIN_ARENA : 0));
4128 set_head (remainder, remainder_size | PREV_INUSE);
4129
4130 check_malloced_chunk (av, victim, nb);
4131 void *p = chunk2mem (victim);
4132 alloc_perturb (p, bytes);
4133 return p;
4134 }
4135
4136 /* When we are using atomic ops to free fast chunks we can get
4137 here for all block sizes. */
e956075a 4138 else if (atomic_load_relaxed (&av->have_fastchunks))
6c8dbf00
OB
4139 {
4140 malloc_consolidate (av);
4141 /* restore original bin index */
4142 if (in_smallbin_range (nb))
4143 idx = smallbin_index (nb);
4144 else
4145 idx = largebin_index (nb);
4146 }
f65fd747 4147
6c8dbf00
OB
4148 /*
4149 Otherwise, relay to handle system-dependent cases
4150 */
425ce2ed 4151 else
6c8dbf00
OB
4152 {
4153 void *p = sysmalloc (nb, av);
4154 if (p != NULL)
4155 alloc_perturb (p, bytes);
4156 return p;
4157 }
425ce2ed 4158 }
fa8d436c 4159}
f65fd747 4160
fa8d436c 4161/*
6c8dbf00
OB
4162 ------------------------------ free ------------------------------
4163 */
f65fd747 4164
78ac92ad 4165static void
6c8dbf00 4166_int_free (mstate av, mchunkptr p, int have_lock)
f65fd747 4167{
fa8d436c 4168 INTERNAL_SIZE_T size; /* its size */
6c8dbf00
OB
4169 mfastbinptr *fb; /* associated fastbin */
4170 mchunkptr nextchunk; /* next contiguous chunk */
fa8d436c 4171 INTERNAL_SIZE_T nextsize; /* its size */
6c8dbf00 4172 int nextinuse; /* true if nextchunk is used */
fa8d436c 4173 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
6c8dbf00
OB
4174 mchunkptr bck; /* misc temp for linking */
4175 mchunkptr fwd; /* misc temp for linking */
fa8d436c 4176
6c8dbf00 4177 size = chunksize (p);
f65fd747 4178
37fa1953
UD
4179 /* Little security check which won't hurt performance: the
4180 allocator never wrapps around at the end of the address space.
4181 Therefore we can exclude some size values which might appear
4182 here by accident or by "design" from some intruder. */
dc165f7b 4183 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
073f560e 4184 || __builtin_expect (misaligned_chunk (p), 0))
ac3ed168 4185 malloc_printerr ("free(): invalid pointer");
347c92e9
L
4186 /* We know that each chunk is at least MINSIZE bytes in size or a
4187 multiple of MALLOC_ALIGNMENT. */
a1ffb40e 4188 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
ac3ed168 4189 malloc_printerr ("free(): invalid size");
f65fd747 4190
37fa1953 4191 check_inuse_chunk(av, p);
f65fd747 4192
d5c3fafc
DD
4193#if USE_TCACHE
4194 {
4195 size_t tc_idx = csize2tidx (size);
affec03b 4196 if (tcache != NULL && tc_idx < mp_.tcache_bins)
d5c3fafc 4197 {
affec03b
FW
4198 /* Check to see if it's already in the tcache. */
4199 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4200
4201 /* This test succeeds on double free. However, we don't 100%
4202 trust it (it also matches random payload data at a 1 in
4203 2^<size_t> chance), so verify it's not an unlikely
4204 coincidence before aborting. */
4205 if (__glibc_unlikely (e->key == tcache))
4206 {
4207 tcache_entry *tmp;
4208 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4209 for (tmp = tcache->entries[tc_idx];
4210 tmp;
4211 tmp = tmp->next)
4212 if (tmp == e)
4213 malloc_printerr ("free(): double free detected in tcache 2");
4214 /* If we get here, it was a coincidence. We've wasted a
4215 few cycles, but don't abort. */
4216 }
4217
4218 if (tcache->counts[tc_idx] < mp_.tcache_count)
4219 {
4220 tcache_put (p, tc_idx);
4221 return;
4222 }
d5c3fafc
DD
4223 }
4224 }
4225#endif
4226
37fa1953
UD
4227 /*
4228 If eligible, place chunk on a fastbin so it can be found
4229 and used quickly in malloc.
4230 */
6bf4302e 4231
9bf248c6 4232 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
6bf4302e 4233
37fa1953
UD
4234#if TRIM_FASTBINS
4235 /*
4236 If TRIM_FASTBINS set, don't place chunks
4237 bordering top into fastbins
4238 */
4239 && (chunk_at_offset(p, size) != av->top)
4240#endif
4241 ) {
fa8d436c 4242
e9c4fe93
FW
4243 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4244 <= 2 * SIZE_SZ, 0)
893e6098
UD
4245 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4246 >= av->system_mem, 0))
4247 {
d74e6f6c 4248 bool fail = true;
bec466d9 4249 /* We might not have a lock at this point and concurrent modifications
d74e6f6c
WD
4250 of system_mem might result in a false positive. Redo the test after
4251 getting the lock. */
4252 if (!have_lock)
4253 {
4254 __libc_lock_lock (av->mutex);
4255 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
4256 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4257 __libc_lock_unlock (av->mutex);
4258 }
4259
4260 if (fail)
ac3ed168 4261 malloc_printerr ("free(): invalid next size (fast)");
893e6098
UD
4262 }
4263
e8349efd 4264 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
425ce2ed 4265
e956075a 4266 atomic_store_relaxed (&av->have_fastchunks, true);
90a3055e
UD
4267 unsigned int idx = fastbin_index(size);
4268 fb = &fastbin (av, idx);
425ce2ed 4269
362b47fe 4270 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
71effcea
FW
4271 mchunkptr old = *fb, old2;
4272
4273 if (SINGLE_THREAD_P)
4274 {
4275 /* Check that the top of the bin is not the record we are going to
4276 add (i.e., double free). */
4277 if (__builtin_expect (old == p, 0))
4278 malloc_printerr ("double free or corruption (fasttop)");
4279 p->fd = old;
4280 *fb = p;
4281 }
4282 else
4283 do
4284 {
4285 /* Check that the top of the bin is not the record we are going to
4286 add (i.e., double free). */
4287 if (__builtin_expect (old == p, 0))
4288 malloc_printerr ("double free or corruption (fasttop)");
4289 p->fd = old2 = old;
4290 }
4291 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4292 != old2);
a15d53e2
WD
4293
4294 /* Check that size of fastbin chunk at the top is the same as
4295 size of the chunk that we are adding. We can dereference OLD
4296 only if we have the lock, otherwise it might have already been
4297 allocated again. */
4298 if (have_lock && old != NULL
4299 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
ac3ed168 4300 malloc_printerr ("invalid fastbin entry (free)");
37fa1953 4301 }
f65fd747 4302
37fa1953
UD
4303 /*
4304 Consolidate other non-mmapped chunks as they arrive.
4305 */
fa8d436c 4306
37fa1953 4307 else if (!chunk_is_mmapped(p)) {
a15d53e2
WD
4308
4309 /* If we're single-threaded, don't lock the arena. */
4310 if (SINGLE_THREAD_P)
4311 have_lock = true;
4312
24cffce7 4313 if (!have_lock)
4bf5f222 4314 __libc_lock_lock (av->mutex);
425ce2ed 4315
37fa1953 4316 nextchunk = chunk_at_offset(p, size);
fa8d436c 4317
37fa1953
UD
4318 /* Lightweight tests: check whether the block is already the
4319 top block. */
a1ffb40e 4320 if (__glibc_unlikely (p == av->top))
ac3ed168 4321 malloc_printerr ("double free or corruption (top)");
37fa1953
UD
4322 /* Or whether the next chunk is beyond the boundaries of the arena. */
4323 if (__builtin_expect (contiguous (av)
4324 && (char *) nextchunk
4325 >= ((char *) av->top + chunksize(av->top)), 0))
ac3ed168 4326 malloc_printerr ("double free or corruption (out)");
37fa1953 4327 /* Or whether the block is actually not marked used. */
a1ffb40e 4328 if (__glibc_unlikely (!prev_inuse(nextchunk)))
ac3ed168 4329 malloc_printerr ("double free or corruption (!prev)");
fa8d436c 4330
37fa1953 4331 nextsize = chunksize(nextchunk);
e9c4fe93 4332 if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
893e6098 4333 || __builtin_expect (nextsize >= av->system_mem, 0))
ac3ed168 4334 malloc_printerr ("free(): invalid next size (normal)");
fa8d436c 4335
e8349efd 4336 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
854278df 4337
37fa1953
UD
4338 /* consolidate backward */
4339 if (!prev_inuse(p)) {
e9c4fe93 4340 prevsize = prev_size (p);
37fa1953
UD
4341 size += prevsize;
4342 p = chunk_at_offset(p, -((long) prevsize));
d6db68e6
ME
4343 if (__glibc_unlikely (chunksize(p) != prevsize))
4344 malloc_printerr ("corrupted size vs. prev_size while consolidating");
1ecba1fa 4345 unlink_chunk (av, p);
37fa1953 4346 }
a9177ff5 4347
37fa1953
UD
4348 if (nextchunk != av->top) {
4349 /* get and clear inuse bit */
4350 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4351
4352 /* consolidate forward */
4353 if (!nextinuse) {
1ecba1fa 4354 unlink_chunk (av, nextchunk);
37fa1953
UD
4355 size += nextsize;
4356 } else
4357 clear_inuse_bit_at_offset(nextchunk, 0);
10dc2a90 4358
fa8d436c 4359 /*
37fa1953
UD
4360 Place the chunk in unsorted chunk list. Chunks are
4361 not placed into regular bins until after they have
4362 been given one chance to be used in malloc.
fa8d436c 4363 */
f65fd747 4364
37fa1953
UD
4365 bck = unsorted_chunks(av);
4366 fwd = bck->fd;
a1ffb40e 4367 if (__glibc_unlikely (fwd->bk != bck))
ac3ed168 4368 malloc_printerr ("free(): corrupted unsorted chunks");
37fa1953 4369 p->fd = fwd;
7ecfbd38
UD
4370 p->bk = bck;
4371 if (!in_smallbin_range(size))
4372 {
4373 p->fd_nextsize = NULL;
4374 p->bk_nextsize = NULL;
4375 }
37fa1953
UD
4376 bck->fd = p;
4377 fwd->bk = p;
8a4b65b4 4378
37fa1953
UD
4379 set_head(p, size | PREV_INUSE);
4380 set_foot(p, size);
4381
4382 check_free_chunk(av, p);
4383 }
4384
4385 /*
4386 If the chunk borders the current high end of memory,
4387 consolidate into top
4388 */
4389
4390 else {
4391 size += nextsize;
4392 set_head(p, size | PREV_INUSE);
4393 av->top = p;
4394 check_chunk(av, p);
4395 }
4396
4397 /*
4398 If freeing a large space, consolidate possibly-surrounding
4399 chunks. Then, if the total unused topmost memory exceeds trim
4400 threshold, ask malloc_trim to reduce top.
4401
4402 Unless max_fast is 0, we don't know if there are fastbins
4403 bordering top, so we cannot tell for sure whether threshold
4404 has been reached unless fastbins are consolidated. But we
4405 don't want to consolidate on each free. As a compromise,
4406 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4407 is reached.
4408 */
fa8d436c 4409
37fa1953 4410 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
e956075a 4411 if (atomic_load_relaxed (&av->have_fastchunks))
37fa1953 4412 malloc_consolidate(av);
fa8d436c 4413
37fa1953 4414 if (av == &main_arena) {
a9177ff5 4415#ifndef MORECORE_CANNOT_TRIM
37fa1953
UD
4416 if ((unsigned long)(chunksize(av->top)) >=
4417 (unsigned long)(mp_.trim_threshold))
3b49edc0 4418 systrim(mp_.top_pad, av);
fa8d436c 4419#endif
37fa1953
UD
4420 } else {
4421 /* Always try heap_trim(), even if the top chunk is not
4422 large, because the corresponding heap might go away. */
4423 heap_info *heap = heap_for_ptr(top(av));
fa8d436c 4424
37fa1953
UD
4425 assert(heap->ar_ptr == av);
4426 heap_trim(heap, mp_.top_pad);
fa8d436c 4427 }
fa8d436c 4428 }
10dc2a90 4429
24cffce7 4430 if (!have_lock)
4bf5f222 4431 __libc_lock_unlock (av->mutex);
37fa1953
UD
4432 }
4433 /*
22a89187 4434 If the chunk was allocated via mmap, release via munmap().
37fa1953
UD
4435 */
4436
4437 else {
c120d94d 4438 munmap_chunk (p);
fa8d436c 4439 }
10dc2a90
UD
4440}
4441
fa8d436c
UD
4442/*
4443 ------------------------- malloc_consolidate -------------------------
4444
4445 malloc_consolidate is a specialized version of free() that tears
4446 down chunks held in fastbins. Free itself cannot be used for this
4447 purpose since, among other things, it might place chunks back onto
4448 fastbins. So, instead, we need to use a minor variant of the same
4449 code.
fa8d436c
UD
4450*/
4451
fa8d436c 4452static void malloc_consolidate(mstate av)
10dc2a90 4453{
fa8d436c
UD
4454 mfastbinptr* fb; /* current fastbin being consolidated */
4455 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4456 mchunkptr p; /* current chunk being consolidated */
4457 mchunkptr nextp; /* next chunk to consolidate */
4458 mchunkptr unsorted_bin; /* bin header */
4459 mchunkptr first_unsorted; /* chunk to link to */
4460
4461 /* These have same use as in free() */
4462 mchunkptr nextchunk;
4463 INTERNAL_SIZE_T size;
4464 INTERNAL_SIZE_T nextsize;
4465 INTERNAL_SIZE_T prevsize;
4466 int nextinuse;
10dc2a90 4467
3381be5c 4468 atomic_store_relaxed (&av->have_fastchunks, false);
10dc2a90 4469
3381be5c 4470 unsorted_bin = unsorted_chunks(av);
a9177ff5 4471
3381be5c
WD
4472 /*
4473 Remove each chunk from fast bin and consolidate it, placing it
4474 then in unsorted bin. Among other reasons for doing this,
4475 placing in unsorted bin avoids needing to calculate actual bins
4476 until malloc is sure that chunks aren't immediately going to be
4477 reused anyway.
4478 */
72f90263 4479
3381be5c
WD
4480 maxfb = &fastbin (av, NFASTBINS - 1);
4481 fb = &fastbin (av, 0);
4482 do {
71effcea 4483 p = atomic_exchange_acq (fb, NULL);
3381be5c
WD
4484 if (p != 0) {
4485 do {
249a5895
IK
4486 {
4487 unsigned int idx = fastbin_index (chunksize (p));
4488 if ((&fastbin (av, idx)) != fb)
4489 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4490 }
4491
3381be5c
WD
4492 check_inuse_chunk(av, p);
4493 nextp = p->fd;
4494
4495 /* Slightly streamlined version of consolidation code in free() */
4496 size = chunksize (p);
4497 nextchunk = chunk_at_offset(p, size);
4498 nextsize = chunksize(nextchunk);
4499
4500 if (!prev_inuse(p)) {
4501 prevsize = prev_size (p);
4502 size += prevsize;
4503 p = chunk_at_offset(p, -((long) prevsize));
d6db68e6
ME
4504 if (__glibc_unlikely (chunksize(p) != prevsize))
4505 malloc_printerr ("corrupted size vs. prev_size in fastbins");
1ecba1fa 4506 unlink_chunk (av, p);
3381be5c 4507 }
72f90263 4508
3381be5c
WD
4509 if (nextchunk != av->top) {
4510 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
a9177ff5 4511
3381be5c
WD
4512 if (!nextinuse) {
4513 size += nextsize;
1ecba1fa 4514 unlink_chunk (av, nextchunk);
3381be5c
WD
4515 } else
4516 clear_inuse_bit_at_offset(nextchunk, 0);
a9177ff5 4517
3381be5c
WD
4518 first_unsorted = unsorted_bin->fd;
4519 unsorted_bin->fd = p;
4520 first_unsorted->bk = p;
7ecfbd38 4521
3381be5c
WD
4522 if (!in_smallbin_range (size)) {
4523 p->fd_nextsize = NULL;
4524 p->bk_nextsize = NULL;
72f90263 4525 }
a9177ff5 4526
3381be5c
WD
4527 set_head(p, size | PREV_INUSE);
4528 p->bk = unsorted_bin;
4529 p->fd = first_unsorted;
4530 set_foot(p, size);
4531 }
a9177ff5 4532
3381be5c
WD
4533 else {
4534 size += nextsize;
4535 set_head(p, size | PREV_INUSE);
4536 av->top = p;
4537 }
a9177ff5 4538
3381be5c
WD
4539 } while ( (p = nextp) != 0);
4540
4541 }
4542 } while (fb++ != maxfb);
fa8d436c 4543}
10dc2a90 4544
fa8d436c
UD
4545/*
4546 ------------------------------ realloc ------------------------------
4547*/
f65fd747 4548
22a89187 4549void*
4c8b8cc3
UD
4550_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4551 INTERNAL_SIZE_T nb)
fa8d436c 4552{
fa8d436c
UD
4553 mchunkptr newp; /* chunk to return */
4554 INTERNAL_SIZE_T newsize; /* its size */
22a89187 4555 void* newmem; /* corresponding user mem */
f65fd747 4556
fa8d436c 4557 mchunkptr next; /* next contiguous chunk after oldp */
f65fd747 4558
fa8d436c
UD
4559 mchunkptr remainder; /* extra space at end of newp */
4560 unsigned long remainder_size; /* its size */
f65fd747 4561
6dd6a580 4562 /* oldmem size */
e9c4fe93 4563 if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
76761b63 4564 || __builtin_expect (oldsize >= av->system_mem, 0))
ac3ed168 4565 malloc_printerr ("realloc(): invalid old size");
76761b63 4566
6c8dbf00 4567 check_inuse_chunk (av, oldp);
f65fd747 4568
4c8b8cc3 4569 /* All callers already filter out mmap'ed chunks. */
6c8dbf00 4570 assert (!chunk_is_mmapped (oldp));
f65fd747 4571
6c8dbf00
OB
4572 next = chunk_at_offset (oldp, oldsize);
4573 INTERNAL_SIZE_T nextsize = chunksize (next);
e9c4fe93 4574 if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
22a89187 4575 || __builtin_expect (nextsize >= av->system_mem, 0))
ac3ed168 4576 malloc_printerr ("realloc(): invalid next size");
22a89187 4577
6c8dbf00
OB
4578 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4579 {
4580 /* already big enough; split below */
fa8d436c 4581 newp = oldp;
6c8dbf00 4582 newsize = oldsize;
7799b7b3 4583 }
f65fd747 4584
6c8dbf00
OB
4585 else
4586 {
4587 /* Try to expand forward into top */
4588 if (next == av->top &&
4589 (unsigned long) (newsize = oldsize + nextsize) >=
4590 (unsigned long) (nb + MINSIZE))
4591 {
4592 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4593 av->top = chunk_at_offset (oldp, nb);
4594 set_head (av->top, (newsize - nb) | PREV_INUSE);
4595 check_inuse_chunk (av, oldp);
4596 return chunk2mem (oldp);
4597 }
4598
4599 /* Try to expand forward into next chunk; split off remainder below */
4600 else if (next != av->top &&
4601 !inuse (next) &&
4602 (unsigned long) (newsize = oldsize + nextsize) >=
4603 (unsigned long) (nb))
4604 {
4605 newp = oldp;
1ecba1fa 4606 unlink_chunk (av, next);
6c8dbf00
OB
4607 }
4608
4609 /* allocate, copy, free */
4610 else
4611 {
4612 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4613 if (newmem == 0)
4614 return 0; /* propagate failure */
4615
4616 newp = mem2chunk (newmem);
4617 newsize = chunksize (newp);
4618
4619 /*
4620 Avoid copy if newp is next chunk after oldp.
4621 */
4622 if (newp == next)
4623 {
4624 newsize += oldsize;
4625 newp = oldp;
4626 }
4627 else
4628 {
b50dd3bc 4629 memcpy (newmem, chunk2mem (oldp), oldsize - SIZE_SZ);
6c8dbf00
OB
4630 _int_free (av, oldp, 1);
4631 check_inuse_chunk (av, newp);
4632 return chunk2mem (newp);
4633 }
4634 }
fa8d436c 4635 }
f65fd747 4636
22a89187 4637 /* If possible, free extra space in old or extended chunk */
f65fd747 4638
6c8dbf00 4639 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
f65fd747 4640
22a89187 4641 remainder_size = newsize - nb;
10dc2a90 4642
6c8dbf00
OB
4643 if (remainder_size < MINSIZE) /* not enough extra to split off */
4644 {
4645 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4646 set_inuse_bit_at_offset (newp, newsize);
4647 }
4648 else /* split remainder */
4649 {
4650 remainder = chunk_at_offset (newp, nb);
4651 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4652 set_head (remainder, remainder_size | PREV_INUSE |
4653 (av != &main_arena ? NON_MAIN_ARENA : 0));
4654 /* Mark remainder as inuse so free() won't complain */
4655 set_inuse_bit_at_offset (remainder, remainder_size);
4656 _int_free (av, remainder, 1);
4657 }
22a89187 4658
6c8dbf00
OB
4659 check_inuse_chunk (av, newp);
4660 return chunk2mem (newp);
fa8d436c
UD
4661}
4662
4663/*
6c8dbf00
OB
4664 ------------------------------ memalign ------------------------------
4665 */
fa8d436c 4666
6c8dbf00
OB
4667static void *
4668_int_memalign (mstate av, size_t alignment, size_t bytes)
fa8d436c
UD
4669{
4670 INTERNAL_SIZE_T nb; /* padded request size */
6c8dbf00
OB
4671 char *m; /* memory returned by malloc call */
4672 mchunkptr p; /* corresponding chunk */
4673 char *brk; /* alignment point within p */
4674 mchunkptr newp; /* chunk to return */
fa8d436c
UD
4675 INTERNAL_SIZE_T newsize; /* its size */
4676 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
6c8dbf00
OB
4677 mchunkptr remainder; /* spare room at end to split off */
4678 unsigned long remainder_size; /* its size */
fa8d436c 4679 INTERNAL_SIZE_T size;
f65fd747 4680
f65fd747 4681
f65fd747 4682
6c8dbf00 4683 checked_request2size (bytes, nb);
fa8d436c
UD
4684
4685 /*
6c8dbf00
OB
4686 Strategy: find a spot within that chunk that meets the alignment
4687 request, and then possibly free the leading and trailing space.
4688 */
fa8d436c
UD
4689
4690
8e448310
AS
4691 /* Check for overflow. */
4692 if (nb > SIZE_MAX - alignment - MINSIZE)
4693 {
4694 __set_errno (ENOMEM);
4695 return 0;
4696 }
4697
fa8d436c
UD
4698 /* Call malloc with worst case padding to hit alignment. */
4699
6c8dbf00
OB
4700 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4701
4702 if (m == 0)
4703 return 0; /* propagate failure */
4704
4705 p = mem2chunk (m);
4706
4707 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4708
4709 { /*
4710 Find an aligned spot inside chunk. Since we need to give back
4711 leading space in a chunk of at least MINSIZE, if the first
4712 calculation places us at a spot with less than MINSIZE leader,
4713 we can move to the next aligned spot -- we've allocated enough
4714 total room so that this is always possible.
4715 */
4716 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4717 - ((signed long) alignment));
4718 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4719 brk += alignment;
4720
4721 newp = (mchunkptr) brk;
4722 leadsize = brk - (char *) (p);
4723 newsize = chunksize (p) - leadsize;
4724
4725 /* For mmapped chunks, just adjust offset */
4726 if (chunk_is_mmapped (p))
4727 {
e9c4fe93 4728 set_prev_size (newp, prev_size (p) + leadsize);
6c8dbf00
OB
4729 set_head (newp, newsize | IS_MMAPPED);
4730 return chunk2mem (newp);
4731 }
4732
4733 /* Otherwise, give back leader, use the rest */
4734 set_head (newp, newsize | PREV_INUSE |
4735 (av != &main_arena ? NON_MAIN_ARENA : 0));
4736 set_inuse_bit_at_offset (newp, newsize);
4737 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4738 _int_free (av, p, 1);
4739 p = newp;
4740
4741 assert (newsize >= nb &&
4742 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
f65fd747 4743 }
f65fd747 4744
f65fd747 4745 /* Also give back spare room at the end */
6c8dbf00
OB
4746 if (!chunk_is_mmapped (p))
4747 {
4748 size = chunksize (p);
4749 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4750 {
4751 remainder_size = size - nb;
4752 remainder = chunk_at_offset (p, nb);
4753 set_head (remainder, remainder_size | PREV_INUSE |
4754 (av != &main_arena ? NON_MAIN_ARENA : 0));
4755 set_head_size (p, nb);
4756 _int_free (av, remainder, 1);
4757 }
fa8d436c 4758 }
f65fd747 4759
6c8dbf00
OB
4760 check_inuse_chunk (av, p);
4761 return chunk2mem (p);
f65fd747
UD
4762}
4763
f65fd747 4764
fa8d436c 4765/*
6c8dbf00
OB
4766 ------------------------------ malloc_trim ------------------------------
4767 */
8a4b65b4 4768
6c8dbf00
OB
4769static int
4770mtrim (mstate av, size_t pad)
f65fd747 4771{
3381be5c 4772 /* Ensure all blocks are consolidated. */
68631c8e
UD
4773 malloc_consolidate (av);
4774
6c8dbf00 4775 const size_t ps = GLRO (dl_pagesize);
68631c8e
UD
4776 int psindex = bin_index (ps);
4777 const size_t psm1 = ps - 1;
4778
4779 int result = 0;
4780 for (int i = 1; i < NBINS; ++i)
4781 if (i == 1 || i >= psindex)
4782 {
6c8dbf00 4783 mbinptr bin = bin_at (av, i);
68631c8e 4784
6c8dbf00
OB
4785 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4786 {
4787 INTERNAL_SIZE_T size = chunksize (p);
68631c8e 4788
6c8dbf00
OB
4789 if (size > psm1 + sizeof (struct malloc_chunk))
4790 {
4791 /* See whether the chunk contains at least one unused page. */
4792 char *paligned_mem = (char *) (((uintptr_t) p
4793 + sizeof (struct malloc_chunk)
4794 + psm1) & ~psm1);
68631c8e 4795
6c8dbf00
OB
4796 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4797 assert ((char *) p + size > paligned_mem);
68631c8e 4798
6c8dbf00
OB
4799 /* This is the size we could potentially free. */
4800 size -= paligned_mem - (char *) p;
68631c8e 4801
6c8dbf00
OB
4802 if (size > psm1)
4803 {
439bda32 4804#if MALLOC_DEBUG
6c8dbf00
OB
4805 /* When debugging we simulate destroying the memory
4806 content. */
4807 memset (paligned_mem, 0x89, size & ~psm1);
68631c8e 4808#endif
6c8dbf00 4809 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
68631c8e 4810
6c8dbf00
OB
4811 result = 1;
4812 }
4813 }
4814 }
68631c8e 4815 }
8a4b65b4 4816
a9177ff5 4817#ifndef MORECORE_CANNOT_TRIM
3b49edc0 4818 return result | (av == &main_arena ? systrim (pad, av) : 0);
6c8dbf00 4819
8a4b65b4 4820#else
68631c8e 4821 return result;
f65fd747 4822#endif
f65fd747
UD
4823}
4824
f65fd747 4825
3b49edc0 4826int
6c8dbf00 4827__malloc_trim (size_t s)
3b49edc0
UD
4828{
4829 int result = 0;
4830
6c8dbf00 4831 if (__malloc_initialized < 0)
3b49edc0
UD
4832 ptmalloc_init ();
4833
4834 mstate ar_ptr = &main_arena;
4835 do
4836 {
4bf5f222 4837 __libc_lock_lock (ar_ptr->mutex);
3b49edc0 4838 result |= mtrim (ar_ptr, s);
4bf5f222 4839 __libc_lock_unlock (ar_ptr->mutex);
3b49edc0
UD
4840
4841 ar_ptr = ar_ptr->next;
4842 }
4843 while (ar_ptr != &main_arena);
4844
4845 return result;
4846}
4847
4848
f65fd747 4849/*
6c8dbf00
OB
4850 ------------------------- malloc_usable_size -------------------------
4851 */
f65fd747 4852
3b49edc0 4853static size_t
6c8dbf00 4854musable (void *mem)
f65fd747
UD
4855{
4856 mchunkptr p;
6c8dbf00
OB
4857 if (mem != 0)
4858 {
4859 p = mem2chunk (mem);
4860
4861 if (__builtin_expect (using_malloc_checking == 1, 0))
4862 return malloc_check_get_size (p);
4863
4864 if (chunk_is_mmapped (p))
073f8214
FW
4865 {
4866 if (DUMPED_MAIN_ARENA_CHUNK (p))
4867 return chunksize (p) - SIZE_SZ;
4868 else
4869 return chunksize (p) - 2 * SIZE_SZ;
4870 }
6c8dbf00
OB
4871 else if (inuse (p))
4872 return chunksize (p) - SIZE_SZ;
4873 }
fa8d436c 4874 return 0;
f65fd747
UD
4875}
4876
3b49edc0
UD
4877
4878size_t
6c8dbf00 4879__malloc_usable_size (void *m)
3b49edc0
UD
4880{
4881 size_t result;
4882
6c8dbf00 4883 result = musable (m);
3b49edc0
UD
4884 return result;
4885}
4886
fa8d436c 4887/*
6c8dbf00
OB
4888 ------------------------------ mallinfo ------------------------------
4889 Accumulate malloc statistics for arena AV into M.
4890 */
f65fd747 4891
bedee953 4892static void
6c8dbf00 4893int_mallinfo (mstate av, struct mallinfo *m)
f65fd747 4894{
6dd67bd5 4895 size_t i;
f65fd747
UD
4896 mbinptr b;
4897 mchunkptr p;
f65fd747 4898 INTERNAL_SIZE_T avail;
fa8d436c
UD
4899 INTERNAL_SIZE_T fastavail;
4900 int nblocks;
4901 int nfastblocks;
f65fd747 4902
6c8dbf00 4903 check_malloc_state (av);
8a4b65b4 4904
fa8d436c 4905 /* Account for top */
6c8dbf00 4906 avail = chunksize (av->top);
fa8d436c 4907 nblocks = 1; /* top always exists */
f65fd747 4908
fa8d436c
UD
4909 /* traverse fastbins */
4910 nfastblocks = 0;
4911 fastavail = 0;
4912
6c8dbf00
OB
4913 for (i = 0; i < NFASTBINS; ++i)
4914 {
4915 for (p = fastbin (av, i); p != 0; p = p->fd)
4916 {
4917 ++nfastblocks;
4918 fastavail += chunksize (p);
4919 }
fa8d436c 4920 }
fa8d436c
UD
4921
4922 avail += fastavail;
f65fd747 4923
fa8d436c 4924 /* traverse regular bins */
6c8dbf00
OB
4925 for (i = 1; i < NBINS; ++i)
4926 {
4927 b = bin_at (av, i);
4928 for (p = last (b); p != b; p = p->bk)
4929 {
4930 ++nblocks;
4931 avail += chunksize (p);
4932 }
fa8d436c 4933 }
f65fd747 4934
bedee953
PP
4935 m->smblks += nfastblocks;
4936 m->ordblks += nblocks;
4937 m->fordblks += avail;
4938 m->uordblks += av->system_mem - avail;
4939 m->arena += av->system_mem;
4940 m->fsmblks += fastavail;
4941 if (av == &main_arena)
4942 {
4943 m->hblks = mp_.n_mmaps;
4944 m->hblkhd = mp_.mmapped_mem;
ca135f82 4945 m->usmblks = 0;
6c8dbf00 4946 m->keepcost = chunksize (av->top);
bedee953 4947 }
fa8d436c 4948}
f65fd747 4949
3b49edc0 4950
6c8dbf00 4951struct mallinfo
9dd346ff 4952__libc_mallinfo (void)
3b49edc0
UD
4953{
4954 struct mallinfo m;
bedee953 4955 mstate ar_ptr;
3b49edc0 4956
6c8dbf00 4957 if (__malloc_initialized < 0)
3b49edc0 4958 ptmalloc_init ();
bedee953 4959
6c8dbf00 4960 memset (&m, 0, sizeof (m));
bedee953 4961 ar_ptr = &main_arena;
6c8dbf00
OB
4962 do
4963 {
4bf5f222 4964 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00 4965 int_mallinfo (ar_ptr, &m);
4bf5f222 4966 __libc_lock_unlock (ar_ptr->mutex);
bedee953 4967
6c8dbf00
OB
4968 ar_ptr = ar_ptr->next;
4969 }
4970 while (ar_ptr != &main_arena);
bedee953 4971
3b49edc0
UD
4972 return m;
4973}
4974
fa8d436c 4975/*
6c8dbf00
OB
4976 ------------------------------ malloc_stats ------------------------------
4977 */
f65fd747 4978
3b49edc0 4979void
60d2f8f3 4980__malloc_stats (void)
f65fd747 4981{
8a4b65b4 4982 int i;
fa8d436c 4983 mstate ar_ptr;
fa8d436c 4984 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
8a4b65b4 4985
6c8dbf00 4986 if (__malloc_initialized < 0)
a234e27d 4987 ptmalloc_init ();
8dab36a1 4988 _IO_flockfile (stderr);
9964a145
ZW
4989 int old_flags2 = stderr->_flags2;
4990 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
6c8dbf00
OB
4991 for (i = 0, ar_ptr = &main_arena;; i++)
4992 {
4993 struct mallinfo mi;
4994
4995 memset (&mi, 0, sizeof (mi));
4bf5f222 4996 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00
OB
4997 int_mallinfo (ar_ptr, &mi);
4998 fprintf (stderr, "Arena %d:\n", i);
4999 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5000 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
fa8d436c 5001#if MALLOC_DEBUG > 1
6c8dbf00
OB
5002 if (i > 0)
5003 dump_heap (heap_for_ptr (top (ar_ptr)));
fa8d436c 5004#endif
6c8dbf00
OB
5005 system_b += mi.arena;
5006 in_use_b += mi.uordblks;
4bf5f222 5007 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
5008 ar_ptr = ar_ptr->next;
5009 if (ar_ptr == &main_arena)
5010 break;
5011 }
5012 fprintf (stderr, "Total (incl. mmap):\n");
5013 fprintf (stderr, "system bytes = %10u\n", system_b);
5014 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5015 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5016 fprintf (stderr, "max mmap bytes = %10lu\n",
5017 (unsigned long) mp_.max_mmapped_mem);
9964a145 5018 stderr->_flags2 = old_flags2;
8dab36a1 5019 _IO_funlockfile (stderr);
f65fd747
UD
5020}
5021
f65fd747
UD
5022
5023/*
6c8dbf00
OB
5024 ------------------------------ mallopt ------------------------------
5025 */
be7991c0
SP
5026static inline int
5027__always_inline
5028do_set_trim_threshold (size_t value)
5029{
5030 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5031 mp_.no_dyn_threshold);
5032 mp_.trim_threshold = value;
5033 mp_.no_dyn_threshold = 1;
5034 return 1;
5035}
5036
5037static inline int
5038__always_inline
5039do_set_top_pad (size_t value)
5040{
5041 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5042 mp_.no_dyn_threshold);
5043 mp_.top_pad = value;
5044 mp_.no_dyn_threshold = 1;
5045 return 1;
5046}
5047
5048static inline int
5049__always_inline
5050do_set_mmap_threshold (size_t value)
5051{
5052 /* Forbid setting the threshold too high. */
5053 if (value <= HEAP_MAX_SIZE / 2)
5054 {
5055 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5056 mp_.no_dyn_threshold);
5057 mp_.mmap_threshold = value;
5058 mp_.no_dyn_threshold = 1;
5059 return 1;
5060 }
5061 return 0;
5062}
5063
5064static inline int
5065__always_inline
5066do_set_mmaps_max (int32_t value)
5067{
5068 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5069 mp_.no_dyn_threshold);
5070 mp_.n_mmaps_max = value;
5071 mp_.no_dyn_threshold = 1;
5072 return 1;
5073}
5074
5075static inline int
5076__always_inline
5077do_set_mallopt_check (int32_t value)
5078{
be7991c0
SP
5079 return 1;
5080}
5081
5082static inline int
5083__always_inline
5084do_set_perturb_byte (int32_t value)
5085{
5086 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5087 perturb_byte = value;
5088 return 1;
5089}
5090
5091static inline int
5092__always_inline
5093do_set_arena_test (size_t value)
5094{
5095 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5096 mp_.arena_test = value;
5097 return 1;
5098}
5099
5100static inline int
5101__always_inline
5102do_set_arena_max (size_t value)
5103{
5104 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5105 mp_.arena_max = value;
5106 return 1;
5107}
5108
d5c3fafc
DD
5109#if USE_TCACHE
5110static inline int
5111__always_inline
5112do_set_tcache_max (size_t value)
5113{
5114 if (value >= 0 && value <= MAX_TCACHE_SIZE)
5115 {
5116 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5117 mp_.tcache_max_bytes = value;
5118 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5119 }
5120 return 1;
5121}
5122
5123static inline int
5124__always_inline
5125do_set_tcache_count (size_t value)
5126{
95d66fec
WD
5127 if (value <= MAX_TCACHE_COUNT)
5128 {
5129 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5130 mp_.tcache_count = value;
5131 }
d5c3fafc
DD
5132 return 1;
5133}
5134
5135static inline int
5136__always_inline
5137do_set_tcache_unsorted_limit (size_t value)
5138{
5139 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5140 mp_.tcache_unsorted_limit = value;
5141 return 1;
5142}
5143#endif
f65fd747 5144
4618f1ff
DD
5145static inline int
5146__always_inline
5147do_set_mxfast (size_t value)
5148{
5149 if (value >= 0 && value <= MAX_FAST_SIZE)
5150 {
5151 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5152 set_max_fast (value);
5153 return 1;
5154 }
5155 return 0;
5156}
5157
6c8dbf00
OB
5158int
5159__libc_mallopt (int param_number, int value)
f65fd747 5160{
fa8d436c
UD
5161 mstate av = &main_arena;
5162 int res = 1;
f65fd747 5163
6c8dbf00 5164 if (__malloc_initialized < 0)
0cb71e02 5165 ptmalloc_init ();
4bf5f222 5166 __libc_lock_lock (av->mutex);
2f6d1f1b 5167
3ea5be54
AO
5168 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5169
3381be5c
WD
5170 /* We must consolidate main arena before changing max_fast
5171 (see definition of set_max_fast). */
5172 malloc_consolidate (av);
5173
6c8dbf00
OB
5174 switch (param_number)
5175 {
5176 case M_MXFAST:
4618f1ff 5177 do_set_mxfast (value);
6c8dbf00
OB
5178 break;
5179
5180 case M_TRIM_THRESHOLD:
be7991c0 5181 do_set_trim_threshold (value);
6c8dbf00
OB
5182 break;
5183
5184 case M_TOP_PAD:
be7991c0 5185 do_set_top_pad (value);
6c8dbf00
OB
5186 break;
5187
5188 case M_MMAP_THRESHOLD:
be7991c0 5189 res = do_set_mmap_threshold (value);
6c8dbf00
OB
5190 break;
5191
5192 case M_MMAP_MAX:
be7991c0 5193 do_set_mmaps_max (value);
6c8dbf00
OB
5194 break;
5195
5196 case M_CHECK_ACTION:
be7991c0 5197 do_set_mallopt_check (value);
6c8dbf00
OB
5198 break;
5199
5200 case M_PERTURB:
be7991c0 5201 do_set_perturb_byte (value);
6c8dbf00
OB
5202 break;
5203
5204 case M_ARENA_TEST:
5205 if (value > 0)
be7991c0 5206 do_set_arena_test (value);
6c8dbf00
OB
5207 break;
5208
5209 case M_ARENA_MAX:
5210 if (value > 0)
62222284 5211 do_set_arena_max (value);
6c8dbf00
OB
5212 break;
5213 }
4bf5f222 5214 __libc_lock_unlock (av->mutex);
fa8d436c 5215 return res;
b22fc5f5 5216}
3b49edc0 5217libc_hidden_def (__libc_mallopt)
b22fc5f5 5218
10dc2a90 5219
a9177ff5 5220/*
6c8dbf00
OB
5221 -------------------- Alternative MORECORE functions --------------------
5222 */
10dc2a90 5223
b22fc5f5 5224
fa8d436c 5225/*
6c8dbf00 5226 General Requirements for MORECORE.
b22fc5f5 5227
6c8dbf00 5228 The MORECORE function must have the following properties:
b22fc5f5 5229
6c8dbf00 5230 If MORECORE_CONTIGUOUS is false:
10dc2a90 5231
6c8dbf00 5232 * MORECORE must allocate in multiples of pagesize. It will
fa8d436c 5233 only be called with arguments that are multiples of pagesize.
10dc2a90 5234
6c8dbf00 5235 * MORECORE(0) must return an address that is at least
fa8d436c 5236 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
10dc2a90 5237
6c8dbf00 5238 else (i.e. If MORECORE_CONTIGUOUS is true):
10dc2a90 5239
6c8dbf00 5240 * Consecutive calls to MORECORE with positive arguments
fa8d436c
UD
5241 return increasing addresses, indicating that space has been
5242 contiguously extended.
10dc2a90 5243
6c8dbf00 5244 * MORECORE need not allocate in multiples of pagesize.
fa8d436c 5245 Calls to MORECORE need not have args of multiples of pagesize.
10dc2a90 5246
6c8dbf00 5247 * MORECORE need not page-align.
10dc2a90 5248
6c8dbf00 5249 In either case:
10dc2a90 5250
6c8dbf00 5251 * MORECORE may allocate more memory than requested. (Or even less,
fa8d436c 5252 but this will generally result in a malloc failure.)
10dc2a90 5253
6c8dbf00 5254 * MORECORE must not allocate memory when given argument zero, but
fa8d436c
UD
5255 instead return one past the end address of memory from previous
5256 nonzero call. This malloc does NOT call MORECORE(0)
5257 until at least one call with positive arguments is made, so
5258 the initial value returned is not important.
10dc2a90 5259
6c8dbf00 5260 * Even though consecutive calls to MORECORE need not return contiguous
fa8d436c
UD
5261 addresses, it must be OK for malloc'ed chunks to span multiple
5262 regions in those cases where they do happen to be contiguous.
10dc2a90 5263
6c8dbf00 5264 * MORECORE need not handle negative arguments -- it may instead
fa8d436c
UD
5265 just return MORECORE_FAILURE when given negative arguments.
5266 Negative arguments are always multiples of pagesize. MORECORE
5267 must not misinterpret negative args as large positive unsigned
5268 args. You can suppress all such calls from even occurring by defining
5269 MORECORE_CANNOT_TRIM,
10dc2a90 5270
6c8dbf00
OB
5271 There is some variation across systems about the type of the
5272 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5273 actually be size_t, because sbrk supports negative args, so it is
5274 normally the signed type of the same width as size_t (sometimes
5275 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5276 matter though. Internally, we use "long" as arguments, which should
5277 work across all reasonable possibilities.
5278
5279 Additionally, if MORECORE ever returns failure for a positive
5280 request, then mmap is used as a noncontiguous system allocator. This
5281 is a useful backup strategy for systems with holes in address spaces
5282 -- in this case sbrk cannot contiguously expand the heap, but mmap
5283 may be able to map noncontiguous space.
5284
5285 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5286 a function that always returns MORECORE_FAILURE.
5287
5288 If you are using this malloc with something other than sbrk (or its
5289 emulation) to supply memory regions, you probably want to set
5290 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5291 allocator kindly contributed for pre-OSX macOS. It uses virtually
5292 but not necessarily physically contiguous non-paged memory (locked
5293 in, present and won't get swapped out). You can use it by
5294 uncommenting this section, adding some #includes, and setting up the
5295 appropriate defines above:
5296
5297 *#define MORECORE osMoreCore
5298 *#define MORECORE_CONTIGUOUS 0
5299
5300 There is also a shutdown routine that should somehow be called for
5301 cleanup upon program exit.
5302
5303 *#define MAX_POOL_ENTRIES 100
5304 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5305 static int next_os_pool;
5306 void *our_os_pools[MAX_POOL_ENTRIES];
5307
5308 void *osMoreCore(int size)
5309 {
fa8d436c
UD
5310 void *ptr = 0;
5311 static void *sbrk_top = 0;
ca34d7a7 5312
fa8d436c
UD
5313 if (size > 0)
5314 {
5315 if (size < MINIMUM_MORECORE_SIZE)
6c8dbf00 5316 size = MINIMUM_MORECORE_SIZE;
fa8d436c 5317 if (CurrentExecutionLevel() == kTaskLevel)
6c8dbf00 5318 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
fa8d436c
UD
5319 if (ptr == 0)
5320 {
6c8dbf00 5321 return (void *) MORECORE_FAILURE;
fa8d436c
UD
5322 }
5323 // save ptrs so they can be freed during cleanup
5324 our_os_pools[next_os_pool] = ptr;
5325 next_os_pool++;
5326 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5327 sbrk_top = (char *) ptr + size;
5328 return ptr;
5329 }
5330 else if (size < 0)
5331 {
5332 // we don't currently support shrink behavior
5333 return (void *) MORECORE_FAILURE;
5334 }
5335 else
5336 {
5337 return sbrk_top;
431c33c0 5338 }
6c8dbf00 5339 }
ca34d7a7 5340
6c8dbf00
OB
5341 // cleanup any allocated memory pools
5342 // called as last thing before shutting down driver
ca34d7a7 5343
6c8dbf00
OB
5344 void osCleanupMem(void)
5345 {
fa8d436c 5346 void **ptr;
ca34d7a7 5347
fa8d436c
UD
5348 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5349 if (*ptr)
5350 {
6c8dbf00
OB
5351 PoolDeallocate(*ptr);
5352 * ptr = 0;
fa8d436c 5353 }
6c8dbf00 5354 }
ee74a442 5355
6c8dbf00 5356 */
f65fd747 5357
7e3be507 5358
3e030bd5
UD
5359/* Helper code. */
5360
ae7f5313
UD
5361extern char **__libc_argv attribute_hidden;
5362
3e030bd5 5363static void
ac3ed168 5364malloc_printerr (const char *str)
3e030bd5 5365{
ec2c1fce
FW
5366 __libc_message (do_abort, "%s\n", str);
5367 __builtin_unreachable ();
3e030bd5
UD
5368}
5369
a204dbb2
UD
5370/* We need a wrapper function for one of the additions of POSIX. */
5371int
5372__posix_memalign (void **memptr, size_t alignment, size_t size)
5373{
5374 void *mem;
5375
5376 /* Test whether the SIZE argument is valid. It must be a power of
5377 two multiple of sizeof (void *). */
de02bd05 5378 if (alignment % sizeof (void *) != 0
fc56e970 5379 || !powerof2 (alignment / sizeof (void *))
de02bd05 5380 || alignment == 0)
a204dbb2
UD
5381 return EINVAL;
5382
10ad46bc
OB
5383
5384 void *address = RETURN_ADDRESS (0);
5385 mem = _mid_memalign (alignment, size, address);
a204dbb2 5386
6c8dbf00
OB
5387 if (mem != NULL)
5388 {
5389 *memptr = mem;
5390 return 0;
5391 }
a204dbb2
UD
5392
5393 return ENOMEM;
5394}
5395weak_alias (__posix_memalign, posix_memalign)
5396
20c13899
OB
5397
5398int
c52ff39e 5399__malloc_info (int options, FILE *fp)
bb066545 5400{
20c13899
OB
5401 /* For now, at least. */
5402 if (options != 0)
5403 return EINVAL;
bb066545 5404
20c13899
OB
5405 int n = 0;
5406 size_t total_nblocks = 0;
5407 size_t total_nfastblocks = 0;
5408 size_t total_avail = 0;
5409 size_t total_fastavail = 0;
5410 size_t total_system = 0;
5411 size_t total_max_system = 0;
5412 size_t total_aspace = 0;
5413 size_t total_aspace_mprotect = 0;
bb066545 5414
6c8dbf00 5415
6c8dbf00 5416
987c0269
OB
5417 if (__malloc_initialized < 0)
5418 ptmalloc_init ();
bb066545 5419
987c0269 5420 fputs ("<malloc version=\"1\">\n", fp);
bb066545 5421
987c0269
OB
5422 /* Iterate over all arenas currently in use. */
5423 mstate ar_ptr = &main_arena;
5424 do
5425 {
5426 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
8b35e35d 5427
987c0269
OB
5428 size_t nblocks = 0;
5429 size_t nfastblocks = 0;
5430 size_t avail = 0;
5431 size_t fastavail = 0;
5432 struct
5433 {
5434 size_t from;
5435 size_t to;
5436 size_t total;
5437 size_t count;
5438 } sizes[NFASTBINS + NBINS - 1];
5439#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
6c8dbf00 5440
4bf5f222 5441 __libc_lock_lock (ar_ptr->mutex);
bb066545 5442
c6e4c319
NH
5443 /* Account for top chunk. The top-most available chunk is
5444 treated specially and is never in any bin. See "initial_top"
5445 comments. */
5446 avail = chunksize (ar_ptr->top);
5447 nblocks = 1; /* Top always exists. */
5448
987c0269
OB
5449 for (size_t i = 0; i < NFASTBINS; ++i)
5450 {
5451 mchunkptr p = fastbin (ar_ptr, i);
5452 if (p != NULL)
5453 {
5454 size_t nthissize = 0;
5455 size_t thissize = chunksize (p);
5456
5457 while (p != NULL)
5458 {
5459 ++nthissize;
5460 p = p->fd;
5461 }
5462
5463 fastavail += nthissize * thissize;
5464 nfastblocks += nthissize;
5465 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5466 sizes[i].to = thissize;
5467 sizes[i].count = nthissize;
5468 }
5469 else
5470 sizes[i].from = sizes[i].to = sizes[i].count = 0;
bb066545 5471
987c0269
OB
5472 sizes[i].total = sizes[i].count * sizes[i].to;
5473 }
bb066545 5474
bb066545 5475
987c0269
OB
5476 mbinptr bin;
5477 struct malloc_chunk *r;
bb066545 5478
987c0269
OB
5479 for (size_t i = 1; i < NBINS; ++i)
5480 {
5481 bin = bin_at (ar_ptr, i);
5482 r = bin->fd;
5483 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5484 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5485 = sizes[NFASTBINS - 1 + i].count = 0;
5486
5487 if (r != NULL)
5488 while (r != bin)
5489 {
e9c4fe93 5490 size_t r_size = chunksize_nomask (r);
987c0269 5491 ++sizes[NFASTBINS - 1 + i].count;
e9c4fe93 5492 sizes[NFASTBINS - 1 + i].total += r_size;
987c0269 5493 sizes[NFASTBINS - 1 + i].from
e9c4fe93 5494 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
987c0269 5495 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
e9c4fe93 5496 r_size);
987c0269
OB
5497
5498 r = r->fd;
5499 }
5500
5501 if (sizes[NFASTBINS - 1 + i].count == 0)
5502 sizes[NFASTBINS - 1 + i].from = 0;
5503 nblocks += sizes[NFASTBINS - 1 + i].count;
5504 avail += sizes[NFASTBINS - 1 + i].total;
5505 }
bb066545 5506
7a9368a1
FW
5507 size_t heap_size = 0;
5508 size_t heap_mprotect_size = 0;
34eb4157 5509 size_t heap_count = 0;
7a9368a1
FW
5510 if (ar_ptr != &main_arena)
5511 {
34eb4157 5512 /* Iterate over the arena heaps from back to front. */
7a9368a1 5513 heap_info *heap = heap_for_ptr (top (ar_ptr));
34eb4157
FW
5514 do
5515 {
5516 heap_size += heap->size;
5517 heap_mprotect_size += heap->mprotect_size;
5518 heap = heap->prev;
5519 ++heap_count;
5520 }
5521 while (heap != NULL);
7a9368a1
FW
5522 }
5523
4bf5f222 5524 __libc_lock_unlock (ar_ptr->mutex);
da2d2fb6 5525
987c0269
OB
5526 total_nfastblocks += nfastblocks;
5527 total_fastavail += fastavail;
0588a9cb 5528
987c0269
OB
5529 total_nblocks += nblocks;
5530 total_avail += avail;
0588a9cb 5531
987c0269
OB
5532 for (size_t i = 0; i < nsizes; ++i)
5533 if (sizes[i].count != 0 && i != NFASTBINS)
a0a551d2 5534 fprintf (fp, "\
987c0269
OB
5535 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5536 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
fdfd175d 5537
987c0269
OB
5538 if (sizes[NFASTBINS].count != 0)
5539 fprintf (fp, "\
5540 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5541 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5542 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
fdfd175d 5543
987c0269
OB
5544 total_system += ar_ptr->system_mem;
5545 total_max_system += ar_ptr->max_system_mem;
bb066545 5546
987c0269
OB
5547 fprintf (fp,
5548 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5549 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5550 "<system type=\"current\" size=\"%zu\"/>\n"
5551 "<system type=\"max\" size=\"%zu\"/>\n",
5552 nfastblocks, fastavail, nblocks, avail,
5553 ar_ptr->system_mem, ar_ptr->max_system_mem);
346bc35c 5554
987c0269
OB
5555 if (ar_ptr != &main_arena)
5556 {
987c0269
OB
5557 fprintf (fp,
5558 "<aspace type=\"total\" size=\"%zu\"/>\n"
34eb4157
FW
5559 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5560 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5561 heap_size, heap_mprotect_size, heap_count);
7a9368a1
FW
5562 total_aspace += heap_size;
5563 total_aspace_mprotect += heap_mprotect_size;
987c0269
OB
5564 }
5565 else
5566 {
5567 fprintf (fp,
5568 "<aspace type=\"total\" size=\"%zu\"/>\n"
5569 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5570 ar_ptr->system_mem, ar_ptr->system_mem);
5571 total_aspace += ar_ptr->system_mem;
5572 total_aspace_mprotect += ar_ptr->system_mem;
5573 }
bb066545 5574
987c0269 5575 fputs ("</heap>\n", fp);
bb066545
UD
5576 ar_ptr = ar_ptr->next;
5577 }
5578 while (ar_ptr != &main_arena);
5579
5580 fprintf (fp,
62a58816
SP
5581 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5582 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
9fa76613 5583 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
62a58816
SP
5584 "<system type=\"current\" size=\"%zu\"/>\n"
5585 "<system type=\"max\" size=\"%zu\"/>\n"
5586 "<aspace type=\"total\" size=\"%zu\"/>\n"
5587 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5588 "</malloc>\n",
5589 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
4d653a59 5590 mp_.n_mmaps, mp_.mmapped_mem,
62a58816
SP
5591 total_system, total_max_system,
5592 total_aspace, total_aspace_mprotect);
bb066545
UD
5593
5594 return 0;
5595}
c52ff39e 5596weak_alias (__malloc_info, malloc_info)
bb066545
UD
5597
5598
eba19d2b 5599strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
eba19d2b
UD
5600strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5601strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5602strong_alias (__libc_memalign, __memalign)
5603weak_alias (__libc_memalign, memalign)
5604strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5605strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5606strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5607strong_alias (__libc_mallinfo, __mallinfo)
5608weak_alias (__libc_mallinfo, mallinfo)
5609strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
7e3be507
UD
5610
5611weak_alias (__malloc_stats, malloc_stats)
5612weak_alias (__malloc_usable_size, malloc_usable_size)
5613weak_alias (__malloc_trim, malloc_trim)
7e3be507 5614
025b33ae
FW
5615#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5616compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
5617#endif
f65fd747 5618
fa8d436c 5619/* ------------------------------------------------------------
6c8dbf00 5620 History:
f65fd747 5621
6c8dbf00 5622 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
f65fd747 5623
6c8dbf00 5624 */
fa8d436c
UD
5625/*
5626 * Local variables:
5627 * c-basic-offset: 2
5628 * End:
5629 */