]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/malloc.c
Avoid backtrace from __stack_chk_fail [BZ #12189]
[thirdparty/glibc.git] / malloc / malloc.c
CommitLineData
56137dbc 1/* Malloc implementation for multiple threads without lock contention.
bfff8b1b 2 Copyright (C) 1996-2017 Free Software Foundation, Inc.
f65fd747 3 This file is part of the GNU C Library.
fa8d436c
UD
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
f65fd747
UD
6
7 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
fa8d436c 10 License, or (at your option) any later version.
f65fd747
UD
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 15 Lesser General Public License for more details.
f65fd747 16
cc7375ce 17 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
f65fd747 20
fa8d436c
UD
21/*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
bb2ce416 25 There have been substantial changes made after the integration into
da2d2fb6
UD
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
fa8d436c 29* Version ptmalloc2-20011215
fa8d436c
UD
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
f65fd747 32
fa8d436c 33* Quickstart
f65fd747 34
fa8d436c
UD
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
f65fd747
UD
43
44* Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
fa8d436c
UD
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71* Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
22a89187
UD
76 free(void* p);
77 realloc(void* p, size_t n);
fa8d436c
UD
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
22a89187
UD
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
fa8d436c 86 pvalloc(size_t n);
fa8d436c 87 malloc_trim(size_t pad);
22a89187 88 malloc_usable_size(void* p);
fa8d436c 89 malloc_stats();
f65fd747
UD
90
91* Vital statistics:
92
fa8d436c 93 Supported pointer representation: 4 or 8 bytes
a9177ff5 94 Supported size_t representation: 4 or 8 bytes
f65fd747 95 Note that size_t is allowed to be 4 bytes even if pointers are 8.
fa8d436c
UD
96 You can adjust this by defining INTERNAL_SIZE_T
97
98 Alignment: 2 * sizeof(size_t) (default)
99 (i.e., 8 byte alignment with 4byte size_t). This suffices for
100 nearly all current machines and C compilers. However, you can
101 define MALLOC_ALIGNMENT to be wider than this if necessary.
f65fd747 102
fa8d436c
UD
103 Minimum overhead per allocated chunk: 4 or 8 bytes
104 Each malloced chunk has a hidden word of overhead holding size
f65fd747
UD
105 and status information.
106
107 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
72f90263 108 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
f65fd747
UD
109
110 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
111 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
fa8d436c
UD
112 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 free list pointers. Thus, the minimum allocatable size is
114 16/24/32 bytes.
f65fd747
UD
115
116 Even a request for zero bytes (i.e., malloc(0)) returns a
117 pointer to something of the minimum allocatable size.
118
fa8d436c
UD
119 The maximum overhead wastage (i.e., number of extra bytes
120 allocated than were requested in malloc) is less than or equal
121 to the minimum size, except for requests >= mmap_threshold that
122 are serviced via mmap(), where the worst case wastage is 2 *
123 sizeof(size_t) bytes plus the remainder from a system page (the
124 minimal mmap unit); typically 4096 or 8192 bytes.
f65fd747 125
a9177ff5 126 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
72f90263 127 8-byte size_t: 2^64 minus about two pages
fa8d436c
UD
128
129 It is assumed that (possibly signed) size_t values suffice to
f65fd747
UD
130 represent chunk sizes. `Possibly signed' is due to the fact
131 that `size_t' may be defined on a system as either a signed or
fa8d436c
UD
132 an unsigned type. The ISO C standard says that it must be
133 unsigned, but a few systems are known not to adhere to this.
134 Additionally, even when size_t is unsigned, sbrk (which is by
135 default used to obtain memory from system) accepts signed
136 arguments, and may not be able to handle size_t-wide arguments
137 with negative sign bit. Generally, values that would
138 appear as negative after accounting for overhead and alignment
139 are supported only via mmap(), which does not have this
140 limitation.
141
142 Requests for sizes outside the allowed range will perform an optional
143 failure action and then return null. (Requests may also
144 also fail because a system is out of memory.)
145
22a89187 146 Thread-safety: thread-safe
fa8d436c
UD
147
148 Compliance: I believe it is compliant with the 1997 Single Unix Specification
2b0fba75 149 Also SVID/XPG, ANSI C, and probably others as well.
f65fd747
UD
150
151* Synopsis of compile-time options:
152
153 People have reported using previous versions of this malloc on all
154 versions of Unix, sometimes by tweaking some of the defines
22a89187 155 below. It has been tested most extensively on Solaris and Linux.
fa8d436c
UD
156 People also report using it in stand-alone embedded systems.
157
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
165
166 OPTION DEFAULT VALUE
167
168 Compilation Environment options:
169
2a26ef3a 170 HAVE_MREMAP 0
fa8d436c
UD
171
172 Changing default word sizes:
173
174 INTERNAL_SIZE_T size_t
fa8d436c
UD
175
176 Configuration and functionality options:
177
fa8d436c
UD
178 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
179 USE_MALLOC_LOCK NOT defined
180 MALLOC_DEBUG NOT defined
181 REALLOC_ZERO_BYTES_FREES 1
fa8d436c
UD
182 TRIM_FASTBINS 0
183
184 Options for customizing MORECORE:
185
186 MORECORE sbrk
187 MORECORE_FAILURE -1
a9177ff5 188 MORECORE_CONTIGUOUS 1
fa8d436c
UD
189 MORECORE_CANNOT_TRIM NOT defined
190 MORECORE_CLEARS 1
a9177ff5 191 MMAP_AS_MORECORE_SIZE (1024 * 1024)
fa8d436c
UD
192
193 Tuning options that are also dynamically changeable via mallopt:
194
425ce2ed 195 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
fa8d436c
UD
196 DEFAULT_TRIM_THRESHOLD 128 * 1024
197 DEFAULT_TOP_PAD 0
198 DEFAULT_MMAP_THRESHOLD 128 * 1024
199 DEFAULT_MMAP_MAX 65536
200
201 There are several other #defined constants and macros that you
202 probably don't want to touch unless you are extending or adapting malloc. */
f65fd747
UD
203
204/*
22a89187 205 void* is the pointer type that malloc should say it returns
f65fd747
UD
206*/
207
22a89187
UD
208#ifndef void
209#define void void
210#endif /*void*/
f65fd747 211
fa8d436c
UD
212#include <stddef.h> /* for size_t */
213#include <stdlib.h> /* for getenv(), abort() */
2a26ef3a 214#include <unistd.h> /* for __libc_enable_secure */
f65fd747 215
425ce2ed 216#include <atomic.h>
eb96ffb0 217#include <_itoa.h>
e404fb16 218#include <bits/wordsize.h>
425ce2ed 219#include <sys/sysinfo.h>
c56da3a3 220
02d46fc4
UD
221#include <ldsodefs.h>
222
fa8d436c 223#include <unistd.h>
fa8d436c 224#include <stdio.h> /* needed for malloc_stats */
8e58439c 225#include <errno.h>
f65fd747 226
66274218
AJ
227#include <shlib-compat.h>
228
5d78bb43
UD
229/* For uintptr_t. */
230#include <stdint.h>
f65fd747 231
3e030bd5
UD
232/* For va_arg, va_start, va_end. */
233#include <stdarg.h>
234
070906ff
RM
235/* For MIN, MAX, powerof2. */
236#include <sys/param.h>
237
ca6be165 238/* For ALIGN_UP et. al. */
9090848d 239#include <libc-pointer-arith.h>
8a35c3fe 240
d5c3fafc
DD
241/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
242#include <libc-diag.h>
243
29d79486 244#include <malloc/malloc-internal.h>
c0f62c56 245
fa8d436c
UD
246/*
247 Debugging:
248
249 Because freed chunks may be overwritten with bookkeeping fields, this
250 malloc will often die when freed memory is overwritten by user
251 programs. This can be very effective (albeit in an annoying way)
252 in helping track down dangling pointers.
253
254 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
255 enabled that will catch more memory errors. You probably won't be
256 able to make much sense of the actual assertion errors, but they
257 should help you locate incorrectly overwritten memory. The checking
258 is fairly extensive, and will slow down execution
259 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
260 will attempt to check every non-mmapped allocated and free chunk in
261 the course of computing the summmaries. (By nature, mmapped regions
262 cannot be checked very much automatically.)
263
264 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
265 this code. The assertions in the check routines spell out in more
266 detail the assumptions and invariants underlying the algorithms.
267
268 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
269 checking that all accesses to malloced memory stay within their
270 bounds. However, there are several add-ons and adaptations of this
271 or other mallocs available that do this.
f65fd747
UD
272*/
273
439bda32
WN
274#ifndef MALLOC_DEBUG
275#define MALLOC_DEBUG 0
276#endif
277
72f90263
UD
278#ifdef NDEBUG
279# define assert(expr) ((void) 0)
280#else
281# define assert(expr) \
282 ((expr) \
283 ? ((void) 0) \
8ba14398 284 : __malloc_assert (#expr, __FILE__, __LINE__, __func__))
72f90263
UD
285
286extern const char *__progname;
287
288static void
289__malloc_assert (const char *assertion, const char *file, unsigned int line,
290 const char *function)
291{
292 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
293 __progname, __progname[0] ? ": " : "",
294 file, line,
295 function ? function : "", function ? ": " : "",
296 assertion);
297 fflush (stderr);
298 abort ();
299}
300#endif
f65fd747 301
d5c3fafc
DD
302#if USE_TCACHE
303/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
304# define TCACHE_MAX_BINS 64
305# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
306
307/* Only used to pre-fill the tunables. */
308# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
309
310/* When "x" is from chunksize(). */
311# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
312/* When "x" is a user-provided size. */
313# define usize2tidx(x) csize2tidx (request2size (x))
314
315/* With rounding and alignment, the bins are...
316 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
317 idx 1 bytes 25..40 or 13..20
318 idx 2 bytes 41..56 or 21..28
319 etc. */
320
321/* This is another arbitrary limit, which tunables can change. Each
322 tcache bin will hold at most this number of chunks. */
323# define TCACHE_FILL_COUNT 7
324#endif
325
f65fd747 326
fa8d436c
UD
327/*
328 REALLOC_ZERO_BYTES_FREES should be set if a call to
329 realloc with zero bytes should be the same as a call to free.
330 This is required by the C standard. Otherwise, since this malloc
331 returns a unique pointer for malloc(0), so does realloc(p, 0).
332*/
333
334#ifndef REALLOC_ZERO_BYTES_FREES
335#define REALLOC_ZERO_BYTES_FREES 1
336#endif
337
338/*
339 TRIM_FASTBINS controls whether free() of a very small chunk can
340 immediately lead to trimming. Setting to true (1) can reduce memory
341 footprint, but will almost always slow down programs that use a lot
342 of small chunks.
343
344 Define this only if you are willing to give up some speed to more
345 aggressively reduce system-level memory footprint when releasing
346 memory in programs that use many small chunks. You can get
347 essentially the same effect by setting MXFAST to 0, but this can
348 lead to even greater slowdowns in programs using many small chunks.
349 TRIM_FASTBINS is an in-between compile-time option, that disables
350 only those chunks bordering topmost memory from being placed in
351 fastbins.
352*/
353
354#ifndef TRIM_FASTBINS
355#define TRIM_FASTBINS 0
356#endif
357
358
3b49edc0 359/* Definition for getting more memory from the OS. */
fa8d436c
UD
360#define MORECORE (*__morecore)
361#define MORECORE_FAILURE 0
22a89187
UD
362void * __default_morecore (ptrdiff_t);
363void *(*__morecore)(ptrdiff_t) = __default_morecore;
f65fd747 364
f65fd747 365
22a89187 366#include <string.h>
f65fd747 367
fa8d436c
UD
368/*
369 MORECORE-related declarations. By default, rely on sbrk
370*/
09f5e163 371
f65fd747 372
fa8d436c
UD
373/*
374 MORECORE is the name of the routine to call to obtain more memory
375 from the system. See below for general guidance on writing
376 alternative MORECORE functions, as well as a version for WIN32 and a
377 sample version for pre-OSX macos.
378*/
f65fd747 379
fa8d436c
UD
380#ifndef MORECORE
381#define MORECORE sbrk
382#endif
f65fd747 383
fa8d436c
UD
384/*
385 MORECORE_FAILURE is the value returned upon failure of MORECORE
386 as well as mmap. Since it cannot be an otherwise valid memory address,
387 and must reflect values of standard sys calls, you probably ought not
388 try to redefine it.
389*/
09f5e163 390
fa8d436c
UD
391#ifndef MORECORE_FAILURE
392#define MORECORE_FAILURE (-1)
393#endif
394
395/*
396 If MORECORE_CONTIGUOUS is true, take advantage of fact that
397 consecutive calls to MORECORE with positive arguments always return
398 contiguous increasing addresses. This is true of unix sbrk. Even
399 if not defined, when regions happen to be contiguous, malloc will
400 permit allocations spanning regions obtained from different
401 calls. But defining this when applicable enables some stronger
402 consistency checks and space efficiencies.
403*/
f65fd747 404
fa8d436c
UD
405#ifndef MORECORE_CONTIGUOUS
406#define MORECORE_CONTIGUOUS 1
f65fd747
UD
407#endif
408
fa8d436c
UD
409/*
410 Define MORECORE_CANNOT_TRIM if your version of MORECORE
411 cannot release space back to the system when given negative
412 arguments. This is generally necessary only if you are using
413 a hand-crafted MORECORE function that cannot handle negative arguments.
414*/
415
416/* #define MORECORE_CANNOT_TRIM */
f65fd747 417
fa8d436c
UD
418/* MORECORE_CLEARS (default 1)
419 The degree to which the routine mapped to MORECORE zeroes out
420 memory: never (0), only for newly allocated space (1) or always
421 (2). The distinction between (1) and (2) is necessary because on
422 some systems, if the application first decrements and then
423 increments the break value, the contents of the reallocated space
424 are unspecified.
6c8dbf00 425 */
fa8d436c
UD
426
427#ifndef MORECORE_CLEARS
6c8dbf00 428# define MORECORE_CLEARS 1
7cabd57c
UD
429#endif
430
fa8d436c 431
a9177ff5 432/*
fa8d436c 433 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
22a89187
UD
434 sbrk fails, and mmap is used as a backup. The value must be a
435 multiple of page size. This backup strategy generally applies only
436 when systems have "holes" in address space, so sbrk cannot perform
437 contiguous expansion, but there is still space available on system.
438 On systems for which this is known to be useful (i.e. most linux
439 kernels), this occurs only when programs allocate huge amounts of
440 memory. Between this, and the fact that mmap regions tend to be
441 limited, the size should be large, to avoid too many mmap calls and
442 thus avoid running out of kernel resources. */
fa8d436c
UD
443
444#ifndef MMAP_AS_MORECORE_SIZE
445#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
f65fd747
UD
446#endif
447
448/*
449 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
2a26ef3a 450 large blocks.
f65fd747
UD
451*/
452
453#ifndef HAVE_MREMAP
fa8d436c 454#define HAVE_MREMAP 0
f65fd747
UD
455#endif
456
2ba3cfa1
FW
457/* We may need to support __malloc_initialize_hook for backwards
458 compatibility. */
459
460#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
461# define HAVE_MALLOC_INIT_HOOK 1
462#else
463# define HAVE_MALLOC_INIT_HOOK 0
464#endif
465
f65fd747 466
f65fd747 467/*
f65fd747 468 This version of malloc supports the standard SVID/XPG mallinfo
fa8d436c
UD
469 routine that returns a struct containing usage properties and
470 statistics. It should work on any SVID/XPG compliant system that has
471 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
472 install such a thing yourself, cut out the preliminary declarations
473 as described above and below and save them in a malloc.h file. But
474 there's no compelling reason to bother to do this.)
f65fd747
UD
475
476 The main declaration needed is the mallinfo struct that is returned
477 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
fa8d436c
UD
478 bunch of fields that are not even meaningful in this version of
479 malloc. These fields are are instead filled by mallinfo() with
480 other numbers that might be of interest.
f65fd747
UD
481*/
482
f65fd747 483
fa8d436c 484/* ---------- description of public routines ------------ */
f65fd747
UD
485
486/*
fa8d436c
UD
487 malloc(size_t n)
488 Returns a pointer to a newly allocated chunk of at least n bytes, or null
489 if no space is available. Additionally, on failure, errno is
490 set to ENOMEM on ANSI C systems.
491
492 If n is zero, malloc returns a minumum-sized chunk. (The minimum
493 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
494 systems.) On most systems, size_t is an unsigned type, so calls
495 with negative arguments are interpreted as requests for huge amounts
496 of space, which will often fail. The maximum supported value of n
497 differs across systems, but is in all cases less than the maximum
498 representable value of a size_t.
f65fd747 499*/
3b49edc0
UD
500void* __libc_malloc(size_t);
501libc_hidden_proto (__libc_malloc)
f65fd747 502
fa8d436c 503/*
22a89187 504 free(void* p)
fa8d436c
UD
505 Releases the chunk of memory pointed to by p, that had been previously
506 allocated using malloc or a related routine such as realloc.
507 It has no effect if p is null. It can have arbitrary (i.e., bad!)
508 effects if p has already been freed.
509
510 Unless disabled (using mallopt), freeing very large spaces will
511 when possible, automatically trigger operations that give
512 back unused memory to the system, thus reducing program footprint.
513*/
3b49edc0
UD
514void __libc_free(void*);
515libc_hidden_proto (__libc_free)
f65fd747 516
fa8d436c
UD
517/*
518 calloc(size_t n_elements, size_t element_size);
519 Returns a pointer to n_elements * element_size bytes, with all locations
520 set to zero.
521*/
3b49edc0 522void* __libc_calloc(size_t, size_t);
f65fd747
UD
523
524/*
22a89187 525 realloc(void* p, size_t n)
fa8d436c
UD
526 Returns a pointer to a chunk of size n that contains the same data
527 as does chunk p up to the minimum of (n, p's size) bytes, or null
a9177ff5 528 if no space is available.
f65fd747 529
fa8d436c
UD
530 The returned pointer may or may not be the same as p. The algorithm
531 prefers extending p when possible, otherwise it employs the
532 equivalent of a malloc-copy-free sequence.
f65fd747 533
a9177ff5 534 If p is null, realloc is equivalent to malloc.
f65fd747 535
fa8d436c
UD
536 If space is not available, realloc returns null, errno is set (if on
537 ANSI) and p is NOT freed.
f65fd747 538
fa8d436c
UD
539 if n is for fewer bytes than already held by p, the newly unused
540 space is lopped off and freed if possible. Unless the #define
541 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
542 zero (re)allocates a minimum-sized chunk.
f65fd747 543
3b5f801d
DD
544 Large chunks that were internally obtained via mmap will always be
545 grown using malloc-copy-free sequences unless the system supports
546 MREMAP (currently only linux).
f65fd747 547
fa8d436c
UD
548 The old unix realloc convention of allowing the last-free'd chunk
549 to be used as an argument to realloc is not supported.
f65fd747 550*/
3b49edc0
UD
551void* __libc_realloc(void*, size_t);
552libc_hidden_proto (__libc_realloc)
f65fd747 553
fa8d436c
UD
554/*
555 memalign(size_t alignment, size_t n);
556 Returns a pointer to a newly allocated chunk of n bytes, aligned
557 in accord with the alignment argument.
558
559 The alignment argument should be a power of two. If the argument is
560 not a power of two, the nearest greater power is used.
561 8-byte alignment is guaranteed by normal malloc calls, so don't
562 bother calling memalign with an argument of 8 or less.
563
564 Overreliance on memalign is a sure way to fragment space.
565*/
3b49edc0
UD
566void* __libc_memalign(size_t, size_t);
567libc_hidden_proto (__libc_memalign)
f65fd747
UD
568
569/*
fa8d436c
UD
570 valloc(size_t n);
571 Equivalent to memalign(pagesize, n), where pagesize is the page
572 size of the system. If the pagesize is unknown, 4096 is used.
573*/
3b49edc0 574void* __libc_valloc(size_t);
fa8d436c 575
f65fd747 576
f65fd747 577
fa8d436c
UD
578/*
579 mallopt(int parameter_number, int parameter_value)
580 Sets tunable parameters The format is to provide a
581 (parameter-number, parameter-value) pair. mallopt then sets the
582 corresponding parameter to the argument value if it can (i.e., so
583 long as the value is meaningful), and returns 1 if successful else
584 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
585 normally defined in malloc.h. Only one of these (M_MXFAST) is used
586 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
587 so setting them has no effect. But this malloc also supports four
588 other options in mallopt. See below for details. Briefly, supported
589 parameters are as follows (listed defaults are for "typical"
590 configurations).
591
592 Symbol param # default allowed param values
593 M_MXFAST 1 64 0-80 (0 disables fastbins)
594 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
a9177ff5 595 M_TOP_PAD -2 0 any
fa8d436c
UD
596 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
597 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
598*/
3b49edc0
UD
599int __libc_mallopt(int, int);
600libc_hidden_proto (__libc_mallopt)
fa8d436c
UD
601
602
603/*
604 mallinfo()
605 Returns (by copy) a struct containing various summary statistics:
606
a9177ff5
RM
607 arena: current total non-mmapped bytes allocated from system
608 ordblks: the number of free chunks
fa8d436c 609 smblks: the number of fastbin blocks (i.e., small chunks that
72f90263 610 have been freed but not use resused or consolidated)
a9177ff5
RM
611 hblks: current number of mmapped regions
612 hblkhd: total bytes held in mmapped regions
ca135f82 613 usmblks: always 0
a9177ff5 614 fsmblks: total bytes held in fastbin blocks
fa8d436c 615 uordblks: current total allocated space (normal or mmapped)
a9177ff5 616 fordblks: total free space
fa8d436c 617 keepcost: the maximum number of bytes that could ideally be released
72f90263
UD
618 back to system via malloc_trim. ("ideally" means that
619 it ignores page restrictions etc.)
fa8d436c
UD
620
621 Because these fields are ints, but internal bookkeeping may
a9177ff5 622 be kept as longs, the reported values may wrap around zero and
fa8d436c
UD
623 thus be inaccurate.
624*/
3b49edc0 625struct mallinfo __libc_mallinfo(void);
88764ae2 626
f65fd747 627
fa8d436c
UD
628/*
629 pvalloc(size_t n);
630 Equivalent to valloc(minimum-page-that-holds(n)), that is,
631 round up n to nearest pagesize.
632 */
3b49edc0 633void* __libc_pvalloc(size_t);
fa8d436c
UD
634
635/*
636 malloc_trim(size_t pad);
637
638 If possible, gives memory back to the system (via negative
639 arguments to sbrk) if there is unused memory at the `high' end of
640 the malloc pool. You can call this after freeing large blocks of
641 memory to potentially reduce the system-level memory requirements
642 of a program. However, it cannot guarantee to reduce memory. Under
643 some allocation patterns, some large free blocks of memory will be
644 locked between two used chunks, so they cannot be given back to
645 the system.
a9177ff5 646
fa8d436c
UD
647 The `pad' argument to malloc_trim represents the amount of free
648 trailing space to leave untrimmed. If this argument is zero,
649 only the minimum amount of memory to maintain internal data
650 structures will be left (one page or less). Non-zero arguments
651 can be supplied to maintain enough trailing space to service
652 future expected allocations without having to re-obtain memory
653 from the system.
a9177ff5 654
fa8d436c
UD
655 Malloc_trim returns 1 if it actually released any memory, else 0.
656 On systems that do not support "negative sbrks", it will always
c958a6a4 657 return 0.
fa8d436c 658*/
3b49edc0 659int __malloc_trim(size_t);
fa8d436c
UD
660
661/*
22a89187 662 malloc_usable_size(void* p);
fa8d436c
UD
663
664 Returns the number of bytes you can actually use in
665 an allocated chunk, which may be more than you requested (although
666 often not) due to alignment and minimum size constraints.
667 You can use this many bytes without worrying about
668 overwriting other allocated objects. This is not a particularly great
669 programming practice. malloc_usable_size can be more useful in
670 debugging and assertions, for example:
671
672 p = malloc(n);
673 assert(malloc_usable_size(p) >= 256);
674
675*/
3b49edc0 676size_t __malloc_usable_size(void*);
fa8d436c
UD
677
678/*
679 malloc_stats();
680 Prints on stderr the amount of space obtained from the system (both
681 via sbrk and mmap), the maximum amount (which may be more than
682 current if malloc_trim and/or munmap got called), and the current
683 number of bytes allocated via malloc (or realloc, etc) but not yet
684 freed. Note that this is the number of bytes allocated, not the
685 number requested. It will be larger than the number requested
686 because of alignment and bookkeeping overhead. Because it includes
687 alignment wastage as being in use, this figure may be greater than
688 zero even when no user-level chunks are allocated.
689
690 The reported current and maximum system memory can be inaccurate if
691 a program makes other calls to system memory allocation functions
692 (normally sbrk) outside of malloc.
693
694 malloc_stats prints only the most commonly interesting statistics.
695 More information can be obtained by calling mallinfo.
696
697*/
3b49edc0 698void __malloc_stats(void);
f65fd747 699
f7ddf3d3
UD
700/*
701 malloc_get_state(void);
702
703 Returns the state of all malloc variables in an opaque data
704 structure.
705*/
3b49edc0 706void* __malloc_get_state(void);
f7ddf3d3
UD
707
708/*
22a89187 709 malloc_set_state(void* state);
f7ddf3d3
UD
710
711 Restore the state of all malloc variables from data obtained with
712 malloc_get_state().
713*/
3b49edc0 714int __malloc_set_state(void*);
f7ddf3d3 715
f7ddf3d3
UD
716/*
717 posix_memalign(void **memptr, size_t alignment, size_t size);
718
719 POSIX wrapper like memalign(), checking for validity of size.
720*/
721int __posix_memalign(void **, size_t, size_t);
f7ddf3d3 722
fa8d436c
UD
723/* mallopt tuning options */
724
f65fd747 725/*
fa8d436c
UD
726 M_MXFAST is the maximum request size used for "fastbins", special bins
727 that hold returned chunks without consolidating their spaces. This
728 enables future requests for chunks of the same size to be handled
729 very quickly, but can increase fragmentation, and thus increase the
730 overall memory footprint of a program.
731
732 This malloc manages fastbins very conservatively yet still
733 efficiently, so fragmentation is rarely a problem for values less
734 than or equal to the default. The maximum supported value of MXFAST
735 is 80. You wouldn't want it any higher than this anyway. Fastbins
736 are designed especially for use with many small structs, objects or
737 strings -- the default handles structs/objects/arrays with sizes up
738 to 8 4byte fields, or small strings representing words, tokens,
739 etc. Using fastbins for larger objects normally worsens
740 fragmentation without improving speed.
741
742 M_MXFAST is set in REQUEST size units. It is internally used in
743 chunksize units, which adds padding and alignment. You can reduce
744 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
745 algorithm to be a closer approximation of fifo-best-fit in all cases,
746 not just for larger requests, but will generally cause it to be
747 slower.
f65fd747
UD
748*/
749
750
fa8d436c
UD
751/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
752#ifndef M_MXFAST
a9177ff5 753#define M_MXFAST 1
fa8d436c 754#endif
f65fd747 755
fa8d436c 756#ifndef DEFAULT_MXFAST
425ce2ed 757#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
10dc2a90
UD
758#endif
759
10dc2a90 760
fa8d436c
UD
761/*
762 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
763 to keep before releasing via malloc_trim in free().
764
765 Automatic trimming is mainly useful in long-lived programs.
766 Because trimming via sbrk can be slow on some systems, and can
767 sometimes be wasteful (in cases where programs immediately
768 afterward allocate more large chunks) the value should be high
769 enough so that your overall system performance would improve by
770 releasing this much memory.
771
772 The trim threshold and the mmap control parameters (see below)
773 can be traded off with one another. Trimming and mmapping are
774 two different ways of releasing unused memory back to the
775 system. Between these two, it is often possible to keep
776 system-level demands of a long-lived program down to a bare
777 minimum. For example, in one test suite of sessions measuring
778 the XF86 X server on Linux, using a trim threshold of 128K and a
779 mmap threshold of 192K led to near-minimal long term resource
780 consumption.
781
782 If you are using this malloc in a long-lived program, it should
783 pay to experiment with these values. As a rough guide, you
784 might set to a value close to the average size of a process
785 (program) running on your system. Releasing this much memory
786 would allow such a process to run in memory. Generally, it's
787 worth it to tune for trimming rather tham memory mapping when a
788 program undergoes phases where several large chunks are
789 allocated and released in ways that can reuse each other's
790 storage, perhaps mixed with phases where there are no such
791 chunks at all. And in well-behaved long-lived programs,
792 controlling release of large blocks via trimming versus mapping
793 is usually faster.
794
795 However, in most programs, these parameters serve mainly as
796 protection against the system-level effects of carrying around
797 massive amounts of unneeded memory. Since frequent calls to
798 sbrk, mmap, and munmap otherwise degrade performance, the default
799 parameters are set to relatively high values that serve only as
800 safeguards.
801
802 The trim value It must be greater than page size to have any useful
a9177ff5 803 effect. To disable trimming completely, you can set to
fa8d436c
UD
804 (unsigned long)(-1)
805
806 Trim settings interact with fastbin (MXFAST) settings: Unless
807 TRIM_FASTBINS is defined, automatic trimming never takes place upon
808 freeing a chunk with size less than or equal to MXFAST. Trimming is
809 instead delayed until subsequent freeing of larger chunks. However,
810 you can still force an attempted trim by calling malloc_trim.
811
812 Also, trimming is not generally possible in cases where
813 the main arena is obtained via mmap.
814
815 Note that the trick some people use of mallocing a huge space and
816 then freeing it at program startup, in an attempt to reserve system
817 memory, doesn't have the intended effect under automatic trimming,
818 since that memory will immediately be returned to the system.
819*/
820
821#define M_TRIM_THRESHOLD -1
822
823#ifndef DEFAULT_TRIM_THRESHOLD
824#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
825#endif
826
827/*
828 M_TOP_PAD is the amount of extra `padding' space to allocate or
829 retain whenever sbrk is called. It is used in two ways internally:
830
831 * When sbrk is called to extend the top of the arena to satisfy
832 a new malloc request, this much padding is added to the sbrk
833 request.
834
835 * When malloc_trim is called automatically from free(),
836 it is used as the `pad' argument.
837
838 In both cases, the actual amount of padding is rounded
839 so that the end of the arena is always a system page boundary.
840
841 The main reason for using padding is to avoid calling sbrk so
842 often. Having even a small pad greatly reduces the likelihood
843 that nearly every malloc request during program start-up (or
844 after trimming) will invoke sbrk, which needlessly wastes
845 time.
846
847 Automatic rounding-up to page-size units is normally sufficient
848 to avoid measurable overhead, so the default is 0. However, in
849 systems where sbrk is relatively slow, it can pay to increase
850 this value, at the expense of carrying around more memory than
851 the program needs.
852*/
10dc2a90 853
fa8d436c 854#define M_TOP_PAD -2
10dc2a90 855
fa8d436c
UD
856#ifndef DEFAULT_TOP_PAD
857#define DEFAULT_TOP_PAD (0)
858#endif
f65fd747 859
1d05c2fb
UD
860/*
861 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
862 adjusted MMAP_THRESHOLD.
863*/
864
865#ifndef DEFAULT_MMAP_THRESHOLD_MIN
866#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
867#endif
868
869#ifndef DEFAULT_MMAP_THRESHOLD_MAX
e404fb16
UD
870 /* For 32-bit platforms we cannot increase the maximum mmap
871 threshold much because it is also the minimum value for the
bd2c2341
UD
872 maximum heap size and its alignment. Going above 512k (i.e., 1M
873 for new heaps) wastes too much address space. */
e404fb16 874# if __WORDSIZE == 32
bd2c2341 875# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
e404fb16 876# else
bd2c2341 877# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
e404fb16 878# endif
1d05c2fb
UD
879#endif
880
fa8d436c
UD
881/*
882 M_MMAP_THRESHOLD is the request size threshold for using mmap()
883 to service a request. Requests of at least this size that cannot
884 be allocated using already-existing space will be serviced via mmap.
885 (If enough normal freed space already exists it is used instead.)
886
887 Using mmap segregates relatively large chunks of memory so that
888 they can be individually obtained and released from the host
889 system. A request serviced through mmap is never reused by any
890 other request (at least not directly; the system may just so
891 happen to remap successive requests to the same locations).
892
893 Segregating space in this way has the benefits that:
894
a9177ff5
RM
895 1. Mmapped space can ALWAYS be individually released back
896 to the system, which helps keep the system level memory
897 demands of a long-lived program low.
fa8d436c
UD
898 2. Mapped memory can never become `locked' between
899 other chunks, as can happen with normally allocated chunks, which
900 means that even trimming via malloc_trim would not release them.
901 3. On some systems with "holes" in address spaces, mmap can obtain
902 memory that sbrk cannot.
903
904 However, it has the disadvantages that:
905
906 1. The space cannot be reclaimed, consolidated, and then
907 used to service later requests, as happens with normal chunks.
908 2. It can lead to more wastage because of mmap page alignment
909 requirements
910 3. It causes malloc performance to be more dependent on host
911 system memory management support routines which may vary in
912 implementation quality and may impose arbitrary
913 limitations. Generally, servicing a request via normal
914 malloc steps is faster than going through a system's mmap.
915
916 The advantages of mmap nearly always outweigh disadvantages for
917 "large" chunks, but the value of "large" varies across systems. The
918 default is an empirically derived value that works well in most
919 systems.
1d05c2fb
UD
920
921
922 Update in 2006:
923 The above was written in 2001. Since then the world has changed a lot.
924 Memory got bigger. Applications got bigger. The virtual address space
925 layout in 32 bit linux changed.
926
927 In the new situation, brk() and mmap space is shared and there are no
928 artificial limits on brk size imposed by the kernel. What is more,
929 applications have started using transient allocations larger than the
930 128Kb as was imagined in 2001.
931
932 The price for mmap is also high now; each time glibc mmaps from the
933 kernel, the kernel is forced to zero out the memory it gives to the
934 application. Zeroing memory is expensive and eats a lot of cache and
935 memory bandwidth. This has nothing to do with the efficiency of the
936 virtual memory system, by doing mmap the kernel just has no choice but
937 to zero.
938
939 In 2001, the kernel had a maximum size for brk() which was about 800
940 megabytes on 32 bit x86, at that point brk() would hit the first
941 mmaped shared libaries and couldn't expand anymore. With current 2.6
942 kernels, the VA space layout is different and brk() and mmap
943 both can span the entire heap at will.
944
945 Rather than using a static threshold for the brk/mmap tradeoff,
946 we are now using a simple dynamic one. The goal is still to avoid
947 fragmentation. The old goals we kept are
948 1) try to get the long lived large allocations to use mmap()
949 2) really large allocations should always use mmap()
950 and we're adding now:
951 3) transient allocations should use brk() to avoid forcing the kernel
952 having to zero memory over and over again
953
954 The implementation works with a sliding threshold, which is by default
955 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
956 out at 128Kb as per the 2001 default.
957
958 This allows us to satisfy requirement 1) under the assumption that long
959 lived allocations are made early in the process' lifespan, before it has
960 started doing dynamic allocations of the same size (which will
961 increase the threshold).
962
963 The upperbound on the threshold satisfies requirement 2)
964
965 The threshold goes up in value when the application frees memory that was
966 allocated with the mmap allocator. The idea is that once the application
967 starts freeing memory of a certain size, it's highly probable that this is
968 a size the application uses for transient allocations. This estimator
969 is there to satisfy the new third requirement.
970
f65fd747
UD
971*/
972
fa8d436c 973#define M_MMAP_THRESHOLD -3
f65fd747 974
fa8d436c 975#ifndef DEFAULT_MMAP_THRESHOLD
1d05c2fb 976#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
fa8d436c
UD
977#endif
978
979/*
980 M_MMAP_MAX is the maximum number of requests to simultaneously
981 service using mmap. This parameter exists because
982 some systems have a limited number of internal tables for
983 use by mmap, and using more than a few of them may degrade
984 performance.
985
986 The default is set to a value that serves only as a safeguard.
22a89187 987 Setting to 0 disables use of mmap for servicing large requests.
fa8d436c 988*/
f65fd747 989
fa8d436c
UD
990#define M_MMAP_MAX -4
991
992#ifndef DEFAULT_MMAP_MAX
fa8d436c 993#define DEFAULT_MMAP_MAX (65536)
f65fd747
UD
994#endif
995
100351c3 996#include <malloc.h>
f65fd747 997
fa8d436c
UD
998#ifndef RETURN_ADDRESS
999#define RETURN_ADDRESS(X_) (NULL)
9ae6fc54 1000#endif
431c33c0
UD
1001
1002/* On some platforms we can compile internal, not exported functions better.
1003 Let the environment provide a macro and define it to be empty if it
1004 is not available. */
1005#ifndef internal_function
1006# define internal_function
1007#endif
1008
fa8d436c
UD
1009/* Forward declarations. */
1010struct malloc_chunk;
1011typedef struct malloc_chunk* mchunkptr;
431c33c0 1012
fa8d436c 1013/* Internal routines. */
f65fd747 1014
22a89187 1015static void* _int_malloc(mstate, size_t);
425ce2ed 1016static void _int_free(mstate, mchunkptr, int);
22a89187 1017static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
6e4b2107 1018 INTERNAL_SIZE_T);
22a89187 1019static void* _int_memalign(mstate, size_t, size_t);
10ad46bc
OB
1020static void* _mid_memalign(size_t, size_t, void *);
1021
fff94fa2 1022static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
fa8d436c 1023
22a89187 1024static void* internal_function mem2mem_check(void *p, size_t sz);
fa8d436c
UD
1025static int internal_function top_check(void);
1026static void internal_function munmap_chunk(mchunkptr p);
a9177ff5 1027#if HAVE_MREMAP
fa8d436c 1028static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
a9177ff5 1029#endif
fa8d436c 1030
22a89187
UD
1031static void* malloc_check(size_t sz, const void *caller);
1032static void free_check(void* mem, const void *caller);
1033static void* realloc_check(void* oldmem, size_t bytes,
1034 const void *caller);
1035static void* memalign_check(size_t alignment, size_t bytes,
1036 const void *caller);
f65fd747 1037
fa8d436c 1038/* ------------------ MMAP support ------------------ */
f65fd747 1039
f65fd747 1040
fa8d436c 1041#include <fcntl.h>
fa8d436c 1042#include <sys/mman.h>
f65fd747 1043
fa8d436c
UD
1044#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1045# define MAP_ANONYMOUS MAP_ANON
1046#endif
f65fd747 1047
fa8d436c 1048#ifndef MAP_NORESERVE
3b49edc0 1049# define MAP_NORESERVE 0
f65fd747
UD
1050#endif
1051
fa8d436c 1052#define MMAP(addr, size, prot, flags) \
3b49edc0 1053 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
f65fd747 1054
f65fd747
UD
1055
1056/*
fa8d436c 1057 ----------------------- Chunk representations -----------------------
f65fd747
UD
1058*/
1059
1060
fa8d436c
UD
1061/*
1062 This struct declaration is misleading (but accurate and necessary).
1063 It declares a "view" into memory allowing access to necessary
1064 fields at known offsets from a given base. See explanation below.
1065*/
1066
1067struct malloc_chunk {
1068
e9c4fe93
FW
1069 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1070 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
fa8d436c
UD
1071
1072 struct malloc_chunk* fd; /* double links -- used only if free. */
f65fd747 1073 struct malloc_chunk* bk;
7ecfbd38
UD
1074
1075 /* Only used for large blocks: pointer to next larger size. */
1076 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1077 struct malloc_chunk* bk_nextsize;
f65fd747
UD
1078};
1079
f65fd747
UD
1080
1081/*
f65fd747
UD
1082 malloc_chunk details:
1083
1084 (The following includes lightly edited explanations by Colin Plumb.)
1085
1086 Chunks of memory are maintained using a `boundary tag' method as
1087 described in e.g., Knuth or Standish. (See the paper by Paul
1088 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1089 survey of such techniques.) Sizes of free chunks are stored both
1090 in the front of each chunk and at the end. This makes
1091 consolidating fragmented chunks into bigger chunks very fast. The
1092 size fields also hold bits representing whether chunks are free or
1093 in use.
1094
1095 An allocated chunk looks like this:
1096
1097
1098 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1099 | Size of previous chunk, if unallocated (P clear) |
72f90263 1100 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1101 | Size of chunk, in bytes |A|M|P|
f65fd747 1102 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1103 | User data starts here... .
1104 . .
1105 . (malloc_usable_size() bytes) .
1106 . |
f65fd747 1107nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2
FW
1108 | (size of chunk, but used for application data) |
1109 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1110 | Size of next chunk, in bytes |A|0|1|
72f90263 1111 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1112
1113 Where "chunk" is the front of the chunk for the purpose of most of
1114 the malloc code, but "mem" is the pointer that is returned to the
1115 user. "Nextchunk" is the beginning of the next contiguous chunk.
1116
6f65e668 1117 Chunks always begin on even word boundaries, so the mem portion
f65fd747 1118 (which is returned to the user) is also on an even word boundary, and
fa8d436c 1119 thus at least double-word aligned.
f65fd747
UD
1120
1121 Free chunks are stored in circular doubly-linked lists, and look like this:
1122
1123 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1124 | Size of previous chunk, if unallocated (P clear) |
72f90263 1125 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2 1126 `head:' | Size of chunk, in bytes |A|0|P|
f65fd747 1127 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72f90263
UD
1128 | Forward pointer to next chunk in list |
1129 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1130 | Back pointer to previous chunk in list |
1131 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1132 | Unused space (may be 0 bytes long) .
1133 . .
1134 . |
f65fd747
UD
1135nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1136 `foot:' | Size of chunk, in bytes |
72f90263 1137 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ae9166f2
FW
1138 | Size of next chunk, in bytes |A|0|0|
1139 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
f65fd747
UD
1140
1141 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1142 chunk size (which is always a multiple of two words), is an in-use
1143 bit for the *previous* chunk. If that bit is *clear*, then the
1144 word before the current chunk size contains the previous chunk
1145 size, and can be used to find the front of the previous chunk.
fa8d436c
UD
1146 The very first chunk allocated always has this bit set,
1147 preventing access to non-existent (or non-owned) memory. If
1148 prev_inuse is set for any given chunk, then you CANNOT determine
1149 the size of the previous chunk, and might even get a memory
1150 addressing fault when trying to do so.
f65fd747 1151
ae9166f2
FW
1152 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1153 main arena, described by the main_arena variable. When additional
1154 threads are spawned, each thread receives its own arena (up to a
1155 configurable limit, after which arenas are reused for multiple
1156 threads), and the chunks in these arenas have the A bit set. To
1157 find the arena for a chunk on such a non-main arena, heap_for_ptr
1158 performs a bit mask operation and indirection through the ar_ptr
1159 member of the per-heap header heap_info (see arena.c).
1160
f65fd747 1161 Note that the `foot' of the current chunk is actually represented
fa8d436c
UD
1162 as the prev_size of the NEXT chunk. This makes it easier to
1163 deal with alignments etc but can be very confusing when trying
1164 to extend or adapt this code.
f65fd747 1165
ae9166f2 1166 The three exceptions to all this are:
f65fd747 1167
fa8d436c 1168 1. The special chunk `top' doesn't bother using the
72f90263
UD
1169 trailing size field since there is no next contiguous chunk
1170 that would have to index off it. After initialization, `top'
1171 is forced to always exist. If it would become less than
1172 MINSIZE bytes long, it is replenished.
f65fd747
UD
1173
1174 2. Chunks allocated via mmap, which have the second-lowest-order
72f90263 1175 bit M (IS_MMAPPED) set in their size fields. Because they are
ae9166f2
FW
1176 allocated one-by-one, each must contain its own trailing size
1177 field. If the M bit is set, the other bits are ignored
1178 (because mmapped chunks are neither in an arena, nor adjacent
1179 to a freed chunk). The M bit is also used for chunks which
1180 originally came from a dumped heap via malloc_set_state in
1181 hooks.c.
1182
1183 3. Chunks in fastbins are treated as allocated chunks from the
1184 point of view of the chunk allocator. They are consolidated
1185 with their neighbors only in bulk, in malloc_consolidate.
f65fd747
UD
1186*/
1187
1188/*
fa8d436c
UD
1189 ---------- Size and alignment checks and conversions ----------
1190*/
f65fd747 1191
fa8d436c 1192/* conversion from malloc headers to user pointers, and back */
f65fd747 1193
22a89187 1194#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
fa8d436c 1195#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
f65fd747 1196
fa8d436c 1197/* The smallest possible chunk */
7ecfbd38 1198#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
f65fd747 1199
fa8d436c 1200/* The smallest size we can malloc is an aligned minimal chunk */
f65fd747 1201
fa8d436c
UD
1202#define MINSIZE \
1203 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
f65fd747 1204
fa8d436c 1205/* Check if m has acceptable alignment */
f65fd747 1206
073f560e
UD
1207#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1208
1209#define misaligned_chunk(p) \
1210 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1211 & MALLOC_ALIGN_MASK)
f65fd747 1212
f65fd747 1213
a9177ff5 1214/*
fa8d436c
UD
1215 Check if a request is so large that it would wrap around zero when
1216 padded and aligned. To simplify some other code, the bound is made
1217 low enough so that adding MINSIZE will also not wrap around zero.
6c8dbf00 1218 */
f65fd747 1219
fa8d436c 1220#define REQUEST_OUT_OF_RANGE(req) \
6c8dbf00
OB
1221 ((unsigned long) (req) >= \
1222 (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
f65fd747 1223
fa8d436c 1224/* pad request bytes into a usable size -- internal version */
f65fd747 1225
fa8d436c
UD
1226#define request2size(req) \
1227 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1228 MINSIZE : \
1229 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
f65fd747 1230
fa8d436c 1231/* Same, except also perform argument check */
f65fd747 1232
fa8d436c 1233#define checked_request2size(req, sz) \
6c8dbf00
OB
1234 if (REQUEST_OUT_OF_RANGE (req)) { \
1235 __set_errno (ENOMEM); \
1236 return 0; \
1237 } \
1238 (sz) = request2size (req);
f65fd747
UD
1239
1240/*
6c8dbf00
OB
1241 --------------- Physical chunk operations ---------------
1242 */
f65fd747 1243
10dc2a90 1244
fa8d436c
UD
1245/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1246#define PREV_INUSE 0x1
f65fd747 1247
fa8d436c 1248/* extract inuse bit of previous chunk */
e9c4fe93 1249#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
f65fd747 1250
f65fd747 1251
fa8d436c
UD
1252/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1253#define IS_MMAPPED 0x2
f65fd747 1254
fa8d436c 1255/* check for mmap()'ed chunk */
e9c4fe93 1256#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
f65fd747 1257
f65fd747 1258
fa8d436c
UD
1259/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1260 from a non-main arena. This is only set immediately before handing
1261 the chunk to the user, if necessary. */
1262#define NON_MAIN_ARENA 0x4
f65fd747 1263
ae9166f2 1264/* Check for chunk from main arena. */
e9c4fe93
FW
1265#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1266
1267/* Mark a chunk as not being on the main arena. */
1268#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
f65fd747
UD
1269
1270
a9177ff5 1271/*
6c8dbf00 1272 Bits to mask off when extracting size
f65fd747 1273
6c8dbf00
OB
1274 Note: IS_MMAPPED is intentionally not masked off from size field in
1275 macros for which mmapped chunks should never be seen. This should
1276 cause helpful core dumps to occur if it is tried by accident by
1277 people extending or adapting this malloc.
1278 */
1279#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
f65fd747 1280
fa8d436c 1281/* Get size, ignoring use bits */
e9c4fe93 1282#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
f65fd747 1283
e9c4fe93
FW
1284/* Like chunksize, but do not mask SIZE_BITS. */
1285#define chunksize_nomask(p) ((p)->mchunk_size)
f65fd747 1286
fa8d436c 1287/* Ptr to next physical malloc_chunk. */
e9c4fe93
FW
1288#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1289
1290/* Size of the chunk below P. Only valid if prev_inuse (P). */
1291#define prev_size(p) ((p)->mchunk_prev_size)
1292
1293/* Set the size of the chunk below P. Only valid if prev_inuse (P). */
1294#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
f65fd747 1295
e9c4fe93
FW
1296/* Ptr to previous physical malloc_chunk. Only valid if prev_inuse (P). */
1297#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
f65fd747 1298
fa8d436c 1299/* Treat space at ptr + offset as a chunk */
6c8dbf00 1300#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
fa8d436c
UD
1301
1302/* extract p's inuse bit */
6c8dbf00 1303#define inuse(p) \
e9c4fe93 1304 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
f65fd747 1305
fa8d436c 1306/* set/clear chunk as being inuse without otherwise disturbing */
6c8dbf00 1307#define set_inuse(p) \
e9c4fe93 1308 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
f65fd747 1309
6c8dbf00 1310#define clear_inuse(p) \
e9c4fe93 1311 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
f65fd747
UD
1312
1313
fa8d436c 1314/* check/set/clear inuse bits in known places */
6c8dbf00 1315#define inuse_bit_at_offset(p, s) \
e9c4fe93 1316 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
f65fd747 1317
6c8dbf00 1318#define set_inuse_bit_at_offset(p, s) \
e9c4fe93 1319 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
f65fd747 1320
6c8dbf00 1321#define clear_inuse_bit_at_offset(p, s) \
e9c4fe93 1322 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
f65fd747 1323
f65fd747 1324
fa8d436c 1325/* Set size at head, without disturbing its use bit */
e9c4fe93 1326#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
f65fd747 1327
fa8d436c 1328/* Set size/use field */
e9c4fe93 1329#define set_head(p, s) ((p)->mchunk_size = (s))
f65fd747 1330
fa8d436c 1331/* Set size at footer (only when chunk is not in use) */
e9c4fe93 1332#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
f65fd747
UD
1333
1334
e9c4fe93
FW
1335#pragma GCC poison mchunk_size
1336#pragma GCC poison mchunk_prev_size
1337
fa8d436c 1338/*
6c8dbf00 1339 -------------------- Internal data structures --------------------
fa8d436c
UD
1340
1341 All internal state is held in an instance of malloc_state defined
1342 below. There are no other static variables, except in two optional
a9177ff5 1343 cases:
6c8dbf00
OB
1344 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1345 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
22a89187 1346 for mmap.
fa8d436c
UD
1347
1348 Beware of lots of tricks that minimize the total bookkeeping space
1349 requirements. The result is a little over 1K bytes (for 4byte
1350 pointers and size_t.)
6c8dbf00 1351 */
f65fd747
UD
1352
1353/*
6c8dbf00 1354 Bins
fa8d436c
UD
1355
1356 An array of bin headers for free chunks. Each bin is doubly
1357 linked. The bins are approximately proportionally (log) spaced.
1358 There are a lot of these bins (128). This may look excessive, but
1359 works very well in practice. Most bins hold sizes that are
1360 unusual as malloc request sizes, but are more usual for fragments
1361 and consolidated sets of chunks, which is what these bins hold, so
1362 they can be found quickly. All procedures maintain the invariant
1363 that no consolidated chunk physically borders another one, so each
1364 chunk in a list is known to be preceeded and followed by either
1365 inuse chunks or the ends of memory.
1366
1367 Chunks in bins are kept in size order, with ties going to the
1368 approximately least recently used chunk. Ordering isn't needed
1369 for the small bins, which all contain the same-sized chunks, but
1370 facilitates best-fit allocation for larger chunks. These lists
1371 are just sequential. Keeping them in order almost never requires
1372 enough traversal to warrant using fancier ordered data
a9177ff5 1373 structures.
fa8d436c
UD
1374
1375 Chunks of the same size are linked with the most
1376 recently freed at the front, and allocations are taken from the
1377 back. This results in LRU (FIFO) allocation order, which tends
1378 to give each chunk an equal opportunity to be consolidated with
1379 adjacent freed chunks, resulting in larger free chunks and less
1380 fragmentation.
1381
1382 To simplify use in double-linked lists, each bin header acts
1383 as a malloc_chunk. This avoids special-casing for headers.
1384 But to conserve space and improve locality, we allocate
1385 only the fd/bk pointers of bins, and then use repositioning tricks
a9177ff5 1386 to treat these as the fields of a malloc_chunk*.
6c8dbf00 1387 */
f65fd747 1388
6c8dbf00 1389typedef struct malloc_chunk *mbinptr;
f65fd747 1390
fa8d436c 1391/* addressing -- note that bin_at(0) does not exist */
41999a1a
UD
1392#define bin_at(m, i) \
1393 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
6c8dbf00 1394 - offsetof (struct malloc_chunk, fd))
f65fd747 1395
fa8d436c 1396/* analog of ++bin */
6c8dbf00 1397#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
f65fd747 1398
fa8d436c
UD
1399/* Reminders about list directionality within bins */
1400#define first(b) ((b)->fd)
1401#define last(b) ((b)->bk)
f65fd747 1402
fa8d436c 1403/* Take a chunk off a bin list */
fff94fa2 1404#define unlink(AV, P, BK, FD) { \
17f487b7
DD
1405 if (__builtin_expect (chunksize(P) != prev_size (next_chunk(P)), 0)) \
1406 malloc_printerr (check_action, "corrupted size vs. prev_size", P, AV); \
6c8dbf00
OB
1407 FD = P->fd; \
1408 BK = P->bk; \
1409 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
fff94fa2 1410 malloc_printerr (check_action, "corrupted double-linked list", P, AV); \
6c8dbf00
OB
1411 else { \
1412 FD->bk = BK; \
1413 BK->fd = FD; \
e9c4fe93 1414 if (!in_smallbin_range (chunksize_nomask (P)) \
6c8dbf00 1415 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
52ffbdf2
FW
1416 if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0) \
1417 || __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0)) \
1418 malloc_printerr (check_action, \
fff94fa2
SP
1419 "corrupted double-linked list (not small)", \
1420 P, AV); \
6c8dbf00
OB
1421 if (FD->fd_nextsize == NULL) { \
1422 if (P->fd_nextsize == P) \
1423 FD->fd_nextsize = FD->bk_nextsize = FD; \
1424 else { \
1425 FD->fd_nextsize = P->fd_nextsize; \
1426 FD->bk_nextsize = P->bk_nextsize; \
1427 P->fd_nextsize->bk_nextsize = FD; \
1428 P->bk_nextsize->fd_nextsize = FD; \
1429 } \
1430 } else { \
1431 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
1432 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
1433 } \
1434 } \
1435 } \
fa8d436c 1436}
f65fd747 1437
fa8d436c 1438/*
6c8dbf00 1439 Indexing
fa8d436c
UD
1440
1441 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1442 8 bytes apart. Larger bins are approximately logarithmically spaced:
f65fd747 1443
fa8d436c
UD
1444 64 bins of size 8
1445 32 bins of size 64
1446 16 bins of size 512
1447 8 bins of size 4096
1448 4 bins of size 32768
1449 2 bins of size 262144
1450 1 bin of size what's left
f65fd747 1451
fa8d436c
UD
1452 There is actually a little bit of slop in the numbers in bin_index
1453 for the sake of speed. This makes no difference elsewhere.
f65fd747 1454
fa8d436c
UD
1455 The bins top out around 1MB because we expect to service large
1456 requests via mmap.
b5a2bbe6
L
1457
1458 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1459 a valid chunk size the small bins are bumped up one.
6c8dbf00 1460 */
f65fd747 1461
fa8d436c
UD
1462#define NBINS 128
1463#define NSMALLBINS 64
1d47e92f 1464#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
b5a2bbe6
L
1465#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1466#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
f65fd747 1467
fa8d436c 1468#define in_smallbin_range(sz) \
6c8dbf00 1469 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
f65fd747 1470
1d47e92f 1471#define smallbin_index(sz) \
6c8dbf00 1472 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
b5a2bbe6 1473 + SMALLBIN_CORRECTION)
f65fd747 1474
1d47e92f 1475#define largebin_index_32(sz) \
6c8dbf00
OB
1476 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1477 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1478 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1479 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1480 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1481 126)
f65fd747 1482
b5a2bbe6 1483#define largebin_index_32_big(sz) \
6c8dbf00
OB
1484 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1485 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1486 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1487 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1488 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1489 126)
b5a2bbe6 1490
1d47e92f
UD
1491// XXX It remains to be seen whether it is good to keep the widths of
1492// XXX the buckets the same or whether it should be scaled by a factor
1493// XXX of two as well.
1494#define largebin_index_64(sz) \
6c8dbf00
OB
1495 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1496 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1497 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1498 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1499 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1500 126)
1d47e92f
UD
1501
1502#define largebin_index(sz) \
b5a2bbe6
L
1503 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1504 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1505 : largebin_index_32 (sz))
1d47e92f 1506
fa8d436c 1507#define bin_index(sz) \
6c8dbf00 1508 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
f65fd747 1509
f65fd747
UD
1510
1511/*
6c8dbf00 1512 Unsorted chunks
fa8d436c
UD
1513
1514 All remainders from chunk splits, as well as all returned chunks,
1515 are first placed in the "unsorted" bin. They are then placed
1516 in regular bins after malloc gives them ONE chance to be used before
1517 binning. So, basically, the unsorted_chunks list acts as a queue,
1518 with chunks being placed on it in free (and malloc_consolidate),
1519 and taken off (to be either used or placed in bins) in malloc.
1520
1521 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1522 does not have to be taken into account in size comparisons.
6c8dbf00 1523 */
f65fd747 1524
fa8d436c 1525/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
6c8dbf00 1526#define unsorted_chunks(M) (bin_at (M, 1))
f65fd747 1527
fa8d436c 1528/*
6c8dbf00 1529 Top
fa8d436c
UD
1530
1531 The top-most available chunk (i.e., the one bordering the end of
1532 available memory) is treated specially. It is never included in
1533 any bin, is used only if no other chunk is available, and is
1534 released back to the system if it is very large (see
1535 M_TRIM_THRESHOLD). Because top initially
1536 points to its own bin with initial zero size, thus forcing
1537 extension on the first malloc request, we avoid having any special
1538 code in malloc to check whether it even exists yet. But we still
1539 need to do so when getting memory from system, so we make
1540 initial_top treat the bin as a legal but unusable chunk during the
1541 interval between initialization and the first call to
3b49edc0 1542 sysmalloc. (This is somewhat delicate, since it relies on
fa8d436c 1543 the 2 preceding words to be zero during this interval as well.)
6c8dbf00 1544 */
f65fd747 1545
fa8d436c 1546/* Conveniently, the unsorted bin can be used as dummy top on first call */
6c8dbf00 1547#define initial_top(M) (unsorted_chunks (M))
f65fd747 1548
fa8d436c 1549/*
6c8dbf00 1550 Binmap
f65fd747 1551
fa8d436c
UD
1552 To help compensate for the large number of bins, a one-level index
1553 structure is used for bin-by-bin searching. `binmap' is a
1554 bitvector recording whether bins are definitely empty so they can
1555 be skipped over during during traversals. The bits are NOT always
1556 cleared as soon as bins are empty, but instead only
1557 when they are noticed to be empty during traversal in malloc.
6c8dbf00 1558 */
f65fd747 1559
fa8d436c
UD
1560/* Conservatively use 32 bits per map word, even if on 64bit system */
1561#define BINMAPSHIFT 5
1562#define BITSPERMAP (1U << BINMAPSHIFT)
1563#define BINMAPSIZE (NBINS / BITSPERMAP)
f65fd747 1564
fa8d436c 1565#define idx2block(i) ((i) >> BINMAPSHIFT)
6c8dbf00 1566#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
f65fd747 1567
6c8dbf00
OB
1568#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1569#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1570#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
f65fd747 1571
fa8d436c 1572/*
6c8dbf00 1573 Fastbins
fa8d436c
UD
1574
1575 An array of lists holding recently freed small chunks. Fastbins
1576 are not doubly linked. It is faster to single-link them, and
1577 since chunks are never removed from the middles of these lists,
1578 double linking is not necessary. Also, unlike regular bins, they
1579 are not even processed in FIFO order (they use faster LIFO) since
1580 ordering doesn't much matter in the transient contexts in which
1581 fastbins are normally used.
1582
1583 Chunks in fastbins keep their inuse bit set, so they cannot
1584 be consolidated with other free chunks. malloc_consolidate
1585 releases all chunks in fastbins and consolidates them with
a9177ff5 1586 other free chunks.
6c8dbf00 1587 */
f65fd747 1588
6c8dbf00 1589typedef struct malloc_chunk *mfastbinptr;
425ce2ed 1590#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
f65fd747 1591
fa8d436c 1592/* offset 2 to use otherwise unindexable first 2 bins */
425ce2ed 1593#define fastbin_index(sz) \
6c8dbf00 1594 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
425ce2ed 1595
f65fd747 1596
fa8d436c 1597/* The maximum fastbin request size we support */
425ce2ed 1598#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
f65fd747 1599
6c8dbf00 1600#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
f65fd747
UD
1601
1602/*
6c8dbf00
OB
1603 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1604 that triggers automatic consolidation of possibly-surrounding
1605 fastbin chunks. This is a heuristic, so the exact value should not
1606 matter too much. It is defined at half the default trim threshold as a
1607 compromise heuristic to only attempt consolidation if it is likely
1608 to lead to trimming. However, it is not dynamically tunable, since
1609 consolidation reduces fragmentation surrounding large chunks even
1610 if trimming is not used.
1611 */
f65fd747 1612
fa8d436c 1613#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
f65fd747
UD
1614
1615/*
6c8dbf00
OB
1616 Since the lowest 2 bits in max_fast don't matter in size comparisons,
1617 they are used as flags.
1618 */
f65fd747 1619
fa8d436c 1620/*
6c8dbf00
OB
1621 FASTCHUNKS_BIT held in max_fast indicates that there are probably
1622 some fastbin chunks. It is set true on entering a chunk into any
1623 fastbin, and cleared only in malloc_consolidate.
f65fd747 1624
6c8dbf00
OB
1625 The truth value is inverted so that have_fastchunks will be true
1626 upon startup (since statics are zero-filled), simplifying
1627 initialization checks.
1628 */
f65fd747 1629
fa8d436c 1630#define FASTCHUNKS_BIT (1U)
f65fd747 1631
6c8dbf00 1632#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
425ce2ed
UD
1633#define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
1634#define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
f65fd747
UD
1635
1636/*
6c8dbf00
OB
1637 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1638 regions. Otherwise, contiguity is exploited in merging together,
1639 when possible, results from consecutive MORECORE calls.
f65fd747 1640
6c8dbf00
OB
1641 The initial value comes from MORECORE_CONTIGUOUS, but is
1642 changed dynamically if mmap is ever used as an sbrk substitute.
1643 */
f65fd747 1644
fa8d436c 1645#define NONCONTIGUOUS_BIT (2U)
f65fd747 1646
6c8dbf00
OB
1647#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1648#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1649#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
9bf248c6 1650#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
f65fd747 1651
fff94fa2
SP
1652/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
1653 arena. Such an arena is no longer used to allocate chunks. Chunks
1654 allocated in that arena before detecting corruption are not freed. */
1655
1656#define ARENA_CORRUPTION_BIT (4U)
1657
1658#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))
1659#define set_arena_corrupt(A) ((A)->flags |= ARENA_CORRUPTION_BIT)
1660
a9177ff5
RM
1661/*
1662 Set value of max_fast.
fa8d436c
UD
1663 Use impossibly small value if 0.
1664 Precondition: there are no existing fastbin chunks.
1665 Setting the value clears fastchunk bit but preserves noncontiguous bit.
6c8dbf00 1666 */
f65fd747 1667
9bf248c6 1668#define set_max_fast(s) \
991eda1e 1669 global_max_fast = (((s) == 0) \
6c8dbf00 1670 ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
9bf248c6 1671#define get_max_fast() global_max_fast
f65fd747 1672
f65fd747
UD
1673
1674/*
fa8d436c 1675 ----------- Internal state representation and initialization -----------
6c8dbf00 1676 */
f65fd747 1677
6c8dbf00
OB
1678struct malloc_state
1679{
fa8d436c 1680 /* Serialize access. */
cbb47fa1 1681 __libc_lock_define (, mutex);
9bf248c6
UD
1682
1683 /* Flags (formerly in max_fast). */
1684 int flags;
f65fd747 1685
fa8d436c 1686 /* Fastbins */
6c8dbf00 1687 mfastbinptr fastbinsY[NFASTBINS];
f65fd747 1688
fa8d436c 1689 /* Base of the topmost chunk -- not otherwise kept in a bin */
6c8dbf00 1690 mchunkptr top;
f65fd747 1691
fa8d436c 1692 /* The remainder from the most recent split of a small request */
6c8dbf00 1693 mchunkptr last_remainder;
f65fd747 1694
fa8d436c 1695 /* Normal bins packed as described above */
6c8dbf00 1696 mchunkptr bins[NBINS * 2 - 2];
f65fd747 1697
fa8d436c 1698 /* Bitmap of bins */
6c8dbf00 1699 unsigned int binmap[BINMAPSIZE];
f65fd747 1700
fa8d436c
UD
1701 /* Linked list */
1702 struct malloc_state *next;
f65fd747 1703
a62719ba 1704 /* Linked list for free arenas. Access to this field is serialized
90c400bd 1705 by free_list_lock in arena.c. */
425ce2ed 1706 struct malloc_state *next_free;
425ce2ed 1707
a62719ba 1708 /* Number of threads attached to this arena. 0 if the arena is on
90c400bd
FW
1709 the free list. Access to this field is serialized by
1710 free_list_lock in arena.c. */
a62719ba
FW
1711 INTERNAL_SIZE_T attached_threads;
1712
fa8d436c
UD
1713 /* Memory allocated from the system in this arena. */
1714 INTERNAL_SIZE_T system_mem;
1715 INTERNAL_SIZE_T max_system_mem;
1716};
f65fd747 1717
6c8dbf00
OB
1718struct malloc_par
1719{
fa8d436c 1720 /* Tunable parameters */
6c8dbf00
OB
1721 unsigned long trim_threshold;
1722 INTERNAL_SIZE_T top_pad;
1723 INTERNAL_SIZE_T mmap_threshold;
1724 INTERNAL_SIZE_T arena_test;
1725 INTERNAL_SIZE_T arena_max;
fa8d436c
UD
1726
1727 /* Memory map support */
6c8dbf00
OB
1728 int n_mmaps;
1729 int n_mmaps_max;
1730 int max_n_mmaps;
1d05c2fb
UD
1731 /* the mmap_threshold is dynamic, until the user sets
1732 it manually, at which point we need to disable any
1733 dynamic behavior. */
6c8dbf00 1734 int no_dyn_threshold;
fa8d436c 1735
fa8d436c 1736 /* Statistics */
6c8dbf00 1737 INTERNAL_SIZE_T mmapped_mem;
6c8dbf00 1738 INTERNAL_SIZE_T max_mmapped_mem;
fa8d436c
UD
1739
1740 /* First address handed out by MORECORE/sbrk. */
6c8dbf00 1741 char *sbrk_base;
d5c3fafc
DD
1742
1743#if USE_TCACHE
1744 /* Maximum number of buckets to use. */
1745 size_t tcache_bins;
1746 size_t tcache_max_bytes;
1747 /* Maximum number of chunks in each bucket. */
1748 size_t tcache_count;
1749 /* Maximum number of chunks to remove from the unsorted list, which
1750 aren't used to prefill the cache. */
1751 size_t tcache_unsorted_limit;
1752#endif
fa8d436c 1753};
f65fd747 1754
fa8d436c
UD
1755/* There are several instances of this struct ("arenas") in this
1756 malloc. If you are adapting this malloc in a way that does NOT use
1757 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1758 before using. This malloc relies on the property that malloc_state
1759 is initialized to all zeroes (as is true of C statics). */
f65fd747 1760
02d46fc4 1761static struct malloc_state main_arena =
6c8dbf00 1762{
400e1226 1763 .mutex = _LIBC_LOCK_INITIALIZER,
a62719ba
FW
1764 .next = &main_arena,
1765 .attached_threads = 1
6c8dbf00 1766};
f65fd747 1767
4cf6c72f
FW
1768/* These variables are used for undumping support. Chunked are marked
1769 as using mmap, but we leave them alone if they fall into this
1e8a8875
FW
1770 range. NB: The chunk size for these chunks only includes the
1771 initial size field (of SIZE_SZ bytes), there is no trailing size
1772 field (unlike with regular mmapped chunks). */
4cf6c72f
FW
1773static mchunkptr dumped_main_arena_start; /* Inclusive. */
1774static mchunkptr dumped_main_arena_end; /* Exclusive. */
1775
1776/* True if the pointer falls into the dumped arena. Use this after
1777 chunk_is_mmapped indicates a chunk is mmapped. */
1778#define DUMPED_MAIN_ARENA_CHUNK(p) \
1779 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1780
fa8d436c 1781/* There is only one instance of the malloc parameters. */
f65fd747 1782
02d46fc4 1783static struct malloc_par mp_ =
6c8dbf00
OB
1784{
1785 .top_pad = DEFAULT_TOP_PAD,
1786 .n_mmaps_max = DEFAULT_MMAP_MAX,
1787 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1788 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1789#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1790 .arena_test = NARENAS_FROM_NCORES (1)
d5c3fafc
DD
1791#if USE_TCACHE
1792 ,
1793 .tcache_count = TCACHE_FILL_COUNT,
1794 .tcache_bins = TCACHE_MAX_BINS,
1795 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1796 .tcache_unsorted_limit = 0 /* No limit. */
1797#endif
6c8dbf00 1798};
f65fd747 1799
9bf248c6
UD
1800/* Maximum size of memory handled in fastbins. */
1801static INTERNAL_SIZE_T global_max_fast;
1802
fa8d436c 1803/*
6c8dbf00 1804 Initialize a malloc_state struct.
f65fd747 1805
6c8dbf00
OB
1806 This is called only from within malloc_consolidate, which needs
1807 be called in the same contexts anyway. It is never called directly
1808 outside of malloc_consolidate because some optimizing compilers try
1809 to inline it at all call points, which turns out not to be an
1810 optimization at all. (Inlining it in malloc_consolidate is fine though.)
1811 */
f65fd747 1812
6c8dbf00
OB
1813static void
1814malloc_init_state (mstate av)
fa8d436c 1815{
6c8dbf00 1816 int i;
fa8d436c 1817 mbinptr bin;
a9177ff5 1818
fa8d436c 1819 /* Establish circular links for normal bins */
6c8dbf00
OB
1820 for (i = 1; i < NBINS; ++i)
1821 {
1822 bin = bin_at (av, i);
1823 bin->fd = bin->bk = bin;
1824 }
f65fd747 1825
fa8d436c
UD
1826#if MORECORE_CONTIGUOUS
1827 if (av != &main_arena)
1828#endif
6c8dbf00 1829 set_noncontiguous (av);
9bf248c6 1830 if (av == &main_arena)
6c8dbf00 1831 set_max_fast (DEFAULT_MXFAST);
9bf248c6 1832 av->flags |= FASTCHUNKS_BIT;
f65fd747 1833
6c8dbf00 1834 av->top = initial_top (av);
fa8d436c 1835}
e9b3e3c5 1836
a9177ff5 1837/*
fa8d436c 1838 Other internal utilities operating on mstates
6c8dbf00 1839 */
f65fd747 1840
6c8dbf00
OB
1841static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1842static int systrim (size_t, mstate);
1843static void malloc_consolidate (mstate);
7e3be507 1844
404d4cef
RM
1845
1846/* -------------- Early definitions for debugging hooks ---------------- */
1847
1848/* Define and initialize the hook variables. These weak definitions must
1849 appear before any use of the variables in a function (arena.c uses one). */
1850#ifndef weak_variable
404d4cef
RM
1851/* In GNU libc we want the hook variables to be weak definitions to
1852 avoid a problem with Emacs. */
22a89187 1853# define weak_variable weak_function
404d4cef
RM
1854#endif
1855
1856/* Forward declarations. */
6c8dbf00
OB
1857static void *malloc_hook_ini (size_t sz,
1858 const void *caller) __THROW;
1859static void *realloc_hook_ini (void *ptr, size_t sz,
1860 const void *caller) __THROW;
1861static void *memalign_hook_ini (size_t alignment, size_t sz,
1862 const void *caller) __THROW;
404d4cef 1863
2ba3cfa1 1864#if HAVE_MALLOC_INIT_HOOK
92e1ab0e
FW
1865void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1866compat_symbol (libc, __malloc_initialize_hook,
1867 __malloc_initialize_hook, GLIBC_2_0);
2ba3cfa1
FW
1868#endif
1869
a222d91a 1870void weak_variable (*__free_hook) (void *__ptr,
6c8dbf00 1871 const void *) = NULL;
a222d91a 1872void *weak_variable (*__malloc_hook)
6c8dbf00 1873 (size_t __size, const void *) = malloc_hook_ini;
a222d91a 1874void *weak_variable (*__realloc_hook)
6c8dbf00
OB
1875 (void *__ptr, size_t __size, const void *)
1876 = realloc_hook_ini;
a222d91a 1877void *weak_variable (*__memalign_hook)
6c8dbf00
OB
1878 (size_t __alignment, size_t __size, const void *)
1879 = memalign_hook_ini;
06d6611a 1880void weak_variable (*__after_morecore_hook) (void) = NULL;
404d4cef
RM
1881
1882
3e030bd5
UD
1883/* ---------------- Error behavior ------------------------------------ */
1884
1885#ifndef DEFAULT_CHECK_ACTION
6c8dbf00 1886# define DEFAULT_CHECK_ACTION 3
3e030bd5
UD
1887#endif
1888
1889static int check_action = DEFAULT_CHECK_ACTION;
1890
1891
854278df
UD
1892/* ------------------ Testing support ----------------------------------*/
1893
1894static int perturb_byte;
1895
af102d95 1896static void
e8349efd
OB
1897alloc_perturb (char *p, size_t n)
1898{
1899 if (__glibc_unlikely (perturb_byte))
1900 memset (p, perturb_byte ^ 0xff, n);
1901}
1902
af102d95 1903static void
e8349efd
OB
1904free_perturb (char *p, size_t n)
1905{
1906 if (__glibc_unlikely (perturb_byte))
1907 memset (p, perturb_byte, n);
1908}
1909
854278df
UD
1910
1911
3ea5be54
AO
1912#include <stap-probe.h>
1913
fa8d436c
UD
1914/* ------------------- Support for multiple arenas -------------------- */
1915#include "arena.c"
f65fd747 1916
fa8d436c 1917/*
6c8dbf00 1918 Debugging support
f65fd747 1919
6c8dbf00
OB
1920 These routines make a number of assertions about the states
1921 of data structures that should be true at all times. If any
1922 are not true, it's very likely that a user program has somehow
1923 trashed memory. (It's also possible that there is a coding error
1924 in malloc. In which case, please report it!)
1925 */
ee74a442 1926
6c8dbf00 1927#if !MALLOC_DEBUG
d8f00d46 1928
6c8dbf00
OB
1929# define check_chunk(A, P)
1930# define check_free_chunk(A, P)
1931# define check_inuse_chunk(A, P)
1932# define check_remalloced_chunk(A, P, N)
1933# define check_malloced_chunk(A, P, N)
1934# define check_malloc_state(A)
d8f00d46 1935
fa8d436c 1936#else
ca34d7a7 1937
6c8dbf00
OB
1938# define check_chunk(A, P) do_check_chunk (A, P)
1939# define check_free_chunk(A, P) do_check_free_chunk (A, P)
1940# define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
1941# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
1942# define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
1943# define check_malloc_state(A) do_check_malloc_state (A)
ca34d7a7 1944
fa8d436c 1945/*
6c8dbf00
OB
1946 Properties of all chunks
1947 */
ca34d7a7 1948
6c8dbf00
OB
1949static void
1950do_check_chunk (mstate av, mchunkptr p)
ca34d7a7 1951{
6c8dbf00 1952 unsigned long sz = chunksize (p);
fa8d436c 1953 /* min and max possible addresses assuming contiguous allocation */
6c8dbf00
OB
1954 char *max_address = (char *) (av->top) + chunksize (av->top);
1955 char *min_address = max_address - av->system_mem;
fa8d436c 1956
6c8dbf00
OB
1957 if (!chunk_is_mmapped (p))
1958 {
1959 /* Has legal address ... */
1960 if (p != av->top)
1961 {
1962 if (contiguous (av))
1963 {
1964 assert (((char *) p) >= min_address);
1965 assert (((char *) p + sz) <= ((char *) (av->top)));
1966 }
1967 }
1968 else
1969 {
1970 /* top size is always at least MINSIZE */
1971 assert ((unsigned long) (sz) >= MINSIZE);
1972 /* top predecessor always marked inuse */
1973 assert (prev_inuse (p));
1974 }
fa8d436c 1975 }
4cf6c72f 1976 else if (!DUMPED_MAIN_ARENA_CHUNK (p))
6c8dbf00
OB
1977 {
1978 /* address is outside main heap */
1979 if (contiguous (av) && av->top != initial_top (av))
1980 {
1981 assert (((char *) p) < min_address || ((char *) p) >= max_address);
1982 }
1983 /* chunk is page-aligned */
e9c4fe93 1984 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
6c8dbf00
OB
1985 /* mem is aligned */
1986 assert (aligned_OK (chunk2mem (p)));
fa8d436c 1987 }
eb406346
UD
1988}
1989
fa8d436c 1990/*
6c8dbf00
OB
1991 Properties of free chunks
1992 */
ee74a442 1993
6c8dbf00
OB
1994static void
1995do_check_free_chunk (mstate av, mchunkptr p)
67c94753 1996{
6c8dbf00
OB
1997 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
1998 mchunkptr next = chunk_at_offset (p, sz);
67c94753 1999
6c8dbf00 2000 do_check_chunk (av, p);
67c94753 2001
fa8d436c 2002 /* Chunk must claim to be free ... */
6c8dbf00
OB
2003 assert (!inuse (p));
2004 assert (!chunk_is_mmapped (p));
67c94753 2005
fa8d436c 2006 /* Unless a special marker, must have OK fields */
6c8dbf00
OB
2007 if ((unsigned long) (sz) >= MINSIZE)
2008 {
2009 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2010 assert (aligned_OK (chunk2mem (p)));
2011 /* ... matching footer field */
e9c4fe93 2012 assert (prev_size (p) == sz);
6c8dbf00
OB
2013 /* ... and is fully consolidated */
2014 assert (prev_inuse (p));
2015 assert (next == av->top || inuse (next));
2016
2017 /* ... and has minimally sane links */
2018 assert (p->fd->bk == p);
2019 assert (p->bk->fd == p);
2020 }
fa8d436c 2021 else /* markers are always of size SIZE_SZ */
6c8dbf00 2022 assert (sz == SIZE_SZ);
67c94753 2023}
67c94753 2024
fa8d436c 2025/*
6c8dbf00
OB
2026 Properties of inuse chunks
2027 */
fa8d436c 2028
6c8dbf00
OB
2029static void
2030do_check_inuse_chunk (mstate av, mchunkptr p)
f65fd747 2031{
fa8d436c 2032 mchunkptr next;
f65fd747 2033
6c8dbf00 2034 do_check_chunk (av, p);
f65fd747 2035
6c8dbf00 2036 if (chunk_is_mmapped (p))
fa8d436c 2037 return; /* mmapped chunks have no next/prev */
ca34d7a7 2038
fa8d436c 2039 /* Check whether it claims to be in use ... */
6c8dbf00 2040 assert (inuse (p));
10dc2a90 2041
6c8dbf00 2042 next = next_chunk (p);
10dc2a90 2043
fa8d436c 2044 /* ... and is surrounded by OK chunks.
6c8dbf00
OB
2045 Since more things can be checked with free chunks than inuse ones,
2046 if an inuse chunk borders them and debug is on, it's worth doing them.
2047 */
2048 if (!prev_inuse (p))
2049 {
2050 /* Note that we cannot even look at prev unless it is not inuse */
2051 mchunkptr prv = prev_chunk (p);
2052 assert (next_chunk (prv) == p);
2053 do_check_free_chunk (av, prv);
2054 }
fa8d436c 2055
6c8dbf00
OB
2056 if (next == av->top)
2057 {
2058 assert (prev_inuse (next));
2059 assert (chunksize (next) >= MINSIZE);
2060 }
2061 else if (!inuse (next))
2062 do_check_free_chunk (av, next);
10dc2a90
UD
2063}
2064
fa8d436c 2065/*
6c8dbf00
OB
2066 Properties of chunks recycled from fastbins
2067 */
fa8d436c 2068
6c8dbf00
OB
2069static void
2070do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2071{
6c8dbf00 2072 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
fa8d436c 2073
6c8dbf00
OB
2074 if (!chunk_is_mmapped (p))
2075 {
2076 assert (av == arena_for_chunk (p));
e9c4fe93 2077 if (chunk_main_arena (p))
6c8dbf00 2078 assert (av == &main_arena);
e9c4fe93
FW
2079 else
2080 assert (av != &main_arena);
6c8dbf00 2081 }
fa8d436c 2082
6c8dbf00 2083 do_check_inuse_chunk (av, p);
fa8d436c
UD
2084
2085 /* Legal size ... */
6c8dbf00
OB
2086 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2087 assert ((unsigned long) (sz) >= MINSIZE);
fa8d436c 2088 /* ... and alignment */
6c8dbf00 2089 assert (aligned_OK (chunk2mem (p)));
fa8d436c 2090 /* chunk is less than MINSIZE more than request */
6c8dbf00
OB
2091 assert ((long) (sz) - (long) (s) >= 0);
2092 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
10dc2a90
UD
2093}
2094
fa8d436c 2095/*
6c8dbf00
OB
2096 Properties of nonrecycled chunks at the point they are malloced
2097 */
fa8d436c 2098
6c8dbf00
OB
2099static void
2100do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
10dc2a90 2101{
fa8d436c 2102 /* same as recycled case ... */
6c8dbf00 2103 do_check_remalloced_chunk (av, p, s);
10dc2a90 2104
fa8d436c 2105 /*
6c8dbf00
OB
2106 ... plus, must obey implementation invariant that prev_inuse is
2107 always true of any allocated chunk; i.e., that each allocated
2108 chunk borders either a previously allocated and still in-use
2109 chunk, or the base of its memory arena. This is ensured
2110 by making all allocations from the `lowest' part of any found
2111 chunk. This does not necessarily hold however for chunks
2112 recycled via fastbins.
2113 */
2114
2115 assert (prev_inuse (p));
fa8d436c 2116}
10dc2a90 2117
f65fd747 2118
fa8d436c 2119/*
6c8dbf00 2120 Properties of malloc_state.
f65fd747 2121
6c8dbf00
OB
2122 This may be useful for debugging malloc, as well as detecting user
2123 programmer errors that somehow write into malloc_state.
f65fd747 2124
6c8dbf00
OB
2125 If you are extending or experimenting with this malloc, you can
2126 probably figure out how to hack this routine to print out or
2127 display chunk addresses, sizes, bins, and other instrumentation.
2128 */
f65fd747 2129
6c8dbf00
OB
2130static void
2131do_check_malloc_state (mstate av)
fa8d436c
UD
2132{
2133 int i;
2134 mchunkptr p;
2135 mchunkptr q;
2136 mbinptr b;
fa8d436c
UD
2137 unsigned int idx;
2138 INTERNAL_SIZE_T size;
2139 unsigned long total = 0;
2140 int max_fast_bin;
f65fd747 2141
fa8d436c 2142 /* internal size_t must be no wider than pointer type */
6c8dbf00 2143 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
f65fd747 2144
fa8d436c 2145 /* alignment is a power of 2 */
6c8dbf00 2146 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
f65fd747 2147
fa8d436c 2148 /* cannot run remaining checks until fully initialized */
6c8dbf00 2149 if (av->top == 0 || av->top == initial_top (av))
fa8d436c 2150 return;
f65fd747 2151
fa8d436c 2152 /* pagesize is a power of 2 */
8a35c3fe 2153 assert (powerof2(GLRO (dl_pagesize)));
f65fd747 2154
fa8d436c 2155 /* A contiguous main_arena is consistent with sbrk_base. */
6c8dbf00
OB
2156 if (av == &main_arena && contiguous (av))
2157 assert ((char *) mp_.sbrk_base + av->system_mem ==
2158 (char *) av->top + chunksize (av->top));
fa8d436c
UD
2159
2160 /* properties of fastbins */
2161
2162 /* max_fast is in allowed range */
6c8dbf00
OB
2163 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2164
2165 max_fast_bin = fastbin_index (get_max_fast ());
2166
2167 for (i = 0; i < NFASTBINS; ++i)
2168 {
2169 p = fastbin (av, i);
2170
2171 /* The following test can only be performed for the main arena.
2172 While mallopt calls malloc_consolidate to get rid of all fast
2173 bins (especially those larger than the new maximum) this does
2174 only happen for the main arena. Trying to do this for any
2175 other arena would mean those arenas have to be locked and
2176 malloc_consolidate be called for them. This is excessive. And
2177 even if this is acceptable to somebody it still cannot solve
2178 the problem completely since if the arena is locked a
2179 concurrent malloc call might create a new arena which then
2180 could use the newly invalid fast bins. */
2181
2182 /* all bins past max_fast are empty */
2183 if (av == &main_arena && i > max_fast_bin)
2184 assert (p == 0);
2185
2186 while (p != 0)
2187 {
2188 /* each chunk claims to be inuse */
2189 do_check_inuse_chunk (av, p);
2190 total += chunksize (p);
2191 /* chunk belongs in this bin */
2192 assert (fastbin_index (chunksize (p)) == i);
2193 p = p->fd;
2194 }
fa8d436c 2195 }
fa8d436c
UD
2196
2197 if (total != 0)
6c8dbf00
OB
2198 assert (have_fastchunks (av));
2199 else if (!have_fastchunks (av))
2200 assert (total == 0);
fa8d436c
UD
2201
2202 /* check normal bins */
6c8dbf00
OB
2203 for (i = 1; i < NBINS; ++i)
2204 {
2205 b = bin_at (av, i);
2206
2207 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2208 if (i >= 2)
2209 {
2210 unsigned int binbit = get_binmap (av, i);
2211 int empty = last (b) == b;
2212 if (!binbit)
2213 assert (empty);
2214 else if (!empty)
2215 assert (binbit);
2216 }
2217
2218 for (p = last (b); p != b; p = p->bk)
2219 {
2220 /* each chunk claims to be free */
2221 do_check_free_chunk (av, p);
2222 size = chunksize (p);
2223 total += size;
2224 if (i >= 2)
2225 {
2226 /* chunk belongs in bin */
2227 idx = bin_index (size);
2228 assert (idx == i);
2229 /* lists are sorted */
2230 assert (p->bk == b ||
2231 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2232
2233 if (!in_smallbin_range (size))
2234 {
2235 if (p->fd_nextsize != NULL)
2236 {
2237 if (p->fd_nextsize == p)
2238 assert (p->bk_nextsize == p);
2239 else
2240 {
2241 if (p->fd_nextsize == first (b))
2242 assert (chunksize (p) < chunksize (p->fd_nextsize));
2243 else
2244 assert (chunksize (p) > chunksize (p->fd_nextsize));
2245
2246 if (p == first (b))
2247 assert (chunksize (p) > chunksize (p->bk_nextsize));
2248 else
2249 assert (chunksize (p) < chunksize (p->bk_nextsize));
2250 }
2251 }
2252 else
2253 assert (p->bk_nextsize == NULL);
2254 }
2255 }
2256 else if (!in_smallbin_range (size))
2257 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2258 /* chunk is followed by a legal chain of inuse chunks */
2259 for (q = next_chunk (p);
2260 (q != av->top && inuse (q) &&
2261 (unsigned long) (chunksize (q)) >= MINSIZE);
2262 q = next_chunk (q))
2263 do_check_inuse_chunk (av, q);
2264 }
fa8d436c 2265 }
f65fd747 2266
fa8d436c 2267 /* top chunk is OK */
6c8dbf00 2268 check_chunk (av, av->top);
fa8d436c
UD
2269}
2270#endif
2271
2272
2273/* ----------------- Support for debugging hooks -------------------- */
2274#include "hooks.c"
2275
2276
2277/* ----------- Routines dealing with system allocation -------------- */
2278
2279/*
6c8dbf00
OB
2280 sysmalloc handles malloc cases requiring more memory from the system.
2281 On entry, it is assumed that av->top does not have enough
2282 space to service request for nb bytes, thus requiring that av->top
2283 be extended or replaced.
2284 */
fa8d436c 2285
6c8dbf00
OB
2286static void *
2287sysmalloc (INTERNAL_SIZE_T nb, mstate av)
f65fd747 2288{
6c8dbf00 2289 mchunkptr old_top; /* incoming value of av->top */
fa8d436c 2290 INTERNAL_SIZE_T old_size; /* its size */
6c8dbf00 2291 char *old_end; /* its end address */
f65fd747 2292
6c8dbf00
OB
2293 long size; /* arg to first MORECORE or mmap call */
2294 char *brk; /* return value from MORECORE */
f65fd747 2295
6c8dbf00
OB
2296 long correction; /* arg to 2nd MORECORE call */
2297 char *snd_brk; /* 2nd return val */
f65fd747 2298
fa8d436c
UD
2299 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2300 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
6c8dbf00 2301 char *aligned_brk; /* aligned offset into brk */
f65fd747 2302
6c8dbf00
OB
2303 mchunkptr p; /* the allocated/returned chunk */
2304 mchunkptr remainder; /* remainder from allocation */
2305 unsigned long remainder_size; /* its size */
fa8d436c 2306
fa8d436c 2307
8a35c3fe 2308 size_t pagesize = GLRO (dl_pagesize);
6c8dbf00 2309 bool tried_mmap = false;
fa8d436c
UD
2310
2311
fa8d436c 2312 /*
6c8dbf00
OB
2313 If have mmap, and the request size meets the mmap threshold, and
2314 the system supports mmap, and there are few enough currently
2315 allocated mmapped regions, try to directly map this request
2316 rather than expanding top.
2317 */
2318
fff94fa2
SP
2319 if (av == NULL
2320 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2321 && (mp_.n_mmaps < mp_.n_mmaps_max)))
6c8dbf00
OB
2322 {
2323 char *mm; /* return value from mmap call*/
a9177ff5 2324
6c8dbf00
OB
2325 try_mmap:
2326 /*
2327 Round up size to nearest page. For mmapped chunks, the overhead
2328 is one SIZE_SZ unit larger than for normal chunks, because there
2329 is no following chunk whose prev_size field could be used.
2330
2331 See the front_misalign handling below, for glibc there is no
2332 need for further alignments unless we have have high alignment.
2333 */
2334 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
8a35c3fe 2335 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
6c8dbf00 2336 else
8a35c3fe 2337 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
6c8dbf00
OB
2338 tried_mmap = true;
2339
2340 /* Don't try if size wraps around 0 */
2341 if ((unsigned long) (size) > (unsigned long) (nb))
2342 {
2343 mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2344
2345 if (mm != MAP_FAILED)
2346 {
2347 /*
2348 The offset to the start of the mmapped region is stored
2349 in the prev_size field of the chunk. This allows us to adjust
2350 returned start address to meet alignment requirements here
2351 and in memalign(), and still be able to compute proper
2352 address argument for later munmap in free() and realloc().
2353 */
2354
2355 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2356 {
2357 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2358 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2359 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2360 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2361 front_misalign = 0;
2362 }
2363 else
2364 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2365 if (front_misalign > 0)
2366 {
2367 correction = MALLOC_ALIGNMENT - front_misalign;
2368 p = (mchunkptr) (mm + correction);
e9c4fe93 2369 set_prev_size (p, correction);
6c8dbf00
OB
2370 set_head (p, (size - correction) | IS_MMAPPED);
2371 }
2372 else
2373 {
2374 p = (mchunkptr) mm;
681421f3 2375 set_prev_size (p, 0);
6c8dbf00
OB
2376 set_head (p, size | IS_MMAPPED);
2377 }
2378
2379 /* update statistics */
2380
2381 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2382 atomic_max (&mp_.max_n_mmaps, new);
2383
2384 unsigned long sum;
2385 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2386 atomic_max (&mp_.max_mmapped_mem, sum);
2387
2388 check_chunk (av, p);
2389
2390 return chunk2mem (p);
2391 }
2392 }
fa8d436c 2393 }
fa8d436c 2394
fff94fa2
SP
2395 /* There are no usable arenas and mmap also failed. */
2396 if (av == NULL)
2397 return 0;
2398
fa8d436c
UD
2399 /* Record incoming configuration of top */
2400
6c8dbf00
OB
2401 old_top = av->top;
2402 old_size = chunksize (old_top);
2403 old_end = (char *) (chunk_at_offset (old_top, old_size));
fa8d436c 2404
6c8dbf00 2405 brk = snd_brk = (char *) (MORECORE_FAILURE);
fa8d436c 2406
a9177ff5 2407 /*
fa8d436c
UD
2408 If not the first time through, we require old_size to be
2409 at least MINSIZE and to have prev_inuse set.
6c8dbf00 2410 */
fa8d436c 2411
6c8dbf00
OB
2412 assert ((old_top == initial_top (av) && old_size == 0) ||
2413 ((unsigned long) (old_size) >= MINSIZE &&
2414 prev_inuse (old_top) &&
8a35c3fe 2415 ((unsigned long) old_end & (pagesize - 1)) == 0));
fa8d436c
UD
2416
2417 /* Precondition: not enough current space to satisfy nb request */
6c8dbf00 2418 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
a9177ff5 2419
72f90263 2420
6c8dbf00
OB
2421 if (av != &main_arena)
2422 {
2423 heap_info *old_heap, *heap;
2424 size_t old_heap_size;
2425
2426 /* First try to extend the current heap. */
2427 old_heap = heap_for_ptr (old_top);
2428 old_heap_size = old_heap->size;
2429 if ((long) (MINSIZE + nb - old_size) > 0
2430 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2431 {
2432 av->system_mem += old_heap->size - old_heap_size;
6c8dbf00
OB
2433 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2434 | PREV_INUSE);
2435 }
2436 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2437 {
2438 /* Use a newly allocated heap. */
2439 heap->ar_ptr = av;
2440 heap->prev = old_heap;
2441 av->system_mem += heap->size;
6c8dbf00
OB
2442 /* Set up the new top. */
2443 top (av) = chunk_at_offset (heap, sizeof (*heap));
2444 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2445
2446 /* Setup fencepost and free the old top chunk with a multiple of
2447 MALLOC_ALIGNMENT in size. */
2448 /* The fencepost takes at least MINSIZE bytes, because it might
2449 become the top chunk again later. Note that a footer is set
2450 up, too, although the chunk is marked in use. */
2451 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2452 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
2453 if (old_size >= MINSIZE)
2454 {
2455 set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
2456 set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
2457 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2458 _int_free (av, old_top, 1);
2459 }
2460 else
2461 {
2462 set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
2463 set_foot (old_top, (old_size + 2 * SIZE_SZ));
2464 }
2465 }
2466 else if (!tried_mmap)
2467 /* We can at least try to use to mmap memory. */
2468 goto try_mmap;
fa8d436c 2469 }
6c8dbf00 2470 else /* av == main_arena */
fa8d436c 2471
fa8d436c 2472
6c8dbf00
OB
2473 { /* Request enough space for nb + pad + overhead */
2474 size = nb + mp_.top_pad + MINSIZE;
a9177ff5 2475
6c8dbf00
OB
2476 /*
2477 If contiguous, we can subtract out existing space that we hope to
2478 combine with new space. We add it back later only if
2479 we don't actually get contiguous space.
2480 */
a9177ff5 2481
6c8dbf00
OB
2482 if (contiguous (av))
2483 size -= old_size;
fa8d436c 2484
6c8dbf00
OB
2485 /*
2486 Round to a multiple of page size.
2487 If MORECORE is not contiguous, this ensures that we only call it
2488 with whole-page arguments. And if MORECORE is contiguous and
2489 this is not first time through, this preserves page-alignment of
2490 previous calls. Otherwise, we correct to page-align below.
2491 */
fa8d436c 2492
8a35c3fe 2493 size = ALIGN_UP (size, pagesize);
fa8d436c 2494
6c8dbf00
OB
2495 /*
2496 Don't try to call MORECORE if argument is so big as to appear
2497 negative. Note that since mmap takes size_t arg, it may succeed
2498 below even if we cannot call MORECORE.
2499 */
2500
2501 if (size > 0)
2502 {
2503 brk = (char *) (MORECORE (size));
2504 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2505 }
2506
2507 if (brk != (char *) (MORECORE_FAILURE))
2508 {
2509 /* Call the `morecore' hook if necessary. */
2510 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2511 if (__builtin_expect (hook != NULL, 0))
2512 (*hook)();
2513 }
2514 else
2515 {
2516 /*
2517 If have mmap, try using it as a backup when MORECORE fails or
2518 cannot be used. This is worth doing on systems that have "holes" in
2519 address space, so sbrk cannot extend to give contiguous space, but
2520 space is available elsewhere. Note that we ignore mmap max count
2521 and threshold limits, since the space will not be used as a
2522 segregated mmap region.
2523 */
2524
2525 /* Cannot merge with old top, so add its size back in */
2526 if (contiguous (av))
8a35c3fe 2527 size = ALIGN_UP (size + old_size, pagesize);
6c8dbf00
OB
2528
2529 /* If we are relying on mmap as backup, then use larger units */
2530 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2531 size = MMAP_AS_MORECORE_SIZE;
2532
2533 /* Don't try if size wraps around 0 */
2534 if ((unsigned long) (size) > (unsigned long) (nb))
2535 {
2536 char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2537
2538 if (mbrk != MAP_FAILED)
2539 {
2540 /* We do not need, and cannot use, another sbrk call to find end */
2541 brk = mbrk;
2542 snd_brk = brk + size;
2543
2544 /*
2545 Record that we no longer have a contiguous sbrk region.
2546 After the first time mmap is used as backup, we do not
2547 ever rely on contiguous space since this could incorrectly
2548 bridge regions.
2549 */
2550 set_noncontiguous (av);
2551 }
2552 }
2553 }
2554
2555 if (brk != (char *) (MORECORE_FAILURE))
2556 {
2557 if (mp_.sbrk_base == 0)
2558 mp_.sbrk_base = brk;
2559 av->system_mem += size;
2560
2561 /*
2562 If MORECORE extends previous space, we can likewise extend top size.
2563 */
2564
2565 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2566 set_head (old_top, (size + old_size) | PREV_INUSE);
2567
2568 else if (contiguous (av) && old_size && brk < old_end)
2569 {
2570 /* Oops! Someone else killed our space.. Can't touch anything. */
fff94fa2
SP
2571 malloc_printerr (3, "break adjusted to free malloc space", brk,
2572 av);
6c8dbf00
OB
2573 }
2574
2575 /*
2576 Otherwise, make adjustments:
2577
2578 * If the first time through or noncontiguous, we need to call sbrk
2579 just to find out where the end of memory lies.
2580
2581 * We need to ensure that all returned chunks from malloc will meet
2582 MALLOC_ALIGNMENT
2583
2584 * If there was an intervening foreign sbrk, we need to adjust sbrk
2585 request size to account for fact that we will not be able to
2586 combine new space with existing space in old_top.
2587
2588 * Almost all systems internally allocate whole pages at a time, in
2589 which case we might as well use the whole last page of request.
2590 So we allocate enough more memory to hit a page boundary now,
2591 which in turn causes future contiguous calls to page-align.
2592 */
2593
2594 else
2595 {
2596 front_misalign = 0;
2597 end_misalign = 0;
2598 correction = 0;
2599 aligned_brk = brk;
2600
2601 /* handle contiguous cases */
2602 if (contiguous (av))
2603 {
2604 /* Count foreign sbrk as system_mem. */
2605 if (old_size)
2606 av->system_mem += brk - old_end;
2607
2608 /* Guarantee alignment of first new chunk made from this space */
2609
2610 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2611 if (front_misalign > 0)
2612 {
2613 /*
2614 Skip over some bytes to arrive at an aligned position.
2615 We don't need to specially mark these wasted front bytes.
2616 They will never be accessed anyway because
2617 prev_inuse of av->top (and any chunk created from its start)
2618 is always true after initialization.
2619 */
2620
2621 correction = MALLOC_ALIGNMENT - front_misalign;
2622 aligned_brk += correction;
2623 }
2624
2625 /*
2626 If this isn't adjacent to existing space, then we will not
2627 be able to merge with old_top space, so must add to 2nd request.
2628 */
2629
2630 correction += old_size;
2631
2632 /* Extend the end address to hit a page boundary */
2633 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
8a35c3fe 2634 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
6c8dbf00
OB
2635
2636 assert (correction >= 0);
2637 snd_brk = (char *) (MORECORE (correction));
2638
2639 /*
2640 If can't allocate correction, try to at least find out current
2641 brk. It might be enough to proceed without failing.
2642
2643 Note that if second sbrk did NOT fail, we assume that space
2644 is contiguous with first sbrk. This is a safe assumption unless
2645 program is multithreaded but doesn't use locks and a foreign sbrk
2646 occurred between our first and second calls.
2647 */
2648
2649 if (snd_brk == (char *) (MORECORE_FAILURE))
2650 {
2651 correction = 0;
2652 snd_brk = (char *) (MORECORE (0));
2653 }
2654 else
2655 {
2656 /* Call the `morecore' hook if necessary. */
2657 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2658 if (__builtin_expect (hook != NULL, 0))
2659 (*hook)();
2660 }
2661 }
2662
2663 /* handle non-contiguous cases */
2664 else
2665 {
2666 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2667 /* MORECORE/mmap must correctly align */
2668 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2669 else
2670 {
2671 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2672 if (front_misalign > 0)
2673 {
2674 /*
2675 Skip over some bytes to arrive at an aligned position.
2676 We don't need to specially mark these wasted front bytes.
2677 They will never be accessed anyway because
2678 prev_inuse of av->top (and any chunk created from its start)
2679 is always true after initialization.
2680 */
2681
2682 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2683 }
2684 }
2685
2686 /* Find out current end of memory */
2687 if (snd_brk == (char *) (MORECORE_FAILURE))
2688 {
2689 snd_brk = (char *) (MORECORE (0));
2690 }
2691 }
2692
2693 /* Adjust top based on results of second sbrk */
2694 if (snd_brk != (char *) (MORECORE_FAILURE))
2695 {
2696 av->top = (mchunkptr) aligned_brk;
2697 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2698 av->system_mem += correction;
2699
2700 /*
2701 If not the first time through, we either have a
2702 gap due to foreign sbrk or a non-contiguous region. Insert a
2703 double fencepost at old_top to prevent consolidation with space
2704 we don't own. These fenceposts are artificial chunks that are
2705 marked as inuse and are in any case too small to use. We need
2706 two to make sizes and alignments work out.
2707 */
2708
2709 if (old_size != 0)
2710 {
2711 /*
2712 Shrink old_top to insert fenceposts, keeping size a
2713 multiple of MALLOC_ALIGNMENT. We know there is at least
2714 enough space in old_top to do this.
2715 */
2716 old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2717 set_head (old_top, old_size | PREV_INUSE);
2718
2719 /*
2720 Note that the following assignments completely overwrite
2721 old_top when old_size was previously MINSIZE. This is
2722 intentional. We need the fencepost, even if old_top otherwise gets
2723 lost.
2724 */
e9c4fe93
FW
2725 set_head (chunk_at_offset (old_top, old_size),
2726 (2 * SIZE_SZ) | PREV_INUSE);
2727 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
2728 (2 * SIZE_SZ) | PREV_INUSE);
6c8dbf00
OB
2729
2730 /* If possible, release the rest. */
2731 if (old_size >= MINSIZE)
2732 {
2733 _int_free (av, old_top, 1);
2734 }
2735 }
2736 }
2737 }
2738 }
2739 } /* if (av != &main_arena) */
2740
2741 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
fa8d436c 2742 av->max_system_mem = av->system_mem;
6c8dbf00 2743 check_malloc_state (av);
a9177ff5 2744
fa8d436c
UD
2745 /* finally, do the allocation */
2746 p = av->top;
6c8dbf00 2747 size = chunksize (p);
fa8d436c
UD
2748
2749 /* check that one of the above allocation paths succeeded */
6c8dbf00
OB
2750 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2751 {
2752 remainder_size = size - nb;
2753 remainder = chunk_at_offset (p, nb);
2754 av->top = remainder;
2755 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2756 set_head (remainder, remainder_size | PREV_INUSE);
2757 check_malloced_chunk (av, p, nb);
2758 return chunk2mem (p);
2759 }
fa8d436c
UD
2760
2761 /* catch all failure paths */
8e58439c 2762 __set_errno (ENOMEM);
fa8d436c
UD
2763 return 0;
2764}
2765
2766
2767/*
6c8dbf00
OB
2768 systrim is an inverse of sorts to sysmalloc. It gives memory back
2769 to the system (via negative arguments to sbrk) if there is unused
2770 memory at the `high' end of the malloc pool. It is called
2771 automatically by free() when top space exceeds the trim
2772 threshold. It is also called by the public malloc_trim routine. It
2773 returns 1 if it actually released any memory, else 0.
2774 */
fa8d436c 2775
6c8dbf00
OB
2776static int
2777systrim (size_t pad, mstate av)
fa8d436c 2778{
6c8dbf00
OB
2779 long top_size; /* Amount of top-most memory */
2780 long extra; /* Amount to release */
2781 long released; /* Amount actually released */
2782 char *current_brk; /* address returned by pre-check sbrk call */
2783 char *new_brk; /* address returned by post-check sbrk call */
8a35c3fe 2784 size_t pagesize;
6c8dbf00 2785 long top_area;
fa8d436c 2786
8a35c3fe 2787 pagesize = GLRO (dl_pagesize);
6c8dbf00 2788 top_size = chunksize (av->top);
a9177ff5 2789
4b5b548c
FS
2790 top_area = top_size - MINSIZE - 1;
2791 if (top_area <= pad)
2792 return 0;
2793
ca6be165
CD
2794 /* Release in pagesize units and round down to the nearest page. */
2795 extra = ALIGN_DOWN(top_area - pad, pagesize);
a9177ff5 2796
51a7380b
WN
2797 if (extra == 0)
2798 return 0;
2799
4b5b548c 2800 /*
6c8dbf00
OB
2801 Only proceed if end of memory is where we last set it.
2802 This avoids problems if there were foreign sbrk calls.
2803 */
2804 current_brk = (char *) (MORECORE (0));
2805 if (current_brk == (char *) (av->top) + top_size)
2806 {
2807 /*
2808 Attempt to release memory. We ignore MORECORE return value,
2809 and instead call again to find out where new end of memory is.
2810 This avoids problems if first call releases less than we asked,
2811 of if failure somehow altered brk value. (We could still
2812 encounter problems if it altered brk in some very bad way,
2813 but the only thing we can do is adjust anyway, which will cause
2814 some downstream failure.)
2815 */
2816
2817 MORECORE (-extra);
2818 /* Call the `morecore' hook if necessary. */
2819 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2820 if (__builtin_expect (hook != NULL, 0))
2821 (*hook)();
2822 new_brk = (char *) (MORECORE (0));
2823
2824 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2825
2826 if (new_brk != (char *) MORECORE_FAILURE)
2827 {
2828 released = (long) (current_brk - new_brk);
2829
2830 if (released != 0)
2831 {
2832 /* Success. Adjust top. */
2833 av->system_mem -= released;
2834 set_head (av->top, (top_size - released) | PREV_INUSE);
2835 check_malloc_state (av);
2836 return 1;
2837 }
2838 }
fa8d436c 2839 }
fa8d436c 2840 return 0;
f65fd747
UD
2841}
2842
431c33c0
UD
2843static void
2844internal_function
6c8dbf00 2845munmap_chunk (mchunkptr p)
f65fd747 2846{
6c8dbf00 2847 INTERNAL_SIZE_T size = chunksize (p);
f65fd747 2848
6c8dbf00 2849 assert (chunk_is_mmapped (p));
8e635611 2850
4cf6c72f
FW
2851 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
2852 main arena. We never free this memory. */
2853 if (DUMPED_MAIN_ARENA_CHUNK (p))
2854 return;
2855
e9c4fe93
FW
2856 uintptr_t block = (uintptr_t) p - prev_size (p);
2857 size_t total_size = prev_size (p) + size;
8e635611
UD
2858 /* Unfortunately we have to do the compilers job by hand here. Normally
2859 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2860 page size. But gcc does not recognize the optimization possibility
2861 (in the moment at least) so we combine the two values into one before
2862 the bit test. */
6c8dbf00 2863 if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
8e635611
UD
2864 {
2865 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
fff94fa2 2866 chunk2mem (p), NULL);
8e635611
UD
2867 return;
2868 }
f65fd747 2869
c6e4925d
OB
2870 atomic_decrement (&mp_.n_mmaps);
2871 atomic_add (&mp_.mmapped_mem, -total_size);
f65fd747 2872
6ef76f3b
UD
2873 /* If munmap failed the process virtual memory address space is in a
2874 bad shape. Just leave the block hanging around, the process will
2875 terminate shortly anyway since not much can be done. */
6c8dbf00 2876 __munmap ((char *) block, total_size);
f65fd747
UD
2877}
2878
2879#if HAVE_MREMAP
2880
431c33c0
UD
2881static mchunkptr
2882internal_function
6c8dbf00 2883mremap_chunk (mchunkptr p, size_t new_size)
f65fd747 2884{
8a35c3fe 2885 size_t pagesize = GLRO (dl_pagesize);
e9c4fe93 2886 INTERNAL_SIZE_T offset = prev_size (p);
6c8dbf00 2887 INTERNAL_SIZE_T size = chunksize (p);
f65fd747
UD
2888 char *cp;
2889
6c8dbf00
OB
2890 assert (chunk_is_mmapped (p));
2891 assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
f65fd747
UD
2892
2893 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
8a35c3fe 2894 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
f65fd747 2895
68f3802d
UD
2896 /* No need to remap if the number of pages does not change. */
2897 if (size + offset == new_size)
2898 return p;
2899
6c8dbf00
OB
2900 cp = (char *) __mremap ((char *) p - offset, size + offset, new_size,
2901 MREMAP_MAYMOVE);
f65fd747 2902
6c8dbf00
OB
2903 if (cp == MAP_FAILED)
2904 return 0;
f65fd747 2905
6c8dbf00 2906 p = (mchunkptr) (cp + offset);
f65fd747 2907
6c8dbf00 2908 assert (aligned_OK (chunk2mem (p)));
f65fd747 2909
e9c4fe93 2910 assert (prev_size (p) == offset);
6c8dbf00 2911 set_head (p, (new_size - offset) | IS_MMAPPED);
f65fd747 2912
c6e4925d
OB
2913 INTERNAL_SIZE_T new;
2914 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
6c8dbf00 2915 + new_size - size - offset;
c6e4925d 2916 atomic_max (&mp_.max_mmapped_mem, new);
f65fd747
UD
2917 return p;
2918}
f65fd747
UD
2919#endif /* HAVE_MREMAP */
2920
fa8d436c 2921/*------------------------ Public wrappers. --------------------------------*/
f65fd747 2922
d5c3fafc
DD
2923#if USE_TCACHE
2924
2925/* We overlay this structure on the user-data portion of a chunk when
2926 the chunk is stored in the per-thread cache. */
2927typedef struct tcache_entry
2928{
2929 struct tcache_entry *next;
2930} tcache_entry;
2931
2932/* There is one of these for each thread, which contains the
2933 per-thread cache (hence "tcache_perthread_struct"). Keeping
2934 overall size low is mildly important. Note that COUNTS and ENTRIES
2935 are redundant (we could have just counted the linked list each
2936 time), this is for performance reasons. */
2937typedef struct tcache_perthread_struct
2938{
2939 char counts[TCACHE_MAX_BINS];
2940 tcache_entry *entries[TCACHE_MAX_BINS];
2941} tcache_perthread_struct;
2942
2943static __thread char tcache_shutting_down = 0;
2944static __thread tcache_perthread_struct *tcache = NULL;
2945
2946/* Caller must ensure that we know tc_idx is valid and there's room
2947 for more chunks. */
2948static void
2949tcache_put (mchunkptr chunk, size_t tc_idx)
2950{
2951 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
2952 assert (tc_idx < TCACHE_MAX_BINS);
2953 e->next = tcache->entries[tc_idx];
2954 tcache->entries[tc_idx] = e;
2955 ++(tcache->counts[tc_idx]);
2956}
2957
2958/* Caller must ensure that we know tc_idx is valid and there's
2959 available chunks to remove. */
2960static void *
2961tcache_get (size_t tc_idx)
2962{
2963 tcache_entry *e = tcache->entries[tc_idx];
2964 assert (tc_idx < TCACHE_MAX_BINS);
2965 assert (tcache->entries[tc_idx] > 0);
2966 tcache->entries[tc_idx] = e->next;
2967 --(tcache->counts[tc_idx]);
2968 return (void *) e;
2969}
2970
2971static void __attribute__ ((section ("__libc_thread_freeres_fn")))
2972tcache_thread_freeres (void)
2973{
2974 int i;
2975 tcache_perthread_struct *tcache_tmp = tcache;
2976
2977 if (!tcache)
2978 return;
2979
2980 tcache = NULL;
2981
2982 for (i = 0; i < TCACHE_MAX_BINS; ++i)
2983 {
2984 while (tcache_tmp->entries[i])
2985 {
2986 tcache_entry *e = tcache_tmp->entries[i];
2987 tcache_tmp->entries[i] = e->next;
2988 __libc_free (e);
2989 }
2990 }
2991
2992 __libc_free (tcache_tmp);
2993
2994 tcache_shutting_down = 1;
2995}
2996text_set_element (__libc_thread_subfreeres, tcache_thread_freeres);
2997
2998static void
2999tcache_init(void)
3000{
3001 mstate ar_ptr;
3002 void *victim = 0;
3003 const size_t bytes = sizeof (tcache_perthread_struct);
3004
3005 if (tcache_shutting_down)
3006 return;
3007
3008 arena_get (ar_ptr, bytes);
3009 victim = _int_malloc (ar_ptr, bytes);
3010 if (!victim && ar_ptr != NULL)
3011 {
3012 ar_ptr = arena_get_retry (ar_ptr, bytes);
3013 victim = _int_malloc (ar_ptr, bytes);
3014 }
3015
3016
3017 if (ar_ptr != NULL)
3018 __libc_lock_unlock (ar_ptr->mutex);
3019
3020 /* In a low memory situation, we may not be able to allocate memory
3021 - in which case, we just keep trying later. However, we
3022 typically do this very early, so either there is sufficient
3023 memory, or there isn't enough memory to do non-trivial
3024 allocations anyway. */
3025 if (victim)
3026 {
3027 tcache = (tcache_perthread_struct *) victim;
3028 memset (tcache, 0, sizeof (tcache_perthread_struct));
3029 }
3030
3031}
3032
3033#define MAYBE_INIT_TCACHE() \
3034 if (__glibc_unlikely (tcache == NULL)) \
3035 tcache_init();
3036
3037#else
3038#define MAYBE_INIT_TCACHE()
3039#endif
3040
6c8dbf00
OB
3041void *
3042__libc_malloc (size_t bytes)
fa8d436c
UD
3043{
3044 mstate ar_ptr;
22a89187 3045 void *victim;
f65fd747 3046
a222d91a 3047 void *(*hook) (size_t, const void *)
f3eeb3fc 3048 = atomic_forced_read (__malloc_hook);
bfacf1af 3049 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3050 return (*hook)(bytes, RETURN_ADDRESS (0));
d5c3fafc
DD
3051#if USE_TCACHE
3052 /* int_free also calls request2size, be careful to not pad twice. */
3053 size_t tbytes = request2size (bytes);
3054 size_t tc_idx = csize2tidx (tbytes);
3055
3056 MAYBE_INIT_TCACHE ();
3057
3058 DIAG_PUSH_NEEDS_COMMENT;
3059 if (tc_idx < mp_.tcache_bins
3060 /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
3061 && tcache
3062 && tcache->entries[tc_idx] != NULL)
3063 {
3064 return tcache_get (tc_idx);
3065 }
3066 DIAG_POP_NEEDS_COMMENT;
3067#endif
f65fd747 3068
94c5a52a 3069 arena_get (ar_ptr, bytes);
425ce2ed 3070
6c8dbf00 3071 victim = _int_malloc (ar_ptr, bytes);
fff94fa2
SP
3072 /* Retry with another arena only if we were able to find a usable arena
3073 before. */
3074 if (!victim && ar_ptr != NULL)
6c8dbf00
OB
3075 {
3076 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3077 ar_ptr = arena_get_retry (ar_ptr, bytes);
fff94fa2 3078 victim = _int_malloc (ar_ptr, bytes);
60f0e64b 3079 }
fff94fa2
SP
3080
3081 if (ar_ptr != NULL)
4bf5f222 3082 __libc_lock_unlock (ar_ptr->mutex);
fff94fa2 3083
6c8dbf00
OB
3084 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3085 ar_ptr == arena_for_chunk (mem2chunk (victim)));
fa8d436c 3086 return victim;
f65fd747 3087}
6c8dbf00 3088libc_hidden_def (__libc_malloc)
f65fd747 3089
fa8d436c 3090void
6c8dbf00 3091__libc_free (void *mem)
f65fd747 3092{
fa8d436c
UD
3093 mstate ar_ptr;
3094 mchunkptr p; /* chunk corresponding to mem */
3095
a222d91a 3096 void (*hook) (void *, const void *)
f3eeb3fc 3097 = atomic_forced_read (__free_hook);
6c8dbf00
OB
3098 if (__builtin_expect (hook != NULL, 0))
3099 {
3100 (*hook)(mem, RETURN_ADDRESS (0));
3101 return;
3102 }
f65fd747 3103
fa8d436c
UD
3104 if (mem == 0) /* free(0) has no effect */
3105 return;
f65fd747 3106
6c8dbf00 3107 p = mem2chunk (mem);
f65fd747 3108
6c8dbf00
OB
3109 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3110 {
4cf6c72f
FW
3111 /* See if the dynamic brk/mmap threshold needs adjusting.
3112 Dumped fake mmapped chunks do not affect the threshold. */
6c8dbf00 3113 if (!mp_.no_dyn_threshold
e9c4fe93
FW
3114 && chunksize_nomask (p) > mp_.mmap_threshold
3115 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
4cf6c72f 3116 && !DUMPED_MAIN_ARENA_CHUNK (p))
6c8dbf00
OB
3117 {
3118 mp_.mmap_threshold = chunksize (p);
3119 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3120 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3121 mp_.mmap_threshold, mp_.trim_threshold);
3122 }
3123 munmap_chunk (p);
3124 return;
3125 }
f65fd747 3126
d5c3fafc
DD
3127 MAYBE_INIT_TCACHE ();
3128
6c8dbf00
OB
3129 ar_ptr = arena_for_chunk (p);
3130 _int_free (ar_ptr, p, 0);
f65fd747 3131}
3b49edc0 3132libc_hidden_def (__libc_free)
f65fd747 3133
6c8dbf00
OB
3134void *
3135__libc_realloc (void *oldmem, size_t bytes)
f65fd747 3136{
fa8d436c 3137 mstate ar_ptr;
6c8dbf00 3138 INTERNAL_SIZE_T nb; /* padded request size */
f65fd747 3139
6c8dbf00 3140 void *newp; /* chunk to return */
f65fd747 3141
a222d91a 3142 void *(*hook) (void *, size_t, const void *) =
f3eeb3fc 3143 atomic_forced_read (__realloc_hook);
bfacf1af 3144 if (__builtin_expect (hook != NULL, 0))
fa8d436c 3145 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
f65fd747 3146
fa8d436c 3147#if REALLOC_ZERO_BYTES_FREES
6c8dbf00
OB
3148 if (bytes == 0 && oldmem != NULL)
3149 {
3150 __libc_free (oldmem); return 0;
3151 }
f65fd747 3152#endif
f65fd747 3153
fa8d436c 3154 /* realloc of null is supposed to be same as malloc */
6c8dbf00
OB
3155 if (oldmem == 0)
3156 return __libc_malloc (bytes);
f65fd747 3157
78ac92ad 3158 /* chunk corresponding to oldmem */
6c8dbf00 3159 const mchunkptr oldp = mem2chunk (oldmem);
78ac92ad 3160 /* its size */
6c8dbf00 3161 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
f65fd747 3162
fff94fa2
SP
3163 if (chunk_is_mmapped (oldp))
3164 ar_ptr = NULL;
3165 else
d5c3fafc
DD
3166 {
3167 MAYBE_INIT_TCACHE ();
3168 ar_ptr = arena_for_chunk (oldp);
3169 }
fff94fa2 3170
4cf6c72f
FW
3171 /* Little security check which won't hurt performance: the allocator
3172 never wrapps around at the end of the address space. Therefore
3173 we can exclude some size values which might appear here by
3174 accident or by "design" from some intruder. We need to bypass
3175 this check for dumped fake mmap chunks from the old main arena
3176 because the new malloc may provide additional alignment. */
3177 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3178 || __builtin_expect (misaligned_chunk (oldp), 0))
3179 && !DUMPED_MAIN_ARENA_CHUNK (oldp))
dc165f7b 3180 {
fff94fa2
SP
3181 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
3182 ar_ptr);
dc165f7b
UD
3183 return NULL;
3184 }
3185
6c8dbf00 3186 checked_request2size (bytes, nb);
f65fd747 3187
6c8dbf00
OB
3188 if (chunk_is_mmapped (oldp))
3189 {
4cf6c72f
FW
3190 /* If this is a faked mmapped chunk from the dumped main arena,
3191 always make a copy (and do not free the old chunk). */
3192 if (DUMPED_MAIN_ARENA_CHUNK (oldp))
3193 {
3194 /* Must alloc, copy, free. */
3195 void *newmem = __libc_malloc (bytes);
3196 if (newmem == 0)
3197 return NULL;
3198 /* Copy as many bytes as are available from the old chunk
1e8a8875
FW
3199 and fit into the new size. NB: The overhead for faked
3200 mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
3201 regular mmapped chunks. */
3202 if (bytes > oldsize - SIZE_SZ)
3203 bytes = oldsize - SIZE_SZ;
4cf6c72f
FW
3204 memcpy (newmem, oldmem, bytes);
3205 return newmem;
3206 }
3207
6c8dbf00 3208 void *newmem;
f65fd747 3209
fa8d436c 3210#if HAVE_MREMAP
6c8dbf00
OB
3211 newp = mremap_chunk (oldp, nb);
3212 if (newp)
3213 return chunk2mem (newp);
f65fd747 3214#endif
6c8dbf00
OB
3215 /* Note the extra SIZE_SZ overhead. */
3216 if (oldsize - SIZE_SZ >= nb)
3217 return oldmem; /* do nothing */
3218
3219 /* Must alloc, copy, free. */
3220 newmem = __libc_malloc (bytes);
3221 if (newmem == 0)
3222 return 0; /* propagate failure */
fa8d436c 3223
6c8dbf00
OB
3224 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
3225 munmap_chunk (oldp);
3226 return newmem;
3227 }
3228
4bf5f222 3229 __libc_lock_lock (ar_ptr->mutex);
f65fd747 3230
6c8dbf00 3231 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
f65fd747 3232
4bf5f222 3233 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
3234 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3235 ar_ptr == arena_for_chunk (mem2chunk (newp)));
07014fca
UD
3236
3237 if (newp == NULL)
3238 {
3239 /* Try harder to allocate memory in other arenas. */
35fed6f1 3240 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
6c8dbf00 3241 newp = __libc_malloc (bytes);
07014fca 3242 if (newp != NULL)
6c8dbf00
OB
3243 {
3244 memcpy (newp, oldmem, oldsize - SIZE_SZ);
3245 _int_free (ar_ptr, oldp, 0);
3246 }
07014fca
UD
3247 }
3248
fa8d436c
UD
3249 return newp;
3250}
3b49edc0 3251libc_hidden_def (__libc_realloc)
f65fd747 3252
6c8dbf00
OB
3253void *
3254__libc_memalign (size_t alignment, size_t bytes)
10ad46bc
OB
3255{
3256 void *address = RETURN_ADDRESS (0);
3257 return _mid_memalign (alignment, bytes, address);
3258}
3259
3260static void *
3261_mid_memalign (size_t alignment, size_t bytes, void *address)
fa8d436c
UD
3262{
3263 mstate ar_ptr;
22a89187 3264 void *p;
f65fd747 3265
a222d91a 3266 void *(*hook) (size_t, size_t, const void *) =
f3eeb3fc 3267 atomic_forced_read (__memalign_hook);
bfacf1af 3268 if (__builtin_expect (hook != NULL, 0))
10ad46bc 3269 return (*hook)(alignment, bytes, address);
f65fd747 3270
10ad46bc 3271 /* If we need less alignment than we give anyway, just relay to malloc. */
6c8dbf00
OB
3272 if (alignment <= MALLOC_ALIGNMENT)
3273 return __libc_malloc (bytes);
1228ed5c 3274
fa8d436c 3275 /* Otherwise, ensure that it is at least a minimum chunk size */
6c8dbf00
OB
3276 if (alignment < MINSIZE)
3277 alignment = MINSIZE;
f65fd747 3278
a56ee40b
WN
3279 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3280 power of 2 and will cause overflow in the check below. */
3281 if (alignment > SIZE_MAX / 2 + 1)
3282 {
3283 __set_errno (EINVAL);
3284 return 0;
3285 }
3286
b73ed247
WN
3287 /* Check for overflow. */
3288 if (bytes > SIZE_MAX - alignment - MINSIZE)
3289 {
3290 __set_errno (ENOMEM);
3291 return 0;
3292 }
3293
10ad46bc
OB
3294
3295 /* Make sure alignment is power of 2. */
6c8dbf00
OB
3296 if (!powerof2 (alignment))
3297 {
3298 size_t a = MALLOC_ALIGNMENT * 2;
3299 while (a < alignment)
3300 a <<= 1;
3301 alignment = a;
3302 }
10ad46bc 3303
6c8dbf00 3304 arena_get (ar_ptr, bytes + alignment + MINSIZE);
6c8dbf00
OB
3305
3306 p = _int_memalign (ar_ptr, alignment, bytes);
fff94fa2 3307 if (!p && ar_ptr != NULL)
6c8dbf00
OB
3308 {
3309 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3310 ar_ptr = arena_get_retry (ar_ptr, bytes);
fff94fa2 3311 p = _int_memalign (ar_ptr, alignment, bytes);
f65fd747 3312 }
fff94fa2
SP
3313
3314 if (ar_ptr != NULL)
4bf5f222 3315 __libc_lock_unlock (ar_ptr->mutex);
fff94fa2 3316
6c8dbf00
OB
3317 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3318 ar_ptr == arena_for_chunk (mem2chunk (p)));
fa8d436c 3319 return p;
f65fd747 3320}
380d7e87 3321/* For ISO C11. */
3b49edc0
UD
3322weak_alias (__libc_memalign, aligned_alloc)
3323libc_hidden_def (__libc_memalign)
f65fd747 3324
6c8dbf00
OB
3325void *
3326__libc_valloc (size_t bytes)
fa8d436c 3327{
6c8dbf00 3328 if (__malloc_initialized < 0)
fa8d436c 3329 ptmalloc_init ();
8088488d 3330
10ad46bc 3331 void *address = RETURN_ADDRESS (0);
8a35c3fe
CD
3332 size_t pagesize = GLRO (dl_pagesize);
3333 return _mid_memalign (pagesize, bytes, address);
fa8d436c 3334}
f65fd747 3335
6c8dbf00
OB
3336void *
3337__libc_pvalloc (size_t bytes)
fa8d436c 3338{
6c8dbf00 3339 if (__malloc_initialized < 0)
fa8d436c 3340 ptmalloc_init ();
8088488d 3341
10ad46bc 3342 void *address = RETURN_ADDRESS (0);
8a35c3fe
CD
3343 size_t pagesize = GLRO (dl_pagesize);
3344 size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
dba38551 3345
1159a193 3346 /* Check for overflow. */
8a35c3fe 3347 if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
1159a193
WN
3348 {
3349 __set_errno (ENOMEM);
3350 return 0;
3351 }
3352
8a35c3fe 3353 return _mid_memalign (pagesize, rounded_bytes, address);
fa8d436c 3354}
f65fd747 3355
6c8dbf00
OB
3356void *
3357__libc_calloc (size_t n, size_t elem_size)
f65fd747 3358{
d6285c9f
CD
3359 mstate av;
3360 mchunkptr oldtop, p;
3361 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
6c8dbf00 3362 void *mem;
d6285c9f
CD
3363 unsigned long clearsize;
3364 unsigned long nclears;
3365 INTERNAL_SIZE_T *d;
0950889b
UD
3366
3367 /* size_t is unsigned so the behavior on overflow is defined. */
3368 bytes = n * elem_size;
d9af917d
UD
3369#define HALF_INTERNAL_SIZE_T \
3370 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
6c8dbf00
OB
3371 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
3372 {
3373 if (elem_size != 0 && bytes / elem_size != n)
3374 {
3375 __set_errno (ENOMEM);
3376 return 0;
3377 }
d9af917d 3378 }
0950889b 3379
a222d91a 3380 void *(*hook) (size_t, const void *) =
f3eeb3fc 3381 atomic_forced_read (__malloc_hook);
6c8dbf00
OB
3382 if (__builtin_expect (hook != NULL, 0))
3383 {
d6285c9f
CD
3384 sz = bytes;
3385 mem = (*hook)(sz, RETURN_ADDRESS (0));
3386 if (mem == 0)
3387 return 0;
3388
3389 return memset (mem, 0, sz);
7799b7b3 3390 }
f65fd747 3391
d6285c9f
CD
3392 sz = bytes;
3393
d5c3fafc
DD
3394 MAYBE_INIT_TCACHE ();
3395
d6285c9f 3396 arena_get (av, sz);
fff94fa2
SP
3397 if (av)
3398 {
3399 /* Check if we hand out the top chunk, in which case there may be no
3400 need to clear. */
d6285c9f 3401#if MORECORE_CLEARS
fff94fa2
SP
3402 oldtop = top (av);
3403 oldtopsize = chunksize (top (av));
d6285c9f 3404# if MORECORE_CLEARS < 2
fff94fa2
SP
3405 /* Only newly allocated memory is guaranteed to be cleared. */
3406 if (av == &main_arena &&
3407 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3408 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
d6285c9f 3409# endif
fff94fa2
SP
3410 if (av != &main_arena)
3411 {
3412 heap_info *heap = heap_for_ptr (oldtop);
3413 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3414 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3415 }
3416#endif
3417 }
3418 else
d6285c9f 3419 {
fff94fa2
SP
3420 /* No usable arenas. */
3421 oldtop = 0;
3422 oldtopsize = 0;
d6285c9f 3423 }
d6285c9f
CD
3424 mem = _int_malloc (av, sz);
3425
3426
3427 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3428 av == arena_for_chunk (mem2chunk (mem)));
3429
fff94fa2 3430 if (mem == 0 && av != NULL)
d6285c9f
CD
3431 {
3432 LIBC_PROBE (memory_calloc_retry, 1, sz);
3433 av = arena_get_retry (av, sz);
fff94fa2 3434 mem = _int_malloc (av, sz);
d6285c9f 3435 }
fff94fa2
SP
3436
3437 if (av != NULL)
4bf5f222 3438 __libc_lock_unlock (av->mutex);
fff94fa2
SP
3439
3440 /* Allocation failed even after a retry. */
3441 if (mem == 0)
3442 return 0;
3443
d6285c9f
CD
3444 p = mem2chunk (mem);
3445
3446 /* Two optional cases in which clearing not necessary */
3447 if (chunk_is_mmapped (p))
3448 {
3449 if (__builtin_expect (perturb_byte, 0))
3450 return memset (mem, 0, sz);
3451
3452 return mem;
3453 }
3454
3455 csz = chunksize (p);
3456
3457#if MORECORE_CLEARS
3458 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3459 {
3460 /* clear only the bytes from non-freshly-sbrked memory */
3461 csz = oldtopsize;
3462 }
3463#endif
3464
3465 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3466 contents have an odd number of INTERNAL_SIZE_T-sized words;
3467 minimally 3. */
3468 d = (INTERNAL_SIZE_T *) mem;
3469 clearsize = csz - SIZE_SZ;
3470 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3471 assert (nclears >= 3);
3472
3473 if (nclears > 9)
3474 return memset (d, 0, clearsize);
3475
3476 else
3477 {
3478 *(d + 0) = 0;
3479 *(d + 1) = 0;
3480 *(d + 2) = 0;
3481 if (nclears > 4)
3482 {
3483 *(d + 3) = 0;
3484 *(d + 4) = 0;
3485 if (nclears > 6)
3486 {
3487 *(d + 5) = 0;
3488 *(d + 6) = 0;
3489 if (nclears > 8)
3490 {
3491 *(d + 7) = 0;
3492 *(d + 8) = 0;
3493 }
3494 }
3495 }
3496 }
3497
3498 return mem;
fa8d436c 3499}
f65fd747 3500
f65fd747 3501/*
6c8dbf00
OB
3502 ------------------------------ malloc ------------------------------
3503 */
f65fd747 3504
6c8dbf00
OB
3505static void *
3506_int_malloc (mstate av, size_t bytes)
f65fd747 3507{
fa8d436c 3508 INTERNAL_SIZE_T nb; /* normalized request size */
6c8dbf00
OB
3509 unsigned int idx; /* associated bin index */
3510 mbinptr bin; /* associated bin */
f65fd747 3511
6c8dbf00 3512 mchunkptr victim; /* inspected/selected chunk */
fa8d436c 3513 INTERNAL_SIZE_T size; /* its size */
6c8dbf00 3514 int victim_index; /* its bin index */
f65fd747 3515
6c8dbf00
OB
3516 mchunkptr remainder; /* remainder from a split */
3517 unsigned long remainder_size; /* its size */
8a4b65b4 3518
6c8dbf00
OB
3519 unsigned int block; /* bit map traverser */
3520 unsigned int bit; /* bit map traverser */
3521 unsigned int map; /* current word of binmap */
8a4b65b4 3522
6c8dbf00
OB
3523 mchunkptr fwd; /* misc temp for linking */
3524 mchunkptr bck; /* misc temp for linking */
8a4b65b4 3525
d5c3fafc
DD
3526#if USE_TCACHE
3527 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3528#endif
3529
f6887a0d
UD
3530 const char *errstr = NULL;
3531
fa8d436c 3532 /*
6c8dbf00
OB
3533 Convert request size to internal form by adding SIZE_SZ bytes
3534 overhead plus possibly more to obtain necessary alignment and/or
3535 to obtain a size of at least MINSIZE, the smallest allocatable
3536 size. Also, checked_request2size traps (returning 0) request sizes
3537 that are so large that they wrap around zero when padded and
3538 aligned.
3539 */
f65fd747 3540
6c8dbf00 3541 checked_request2size (bytes, nb);
f65fd747 3542
fff94fa2
SP
3543 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3544 mmap. */
3545 if (__glibc_unlikely (av == NULL))
3546 {
3547 void *p = sysmalloc (nb, av);
3548 if (p != NULL)
3549 alloc_perturb (p, bytes);
3550 return p;
3551 }
3552
fa8d436c 3553 /*
6c8dbf00
OB
3554 If the size qualifies as a fastbin, first check corresponding bin.
3555 This code is safe to execute even if av is not yet initialized, so we
3556 can try it without checking, which saves some time on this fast path.
3557 */
f65fd747 3558
d5c3fafc
DD
3559#define REMOVE_FB(fb, victim, pp) \
3560 do \
3561 { \
3562 victim = pp; \
3563 if (victim == NULL) \
3564 break; \
3565 } \
3566 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
3567 != victim); \
3568
6c8dbf00
OB
3569 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3570 {
3571 idx = fastbin_index (nb);
3572 mfastbinptr *fb = &fastbin (av, idx);
3573 mchunkptr pp = *fb;
d5c3fafc 3574 REMOVE_FB (fb, victim, pp);
6c8dbf00
OB
3575 if (victim != 0)
3576 {
3577 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
3578 {
3579 errstr = "malloc(): memory corruption (fast)";
3580 errout:
fff94fa2 3581 malloc_printerr (check_action, errstr, chunk2mem (victim), av);
6c8dbf00
OB
3582 return NULL;
3583 }
3584 check_remalloced_chunk (av, victim, nb);
d5c3fafc
DD
3585#if USE_TCACHE
3586 /* While we're here, if we see other chunks of the same size,
3587 stash them in the tcache. */
3588 size_t tc_idx = csize2tidx (nb);
3589 if (tcache && tc_idx < mp_.tcache_bins)
3590 {
3591 mchunkptr tc_victim;
3592
3593 /* While bin not empty and tcache not full, copy chunks over. */
3594 while (tcache->counts[tc_idx] < mp_.tcache_count
3595 && (pp = *fb) != NULL)
3596 {
3597 REMOVE_FB (fb, tc_victim, pp);
3598 if (tc_victim != 0)
3599 {
3600 tcache_put (tc_victim, tc_idx);
3601 }
3602 }
3603 }
3604#endif
6c8dbf00
OB
3605 void *p = chunk2mem (victim);
3606 alloc_perturb (p, bytes);
3607 return p;
3608 }
fa8d436c 3609 }
f65fd747 3610
fa8d436c 3611 /*
6c8dbf00
OB
3612 If a small request, check regular bin. Since these "smallbins"
3613 hold one size each, no searching within bins is necessary.
3614 (For a large request, we need to wait until unsorted chunks are
3615 processed to find best fit. But for small ones, fits are exact
3616 anyway, so we can check now, which is faster.)
3617 */
3618
3619 if (in_smallbin_range (nb))
3620 {
3621 idx = smallbin_index (nb);
3622 bin = bin_at (av, idx);
3623
3624 if ((victim = last (bin)) != bin)
3625 {
3626 if (victim == 0) /* initialization check */
3627 malloc_consolidate (av);
3628 else
3629 {
3630 bck = victim->bk;
a1ffb40e 3631 if (__glibc_unlikely (bck->fd != victim))
6c8dbf00
OB
3632 {
3633 errstr = "malloc(): smallbin double linked list corrupted";
3634 goto errout;
3635 }
3636 set_inuse_bit_at_offset (victim, nb);
3637 bin->bk = bck;
3638 bck->fd = bin;
3639
3640 if (av != &main_arena)
e9c4fe93 3641 set_non_main_arena (victim);
6c8dbf00 3642 check_malloced_chunk (av, victim, nb);
d5c3fafc
DD
3643#if USE_TCACHE
3644 /* While we're here, if we see other chunks of the same size,
3645 stash them in the tcache. */
3646 size_t tc_idx = csize2tidx (nb);
3647 if (tcache && tc_idx < mp_.tcache_bins)
3648 {
3649 mchunkptr tc_victim;
3650
3651 /* While bin not empty and tcache not full, copy chunks over. */
3652 while (tcache->counts[tc_idx] < mp_.tcache_count
3653 && (tc_victim = last (bin)) != bin)
3654 {
3655 if (tc_victim != 0)
3656 {
3657 bck = tc_victim->bk;
3658 set_inuse_bit_at_offset (tc_victim, nb);
3659 if (av != &main_arena)
3660 set_non_main_arena (tc_victim);
3661 bin->bk = bck;
3662 bck->fd = bin;
3663
3664 tcache_put (tc_victim, tc_idx);
3665 }
3666 }
3667 }
3668#endif
6c8dbf00
OB
3669 void *p = chunk2mem (victim);
3670 alloc_perturb (p, bytes);
3671 return p;
3672 }
3673 }
fa8d436c 3674 }
f65fd747 3675
a9177ff5 3676 /*
fa8d436c
UD
3677 If this is a large request, consolidate fastbins before continuing.
3678 While it might look excessive to kill all fastbins before
3679 even seeing if there is space available, this avoids
3680 fragmentation problems normally associated with fastbins.
3681 Also, in practice, programs tend to have runs of either small or
a9177ff5 3682 large requests, but less often mixtures, so consolidation is not
fa8d436c
UD
3683 invoked all that often in most programs. And the programs that
3684 it is called frequently in otherwise tend to fragment.
6c8dbf00 3685 */
7799b7b3 3686
6c8dbf00
OB
3687 else
3688 {
3689 idx = largebin_index (nb);
3690 if (have_fastchunks (av))
3691 malloc_consolidate (av);
3692 }
f65fd747 3693
fa8d436c 3694 /*
6c8dbf00
OB
3695 Process recently freed or remaindered chunks, taking one only if
3696 it is exact fit, or, if this a small request, the chunk is remainder from
3697 the most recent non-exact fit. Place other traversed chunks in
3698 bins. Note that this step is the only place in any routine where
3699 chunks are placed in bins.
3700
3701 The outer loop here is needed because we might not realize until
3702 near the end of malloc that we should have consolidated, so must
3703 do so and retry. This happens at most once, and only when we would
3704 otherwise need to expand memory to service a "small" request.
3705 */
3706
d5c3fafc
DD
3707#if USE_TCACHE
3708 INTERNAL_SIZE_T tcache_nb = 0;
3709 size_t tc_idx = csize2tidx (nb);
3710 if (tcache && tc_idx < mp_.tcache_bins)
3711 tcache_nb = nb;
3712 int return_cached = 0;
3713
3714 tcache_unsorted_count = 0;
3715#endif
3716
6c8dbf00
OB
3717 for (;; )
3718 {
3719 int iters = 0;
3720 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3721 {
3722 bck = victim->bk;
e9c4fe93
FW
3723 if (__builtin_expect (chunksize_nomask (victim) <= 2 * SIZE_SZ, 0)
3724 || __builtin_expect (chunksize_nomask (victim)
3725 > av->system_mem, 0))
6c8dbf00 3726 malloc_printerr (check_action, "malloc(): memory corruption",
fff94fa2 3727 chunk2mem (victim), av);
6c8dbf00
OB
3728 size = chunksize (victim);
3729
3730 /*
3731 If a small request, try to use last remainder if it is the
3732 only chunk in unsorted bin. This helps promote locality for
3733 runs of consecutive small requests. This is the only
3734 exception to best-fit, and applies only when there is
3735 no exact fit for a small chunk.
3736 */
3737
3738 if (in_smallbin_range (nb) &&
3739 bck == unsorted_chunks (av) &&
3740 victim == av->last_remainder &&
3741 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3742 {
3743 /* split and reattach remainder */
3744 remainder_size = size - nb;
3745 remainder = chunk_at_offset (victim, nb);
3746 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3747 av->last_remainder = remainder;
3748 remainder->bk = remainder->fd = unsorted_chunks (av);
3749 if (!in_smallbin_range (remainder_size))
3750 {
3751 remainder->fd_nextsize = NULL;
3752 remainder->bk_nextsize = NULL;
3753 }
3754
3755 set_head (victim, nb | PREV_INUSE |
3756 (av != &main_arena ? NON_MAIN_ARENA : 0));
3757 set_head (remainder, remainder_size | PREV_INUSE);
3758 set_foot (remainder, remainder_size);
3759
3760 check_malloced_chunk (av, victim, nb);
3761 void *p = chunk2mem (victim);
3762 alloc_perturb (p, bytes);
3763 return p;
3764 }
3765
3766 /* remove from unsorted list */
3767 unsorted_chunks (av)->bk = bck;
3768 bck->fd = unsorted_chunks (av);
3769
3770 /* Take now instead of binning if exact fit */
3771
3772 if (size == nb)
3773 {
3774 set_inuse_bit_at_offset (victim, size);
3775 if (av != &main_arena)
e9c4fe93 3776 set_non_main_arena (victim);
d5c3fafc
DD
3777#if USE_TCACHE
3778 /* Fill cache first, return to user only if cache fills.
3779 We may return one of these chunks later. */
3780 if (tcache_nb
3781 && tcache->counts[tc_idx] < mp_.tcache_count)
3782 {
3783 tcache_put (victim, tc_idx);
3784 return_cached = 1;
3785 continue;
3786 }
3787 else
3788 {
3789#endif
6c8dbf00
OB
3790 check_malloced_chunk (av, victim, nb);
3791 void *p = chunk2mem (victim);
3792 alloc_perturb (p, bytes);
3793 return p;
d5c3fafc
DD
3794#if USE_TCACHE
3795 }
3796#endif
6c8dbf00
OB
3797 }
3798
3799 /* place chunk in bin */
3800
3801 if (in_smallbin_range (size))
3802 {
3803 victim_index = smallbin_index (size);
3804 bck = bin_at (av, victim_index);
3805 fwd = bck->fd;
3806 }
3807 else
3808 {
3809 victim_index = largebin_index (size);
3810 bck = bin_at (av, victim_index);
3811 fwd = bck->fd;
3812
3813 /* maintain large bins in sorted order */
3814 if (fwd != bck)
3815 {
3816 /* Or with inuse bit to speed comparisons */
3817 size |= PREV_INUSE;
3818 /* if smaller than smallest, bypass loop below */
e9c4fe93
FW
3819 assert (chunk_main_arena (bck->bk));
3820 if ((unsigned long) (size)
3821 < (unsigned long) chunksize_nomask (bck->bk))
6c8dbf00
OB
3822 {
3823 fwd = bck;
3824 bck = bck->bk;
3825
3826 victim->fd_nextsize = fwd->fd;
3827 victim->bk_nextsize = fwd->fd->bk_nextsize;
3828 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3829 }
3830 else
3831 {
e9c4fe93
FW
3832 assert (chunk_main_arena (fwd));
3833 while ((unsigned long) size < chunksize_nomask (fwd))
6c8dbf00
OB
3834 {
3835 fwd = fwd->fd_nextsize;
e9c4fe93 3836 assert (chunk_main_arena (fwd));
6c8dbf00
OB
3837 }
3838
e9c4fe93
FW
3839 if ((unsigned long) size
3840 == (unsigned long) chunksize_nomask (fwd))
6c8dbf00
OB
3841 /* Always insert in the second position. */
3842 fwd = fwd->fd;
3843 else
3844 {
3845 victim->fd_nextsize = fwd;
3846 victim->bk_nextsize = fwd->bk_nextsize;
3847 fwd->bk_nextsize = victim;
3848 victim->bk_nextsize->fd_nextsize = victim;
3849 }
3850 bck = fwd->bk;
3851 }
3852 }
3853 else
3854 victim->fd_nextsize = victim->bk_nextsize = victim;
3855 }
3856
3857 mark_bin (av, victim_index);
3858 victim->bk = bck;
3859 victim->fd = fwd;
3860 fwd->bk = victim;
3861 bck->fd = victim;
3862
d5c3fafc
DD
3863#if USE_TCACHE
3864 /* If we've processed as many chunks as we're allowed while
3865 filling the cache, return one of the cached ones. */
3866 ++tcache_unsorted_count;
3867 if (return_cached
3868 && mp_.tcache_unsorted_limit > 0
3869 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
3870 {
3871 return tcache_get (tc_idx);
3872 }
3873#endif
3874
6c8dbf00
OB
3875#define MAX_ITERS 10000
3876 if (++iters >= MAX_ITERS)
3877 break;
3878 }
fa8d436c 3879
d5c3fafc
DD
3880#if USE_TCACHE
3881 /* If all the small chunks we found ended up cached, return one now. */
3882 if (return_cached)
3883 {
3884 return tcache_get (tc_idx);
3885 }
3886#endif
3887
a9177ff5 3888 /*
6c8dbf00
OB
3889 If a large request, scan through the chunks of current bin in
3890 sorted order to find smallest that fits. Use the skip list for this.
3891 */
3892
3893 if (!in_smallbin_range (nb))
3894 {
3895 bin = bin_at (av, idx);
3896
3897 /* skip scan if empty or largest chunk is too small */
e9c4fe93
FW
3898 if ((victim = first (bin)) != bin
3899 && (unsigned long) chunksize_nomask (victim)
3900 >= (unsigned long) (nb))
6c8dbf00
OB
3901 {
3902 victim = victim->bk_nextsize;
3903 while (((unsigned long) (size = chunksize (victim)) <
3904 (unsigned long) (nb)))
3905 victim = victim->bk_nextsize;
3906
3907 /* Avoid removing the first entry for a size so that the skip
3908 list does not have to be rerouted. */
e9c4fe93
FW
3909 if (victim != last (bin)
3910 && chunksize_nomask (victim)
3911 == chunksize_nomask (victim->fd))
6c8dbf00
OB
3912 victim = victim->fd;
3913
3914 remainder_size = size - nb;
fff94fa2 3915 unlink (av, victim, bck, fwd);
6c8dbf00
OB
3916
3917 /* Exhaust */
3918 if (remainder_size < MINSIZE)
3919 {
3920 set_inuse_bit_at_offset (victim, size);
3921 if (av != &main_arena)
e9c4fe93 3922 set_non_main_arena (victim);
6c8dbf00
OB
3923 }
3924 /* Split */
3925 else
3926 {
3927 remainder = chunk_at_offset (victim, nb);
3928 /* We cannot assume the unsorted list is empty and therefore
3929 have to perform a complete insert here. */
3930 bck = unsorted_chunks (av);
3931 fwd = bck->fd;
a1ffb40e 3932 if (__glibc_unlikely (fwd->bk != bck))
6c8dbf00
OB
3933 {
3934 errstr = "malloc(): corrupted unsorted chunks";
3935 goto errout;
3936 }
3937 remainder->bk = bck;
3938 remainder->fd = fwd;
3939 bck->fd = remainder;
3940 fwd->bk = remainder;
3941 if (!in_smallbin_range (remainder_size))
3942 {
3943 remainder->fd_nextsize = NULL;
3944 remainder->bk_nextsize = NULL;
3945 }
3946 set_head (victim, nb | PREV_INUSE |
3947 (av != &main_arena ? NON_MAIN_ARENA : 0));
3948 set_head (remainder, remainder_size | PREV_INUSE);
3949 set_foot (remainder, remainder_size);
3950 }
3951 check_malloced_chunk (av, victim, nb);
3952 void *p = chunk2mem (victim);
3953 alloc_perturb (p, bytes);
3954 return p;
3955 }
3956 }
f65fd747 3957
6c8dbf00
OB
3958 /*
3959 Search for a chunk by scanning bins, starting with next largest
3960 bin. This search is strictly by best-fit; i.e., the smallest
3961 (with ties going to approximately the least recently used) chunk
3962 that fits is selected.
3963
3964 The bitmap avoids needing to check that most blocks are nonempty.
3965 The particular case of skipping all bins during warm-up phases
3966 when no chunks have been returned yet is faster than it might look.
3967 */
3968
3969 ++idx;
3970 bin = bin_at (av, idx);
3971 block = idx2block (idx);
3972 map = av->binmap[block];
3973 bit = idx2bit (idx);
3974
3975 for (;; )
3976 {
3977 /* Skip rest of block if there are no more set bits in this block. */
3978 if (bit > map || bit == 0)
3979 {
3980 do
3981 {
3982 if (++block >= BINMAPSIZE) /* out of bins */
3983 goto use_top;
3984 }
3985 while ((map = av->binmap[block]) == 0);
3986
3987 bin = bin_at (av, (block << BINMAPSHIFT));
3988 bit = 1;
3989 }
3990
3991 /* Advance to bin with set bit. There must be one. */
3992 while ((bit & map) == 0)
3993 {
3994 bin = next_bin (bin);
3995 bit <<= 1;
3996 assert (bit != 0);
3997 }
3998
3999 /* Inspect the bin. It is likely to be non-empty */
4000 victim = last (bin);
4001
4002 /* If a false alarm (empty bin), clear the bit. */
4003 if (victim == bin)
4004 {
4005 av->binmap[block] = map &= ~bit; /* Write through */
4006 bin = next_bin (bin);
4007 bit <<= 1;
4008 }
4009
4010 else
4011 {
4012 size = chunksize (victim);
4013
4014 /* We know the first chunk in this bin is big enough to use. */
4015 assert ((unsigned long) (size) >= (unsigned long) (nb));
4016
4017 remainder_size = size - nb;
4018
4019 /* unlink */
fff94fa2 4020 unlink (av, victim, bck, fwd);
6c8dbf00
OB
4021
4022 /* Exhaust */
4023 if (remainder_size < MINSIZE)
4024 {
4025 set_inuse_bit_at_offset (victim, size);
4026 if (av != &main_arena)
e9c4fe93 4027 set_non_main_arena (victim);
6c8dbf00
OB
4028 }
4029
4030 /* Split */
4031 else
4032 {
4033 remainder = chunk_at_offset (victim, nb);
4034
4035 /* We cannot assume the unsorted list is empty and therefore
4036 have to perform a complete insert here. */
4037 bck = unsorted_chunks (av);
4038 fwd = bck->fd;
a1ffb40e 4039 if (__glibc_unlikely (fwd->bk != bck))
6c8dbf00
OB
4040 {
4041 errstr = "malloc(): corrupted unsorted chunks 2";
4042 goto errout;
4043 }
4044 remainder->bk = bck;
4045 remainder->fd = fwd;
4046 bck->fd = remainder;
4047 fwd->bk = remainder;
4048
4049 /* advertise as last remainder */
4050 if (in_smallbin_range (nb))
4051 av->last_remainder = remainder;
4052 if (!in_smallbin_range (remainder_size))
4053 {
4054 remainder->fd_nextsize = NULL;
4055 remainder->bk_nextsize = NULL;
4056 }
4057 set_head (victim, nb | PREV_INUSE |
4058 (av != &main_arena ? NON_MAIN_ARENA : 0));
4059 set_head (remainder, remainder_size | PREV_INUSE);
4060 set_foot (remainder, remainder_size);
4061 }
4062 check_malloced_chunk (av, victim, nb);
4063 void *p = chunk2mem (victim);
4064 alloc_perturb (p, bytes);
4065 return p;
4066 }
4067 }
4068
4069 use_top:
4070 /*
4071 If large enough, split off the chunk bordering the end of memory
4072 (held in av->top). Note that this is in accord with the best-fit
4073 search rule. In effect, av->top is treated as larger (and thus
4074 less well fitting) than any other available chunk since it can
4075 be extended to be as large as necessary (up to system
4076 limitations).
4077
4078 We require that av->top always exists (i.e., has size >=
4079 MINSIZE) after initialization, so if it would otherwise be
4080 exhausted by current request, it is replenished. (The main
4081 reason for ensuring it exists is that we may need MINSIZE space
4082 to put in fenceposts in sysmalloc.)
4083 */
4084
4085 victim = av->top;
4086 size = chunksize (victim);
4087
4088 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4089 {
4090 remainder_size = size - nb;
4091 remainder = chunk_at_offset (victim, nb);
4092 av->top = remainder;
4093 set_head (victim, nb | PREV_INUSE |
4094 (av != &main_arena ? NON_MAIN_ARENA : 0));
4095 set_head (remainder, remainder_size | PREV_INUSE);
4096
4097 check_malloced_chunk (av, victim, nb);
4098 void *p = chunk2mem (victim);
4099 alloc_perturb (p, bytes);
4100 return p;
4101 }
4102
4103 /* When we are using atomic ops to free fast chunks we can get
4104 here for all block sizes. */
4105 else if (have_fastchunks (av))
4106 {
4107 malloc_consolidate (av);
4108 /* restore original bin index */
4109 if (in_smallbin_range (nb))
4110 idx = smallbin_index (nb);
4111 else
4112 idx = largebin_index (nb);
4113 }
f65fd747 4114
6c8dbf00
OB
4115 /*
4116 Otherwise, relay to handle system-dependent cases
4117 */
425ce2ed 4118 else
6c8dbf00
OB
4119 {
4120 void *p = sysmalloc (nb, av);
4121 if (p != NULL)
4122 alloc_perturb (p, bytes);
4123 return p;
4124 }
425ce2ed 4125 }
fa8d436c 4126}
f65fd747 4127
fa8d436c 4128/*
6c8dbf00
OB
4129 ------------------------------ free ------------------------------
4130 */
f65fd747 4131
78ac92ad 4132static void
6c8dbf00 4133_int_free (mstate av, mchunkptr p, int have_lock)
f65fd747 4134{
fa8d436c 4135 INTERNAL_SIZE_T size; /* its size */
6c8dbf00
OB
4136 mfastbinptr *fb; /* associated fastbin */
4137 mchunkptr nextchunk; /* next contiguous chunk */
fa8d436c 4138 INTERNAL_SIZE_T nextsize; /* its size */
6c8dbf00 4139 int nextinuse; /* true if nextchunk is used */
fa8d436c 4140 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
6c8dbf00
OB
4141 mchunkptr bck; /* misc temp for linking */
4142 mchunkptr fwd; /* misc temp for linking */
fa8d436c 4143
37fa1953 4144 const char *errstr = NULL;
425ce2ed 4145 int locked = 0;
f65fd747 4146
6c8dbf00 4147 size = chunksize (p);
f65fd747 4148
37fa1953
UD
4149 /* Little security check which won't hurt performance: the
4150 allocator never wrapps around at the end of the address space.
4151 Therefore we can exclude some size values which might appear
4152 here by accident or by "design" from some intruder. */
dc165f7b 4153 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
073f560e 4154 || __builtin_expect (misaligned_chunk (p), 0))
37fa1953
UD
4155 {
4156 errstr = "free(): invalid pointer";
4157 errout:
6c8dbf00 4158 if (!have_lock && locked)
4bf5f222 4159 __libc_lock_unlock (av->mutex);
fff94fa2 4160 malloc_printerr (check_action, errstr, chunk2mem (p), av);
37fa1953 4161 return;
fa8d436c 4162 }
347c92e9
L
4163 /* We know that each chunk is at least MINSIZE bytes in size or a
4164 multiple of MALLOC_ALIGNMENT. */
a1ffb40e 4165 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
bf589066
UD
4166 {
4167 errstr = "free(): invalid size";
4168 goto errout;
4169 }
f65fd747 4170
37fa1953 4171 check_inuse_chunk(av, p);
f65fd747 4172
d5c3fafc
DD
4173#if USE_TCACHE
4174 {
4175 size_t tc_idx = csize2tidx (size);
4176
4177 if (tcache
4178 && tc_idx < mp_.tcache_bins
4179 && tcache->counts[tc_idx] < mp_.tcache_count)
4180 {
4181 tcache_put (p, tc_idx);
4182 return;
4183 }
4184 }
4185#endif
4186
37fa1953
UD
4187 /*
4188 If eligible, place chunk on a fastbin so it can be found
4189 and used quickly in malloc.
4190 */
6bf4302e 4191
9bf248c6 4192 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
6bf4302e 4193
37fa1953
UD
4194#if TRIM_FASTBINS
4195 /*
4196 If TRIM_FASTBINS set, don't place chunks
4197 bordering top into fastbins
4198 */
4199 && (chunk_at_offset(p, size) != av->top)
4200#endif
4201 ) {
fa8d436c 4202
e9c4fe93
FW
4203 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4204 <= 2 * SIZE_SZ, 0)
893e6098
UD
4205 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4206 >= av->system_mem, 0))
4207 {
bec466d9
UD
4208 /* We might not have a lock at this point and concurrent modifications
4209 of system_mem might have let to a false positive. Redo the test
4210 after getting the lock. */
4211 if (have_lock
4212 || ({ assert (locked == 0);
4bf5f222 4213 __libc_lock_lock (av->mutex);
bec466d9 4214 locked = 1;
e9c4fe93 4215 chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
bec466d9
UD
4216 || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
4217 }))
bec466d9
UD
4218 {
4219 errstr = "free(): invalid next size (fast)";
4220 goto errout;
4221 }
bec466d9
UD
4222 if (! have_lock)
4223 {
4bf5f222 4224 __libc_lock_unlock (av->mutex);
bec466d9
UD
4225 locked = 0;
4226 }
893e6098
UD
4227 }
4228
e8349efd 4229 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
425ce2ed 4230
37fa1953 4231 set_fastchunks(av);
90a3055e
UD
4232 unsigned int idx = fastbin_index(size);
4233 fb = &fastbin (av, idx);
425ce2ed 4234
362b47fe
MK
4235 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4236 mchunkptr old = *fb, old2;
5f24d53a 4237 unsigned int old_idx = ~0u;
425ce2ed
UD
4238 do
4239 {
362b47fe
MK
4240 /* Check that the top of the bin is not the record we are going to add
4241 (i.e., double free). */
425ce2ed
UD
4242 if (__builtin_expect (old == p, 0))
4243 {
4244 errstr = "double free or corruption (fasttop)";
4245 goto errout;
4246 }
362b47fe
MK
4247 /* Check that size of fastbin chunk at the top is the same as
4248 size of the chunk that we are adding. We can dereference OLD
4249 only if we have the lock, otherwise it might have already been
4250 deallocated. See use of OLD_IDX below for the actual check. */
4251 if (have_lock && old != NULL)
5f24d53a 4252 old_idx = fastbin_index(chunksize(old));
362b47fe 4253 p->fd = old2 = old;
425ce2ed 4254 }
362b47fe 4255 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
5f24d53a 4256
362b47fe 4257 if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
5f24d53a
UD
4258 {
4259 errstr = "invalid fastbin entry (free)";
4260 goto errout;
4261 }
37fa1953 4262 }
f65fd747 4263
37fa1953
UD
4264 /*
4265 Consolidate other non-mmapped chunks as they arrive.
4266 */
fa8d436c 4267
37fa1953 4268 else if (!chunk_is_mmapped(p)) {
425ce2ed 4269 if (! have_lock) {
4bf5f222 4270 __libc_lock_lock (av->mutex);
425ce2ed
UD
4271 locked = 1;
4272 }
425ce2ed 4273
37fa1953 4274 nextchunk = chunk_at_offset(p, size);
fa8d436c 4275
37fa1953
UD
4276 /* Lightweight tests: check whether the block is already the
4277 top block. */
a1ffb40e 4278 if (__glibc_unlikely (p == av->top))
37fa1953
UD
4279 {
4280 errstr = "double free or corruption (top)";
4281 goto errout;
4282 }
4283 /* Or whether the next chunk is beyond the boundaries of the arena. */
4284 if (__builtin_expect (contiguous (av)
4285 && (char *) nextchunk
4286 >= ((char *) av->top + chunksize(av->top)), 0))
4287 {
4288 errstr = "double free or corruption (out)";
4289 goto errout;
4290 }
4291 /* Or whether the block is actually not marked used. */
a1ffb40e 4292 if (__glibc_unlikely (!prev_inuse(nextchunk)))
37fa1953
UD
4293 {
4294 errstr = "double free or corruption (!prev)";
4295 goto errout;
4296 }
fa8d436c 4297
37fa1953 4298 nextsize = chunksize(nextchunk);
e9c4fe93 4299 if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
893e6098
UD
4300 || __builtin_expect (nextsize >= av->system_mem, 0))
4301 {
76761b63 4302 errstr = "free(): invalid next size (normal)";
893e6098
UD
4303 goto errout;
4304 }
fa8d436c 4305
e8349efd 4306 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
854278df 4307
37fa1953
UD
4308 /* consolidate backward */
4309 if (!prev_inuse(p)) {
e9c4fe93 4310 prevsize = prev_size (p);
37fa1953
UD
4311 size += prevsize;
4312 p = chunk_at_offset(p, -((long) prevsize));
fff94fa2 4313 unlink(av, p, bck, fwd);
37fa1953 4314 }
a9177ff5 4315
37fa1953
UD
4316 if (nextchunk != av->top) {
4317 /* get and clear inuse bit */
4318 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4319
4320 /* consolidate forward */
4321 if (!nextinuse) {
fff94fa2 4322 unlink(av, nextchunk, bck, fwd);
37fa1953
UD
4323 size += nextsize;
4324 } else
4325 clear_inuse_bit_at_offset(nextchunk, 0);
10dc2a90 4326
fa8d436c 4327 /*
37fa1953
UD
4328 Place the chunk in unsorted chunk list. Chunks are
4329 not placed into regular bins until after they have
4330 been given one chance to be used in malloc.
fa8d436c 4331 */
f65fd747 4332
37fa1953
UD
4333 bck = unsorted_chunks(av);
4334 fwd = bck->fd;
a1ffb40e 4335 if (__glibc_unlikely (fwd->bk != bck))
f6887a0d
UD
4336 {
4337 errstr = "free(): corrupted unsorted chunks";
4338 goto errout;
4339 }
37fa1953 4340 p->fd = fwd;
7ecfbd38
UD
4341 p->bk = bck;
4342 if (!in_smallbin_range(size))
4343 {
4344 p->fd_nextsize = NULL;
4345 p->bk_nextsize = NULL;
4346 }
37fa1953
UD
4347 bck->fd = p;
4348 fwd->bk = p;
8a4b65b4 4349
37fa1953
UD
4350 set_head(p, size | PREV_INUSE);
4351 set_foot(p, size);
4352
4353 check_free_chunk(av, p);
4354 }
4355
4356 /*
4357 If the chunk borders the current high end of memory,
4358 consolidate into top
4359 */
4360
4361 else {
4362 size += nextsize;
4363 set_head(p, size | PREV_INUSE);
4364 av->top = p;
4365 check_chunk(av, p);
4366 }
4367
4368 /*
4369 If freeing a large space, consolidate possibly-surrounding
4370 chunks. Then, if the total unused topmost memory exceeds trim
4371 threshold, ask malloc_trim to reduce top.
4372
4373 Unless max_fast is 0, we don't know if there are fastbins
4374 bordering top, so we cannot tell for sure whether threshold
4375 has been reached unless fastbins are consolidated. But we
4376 don't want to consolidate on each free. As a compromise,
4377 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4378 is reached.
4379 */
fa8d436c 4380
37fa1953
UD
4381 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4382 if (have_fastchunks(av))
4383 malloc_consolidate(av);
fa8d436c 4384
37fa1953 4385 if (av == &main_arena) {
a9177ff5 4386#ifndef MORECORE_CANNOT_TRIM
37fa1953
UD
4387 if ((unsigned long)(chunksize(av->top)) >=
4388 (unsigned long)(mp_.trim_threshold))
3b49edc0 4389 systrim(mp_.top_pad, av);
fa8d436c 4390#endif
37fa1953
UD
4391 } else {
4392 /* Always try heap_trim(), even if the top chunk is not
4393 large, because the corresponding heap might go away. */
4394 heap_info *heap = heap_for_ptr(top(av));
fa8d436c 4395
37fa1953
UD
4396 assert(heap->ar_ptr == av);
4397 heap_trim(heap, mp_.top_pad);
fa8d436c 4398 }
fa8d436c 4399 }
10dc2a90 4400
425ce2ed
UD
4401 if (! have_lock) {
4402 assert (locked);
4bf5f222 4403 __libc_lock_unlock (av->mutex);
425ce2ed 4404 }
37fa1953
UD
4405 }
4406 /*
22a89187 4407 If the chunk was allocated via mmap, release via munmap().
37fa1953
UD
4408 */
4409
4410 else {
c120d94d 4411 munmap_chunk (p);
fa8d436c 4412 }
10dc2a90
UD
4413}
4414
fa8d436c
UD
4415/*
4416 ------------------------- malloc_consolidate -------------------------
4417
4418 malloc_consolidate is a specialized version of free() that tears
4419 down chunks held in fastbins. Free itself cannot be used for this
4420 purpose since, among other things, it might place chunks back onto
4421 fastbins. So, instead, we need to use a minor variant of the same
4422 code.
a9177ff5 4423
fa8d436c
UD
4424 Also, because this routine needs to be called the first time through
4425 malloc anyway, it turns out to be the perfect place to trigger
4426 initialization code.
4427*/
4428
fa8d436c 4429static void malloc_consolidate(mstate av)
10dc2a90 4430{
fa8d436c
UD
4431 mfastbinptr* fb; /* current fastbin being consolidated */
4432 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4433 mchunkptr p; /* current chunk being consolidated */
4434 mchunkptr nextp; /* next chunk to consolidate */
4435 mchunkptr unsorted_bin; /* bin header */
4436 mchunkptr first_unsorted; /* chunk to link to */
4437
4438 /* These have same use as in free() */
4439 mchunkptr nextchunk;
4440 INTERNAL_SIZE_T size;
4441 INTERNAL_SIZE_T nextsize;
4442 INTERNAL_SIZE_T prevsize;
4443 int nextinuse;
4444 mchunkptr bck;
4445 mchunkptr fwd;
10dc2a90 4446
fa8d436c
UD
4447 /*
4448 If max_fast is 0, we know that av hasn't
4449 yet been initialized, in which case do so below
4450 */
10dc2a90 4451
9bf248c6 4452 if (get_max_fast () != 0) {
fa8d436c 4453 clear_fastchunks(av);
10dc2a90 4454
fa8d436c 4455 unsorted_bin = unsorted_chunks(av);
10dc2a90 4456
fa8d436c
UD
4457 /*
4458 Remove each chunk from fast bin and consolidate it, placing it
4459 then in unsorted bin. Among other reasons for doing this,
4460 placing in unsorted bin avoids needing to calculate actual bins
4461 until malloc is sure that chunks aren't immediately going to be
4462 reused anyway.
4463 */
a9177ff5 4464
425ce2ed 4465 maxfb = &fastbin (av, NFASTBINS - 1);
425ce2ed 4466 fb = &fastbin (av, 0);
fa8d436c 4467 do {
b43f552a 4468 p = atomic_exchange_acq (fb, NULL);
425ce2ed 4469 if (p != 0) {
72f90263
UD
4470 do {
4471 check_inuse_chunk(av, p);
4472 nextp = p->fd;
4473
4474 /* Slightly streamlined version of consolidation code in free() */
e9c4fe93 4475 size = chunksize (p);
72f90263
UD
4476 nextchunk = chunk_at_offset(p, size);
4477 nextsize = chunksize(nextchunk);
4478
4479 if (!prev_inuse(p)) {
e9c4fe93 4480 prevsize = prev_size (p);
72f90263
UD
4481 size += prevsize;
4482 p = chunk_at_offset(p, -((long) prevsize));
fff94fa2 4483 unlink(av, p, bck, fwd);
72f90263
UD
4484 }
4485
4486 if (nextchunk != av->top) {
4487 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4488
4489 if (!nextinuse) {
4490 size += nextsize;
fff94fa2 4491 unlink(av, nextchunk, bck, fwd);
72f90263 4492 } else
fa8d436c 4493 clear_inuse_bit_at_offset(nextchunk, 0);
a9177ff5 4494
72f90263
UD
4495 first_unsorted = unsorted_bin->fd;
4496 unsorted_bin->fd = p;
4497 first_unsorted->bk = p;
a9177ff5 4498
72f90263 4499 if (!in_smallbin_range (size)) {
7ecfbd38
UD
4500 p->fd_nextsize = NULL;
4501 p->bk_nextsize = NULL;
4502 }
4503
72f90263
UD
4504 set_head(p, size | PREV_INUSE);
4505 p->bk = unsorted_bin;
4506 p->fd = first_unsorted;
4507 set_foot(p, size);
4508 }
a9177ff5 4509
72f90263
UD
4510 else {
4511 size += nextsize;
4512 set_head(p, size | PREV_INUSE);
4513 av->top = p;
4514 }
a9177ff5 4515
72f90263 4516 } while ( (p = nextp) != 0);
a9177ff5 4517
fa8d436c
UD
4518 }
4519 } while (fb++ != maxfb);
4520 }
4521 else {
4522 malloc_init_state(av);
4523 check_malloc_state(av);
4524 }
4525}
10dc2a90 4526
fa8d436c
UD
4527/*
4528 ------------------------------ realloc ------------------------------
4529*/
f65fd747 4530
22a89187 4531void*
4c8b8cc3
UD
4532_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4533 INTERNAL_SIZE_T nb)
fa8d436c 4534{
fa8d436c
UD
4535 mchunkptr newp; /* chunk to return */
4536 INTERNAL_SIZE_T newsize; /* its size */
22a89187 4537 void* newmem; /* corresponding user mem */
f65fd747 4538
fa8d436c 4539 mchunkptr next; /* next contiguous chunk after oldp */
f65fd747 4540
fa8d436c
UD
4541 mchunkptr remainder; /* extra space at end of newp */
4542 unsigned long remainder_size; /* its size */
f65fd747 4543
fa8d436c
UD
4544 mchunkptr bck; /* misc temp for linking */
4545 mchunkptr fwd; /* misc temp for linking */
2ed5fd9a 4546
fa8d436c
UD
4547 unsigned long copysize; /* bytes to copy */
4548 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
a9177ff5 4549 INTERNAL_SIZE_T* s; /* copy source */
fa8d436c 4550 INTERNAL_SIZE_T* d; /* copy destination */
f65fd747 4551
76761b63 4552 const char *errstr = NULL;
f65fd747 4553
6dd6a580 4554 /* oldmem size */
e9c4fe93 4555 if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
76761b63
UD
4556 || __builtin_expect (oldsize >= av->system_mem, 0))
4557 {
4b04154d 4558 errstr = "realloc(): invalid old size";
4c8b8cc3 4559 errout:
fff94fa2 4560 malloc_printerr (check_action, errstr, chunk2mem (oldp), av);
4c8b8cc3 4561 return NULL;
76761b63
UD
4562 }
4563
6c8dbf00 4564 check_inuse_chunk (av, oldp);
f65fd747 4565
4c8b8cc3 4566 /* All callers already filter out mmap'ed chunks. */
6c8dbf00 4567 assert (!chunk_is_mmapped (oldp));
f65fd747 4568
6c8dbf00
OB
4569 next = chunk_at_offset (oldp, oldsize);
4570 INTERNAL_SIZE_T nextsize = chunksize (next);
e9c4fe93 4571 if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
22a89187
UD
4572 || __builtin_expect (nextsize >= av->system_mem, 0))
4573 {
4574 errstr = "realloc(): invalid next size";
4575 goto errout;
4576 }
4577
6c8dbf00
OB
4578 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4579 {
4580 /* already big enough; split below */
fa8d436c 4581 newp = oldp;
6c8dbf00 4582 newsize = oldsize;
7799b7b3 4583 }
f65fd747 4584
6c8dbf00
OB
4585 else
4586 {
4587 /* Try to expand forward into top */
4588 if (next == av->top &&
4589 (unsigned long) (newsize = oldsize + nextsize) >=
4590 (unsigned long) (nb + MINSIZE))
4591 {
4592 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4593 av->top = chunk_at_offset (oldp, nb);
4594 set_head (av->top, (newsize - nb) | PREV_INUSE);
4595 check_inuse_chunk (av, oldp);
4596 return chunk2mem (oldp);
4597 }
4598
4599 /* Try to expand forward into next chunk; split off remainder below */
4600 else if (next != av->top &&
4601 !inuse (next) &&
4602 (unsigned long) (newsize = oldsize + nextsize) >=
4603 (unsigned long) (nb))
4604 {
4605 newp = oldp;
fff94fa2 4606 unlink (av, next, bck, fwd);
6c8dbf00
OB
4607 }
4608
4609 /* allocate, copy, free */
4610 else
4611 {
4612 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4613 if (newmem == 0)
4614 return 0; /* propagate failure */
4615
4616 newp = mem2chunk (newmem);
4617 newsize = chunksize (newp);
4618
4619 /*
4620 Avoid copy if newp is next chunk after oldp.
4621 */
4622 if (newp == next)
4623 {
4624 newsize += oldsize;
4625 newp = oldp;
4626 }
4627 else
4628 {
4629 /*
4630 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4631 We know that contents have an odd number of
4632 INTERNAL_SIZE_T-sized words; minimally 3.
4633 */
4634
4635 copysize = oldsize - SIZE_SZ;
4636 s = (INTERNAL_SIZE_T *) (chunk2mem (oldp));
4637 d = (INTERNAL_SIZE_T *) (newmem);
4638 ncopies = copysize / sizeof (INTERNAL_SIZE_T);
4639 assert (ncopies >= 3);
4640
4641 if (ncopies > 9)
4642 memcpy (d, s, copysize);
4643
4644 else
4645 {
4646 *(d + 0) = *(s + 0);
4647 *(d + 1) = *(s + 1);
4648 *(d + 2) = *(s + 2);
4649 if (ncopies > 4)
4650 {
4651 *(d + 3) = *(s + 3);
4652 *(d + 4) = *(s + 4);
4653 if (ncopies > 6)
4654 {
4655 *(d + 5) = *(s + 5);
4656 *(d + 6) = *(s + 6);
4657 if (ncopies > 8)
4658 {
4659 *(d + 7) = *(s + 7);
4660 *(d + 8) = *(s + 8);
4661 }
4662 }
4663 }
4664 }
4665
4666 _int_free (av, oldp, 1);
4667 check_inuse_chunk (av, newp);
4668 return chunk2mem (newp);
4669 }
4670 }
fa8d436c 4671 }
f65fd747 4672
22a89187 4673 /* If possible, free extra space in old or extended chunk */
f65fd747 4674
6c8dbf00 4675 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
f65fd747 4676
22a89187 4677 remainder_size = newsize - nb;
10dc2a90 4678
6c8dbf00
OB
4679 if (remainder_size < MINSIZE) /* not enough extra to split off */
4680 {
4681 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4682 set_inuse_bit_at_offset (newp, newsize);
4683 }
4684 else /* split remainder */
4685 {
4686 remainder = chunk_at_offset (newp, nb);
4687 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4688 set_head (remainder, remainder_size | PREV_INUSE |
4689 (av != &main_arena ? NON_MAIN_ARENA : 0));
4690 /* Mark remainder as inuse so free() won't complain */
4691 set_inuse_bit_at_offset (remainder, remainder_size);
4692 _int_free (av, remainder, 1);
4693 }
22a89187 4694
6c8dbf00
OB
4695 check_inuse_chunk (av, newp);
4696 return chunk2mem (newp);
fa8d436c
UD
4697}
4698
4699/*
6c8dbf00
OB
4700 ------------------------------ memalign ------------------------------
4701 */
fa8d436c 4702
6c8dbf00
OB
4703static void *
4704_int_memalign (mstate av, size_t alignment, size_t bytes)
fa8d436c
UD
4705{
4706 INTERNAL_SIZE_T nb; /* padded request size */
6c8dbf00
OB
4707 char *m; /* memory returned by malloc call */
4708 mchunkptr p; /* corresponding chunk */
4709 char *brk; /* alignment point within p */
4710 mchunkptr newp; /* chunk to return */
fa8d436c
UD
4711 INTERNAL_SIZE_T newsize; /* its size */
4712 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
6c8dbf00
OB
4713 mchunkptr remainder; /* spare room at end to split off */
4714 unsigned long remainder_size; /* its size */
fa8d436c 4715 INTERNAL_SIZE_T size;
f65fd747 4716
f65fd747 4717
f65fd747 4718
6c8dbf00 4719 checked_request2size (bytes, nb);
fa8d436c
UD
4720
4721 /*
6c8dbf00
OB
4722 Strategy: find a spot within that chunk that meets the alignment
4723 request, and then possibly free the leading and trailing space.
4724 */
fa8d436c
UD
4725
4726
4727 /* Call malloc with worst case padding to hit alignment. */
4728
6c8dbf00
OB
4729 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4730
4731 if (m == 0)
4732 return 0; /* propagate failure */
4733
4734 p = mem2chunk (m);
4735
4736 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4737
4738 { /*
4739 Find an aligned spot inside chunk. Since we need to give back
4740 leading space in a chunk of at least MINSIZE, if the first
4741 calculation places us at a spot with less than MINSIZE leader,
4742 we can move to the next aligned spot -- we've allocated enough
4743 total room so that this is always possible.
4744 */
4745 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4746 - ((signed long) alignment));
4747 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4748 brk += alignment;
4749
4750 newp = (mchunkptr) brk;
4751 leadsize = brk - (char *) (p);
4752 newsize = chunksize (p) - leadsize;
4753
4754 /* For mmapped chunks, just adjust offset */
4755 if (chunk_is_mmapped (p))
4756 {
e9c4fe93 4757 set_prev_size (newp, prev_size (p) + leadsize);
6c8dbf00
OB
4758 set_head (newp, newsize | IS_MMAPPED);
4759 return chunk2mem (newp);
4760 }
4761
4762 /* Otherwise, give back leader, use the rest */
4763 set_head (newp, newsize | PREV_INUSE |
4764 (av != &main_arena ? NON_MAIN_ARENA : 0));
4765 set_inuse_bit_at_offset (newp, newsize);
4766 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4767 _int_free (av, p, 1);
4768 p = newp;
4769
4770 assert (newsize >= nb &&
4771 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
f65fd747 4772 }
f65fd747 4773
f65fd747 4774 /* Also give back spare room at the end */
6c8dbf00
OB
4775 if (!chunk_is_mmapped (p))
4776 {
4777 size = chunksize (p);
4778 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4779 {
4780 remainder_size = size - nb;
4781 remainder = chunk_at_offset (p, nb);
4782 set_head (remainder, remainder_size | PREV_INUSE |
4783 (av != &main_arena ? NON_MAIN_ARENA : 0));
4784 set_head_size (p, nb);
4785 _int_free (av, remainder, 1);
4786 }
fa8d436c 4787 }
f65fd747 4788
6c8dbf00
OB
4789 check_inuse_chunk (av, p);
4790 return chunk2mem (p);
f65fd747
UD
4791}
4792
f65fd747 4793
fa8d436c 4794/*
6c8dbf00
OB
4795 ------------------------------ malloc_trim ------------------------------
4796 */
8a4b65b4 4797
6c8dbf00
OB
4798static int
4799mtrim (mstate av, size_t pad)
f65fd747 4800{
fff94fa2
SP
4801 /* Don't touch corrupt arenas. */
4802 if (arena_is_corrupt (av))
4803 return 0;
4804
fa8d436c 4805 /* Ensure initialization/consolidation */
68631c8e
UD
4806 malloc_consolidate (av);
4807
6c8dbf00 4808 const size_t ps = GLRO (dl_pagesize);
68631c8e
UD
4809 int psindex = bin_index (ps);
4810 const size_t psm1 = ps - 1;
4811
4812 int result = 0;
4813 for (int i = 1; i < NBINS; ++i)
4814 if (i == 1 || i >= psindex)
4815 {
6c8dbf00 4816 mbinptr bin = bin_at (av, i);
68631c8e 4817
6c8dbf00
OB
4818 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4819 {
4820 INTERNAL_SIZE_T size = chunksize (p);
68631c8e 4821
6c8dbf00
OB
4822 if (size > psm1 + sizeof (struct malloc_chunk))
4823 {
4824 /* See whether the chunk contains at least one unused page. */
4825 char *paligned_mem = (char *) (((uintptr_t) p
4826 + sizeof (struct malloc_chunk)
4827 + psm1) & ~psm1);
68631c8e 4828
6c8dbf00
OB
4829 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4830 assert ((char *) p + size > paligned_mem);
68631c8e 4831
6c8dbf00
OB
4832 /* This is the size we could potentially free. */
4833 size -= paligned_mem - (char *) p;
68631c8e 4834
6c8dbf00
OB
4835 if (size > psm1)
4836 {
439bda32 4837#if MALLOC_DEBUG
6c8dbf00
OB
4838 /* When debugging we simulate destroying the memory
4839 content. */
4840 memset (paligned_mem, 0x89, size & ~psm1);
68631c8e 4841#endif
6c8dbf00 4842 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
68631c8e 4843
6c8dbf00
OB
4844 result = 1;
4845 }
4846 }
4847 }
68631c8e 4848 }
8a4b65b4 4849
a9177ff5 4850#ifndef MORECORE_CANNOT_TRIM
3b49edc0 4851 return result | (av == &main_arena ? systrim (pad, av) : 0);
6c8dbf00 4852
8a4b65b4 4853#else
68631c8e 4854 return result;
f65fd747 4855#endif
f65fd747
UD
4856}
4857
f65fd747 4858
3b49edc0 4859int
6c8dbf00 4860__malloc_trim (size_t s)
3b49edc0
UD
4861{
4862 int result = 0;
4863
6c8dbf00 4864 if (__malloc_initialized < 0)
3b49edc0
UD
4865 ptmalloc_init ();
4866
4867 mstate ar_ptr = &main_arena;
4868 do
4869 {
4bf5f222 4870 __libc_lock_lock (ar_ptr->mutex);
3b49edc0 4871 result |= mtrim (ar_ptr, s);
4bf5f222 4872 __libc_lock_unlock (ar_ptr->mutex);
3b49edc0
UD
4873
4874 ar_ptr = ar_ptr->next;
4875 }
4876 while (ar_ptr != &main_arena);
4877
4878 return result;
4879}
4880
4881
f65fd747 4882/*
6c8dbf00
OB
4883 ------------------------- malloc_usable_size -------------------------
4884 */
f65fd747 4885
3b49edc0 4886static size_t
6c8dbf00 4887musable (void *mem)
f65fd747
UD
4888{
4889 mchunkptr p;
6c8dbf00
OB
4890 if (mem != 0)
4891 {
4892 p = mem2chunk (mem);
4893
4894 if (__builtin_expect (using_malloc_checking == 1, 0))
4895 return malloc_check_get_size (p);
4896
4897 if (chunk_is_mmapped (p))
073f8214
FW
4898 {
4899 if (DUMPED_MAIN_ARENA_CHUNK (p))
4900 return chunksize (p) - SIZE_SZ;
4901 else
4902 return chunksize (p) - 2 * SIZE_SZ;
4903 }
6c8dbf00
OB
4904 else if (inuse (p))
4905 return chunksize (p) - SIZE_SZ;
4906 }
fa8d436c 4907 return 0;
f65fd747
UD
4908}
4909
3b49edc0
UD
4910
4911size_t
6c8dbf00 4912__malloc_usable_size (void *m)
3b49edc0
UD
4913{
4914 size_t result;
4915
6c8dbf00 4916 result = musable (m);
3b49edc0
UD
4917 return result;
4918}
4919
fa8d436c 4920/*
6c8dbf00
OB
4921 ------------------------------ mallinfo ------------------------------
4922 Accumulate malloc statistics for arena AV into M.
4923 */
f65fd747 4924
bedee953 4925static void
6c8dbf00 4926int_mallinfo (mstate av, struct mallinfo *m)
f65fd747 4927{
6dd67bd5 4928 size_t i;
f65fd747
UD
4929 mbinptr b;
4930 mchunkptr p;
f65fd747 4931 INTERNAL_SIZE_T avail;
fa8d436c
UD
4932 INTERNAL_SIZE_T fastavail;
4933 int nblocks;
4934 int nfastblocks;
f65fd747 4935
fa8d436c 4936 /* Ensure initialization */
6c8dbf00
OB
4937 if (av->top == 0)
4938 malloc_consolidate (av);
8a4b65b4 4939
6c8dbf00 4940 check_malloc_state (av);
8a4b65b4 4941
fa8d436c 4942 /* Account for top */
6c8dbf00 4943 avail = chunksize (av->top);
fa8d436c 4944 nblocks = 1; /* top always exists */
f65fd747 4945
fa8d436c
UD
4946 /* traverse fastbins */
4947 nfastblocks = 0;
4948 fastavail = 0;
4949
6c8dbf00
OB
4950 for (i = 0; i < NFASTBINS; ++i)
4951 {
4952 for (p = fastbin (av, i); p != 0; p = p->fd)
4953 {
4954 ++nfastblocks;
4955 fastavail += chunksize (p);
4956 }
fa8d436c 4957 }
fa8d436c
UD
4958
4959 avail += fastavail;
f65fd747 4960
fa8d436c 4961 /* traverse regular bins */
6c8dbf00
OB
4962 for (i = 1; i < NBINS; ++i)
4963 {
4964 b = bin_at (av, i);
4965 for (p = last (b); p != b; p = p->bk)
4966 {
4967 ++nblocks;
4968 avail += chunksize (p);
4969 }
fa8d436c 4970 }
f65fd747 4971
bedee953
PP
4972 m->smblks += nfastblocks;
4973 m->ordblks += nblocks;
4974 m->fordblks += avail;
4975 m->uordblks += av->system_mem - avail;
4976 m->arena += av->system_mem;
4977 m->fsmblks += fastavail;
4978 if (av == &main_arena)
4979 {
4980 m->hblks = mp_.n_mmaps;
4981 m->hblkhd = mp_.mmapped_mem;
ca135f82 4982 m->usmblks = 0;
6c8dbf00 4983 m->keepcost = chunksize (av->top);
bedee953 4984 }
fa8d436c 4985}
f65fd747 4986
3b49edc0 4987
6c8dbf00 4988struct mallinfo
9dd346ff 4989__libc_mallinfo (void)
3b49edc0
UD
4990{
4991 struct mallinfo m;
bedee953 4992 mstate ar_ptr;
3b49edc0 4993
6c8dbf00 4994 if (__malloc_initialized < 0)
3b49edc0 4995 ptmalloc_init ();
bedee953 4996
6c8dbf00 4997 memset (&m, 0, sizeof (m));
bedee953 4998 ar_ptr = &main_arena;
6c8dbf00
OB
4999 do
5000 {
4bf5f222 5001 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00 5002 int_mallinfo (ar_ptr, &m);
4bf5f222 5003 __libc_lock_unlock (ar_ptr->mutex);
bedee953 5004
6c8dbf00
OB
5005 ar_ptr = ar_ptr->next;
5006 }
5007 while (ar_ptr != &main_arena);
bedee953 5008
3b49edc0
UD
5009 return m;
5010}
5011
fa8d436c 5012/*
6c8dbf00
OB
5013 ------------------------------ malloc_stats ------------------------------
5014 */
f65fd747 5015
3b49edc0 5016void
60d2f8f3 5017__malloc_stats (void)
f65fd747 5018{
8a4b65b4 5019 int i;
fa8d436c 5020 mstate ar_ptr;
fa8d436c 5021 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
8a4b65b4 5022
6c8dbf00 5023 if (__malloc_initialized < 0)
a234e27d 5024 ptmalloc_init ();
8dab36a1
UD
5025 _IO_flockfile (stderr);
5026 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
5027 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
6c8dbf00
OB
5028 for (i = 0, ar_ptr = &main_arena;; i++)
5029 {
5030 struct mallinfo mi;
5031
5032 memset (&mi, 0, sizeof (mi));
4bf5f222 5033 __libc_lock_lock (ar_ptr->mutex);
6c8dbf00
OB
5034 int_mallinfo (ar_ptr, &mi);
5035 fprintf (stderr, "Arena %d:\n", i);
5036 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5037 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
fa8d436c 5038#if MALLOC_DEBUG > 1
6c8dbf00
OB
5039 if (i > 0)
5040 dump_heap (heap_for_ptr (top (ar_ptr)));
fa8d436c 5041#endif
6c8dbf00
OB
5042 system_b += mi.arena;
5043 in_use_b += mi.uordblks;
4bf5f222 5044 __libc_lock_unlock (ar_ptr->mutex);
6c8dbf00
OB
5045 ar_ptr = ar_ptr->next;
5046 if (ar_ptr == &main_arena)
5047 break;
5048 }
5049 fprintf (stderr, "Total (incl. mmap):\n");
5050 fprintf (stderr, "system bytes = %10u\n", system_b);
5051 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5052 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5053 fprintf (stderr, "max mmap bytes = %10lu\n",
5054 (unsigned long) mp_.max_mmapped_mem);
8dab36a1
UD
5055 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
5056 _IO_funlockfile (stderr);
f65fd747
UD
5057}
5058
f65fd747
UD
5059
5060/*
6c8dbf00
OB
5061 ------------------------------ mallopt ------------------------------
5062 */
be7991c0
SP
5063static inline int
5064__always_inline
5065do_set_trim_threshold (size_t value)
5066{
5067 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5068 mp_.no_dyn_threshold);
5069 mp_.trim_threshold = value;
5070 mp_.no_dyn_threshold = 1;
5071 return 1;
5072}
5073
5074static inline int
5075__always_inline
5076do_set_top_pad (size_t value)
5077{
5078 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5079 mp_.no_dyn_threshold);
5080 mp_.top_pad = value;
5081 mp_.no_dyn_threshold = 1;
5082 return 1;
5083}
5084
5085static inline int
5086__always_inline
5087do_set_mmap_threshold (size_t value)
5088{
5089 /* Forbid setting the threshold too high. */
5090 if (value <= HEAP_MAX_SIZE / 2)
5091 {
5092 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5093 mp_.no_dyn_threshold);
5094 mp_.mmap_threshold = value;
5095 mp_.no_dyn_threshold = 1;
5096 return 1;
5097 }
5098 return 0;
5099}
5100
5101static inline int
5102__always_inline
5103do_set_mmaps_max (int32_t value)
5104{
5105 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5106 mp_.no_dyn_threshold);
5107 mp_.n_mmaps_max = value;
5108 mp_.no_dyn_threshold = 1;
5109 return 1;
5110}
5111
5112static inline int
5113__always_inline
5114do_set_mallopt_check (int32_t value)
5115{
5116 LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
5117 check_action = value;
5118 return 1;
5119}
5120
5121static inline int
5122__always_inline
5123do_set_perturb_byte (int32_t value)
5124{
5125 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5126 perturb_byte = value;
5127 return 1;
5128}
5129
5130static inline int
5131__always_inline
5132do_set_arena_test (size_t value)
5133{
5134 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5135 mp_.arena_test = value;
5136 return 1;
5137}
5138
5139static inline int
5140__always_inline
5141do_set_arena_max (size_t value)
5142{
5143 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5144 mp_.arena_max = value;
5145 return 1;
5146}
5147
d5c3fafc
DD
5148#if USE_TCACHE
5149static inline int
5150__always_inline
5151do_set_tcache_max (size_t value)
5152{
5153 if (value >= 0 && value <= MAX_TCACHE_SIZE)
5154 {
5155 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5156 mp_.tcache_max_bytes = value;
5157 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5158 }
5159 return 1;
5160}
5161
5162static inline int
5163__always_inline
5164do_set_tcache_count (size_t value)
5165{
5166 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5167 mp_.tcache_count = value;
5168 return 1;
5169}
5170
5171static inline int
5172__always_inline
5173do_set_tcache_unsorted_limit (size_t value)
5174{
5175 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5176 mp_.tcache_unsorted_limit = value;
5177 return 1;
5178}
5179#endif
f65fd747 5180
6c8dbf00
OB
5181int
5182__libc_mallopt (int param_number, int value)
f65fd747 5183{
fa8d436c
UD
5184 mstate av = &main_arena;
5185 int res = 1;
f65fd747 5186
6c8dbf00 5187 if (__malloc_initialized < 0)
0cb71e02 5188 ptmalloc_init ();
4bf5f222 5189 __libc_lock_lock (av->mutex);
fa8d436c 5190 /* Ensure initialization/consolidation */
6c8dbf00 5191 malloc_consolidate (av);
2f6d1f1b 5192
3ea5be54
AO
5193 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5194
6c8dbf00
OB
5195 switch (param_number)
5196 {
5197 case M_MXFAST:
5198 if (value >= 0 && value <= MAX_FAST_SIZE)
5199 {
5200 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5201 set_max_fast (value);
5202 }
5203 else
5204 res = 0;
5205 break;
5206
5207 case M_TRIM_THRESHOLD:
be7991c0 5208 do_set_trim_threshold (value);
6c8dbf00
OB
5209 break;
5210
5211 case M_TOP_PAD:
be7991c0 5212 do_set_top_pad (value);
6c8dbf00
OB
5213 break;
5214
5215 case M_MMAP_THRESHOLD:
be7991c0 5216 res = do_set_mmap_threshold (value);
6c8dbf00
OB
5217 break;
5218
5219 case M_MMAP_MAX:
be7991c0 5220 do_set_mmaps_max (value);
6c8dbf00
OB
5221 break;
5222
5223 case M_CHECK_ACTION:
be7991c0 5224 do_set_mallopt_check (value);
6c8dbf00
OB
5225 break;
5226
5227 case M_PERTURB:
be7991c0 5228 do_set_perturb_byte (value);
6c8dbf00
OB
5229 break;
5230
5231 case M_ARENA_TEST:
5232 if (value > 0)
be7991c0 5233 do_set_arena_test (value);
6c8dbf00
OB
5234 break;
5235
5236 case M_ARENA_MAX:
5237 if (value > 0)
62222284 5238 do_set_arena_max (value);
6c8dbf00
OB
5239 break;
5240 }
4bf5f222 5241 __libc_lock_unlock (av->mutex);
fa8d436c 5242 return res;
b22fc5f5 5243}
3b49edc0 5244libc_hidden_def (__libc_mallopt)
b22fc5f5 5245
10dc2a90 5246
a9177ff5 5247/*
6c8dbf00
OB
5248 -------------------- Alternative MORECORE functions --------------------
5249 */
10dc2a90 5250
b22fc5f5 5251
fa8d436c 5252/*
6c8dbf00 5253 General Requirements for MORECORE.
b22fc5f5 5254
6c8dbf00 5255 The MORECORE function must have the following properties:
b22fc5f5 5256
6c8dbf00 5257 If MORECORE_CONTIGUOUS is false:
10dc2a90 5258
6c8dbf00 5259 * MORECORE must allocate in multiples of pagesize. It will
fa8d436c 5260 only be called with arguments that are multiples of pagesize.
10dc2a90 5261
6c8dbf00 5262 * MORECORE(0) must return an address that is at least
fa8d436c 5263 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
10dc2a90 5264
6c8dbf00 5265 else (i.e. If MORECORE_CONTIGUOUS is true):
10dc2a90 5266
6c8dbf00 5267 * Consecutive calls to MORECORE with positive arguments
fa8d436c
UD
5268 return increasing addresses, indicating that space has been
5269 contiguously extended.
10dc2a90 5270
6c8dbf00 5271 * MORECORE need not allocate in multiples of pagesize.
fa8d436c 5272 Calls to MORECORE need not have args of multiples of pagesize.
10dc2a90 5273
6c8dbf00 5274 * MORECORE need not page-align.
10dc2a90 5275
6c8dbf00 5276 In either case:
10dc2a90 5277
6c8dbf00 5278 * MORECORE may allocate more memory than requested. (Or even less,
fa8d436c 5279 but this will generally result in a malloc failure.)
10dc2a90 5280
6c8dbf00 5281 * MORECORE must not allocate memory when given argument zero, but
fa8d436c
UD
5282 instead return one past the end address of memory from previous
5283 nonzero call. This malloc does NOT call MORECORE(0)
5284 until at least one call with positive arguments is made, so
5285 the initial value returned is not important.
10dc2a90 5286
6c8dbf00 5287 * Even though consecutive calls to MORECORE need not return contiguous
fa8d436c
UD
5288 addresses, it must be OK for malloc'ed chunks to span multiple
5289 regions in those cases where they do happen to be contiguous.
10dc2a90 5290
6c8dbf00 5291 * MORECORE need not handle negative arguments -- it may instead
fa8d436c
UD
5292 just return MORECORE_FAILURE when given negative arguments.
5293 Negative arguments are always multiples of pagesize. MORECORE
5294 must not misinterpret negative args as large positive unsigned
5295 args. You can suppress all such calls from even occurring by defining
5296 MORECORE_CANNOT_TRIM,
10dc2a90 5297
6c8dbf00
OB
5298 There is some variation across systems about the type of the
5299 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5300 actually be size_t, because sbrk supports negative args, so it is
5301 normally the signed type of the same width as size_t (sometimes
5302 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5303 matter though. Internally, we use "long" as arguments, which should
5304 work across all reasonable possibilities.
5305
5306 Additionally, if MORECORE ever returns failure for a positive
5307 request, then mmap is used as a noncontiguous system allocator. This
5308 is a useful backup strategy for systems with holes in address spaces
5309 -- in this case sbrk cannot contiguously expand the heap, but mmap
5310 may be able to map noncontiguous space.
5311
5312 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5313 a function that always returns MORECORE_FAILURE.
5314
5315 If you are using this malloc with something other than sbrk (or its
5316 emulation) to supply memory regions, you probably want to set
5317 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5318 allocator kindly contributed for pre-OSX macOS. It uses virtually
5319 but not necessarily physically contiguous non-paged memory (locked
5320 in, present and won't get swapped out). You can use it by
5321 uncommenting this section, adding some #includes, and setting up the
5322 appropriate defines above:
5323
5324 *#define MORECORE osMoreCore
5325 *#define MORECORE_CONTIGUOUS 0
5326
5327 There is also a shutdown routine that should somehow be called for
5328 cleanup upon program exit.
5329
5330 *#define MAX_POOL_ENTRIES 100
5331 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5332 static int next_os_pool;
5333 void *our_os_pools[MAX_POOL_ENTRIES];
5334
5335 void *osMoreCore(int size)
5336 {
fa8d436c
UD
5337 void *ptr = 0;
5338 static void *sbrk_top = 0;
ca34d7a7 5339
fa8d436c
UD
5340 if (size > 0)
5341 {
5342 if (size < MINIMUM_MORECORE_SIZE)
6c8dbf00 5343 size = MINIMUM_MORECORE_SIZE;
fa8d436c 5344 if (CurrentExecutionLevel() == kTaskLevel)
6c8dbf00 5345 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
fa8d436c
UD
5346 if (ptr == 0)
5347 {
6c8dbf00 5348 return (void *) MORECORE_FAILURE;
fa8d436c
UD
5349 }
5350 // save ptrs so they can be freed during cleanup
5351 our_os_pools[next_os_pool] = ptr;
5352 next_os_pool++;
5353 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5354 sbrk_top = (char *) ptr + size;
5355 return ptr;
5356 }
5357 else if (size < 0)
5358 {
5359 // we don't currently support shrink behavior
5360 return (void *) MORECORE_FAILURE;
5361 }
5362 else
5363 {
5364 return sbrk_top;
431c33c0 5365 }
6c8dbf00 5366 }
ca34d7a7 5367
6c8dbf00
OB
5368 // cleanup any allocated memory pools
5369 // called as last thing before shutting down driver
ca34d7a7 5370
6c8dbf00
OB
5371 void osCleanupMem(void)
5372 {
fa8d436c 5373 void **ptr;
ca34d7a7 5374
fa8d436c
UD
5375 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5376 if (*ptr)
5377 {
6c8dbf00
OB
5378 PoolDeallocate(*ptr);
5379 * ptr = 0;
fa8d436c 5380 }
6c8dbf00 5381 }
ee74a442 5382
6c8dbf00 5383 */
f65fd747 5384
7e3be507 5385
3e030bd5
UD
5386/* Helper code. */
5387
ae7f5313
UD
5388extern char **__libc_argv attribute_hidden;
5389
3e030bd5 5390static void
fff94fa2 5391malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr)
3e030bd5 5392{
fff94fa2
SP
5393 /* Avoid using this arena in future. We do not attempt to synchronize this
5394 with anything else because we minimally want to ensure that __libc_message
5395 gets its resources safely without stumbling on the current corruption. */
5396 if (ar_ptr)
5397 set_arena_corrupt (ar_ptr);
5398
553cc5f9 5399 if ((action & 5) == 5)
ed421fca
L
5400 __libc_message ((action & 2) ? (do_abort | do_backtrace) : do_message,
5401 "%s\n", str);
553cc5f9 5402 else if (action & 1)
3e030bd5 5403 {
a9055cab 5404 char buf[2 * sizeof (uintptr_t) + 1];
3e030bd5 5405
a9055cab
UD
5406 buf[sizeof (buf) - 1] = '\0';
5407 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
5408 while (cp > buf)
6c8dbf00 5409 *--cp = '0';
a9055cab 5410
ed421fca
L
5411 __libc_message ((action & 2) ? (do_abort | do_backtrace) : do_message,
5412 "*** Error in `%s': %s: 0x%s ***\n",
6c8dbf00 5413 __libc_argv[0] ? : "<unknown>", str, cp);
3e030bd5 5414 }
a9055cab 5415 else if (action & 2)
3e030bd5
UD
5416 abort ();
5417}
5418
a204dbb2
UD
5419/* We need a wrapper function for one of the additions of POSIX. */
5420int
5421__posix_memalign (void **memptr, size_t alignment, size_t size)
5422{
5423 void *mem;
5424
5425 /* Test whether the SIZE argument is valid. It must be a power of
5426 two multiple of sizeof (void *). */
de02bd05 5427 if (alignment % sizeof (void *) != 0
fc56e970 5428 || !powerof2 (alignment / sizeof (void *))
de02bd05 5429 || alignment == 0)
a204dbb2
UD
5430 return EINVAL;
5431
10ad46bc
OB
5432
5433 void *address = RETURN_ADDRESS (0);
5434 mem = _mid_memalign (alignment, size, address);
a204dbb2 5435
6c8dbf00
OB
5436 if (mem != NULL)
5437 {
5438 *memptr = mem;
5439 return 0;
5440 }
a204dbb2
UD
5441
5442 return ENOMEM;
5443}
5444weak_alias (__posix_memalign, posix_memalign)
5445
20c13899
OB
5446
5447int
c52ff39e 5448__malloc_info (int options, FILE *fp)
bb066545 5449{
20c13899
OB
5450 /* For now, at least. */
5451 if (options != 0)
5452 return EINVAL;
bb066545 5453
20c13899
OB
5454 int n = 0;
5455 size_t total_nblocks = 0;
5456 size_t total_nfastblocks = 0;
5457 size_t total_avail = 0;
5458 size_t total_fastavail = 0;
5459 size_t total_system = 0;
5460 size_t total_max_system = 0;
5461 size_t total_aspace = 0;
5462 size_t total_aspace_mprotect = 0;
bb066545 5463
6c8dbf00 5464
6c8dbf00 5465
987c0269
OB
5466 if (__malloc_initialized < 0)
5467 ptmalloc_init ();
bb066545 5468
987c0269 5469 fputs ("<malloc version=\"1\">\n", fp);
bb066545 5470
987c0269
OB
5471 /* Iterate over all arenas currently in use. */
5472 mstate ar_ptr = &main_arena;
5473 do
5474 {
5475 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
8b35e35d 5476
987c0269
OB
5477 size_t nblocks = 0;
5478 size_t nfastblocks = 0;
5479 size_t avail = 0;
5480 size_t fastavail = 0;
5481 struct
5482 {
5483 size_t from;
5484 size_t to;
5485 size_t total;
5486 size_t count;
5487 } sizes[NFASTBINS + NBINS - 1];
5488#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
6c8dbf00 5489
4bf5f222 5490 __libc_lock_lock (ar_ptr->mutex);
bb066545 5491
987c0269
OB
5492 for (size_t i = 0; i < NFASTBINS; ++i)
5493 {
5494 mchunkptr p = fastbin (ar_ptr, i);
5495 if (p != NULL)
5496 {
5497 size_t nthissize = 0;
5498 size_t thissize = chunksize (p);
5499
5500 while (p != NULL)
5501 {
5502 ++nthissize;
5503 p = p->fd;
5504 }
5505
5506 fastavail += nthissize * thissize;
5507 nfastblocks += nthissize;
5508 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5509 sizes[i].to = thissize;
5510 sizes[i].count = nthissize;
5511 }
5512 else
5513 sizes[i].from = sizes[i].to = sizes[i].count = 0;
bb066545 5514
987c0269
OB
5515 sizes[i].total = sizes[i].count * sizes[i].to;
5516 }
bb066545 5517
bb066545 5518
987c0269
OB
5519 mbinptr bin;
5520 struct malloc_chunk *r;
bb066545 5521
987c0269
OB
5522 for (size_t i = 1; i < NBINS; ++i)
5523 {
5524 bin = bin_at (ar_ptr, i);
5525 r = bin->fd;
5526 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5527 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5528 = sizes[NFASTBINS - 1 + i].count = 0;
5529
5530 if (r != NULL)
5531 while (r != bin)
5532 {
e9c4fe93 5533 size_t r_size = chunksize_nomask (r);
987c0269 5534 ++sizes[NFASTBINS - 1 + i].count;
e9c4fe93 5535 sizes[NFASTBINS - 1 + i].total += r_size;
987c0269 5536 sizes[NFASTBINS - 1 + i].from
e9c4fe93 5537 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
987c0269 5538 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
e9c4fe93 5539 r_size);
987c0269
OB
5540
5541 r = r->fd;
5542 }
5543
5544 if (sizes[NFASTBINS - 1 + i].count == 0)
5545 sizes[NFASTBINS - 1 + i].from = 0;
5546 nblocks += sizes[NFASTBINS - 1 + i].count;
5547 avail += sizes[NFASTBINS - 1 + i].total;
5548 }
bb066545 5549
4bf5f222 5550 __libc_lock_unlock (ar_ptr->mutex);
da2d2fb6 5551
987c0269
OB
5552 total_nfastblocks += nfastblocks;
5553 total_fastavail += fastavail;
0588a9cb 5554
987c0269
OB
5555 total_nblocks += nblocks;
5556 total_avail += avail;
0588a9cb 5557
987c0269
OB
5558 for (size_t i = 0; i < nsizes; ++i)
5559 if (sizes[i].count != 0 && i != NFASTBINS)
5560 fprintf (fp, " \
5561 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5562 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
fdfd175d 5563
987c0269
OB
5564 if (sizes[NFASTBINS].count != 0)
5565 fprintf (fp, "\
5566 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5567 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5568 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
fdfd175d 5569
987c0269
OB
5570 total_system += ar_ptr->system_mem;
5571 total_max_system += ar_ptr->max_system_mem;
bb066545 5572
987c0269
OB
5573 fprintf (fp,
5574 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5575 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5576 "<system type=\"current\" size=\"%zu\"/>\n"
5577 "<system type=\"max\" size=\"%zu\"/>\n",
5578 nfastblocks, fastavail, nblocks, avail,
5579 ar_ptr->system_mem, ar_ptr->max_system_mem);
346bc35c 5580
987c0269
OB
5581 if (ar_ptr != &main_arena)
5582 {
5583 heap_info *heap = heap_for_ptr (top (ar_ptr));
5584 fprintf (fp,
5585 "<aspace type=\"total\" size=\"%zu\"/>\n"
5586 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5587 heap->size, heap->mprotect_size);
5588 total_aspace += heap->size;
5589 total_aspace_mprotect += heap->mprotect_size;
5590 }
5591 else
5592 {
5593 fprintf (fp,
5594 "<aspace type=\"total\" size=\"%zu\"/>\n"
5595 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5596 ar_ptr->system_mem, ar_ptr->system_mem);
5597 total_aspace += ar_ptr->system_mem;
5598 total_aspace_mprotect += ar_ptr->system_mem;
5599 }
bb066545 5600
987c0269 5601 fputs ("</heap>\n", fp);
bb066545
UD
5602 ar_ptr = ar_ptr->next;
5603 }
5604 while (ar_ptr != &main_arena);
5605
5606 fprintf (fp,
62a58816
SP
5607 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5608 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
9fa76613 5609 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
62a58816
SP
5610 "<system type=\"current\" size=\"%zu\"/>\n"
5611 "<system type=\"max\" size=\"%zu\"/>\n"
5612 "<aspace type=\"total\" size=\"%zu\"/>\n"
5613 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5614 "</malloc>\n",
5615 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
4d653a59 5616 mp_.n_mmaps, mp_.mmapped_mem,
62a58816
SP
5617 total_system, total_max_system,
5618 total_aspace, total_aspace_mprotect);
bb066545
UD
5619
5620 return 0;
5621}
c52ff39e 5622weak_alias (__malloc_info, malloc_info)
bb066545
UD
5623
5624
eba19d2b 5625strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
eba19d2b
UD
5626strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5627strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5628strong_alias (__libc_memalign, __memalign)
5629weak_alias (__libc_memalign, memalign)
5630strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5631strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5632strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5633strong_alias (__libc_mallinfo, __mallinfo)
5634weak_alias (__libc_mallinfo, mallinfo)
5635strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
7e3be507
UD
5636
5637weak_alias (__malloc_stats, malloc_stats)
5638weak_alias (__malloc_usable_size, malloc_usable_size)
5639weak_alias (__malloc_trim, malloc_trim)
7e3be507 5640
025b33ae
FW
5641#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5642compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
5643#endif
f65fd747 5644
fa8d436c 5645/* ------------------------------------------------------------
6c8dbf00 5646 History:
f65fd747 5647
6c8dbf00 5648 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
f65fd747 5649
6c8dbf00 5650 */
fa8d436c
UD
5651/*
5652 * Local variables:
5653 * c-basic-offset: 2
5654 * End:
5655 */